diff --git "a/437.jsonl" "b/437.jsonl" new file mode 100644--- /dev/null +++ "b/437.jsonl" @@ -0,0 +1,731 @@ +{"seq_id":"471708278","text":"# The difference between the highest and the lowest temperature values \n# predicted for the 10 day forecast.\ndef diff():\n high = float(input('The highest temperature values predicted for the 10 day forecast: '))\n low = float(input('The lowest temperature values predicted for the 10 day forecast: '))\n diff = high - low\n print('The difference is:', diff)\n\n# The average temperature at noon predicted for the 10 day forecast.\ndef average():\n tem_sum = 0\n for i in range(10):\n tem = float(input('The temperature at noon for day {:d}: '.format(i+1)))\n tem_sum += tem\n tem_ave = tem_sum/10\n print('The average temperature at noon predicted for the 10 day forecas is:', tem_ave) \n\n# The highest temperature predicted for the 10 day forecast, converted from Celsius to Fahrenheit.\ndef convert():\n tem_cel = float(input('The highest temperature in Celsius: '))\n tem_fah = tem_cel * 1.8 + 32\n print('The highest temperature in Fahrenheit:', tem_fah)\n\ndiff()\naverage()\nconvert()","sub_path":"lab02_Chenxi_Cai/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"142476030","text":"import math, numpy as np\nimport os\n\noptim_type = 'SGD'\n\n\ndef learning_rate(args, epoch):\n decay_epochs = np.array([50, 80, 120, 150, 200])\n \n optim_factor = sum(decay_epochs width or self.x - self.R <= 0:\n self.dx = -self.dx\n if self.y + self.R > height or self.y - self.R <= 0:\n self.dy = -self.dy\n\n def show(self):\n canvas.move(self.ball_id, self.dx, self.dy)\n\n\ndef main():\n global root, canvas, balls\n\n root = Tk()\n canvas = Canvas(root, width=width, height=height)\n canvas.pack()\n balls = [Ball() for ball in range(2)]\n\n tick()\n root.mainloop()\n\n\ndef tick():\n for ball in balls:\n ball.move()\n ball.show()\n root.after(50, tick)\n\n\nmain()\n","sub_path":"moving_balls.py","file_name":"moving_balls.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"103094929","text":"import json\nimport re\nfrom datetime import datetime\n\nfrom paycar.models import PayCar\nfrom product.models import Product\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nimport os\n# Create your views here.\nfrom tools.login_check import login_check\nimport html\nimport time\n\nfrom user.models import UserProfile\nfrom user.views import make_token\n\n\n# 驗證後將商品送進購物車\ndef into_cart(json_obj):\n try:\n cid = json_obj.get('id')\n # print('==================================cid:', cid)\n except:\n return {'code': 213, 'error': 'NO_cid'}\n try:\n title = json_obj.get('title')\n # print('==================================title:', title)\n except:\n return {'code': 213, 'error': 'NO_tt'}\n try:\n id_num = json_obj.get('id_num')\n # print('==================================id_num:', id_num)\n except:\n return {'code': 213, 'error': 'NO_num'}\n try:\n price = json_obj.get('price')\n # print('==================================price:', price)\n except:\n return {'code': 213, 'error': 'NO_pp'}\n return [cid, title, id_num, price]\n\n\n# 確認商品及數量\ndef sure_pro_info_buy_amount(into_cart_data, json_obj):\n cid = into_cart_data[0]\n try:\n car_pro = Product.objects.get(id=cid, amount__gt=0)\n except Exception as e:\n return JsonResponse({'code': 233, 'error': '商品已被通知即將下架'})\n # 這裡處理一下購買方案\n try:\n buy_amount = json_obj.get('buy_amount')\n if buy_amount is None:\n buy_amount = '1'\n return buy_amount\n except:\n buy_amount = '1'\n return buy_amount\n\n\n# 存進購物車(創建商品data\ndef create_data_into_cart(request, into_cart_data, buy_amount):\n cid = into_cart_data[0]\n title = into_cart_data[1]\n id_num = into_cart_data[2]\n price = into_cart_data[3]\n try:\n car_ori = PayCar.objects.get(title=title, pro_id=cid, id_num=id_num, price=price,\n author=request.user)\n car_ori.buy_amount = int(car_ori.buy_amount) + int(buy_amount)\n car_ori.save()\n result = ({'code': 200, 'error': 'success'})\n return result\n\n\n except:\n try:\n car_pro = Product.objects.get(id=cid, amount__gt=0)\n print('buy_amount:', buy_amount)\n\n new_car_pro = PayCar.objects.create(title=title, pro_id=cid, id_num=id_num, price=price,\n buy_amount=int(buy_amount),\n author=request.user, market=car_pro)\n except Exception as e:\n raise\n # return JsonResponse({'code': 202, 'error': 'server save error'})\n try:\n new_car_pro.avatar = car_pro.avatar\n new_car_pro.save()\n\n except:\n raise\n result = ({'code': 200, 'error': 'success'})\n return result\n\n\n# !!#確認修改內容\ndef check_info_to_ch(json_obj):\n if 'buy_amount' not in json_obj:\n return {'code': 207, 'error': 'buy_amount'}\n if 'id' not in json_obj:\n return {'code': 207, 'error': 'no id'}\n buy_amount = json_obj.get('buy_amount', '')\n car_id = json_obj.get('id', '')\n # print(car_id, buy_amount)\n # int(buy_amount)\n # str(car_id)\n try:\n car_pro = PayCar.objects.get(id=car_id)\n return [buy_amount, car_id, car_pro]\n except:\n # raise\n return {'code': 202, 'error': 'db no this product'}\n\n\n# 確認移出的商品\ndef check_cancel_pro(url):\n try:\n car_pro = PayCar.objects.get(id=url)\n # '
  • 删除
  • ';\n return car_pro\n except:\n return {'code': 318, 'error': 'cant get data!'}\n\n\n# author_id: root(url傳的),跟後端使用這確認是否相同\n# 透過外鍵確認\ndef check_author(car_pro, request):\n if car_pro.author.id != request.user.id:\n return {'code': 311, 'error': 'cant user.id:!! '}\n\n\ndef del_pro_in_db(car_pro):\n try:\n car_pro.delete()\n res = {'code': 200}\n return res\n except:\n\n raise\n\n\n# 透過購物車紀錄去找商品剩餘數量\ndef find_db_date(checked_info):\n buy_amount = checked_info[0]\n car_id = checked_info[1]\n car_pro = checked_info[2]\n try: # 透過購物車紀錄去找商品剩餘數量\n\n pro_id = car_pro.pro_id\n pro = Product.objects.get(id=pro_id, amount__gte=int(buy_amount))\n\n except:\n res = {'code': 277, 'data': {}, 'error': '購物車數量超出'}\n data = {}\n re_amount = []\n # raise\n pro = Product.objects.get(id=pro_id)\n amount = pro.amount\n re_amount.append(amount)\n data['re_amount'] = re_amount\n res['data'] = data\n return res\n\n\n# !!存取修改內容\ndef save_check_info_in_db(checked_info):\n buy_amount = checked_info[0]\n car_pro = checked_info[2]\n try:\n car_pro.buy_amount = int(buy_amount)\n car_pro.save()\n result = {'code': 200, 'error': 'success'}\n return result\n except:\n return {'code': 202, 'error': 'server error'}\n\n\n# 資料庫取出購物車購買商品\ndef get_pros_in_db(request):\n try:\n car_pros = PayCar.objects.filter(author=request.user.id) # 購買者id\n # , ]>\n info = {}\n car_pros_list = []\n # [, ]>]\n\n for i in car_pros:\n f = {}\n f['id'] = i.id\n f['pro_id'] = i.pro_id # 購物車中對應商品項目的pro_id\n f['title'] = i.title\n f['buy_amount'] = i.buy_amount # 購買數量\n car_pros_list.append(f)\n info['car_pros'] = car_pros_list\n return info['car_pros']\n except:\n res = {'code': 600, 'error': 'no pro in cart '}\n return res\n\n\n# 此循環驗證有無超出數量\ndef check_buy_amount(car_info):\n num = 0\n for i in car_info:\n pid = car_info[num]['id']\n print(\"+++++++++++++++\", pid)\n pro_id = car_info[num]['pro_id']\n print(\"+++++++++++++++\", pro_id)\n title = car_info[num]['title']\n print(\"+++++++++++++++\", title)\n buy_amount = car_info[num]['buy_amount']\n print(\"+++++++++++++++\", buy_amount)\n num += 1\n try:\n pro = Product.objects.get(id=pro_id)\n print('取商品核對')\n amount = pro.amount\n except:\n res = {'code': 311, 'error': 'match error '}\n print('核對失敗')\n return res\n if amount >= buy_amount:\n print('核對數量成功進行操作')\n pass\n else:\n print('核對數量超出存貨進行操作')\n res = {'code': 400, 'data': {}}\n data = {}\n error_pro_list = []\n error_pro = {'pro_id': pro_id, 'title': title, 'amount': amount}\n error_pro_list.append(error_pro)\n data['error_pro'] = error_pro_list\n res['data'] = data\n print(\"!!!!!!!!!!!!\", pro_id, title, amount)\n return res\n\n\n# 驗證無超出數量進入此循環購買\ndef into_purchase_procedure(car_info, username):\n num2 = 0\n ms_ms = \"\"\n for i in car_info:\n pid = car_info[num2]['id']\n print(\"+++++++++++++++\", pid)\n pro_id = car_info[num2]['pro_id']\n print(\"+++++++++++++++\", pro_id)\n title = car_info[num2]['title']\n print(\"+++++++++++++++\", title)\n buy_amount = car_info[num2]['buy_amount']\n print(\"+++++++++++++++\", buy_amount)\n num2 += 1\n try:\n pro = Product.objects.get(id=pro_id)\n print('取商品核對')\n amount = pro.amount\n amount = amount - int(buy_amount)\n print('扣除數量成功')\n pro.amount = amount\n pro.save()\n print('成功改變商品項目剩餘參數')\n del_pay = PayCar.objects.get(id=pid, pro_id=pro_id)\n print('取出要刪除的購物車商品')\n del_pay.delete()\n print('刪除購物車商品:', title)\n print('商品結帳成功數據為:%s,購買數量:%d:' % (title, buy_amount))\n ms: str = '商品結帳成功數據為:%s,購買數量:%d\\n' % (title, buy_amount)\n ms_ms += ms\n try:\n user = UserProfile.objects.get(username=username)\n business_deal = user.business_deal\n business_deal = business_deal + 1\n user.business_deal = business_deal\n user.save()\n print('紀錄交易成功')\n except:\n print('紀錄交易失敗,聯絡官方')\n\n except:\n res = {'code': 333, 'error': '購買失敗聯絡官方 '}\n print('核對失敗,購買失敗聯絡官方')\n return res\n print(\"+++++++ms_ms\", ms_ms)\n return ms_ms\n\n\n# email function\ndef send_email(username, ms_ms):\n user = UserProfile.objects.get(username=username)\n try:\n print(\"into email\")\n import email.message\n # 計送email程式\n # email寄送訊息物件設定\n try:\n em = user.email\n except Exception:\n raise\n msg = email.message.EmailMessage()\n print('msg = email.message.EmailMessage() 成功')\n msg[\"From\"] = \"andywordsup@gmail.com\"\n # 這裡是變量\n msg[\"To\"] = em\n print('em', em)\n msg[\"Subject\"] = \"訂單通知\"\n msg.set_content(ms_ms)\n print('ms_ms', ms_ms)\n # msg.add_alternative(\"\\'ms_ms\\'\", subtype=\"HTML\")\n import smtplib\n server = smtplib.SMTP_SSL(\"smtp.gmail.com\", 465)\n server.login(\"andywordsup@gmail.com\", \"gimjvgqnanadbmlv\")\n server.send_message(msg)\n except:\n res = {'code': 465, 'error': '郵寄失敗聯絡官方 '}\n print('購買成功,郵寄失敗聯絡官方')\n return res\n","sub_path":"YGO/paycar/function_to_views.py","file_name":"function_to_views.py","file_ext":"py","file_size_in_byte":10149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"161563157","text":"'''\n\n슬�� 봇 프로젝트\n\n> 구조 :\n ┏ 파이썬 코드 ┓\n 이벤트(호출) ┬ request - 처리 - Slacker - 출력\n 일반 입력 ┘\n\n> 이벤트\n slack의 event api 사용\n hear() 함수에서 처리\n\n> 텍스트 입력\n\n출력 : Slacker\n\n'''\n# -*- coding: utf-8 -*-\nimport time\nfrom flask import Flask, request, make_response\nfrom slacker import Slacker\nimport websocket\nimport re\nfrom threading import Thread\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nimport urllib.request\n\napp = Flask(__name__)\n\nslack_token = \"SECRET\"\nslack_client_id = \"SECRET\"\nslack_client_secret = \"SECRET\"\nslack_verification = \"SECRET\"\nslack = Slacker(slack_token)\n\ncaller = []\nchannel_list = []\n\n\ndef get_name(id):\n user = slack.users.info(id)\n name = user.body[\"user\"][\"profile\"][\"display_name\"]\n return name\n\n\ndef check_user(user, msg):\n if 'user' in msg and 'text' in msg:\n if user == msg['user']:\n return True\n return False\n\n\ndef proc(slack_response):\n call_elice(slack_response[\"event\"])\n\n\ndef interact():\n while True:\n if len(caller) > 0:\n res = caller.pop()\n proc(res)\n channel_list.remove(res[\"event\"][\"channel\"])\n\n\n# 앨리스 호출\ndef call_elice(event):\n res = slack.rtm.connect()\n endpoint = res.body['url']\n ws = websocket.create_connection(endpoint)\n\n slack.chat.post_message(event['channel'], \"부르셨어요, {}님? \\n `응` / `아니`\".format(get_name(event['user'])))\n while True:\n msg = json.loads(ws.recv())\n if check_user(event['user'], msg):\n name = get_name(msg['user'])\n channel = event[\"channel\"]\n re_yes = re.compile('((yes))|(응)|(ㅇ)+', re.I)\n re_no = re.compile('((no)|(아니))|(ㄴ)+', re.I)\n if msg['text'] == '<@UEXBA7A0K>':\n slack.chat.post_message(channel, \"저 여기 있어요~\"\n \"부르셨어요, {}님? \\n `응` / `아니`\".format(get_name(event['user'])))\n elif re_yes.match(msg['text']):\n elice(ws, event, msg)\n break;\n elif re_no.match(msg['text']):\n slack.chat.post_message(channel, \"필요할때 불러주세요:wink:\".format(name))\n break;\n else:\n slack.chat.post_message(channel, \"`응` / `아니`로 대답해주세요\")\n ws.close()\n\n\n#앨리스 실행\ndef elice(ws, event, msg):\n name = get_name(msg['user'])\n channel = event[\"channel\"]\n slack.chat.post_message(channel, \"안녕하세요! {}님\\n\"\n \"필요하신 기능을 말씀해주세요!\\n\"\n \"`넌 누구니?`/`맞춤법검사해줘`/`문법경찰출동`/`번역해줘`/`잘가`\".format(name))\n hello_c = 0\n while True:\n msg = json.loads(ws.recv())\n if check_user(event['user'], msg):\n msg = msg['text']\n msg = msg.replace(' ', '').strip()\n if msg == '<@UEXBA7A0K>':\n slack.chat.post_message(channel, \"저 여기 있어요~\")\n elif '안녕' in msg:\n hi(channel, hello_c, name)\n hello_c +=1\n elif '누구' in msg:\n who_is_elice(channel)\n elif '검사' in msg:\n lets_check(ws, channel, name, event['user'])\n elif '경찰' in msg:\n police(channel)\n elif '번역' in msg:\n trans(ws, channel, name, event['user'])\n elif '잘가' in msg:\n slack.chat.post_message(channel, \"필요할때 언제든 다시 불러주세요 :wink: 찡긋찡긋\")\n break;\n else:\n slack.chat.post_message(channel, \"다시 말해주세요\\n\")\n slack.chat.post_message(channel,\"\\n\\n\" \n \"이제 뭘 할까요?\\n\"\n \"`넌 누구니?`/`맞춤법검사해줘`/`문법경찰출동`/`번역해줘`/`잘가`\")\n else :\n if 'user' in msg and 'text' in msg :\n if event['user'] != msg['user']:\n slack.chat.post_message(channel, \"{}님이랑 작업중이에요! 끝날때까지 기다려주세요!\".format(name))\n\n\ndef hi(channel, hello_count, name):\n if hello_count == 0:\n slack.chat.post_message(channel, \"하하, {}님! 정말정말 반가워요!:heart_eyes:\".format(name))\n elif hello_count == 1:\n slack.chat.post_message(channel, \"음? 우리 이미 인사하지 않았었나요?:sweat_smile:\")\n elif hello_count == 2:\n slack.chat.post_message(channel, \"음? 우리 이미 인사하지 않았었나요?:thinking_face:\")\n elif hello_count == 3:\n slack.chat.post_message(channel, \"저도 나름 바쁜 봇이에요! 이제 대답 안할거에요!:expressionless:\")\n elif hello_count == 10:\n slack.chat.post_message(channel, \"ㅎㅎ.. 근성은 인정해드릴게요, 이제 진짜 끝이에요!:kissing_heart:\")\n else:\n pass\n\n\ndef who_is_elice(channel):\n slack.chat.post_message(channel, \"```저는 앨리스입니다!!\\n\"\n \"한국말을 잘 할 수 있게 도와주는 봇이에요\\n\"\n \"저를 유용하게 이용해 주셨으면 좋겠네요!\\n\"\n \"근데 우리, 인사는 했나요..?```\\n\")\n\n\ndef lets_check(ws, channel, name, user_id):\n slack.chat.post_message(channel, \"\"\"지금부터 {}님의 입력을 받을게요!\n 입력을 다 하셨으면 {}이라고 말씀해주세요!\"\"\".format(name, \"<@UEXBA7A0K>\"))\n user_text=[]\n while True:\n msg = json.loads(ws.recv())\n if check_user(user_id, msg):\n msg = msg['text']\n if \"<@UEXBA7A0K>\" in msg:\n user_text.append(msg[:msg.find('<@UEXBA7A0K>')])\n slack.chat.post_message(channel, \"입력받았습니다, 잠시만 기다려주세요!\")\n break;\n user_text.append(msg)\n elif 'user' in msg and 'text' in msg:\n slack.chat.post_message(channel, \"{}님이랑 문법검사중이에요! 끝날때까지 기다려주세요!\".format(name))\n befo = '\\n'.join(user_text)\n after = spellCorrection(befo)\n\n slack.chat.post_message(channel, \"\"\"```수정된 내용은 다음과 같습니다.\n ==수정 전==\n {}\n ==수정 후==\n {}\n ```\"\"\".format(befo, after))\n\n\ndef police(channel):\n slack.chat.post_message(channel, \"경찰 출동!:rotating_light:\")\n time.sleep(0.5)\n slack.chat.post_message(channel, \".\")\n time.sleep(0.5)\n slack.chat.post_message(channel, \".\")\n time.sleep(0.5)\n slack.chat.post_message(channel, \".\")\n time.sleep(0.5)\n slack.chat.post_message(channel, \"죄송해요, 아직 기능이 없어요ㅠ\")\n\n\ndef spellCorrection(q):\n datas = {'text1': q}\n post_result = requests.post(\"http://speller.cs.pusan.ac.kr/PnuWebSpeller/lib/check.asp\", data=datas)\n if post_result.status_code != 200 :\n return \"\"\"지금 번역기에 문제가 있어서 검사가 안될것같아요ㅠ\n 조금 있다가 다시 시도해주세요!\"\"\"\n res = post_result.text\n soup = BeautifulSoup(res, \"html.parser\")\n tables = soup.find_all(\"table\", class_=\"tableErrCorrect\")\n replaced_list=[]\n for table in tables:\n err = table.find(\"td\", class_=\"tdErrWord\").get_text()\n cor = table.find(\"td\", class_=\"tdReplace\").get_text()\n replaced_list.append([err, cor])\n after = q;\n mody_list = []\n for replace_word in replaced_list:\n after = after.replace(replace_word[0], replace_word[1])\n mody_list.append(replace_word[0]+\" -> \"+replace_word[1])\n after+= \"\\n\"+'\\n'.join(mody_list)\n return after\n\n\ndef trans(ws, channel, name, user_id):\n slack.chat.post_message(channel, \"\"\"번역하고 싶은 말을 입력해주세요!\n입력이 끝나셨으면 {}이라고 말씀해주세요!\"\"\".format(\"<@UEXBA7A0K>\"))\n user_text=[]\n while True:\n msg = json.loads(ws.recv())\n if check_user(user_id, msg):\n msg = msg['text']\n if \"<@UEXBA7A0K>\" in msg:\n user_text.append(msg[:msg.find('<@UEXBA7A0K>')])\n slack.chat.post_message(channel, \"입력받았습니다, 잠시만 기다려주세요!\")\n break;\n user_text.append(msg)\n elif 'user' in msg and 'text' in msg:\n slack.chat.post_message(channel, \"{}님이랑 번역중이에요! 끝날때까지 기다려주세요!\".format(name))\n from_text = '\\n'.join(user_text)\n\n slack.chat.post_message(channel, \"```{}```\".format(trans_naver('\\n'.join(user_text))))\n\n\ndef trans_naver(q):\n encText = urllib.parse.quote(q)\n client_id = \"SECRET\"\n client_secret = \"SECRET\"\n #언어감지\n data = \"query=\" + encText\n url = \"https://openapi.naver.com/v1/papago/detectLangs\"\n request = urllib.request.Request(url)\n request.add_header(\"X-Naver-Client-Id\", client_id)\n request.add_header(\"X-Naver-Client-Secret\", client_secret)\n response = urllib.request.urlopen(request, data=data.encode(\"utf-8\"))\n rescode = response.getcode()\n from_type = 'ko'\n to_type = 'en'\n if (rescode == 200):\n response_body = response.read()\n res = json.loads(response_body.decode('utf-8'))\n from_type = res['langCode']\n if from_type == 'en':\n to_type = 'ko'\n elif from_type != 'ko':\n return \"번역이 안되는 언어입니다! 죄송해요!\"\n else:\n print(\"Error Code:\" + rescode)\n return \"Error Code:\" + rescode\n #번역\n data = \"source={}&target={}&text={}\".format(from_type, to_type, encText)\n url = \"https://openapi.naver.com/v1/papago/n2mt\"\n request = urllib.request.Request(url)\n request.add_header(\"X-Naver-Client-Id\",client_id)\n request.add_header(\"X-Naver-Client-Secret\",client_secret)\n response = urllib.request.urlopen(request, data=data.encode(\"utf-8\"))\n rescode = response.getcode()\n\n #번역 결과\n if(rescode==200):\n response_body = response.read()\n res = json.loads(response_body.decode('utf-8'))\n from_type = res['message']['result']['srcLangType']\n to_type = res['message']['result']['tarLangType']\n translated = res['message']['result']['translatedText']\n return \"== 원문 ==\\n{}\\n== {} -> {} ==\\n{}\"\"\".format(q, from_type, to_type, translated)\n else:\n print(\"Error Code:\" + rescode)\n return rescode\n\n\n# 이벤트 핸들하는 함수\ndef _event_handler(event_type, slack_response):\n global caller, channel_list\n if event_type == \"app_mention\":\n channel = slack_response[\"event\"][\"channel\"]\n if slack_response[\"event\"][\"channel\"] in channel_list:\n pass\n else:\n caller.append(slack_response)\n channel_list.append(channel)\n return make_response(\"App mention message has been sent\", 200, )\n\n\n # ============= Event Type Not Found! ============= #\n # If the event_type does not have a handler\n message = \"You have not added an event handler for the %s\" % event_type\n # Return a helpful error message\n return make_response(message, 200, {\"X-Slack-No-Retry\": 1})\n\n\n@app.route(\"/event\", methods=[\"GET\", \"POST\"])\ndef hears():\n slack_response = json.loads(request.data)\n\n if \"challenge\" in slack_response:\n return make_response(slack_response[\"challenge\"], 200, {\"content_type\":\"application/json\"})\n\n if slack_verification != slack_response.get(\"token\"):\n message = \"Invalid Slack verification token: %s\" % (slack_response[\"token\"])\n make_response(message, 403, {\"X-Slack-No-Retry\": 1})\n\n if \"event\" in slack_response:\n event_type = slack_response[\"event\"][\"type\"]\n _event_handler(event_type, slack_response)\n return make_response(event_type, 200, {\"content_type\":\"application/json\"})\n\n # If our bot hears things that are not events we've subscribed to,\n # send a quirky but helpful error response\n return make_response(\"[NO EVENT IN SLACK REQUEST] These are not the droids\\\n you're looking for.\", 404, {\"X-Slack-No-Retry\": 1})\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef index():\n return \"

    앨리스 서버에용! 어떻게 알고 오셨을까?ㅎ_ㅎ

    \"\n\n\nif __name__ == '__main__':\n p = Thread(target=interact)\n p.start()\n app.run('127.0.0.1', port=5000)\n p.join()\n\n","sub_path":"chatbot_elice.py","file_name":"chatbot_elice.py","file_ext":"py","file_size_in_byte":12588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"80676894","text":"import asyncio\nimport websockets\nimport base64\nimport random\nimport requests\nimport json\nimport time\nfrom ecdsa import ellipticcurve\nfrom ecdsa import curves\nfrom ecdsa import SigningKey\nfrom hashlib import sha224\n\nclass args:\n auth= False\n tag=123 #any integer tag which will be returned in the authentication response from the exchnage\n url=\"wss://api.coinflex.com/v1\" #websocket URL for LIVE\n cookie='' #this is the API key from your CoinFLEX account\n id=0 #this is core ID for your CoinFLEX account\n phrase='' #this is password for your CoinFLEX account\n\n\n# Connects to the CoinFLEX /assets/ REST endpoint to retrieve the asset ID's\ndef get_assets():\n Assets = {}\n response= requests.get(url=\"https://webapi.coinflex.com/assets/\",headers={'Content-type': 'application/x-www-form-urlencoded'})\n asset_list = response.json()\n for item in asset_list:\n Assets[item['name']] = {}\n Assets[item['name']]['id'] = item['id']\n Assets[item['name']]['scale'] = item['scale'] \n return Assets\nAssets = get_assets()\n\ndef get_markets():\n Markets = {}\n response= requests.get(url=\"https://webapi.coinflex.com/markets/\",headers={'Content-type': 'application/x-www-form-urlencoded'})\n market_list = response.json()\n for item in market_list:\n Markets[item['base']] = {}\n Markets[item['base']]['counter'] = item['counter']\n if item.get('start')!=None: \n Markets[item['base']]['start'] = item['start']\n if item.get('expires')!=None: \n Markets[item['base']]['expires'] = item['expires']\n return Markets\nMarkets = get_markets() \n\ndef secp224k1():\n _a = 0x0000000000000000000000000000000000000000000000000000000000\n _b = 0x0000000000000000000000000000000000000000000000000000000005\n _p = 0x00fffffffffffffffffffffffffffffffffffffffffffffffeffffe56d\n _Gx = 0x00a1455b334df099df30fc28a169a467e9e47075a90f7e650eb6b7a45c\n _Gy = 0x007e089fed7fba344282cafbd6f7e319f7c0b0bd59e2ca4bdb556d61a5\n _r = 0x010000000000000000000000000001dce8d2ec6184caf0a971769fb1f7\n curve_secp224k1 = ellipticcurve.CurveFp(_p, _a, _b)\n generator_secp224k1 = ellipticcurve.Point(curve_secp224k1, _Gx, _Gy, _r)\n secp224k1_instance = curves.Curve(\n \"SECP224k1\",\n curve_secp224k1,\n generator_secp224k1,\n (1, 3, 132, 0, 20),\n \"secp256k1\"\n )\n return secp224k1_instance\n\ndef compute_ecdsa_signature(user_id,passphrase,server_nonce,client_nonce):\n #sys_random = random.SystemRandom()\n #ecdsa_nonce = sys_random.getrandbits(224)\n user_bytes = int(user_id).to_bytes(8, \"big\")\n message = b\"\".join([user_bytes, server_nonce, client_nonce])\n key = b\"\".join([user_bytes, passphrase])\n key_hash = sha224(key).digest()\n exponent = int.from_bytes(key_hash, \"big\", signed = False)\n #secp224k1 = secp224k1()\n priv_key = SigningKey.from_secret_exponent(exponent, curve = secp224k1(), hashfunc = sha224)\n r, s = priv_key.sign_deterministic(message, hashfunc = sha224,\n sigencode = lambda r, s, order: (r, s)\n )\n r = r.to_bytes(28, \"big\")\n s = s.to_bytes(28, \"big\")\n r = base64.b64encode(r).decode()\n s = base64.b64encode(s).decode()\n\n return r, s\n\ndef authenticate(tag,user_id,cookie,passphrase,server_nonce):\n client_nonce = random.getrandbits(16 * 8).to_bytes(16, \"big\")\n signature = compute_ecdsa_signature(user_id,passphrase.encode(),server_nonce,client_nonce)\n send_signature= {\"tag\":tag, \"method\": \"Authenticate\",\"user_id\": user_id,\"cookie\": cookie,\n \"nonce\": base64.b64encode(client_nonce).decode(),\"signature\": signature}\n return send_signature\n\n\nasync def subscribe():\n global ws\n async with websockets.connect(args.url) as ws: \n while True:\n #If there's a websocket disconnection then this will attempt a re-connection\n if not ws.open: \n ws = await websockets.connect(args.url)\n try:\n response = await ws.recv()\n msg = json.loads(response)\n print(msg)\n #if websocket connection is successful a welomce notice and a NONCE will be returned in a repsonse from the exchange\n if 'nonce' in msg:\n #once websocket is connected then send commands to subscribe to the public endpoints such as WatchTicker and WatchOrders\n payload_ticker = {\"method\": \"WatchTicker\", \"base\": Assets['XBT']['id'],\"counter\": Assets['USDT']['id'], \"watch\": True}\n await ws.send(json.dumps(payload_ticker))\n \n payload_ticker = {\"method\": \"WatchOrders\", \"base\": Assets['XBT']['id'],\"counter\": Assets['USDT']['id'], \"watch\": True}\n await ws.send(json.dumps(payload_ticker))\n \n #If client wants to authenticate then set args.auth=True in the args class above and this will run\n if args.auth:\n server_nonce = base64.b64decode(msg['nonce'])\n payload_auth = authenticate(args.tag,args.id,args.cookie,args.phrase,server_nonce)\n await ws.send(json.dumps(payload_auth))\n \n #if the authenticate tag is returned in the repsonse then authentication was sucessful\n if 'tag' in msg and msg['tag']==args.tag:\n print('Authentication Successful')\n #once websocket is authenticated then send commands to subscribe to the private endpoints such as GetBalances\n payload_balances = {\"method\": \"GetBalances\"}\n await ws.send(json.dumps(payload_balances))\n \n except Exception as error:\n err_msg = 'Error: '+str(time.time())+' '+repr(error)\n print(err_msg)\n \nasyncio.get_event_loop().run_until_complete(subscribe())\n","sub_path":"Websocket API (Method 2)/clientExample (websockets & asyncio).py","file_name":"clientExample (websockets & asyncio).py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"53020606","text":"import urllib3\nfrom urllib.parse import urlencode\nfrom urllib3.response import HTTPResponse\n\nurl = 'https://movie.douban.com/j/search_subjects'\nua = '\tMozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0'\nd = {\n 'type': 'tv',\n 'tag': '热门',\n 'page_limit': '50',\n 'page_start': '0'\n}\nurl = '{}?{}'.format(url, urlencode(d))\n\n\nwith urllib3.PoolManager() as http:\n response = http.urlopen(method='GET', url=url, headers={\n 'User-Agent': ua\n })\n print(type(response))\n # response: HTTPResponse = HTTPResponse() # 写出来是为了要response的属性\n print(response.status)\n print(response.data)\n","sub_path":"Spider/spider/u9.py","file_name":"u9.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"6763840","text":"#앙상블 모델로 만드시오.(input:output=2:1)\nfrom numpy import array\nfrom keras.models import Model\nfrom keras.layers import Dense,LSTM,Input\n\n\nx1=array([[1,2,3], [2,3,4], [3,4,5], [4,5,6],\n [5,6,7],[6,7,8],[7,8,9],[8,9,10],\n [9,10,11],[10,11,12],[20,30,40],[30,40,50],[40,50,60]])\nx2=array([[10,20,30], [20,30,40], [30,40,50], [40,50,60],\n [50,60,70],[60,70,80],[70,80,90],[80,90,100],\n [90,100,110],[100,110,120],[2,3,4],[3,4,5],[4,5,6]]) \ny=array([4,5,6,7,8,9,10,11,12,13,50,60,70])\nx1_predict=array([55,65,75])\nx2_predict=array([65,75,85])\n\nx1=x1.reshape(x1.shape[0],x1.shape[1],1) \nx2=x2.reshape(x2.shape[0],x2.shape[1],1)\nprint(\"x1.shape:\",x1.shape) \nprint(\"x2.shape:\",x2.shape)\n\n#2. 모델구성\ninput1=Input(shape=(3,1)) # 변수명은 소문자(암묵적약속)\ndense1_1=LSTM(30,activation='relu',name='A1')(input1) #input명시해주어야 함\ndense1_2=Dense(40,activation='relu',name='A2')(dense1_1)\ndense1_3=Dense(50,activation='relu',name='A3')(dense1_2)\ndense1_4=Dense(20,activation='relu',name='A4')(dense1_3)\ndense1_4=Dense(30,activation='relu',name='A4')(dense1_3)\ndense1_4=Dense(20,activation='relu',name='A4')(dense1_3)\ndense1_4=Dense(10,activation='relu',name='A4')(dense1_3)\n\n\ninput2=Input(shape=(3,1)) # 변수명은 소문자(암묵적약속)\ndense2_1=LSTM(30,activation='relu',name='B1')(input2) #input명시해주어야 함\ndense2_2=Dense(40,activation='relu',name='B2')(dense2_1)\ndense2_3=Dense(50,activation='relu',name='B3')(dense2_2)\ndense2_4=Dense(30,activation='relu',name='B4')(dense2_3)\ndense2_4=Dense(10,activation='relu',name='B4')(dense2_3)\ndense2_4=Dense(30,activation='relu',name='B4')(dense2_3)\ndense2_4=Dense(20,activation='relu',name='B4')(dense2_3)\n\nfrom keras.layers.merge import concatenate\nmerge1=concatenate([dense1_4,dense2_4],name='merge1')\n\nmiddle1=Dense(30,name='m1')(merge1)\nmiddle1=Dense(50,name='m2')(middle1)\nmiddle1=Dense(70,name='m3')(middle1)\n\noutput1=Dense(10,name='o1')(middle1)\noutput1_2=Dense(70,name='o1_2')(output1)\noutput1_3=Dense(25,name='o1_3')(output1_2) \noutput1_4=Dense(25,name='o1_4')(output1_3) \noutput1_5=Dense(1,name='o1_5')(output1_4)\n\nmodel=Model(inputs=[input1, input2],output=output1_5)\nmodel.summary()\n\n#3. 실행\nmodel.compile(optimizer='adam',loss='mse',metrics=['mse']) #metrics하나 안하나 상관없다.\nmodel.fit([x1,x2],y,epochs=1000,batch_size=1)\n\n#그러나 예측을 할 때는 데이터의 개수가 주어지고 그것의 형태를 맞춰주어야 한다. \n#(3,) 와꾸가 안맞음--->(1,3,1)로 변환 (행, 열, 몇개로 쪼갤건지)\nx1_predict=x1_predict.reshape(1,3,1)\nx2_predict=x2_predict.reshape(1,3,1)\n# print(x1_predict.shape)\n# print(x2_predict.shape)\n\n# print(x1.shape)\n# print(x2.shape)\n\ny_predict=model.predict([x1_predict,x2_predict]) #처음 모델이 x 2개를 넣어서 y하나 예측하는 것이었음 따라서 predict도 동일하게!\n\n #대괄호 써주어야 함 리스트로 만들기\nprint(\"y_predict:\",y_predict)\n\n\n\n","sub_path":"keras/keras33_lstm_ensemble.py","file_name":"keras33_lstm_ensemble.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529331171","text":"\"\"\"\nThis file is part of CLIMADA-papers.\n\nEberenz, S., Stocker, D., Röösli, T., and Bresch, D. N.: Asset exposure data for global physical risk assessment, Earth Syst. Sci. Data, 12, 817–833, https://doi.org/10.5194/essd-12-817-2020, 2020.\n\n\nFunctionality: LitPop exposure data model evaluation for 14 countries and plotting of scatter and box plots\nSections 2.6, 3.2, 3.3\nFigures 3, 5\nTables (A1), A2, A3\n\nRequires https://github.com/CLIMADA-project/climada_python/releases/tag/v1.2.0\nor later\n\nThe required gridded population data GPWv4.10 is available from SEDAC's Beta site, please see\nhttps://beta.sedac.ciesin.columbia.edu/data/collection/gpw-v4/sets/browse\n\nFor more guidance on the LitPop module please refer to the CLIMADA tutorial:\nhttps://climada-python.readthedocs.io/en/latest/tutorial/climada_entity_LitPop.html\n\n@author: Samuel Eberenz\n\"\"\"\n\nimport os\nimport time\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom scipy import stats\n\nfrom climada.entity.exposures import litpop as lp\nfrom climada.util.constants import DATA_DIR\nfrom climada.util.finance import income_group\n\n# set output directory:\noutput_path = os.path.join(DATA_DIR, 'results')\nif not os.path.isdir(output_path):\n os.mkdir(output_path)\n\nexperiment_name = 'v1'\n\n# SWITCH FEATURES ON/OFF:\n\"\"\"\n- compute_validation:\nCompute full validation statistics for all selected countries:\nPearson correlation coefficient (rho), linear regression slope beta,\nand root mean squared fraction RMSF for variations of Lit^n * Pop^m.\n# warning: computational intensive. This can take several hours.\nPlots scatter plots per country (Figure 5).\n\n- validation_plots:\nMake and save box plots (Figure 3).\nThis requires compute_validation to be run for all selected countries first.\n\"\"\"\ncompute_validation = True # default: True\nvalidation_plots = True # default: True\n\n# quick_test: make quick test to check whether engine works\nquick_test = False\n\n# countries: list of countries taken into account.\n# each country requires regional gross regional product (GRP) data file as XLS in data > system > GSDP\ncountries = ['AUS', 'BRA', 'CAN', 'CHE', 'CHN', 'DEU', 'FRA', 'IDN', 'IND', \\\n 'JPN', 'MEX', 'TUR', 'USA', 'ZAF']\n\ncountries = sorted(countries)\nif quick_test:\n countries_sel = [3] # Switzerland only\n resolution = 120 # reduced resolution\n experiment_name = 'test'\nelse:\n countries_sel = np.arange(0, len(countries)) # all countries in list\n # set resolution of exposure in arcsec. set to 30 for best results (slow):\n resolution = 30\n\n# name per method (i.e. combination of Lit and Pop with varying exponents) evaluated:\nmethods = ['Lit', 'Lit2', 'Lit3', 'Lit4', 'Lit5', 'Pop', 'Pop2', 'Lit3Pop', 'Lit2Pop', 'LitPop']\n# exponents per method:\nexponents_list = [[1, 0], [2, 0], [3, 0], [4, 0], [5, 0], [0, 1], [0, 2], [3, 1], [2, 1], [1, 1]]\n# choose which method to include in plots + marker style per method:\nmethods_show = [True, False, True, False, True, True, False, False, False, True]\nmarkers_list = ['o', 's', '^', 'd', 'p', 'o', 's', '^', 's', 'o']\n\n# coefficients to be calculated per country and method:\ncoeff_types = ['rp', 'rs', 'rmse', 'rmsf']\n\n# initiating variables...\ncc = 0\nall_coeffs = list()\nrmsf_coeffs = list()\nslope_coeffs = list()\npval_coeffs = list()\nfor meth in methods:\n rmsf_coeffs.append('rmsf_' + meth)\n slope_coeffs.append('slope_' + meth)\n pval_coeffs.append('pval_' + meth)\n for coeff in coeff_types:\n all_coeffs.append(coeff + '_' + meth)\n cc = cc + 1\n\n# indicies of each skill metric:\nrp_i = np.arange(0, cc, len(coeff_types))\nrs_i = np.arange(1, cc, len(coeff_types))\nrmse_i = np.arange(2, cc, len(coeff_types))\nrmsf_i = np.arange(3, cc, len(coeff_types))\n\ncolors3 = ['#1b9e77', '#7570b3', '#d95f02']\nc3_10 = [0, 0, 0, 0, 0, 1, 1, 2, 2, 2]\n\nincome_groups = list()\nfor cntry in countries:\n income_groups.append(income_group(cntry, 2016)[1])\n\n\nif compute_validation:\n \"\"\"computation of normalized Gross Regional Product nGRP, skill metrics,\n and make scatter plots\"\"\"\n rho = dict()\n adm0 = dict()\n adm1 = dict()\n # loop over countries, computing nGRP and skill\n for i in countries_sel:\n print('*** ' + countries[i] + ' *** ')\n start_time_c = time.time()\n rho[countries[i]], adm0[countries[i]], adm1[countries[i]] =\\\n lp.admin1_validation(countries[i], methods, exponents_list, \\\n res_arcsec=resolution, check_plot=False)\n\n plt.figure() # Scatter plot per country\n lit3_scatter = plt.scatter(adm1[countries[i]]['Lit3'], \\\n adm0[countries[i]]['Lit3'], c=colors3[0], marker='^')\n pop_scatter = plt.scatter(adm1[countries[i]]['Pop'], \\\n adm0[countries[i]]['Pop'], c=colors3[1])\n litpop_scatter = plt.scatter(adm1[countries[i]]['LitPop'], \\\n adm0[countries[i]]['LitPop'], c=colors3[2])\n plt.plot([0, np.max([plt.gca().get_xlim()[1], plt.gca().get_ylim()[1]])],\n [0, np.max([plt.gca().get_xlim()[1], plt.gca().get_ylim()[1]])],\\\n ls=\"--\", c=\".3\")\n\n plt.legend((litpop_scatter, lit3_scatter, pop_scatter),\\\n (r'$LitPop$', r'$Lit^3$', r'$Pop$',))\n plt.xlabel('Reference nGRP')\n plt.ylabel('Modeled nGRP')\n\n plt.savefig(os.path.join(output_path, experiment_name + '_' + countries[i] + str(resolution) + '_.pdf'), \\\n dpi=600, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format='pdf',\n transparent=False, bbox_inches=None, pad_inches=0.1,\n frameon=None, metadata=None)\n plt.show()\n\n df = pd.DataFrame(adm0[countries[i]])\n df.to_csv(os.path.join(output_path, experiment_name + '_' + countries[i] + \\\n str(resolution) + '_adm0.csv'))\n df = pd.DataFrame(adm1[countries[i]])\n df.to_csv(os.path.join(output_path, experiment_name + '_' + countries[i] + \\\n str(resolution) + '_adm1_ref.csv'))\n\n df_r = pd.DataFrame(rho)\n df_r['COEFF'] = all_coeffs\n cols = df_r.columns.tolist()\n cols = cols[-1:] + cols[:-1]\n df_r = df_r[cols]\n # df_r.to_csv(os.path.join(output_path, experiment_name + '_' + countries[i] + str(resolution) + \\\n # '_corr_coeffs_zwschnspchr.csv'))\n print('Computing admin1-validation for ' + countries[i] + ' took ' + \\\n str(round(time.time()-start_time_c, 2)) + 's')\n\n df_r = pd.DataFrame(rho)\n df_r['COEFF'] = all_coeffs\n cols = df_r.columns.tolist()\n cols = cols[-1:] + cols[:-1]\n df_r = df_r[cols]\n df_r.to_csv(os.path.join(output_path, experiment_name + '_' + str(resolution) + '_corr_coeffs.csv'))\n\n# defining plotting functions:\ndef plot_skillpercountry(data_df, **args):\n \"\"\"\n Make plot of skill scores with countries on x-axis, methods in legend\n \"\"\"\n\n name = args.get('name', 'VARIABLE NAME')\n idx = args.get('idx', data_df.index.values)\n dd = args.get('dd', 5.8) # 3.3\n wdth = args.get('wdth', 8) # 7\n hght = args.get('hght', 4)\n markersize = 60\n target_y = args.get('target_y', 1)\n label_y = args.get('label_y', r'$\\rho$')\n meth_labels = [r'$Lit$', r'$Lit^2$', r'$Lit^3$', r'$Lit^4$', r'$Lit^5$', \\\n r'$Pop$', r'$Pop^2$', r'$Lit^3Pop$', r'$Lit^2Pop$', r'$LitPop$']\n\n plt.figure(facecolor='w', figsize=(wdth, hght))\n\n for i in np.arange(0, len(methods_show)):\n if not methods_show[i]:\n markers_list[i] = ''\n else:\n plt.scatter([], [], marker=markers_list[i], lw=1, c=colors3[c3_10[i]], \\\n s=markersize, edgecolor='black', linewidth='.4', label=meth_labels[i])\n plt.legend()\n # legendspace:\n plt.scatter([0, len(idx)+dd], [0.7, 0.7], marker='.', lw=1, c='white')\n\n # actual plotting:\n for i in countries_sel: # country\n for j in np.arange(0, len(idx)):\n # rp - pearson correlation:\n plt.scatter([i], data_df[countries[i]][idx[j]], marker=markers_list[j], \\\n c=colors3[c3_10[j]],\\\n s=markersize, edgecolor='black', linewidth='.5',\\\n alpha=1., zorder=j+10)\n if not target_y == 'none':\n plt.plot([0, i], [target_y, target_y], c='#d3d3d3', lw=5, ls='-', zorder=1)\n\n plt.xticks(countries_sel, [countries[i] for i in countries_sel], color='black')\n plt.grid(axis='y')\n plt.xlabel('Country')\n plt.ylabel(label_y)\n plt.title(name)\n\n plt.savefig(os.path.join(output_path, experiment_name + '_' + 'allcountries_v4_' + name + '.pdf'),\\\n dpi=600, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format='pdf',\n transparent=False, bbox_inches=None, pad_inches=0.1,\n frameon=None, metadata=None)\n plt.show()\n\ndef plot_countryperskill(data_df, **args):\n \"\"\"\n Make plot of skill scores with method on x-axis, countries in legend\n \"\"\"\n name = args.get('name', 'VARIABLE NAME')\n idx = args.get('idx', data_df.index.values)\n order = args.get('order', np.array([9, 0, 1, 2, 3, 4, 5, 6, 8, 7], int))\n dd = args.get('dd', .7) # 3.3\n wdth = args.get('wdth', 8) # 7\n hght = args.get('hght', 4)\n markersize = 60\n target_y = args.get('target_y', 1)\n label_y = args.get('label_y', r'$\\rho$')\n colors14 = args.get('colors14', ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', \\\n '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', \\\n '#cab2d6', '#6a3d9a', '#ffff99', '#b15928', \\\n '#dd1c77', '#8dd3c7'])\n plt.figure(facecolor='w', figsize=(wdth, hght))\n meth_labels = [r'$Lit$', r'$Lit^2$', r'$Lit^3$', r'$Lit^4$', r'$Lit^5$', \\\n r'$Pop$', r'$Pop^2$', r'$Lit^3Pop$', r'$Lit^2Pop$', r'$LitPop$']\n idx = idx[order]\n meth_labels = [meth_labels[i] for i in order]\n # empty plots for legend handlers:\n for i in np.arange(0, len(countries_sel)): # country\n plt.scatter([], [], marker='o', s=markersize, edgecolor='black', linewidth='.4',\\\n c=colors14[i], label=countries[countries_sel[i]])\n plt.legend()\n\n plt.scatter([0, len(idx)+dd], [0.7, 0.7], marker='.', lw=1, c='white') # legendspace\n\n # actual plotting:\n for i in np.arange(0, len(countries_sel)): # country\n for j in np.arange(0, len(idx)):\n # rp - pearson correlation:\n plt.scatter([j], data_df[countries[countries_sel[i]]][idx[j]], marker='o', \\\n s=markersize, edgecolor='black', linewidth='.4',\\\n alpha=1., c=colors14[i], zorder=j+10)\n if not target_y == 'none':\n plt.plot([0, j], [target_y, target_y], c='#d3d3d3', lw=5, ls='-', zorder=1)\n\n plt.xticks(np.arange(0, len(idx)), meth_labels, color='black', rotation=30)\n plt.grid(axis='y')\n # plt.xlabel('Method')\n plt.ylabel(label_y)\n plt.title(name)\n\n plt.savefig(os.path.join(output_path, experiment_name + '_' + 'allcountries_perScore_v4_' + name + '.pdf'),\\\n dpi=600, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format='pdf',\n transparent=False, bbox_inches=None, pad_inches=0.1,\n frameon=None, metadata=None)\n plt.show()\n\ndef boxplot_skillpermethod(data_df, **args):\n \"\"\"\n Make boxplot of skill scores with method on x-axis\n \"\"\"\n name = args.get('name', 'VARIABLE NAME')\n idx = args.get('idx', data_df.index.values)\n order = args.get('order', np.array([9, 0, 1, 2, 3, 4, 5, 6, 8, 7], int))\n # dd = args.get('dd', .7) # 3.3\n wdth = args.get('wdth', 6) # 7\n hght = args.get('hght', 3.5)\n target_y = args.get('target_y', 1)\n label_y = args.get('label_y', r'$\\rho$')\n ticks_y = args.get('ticks_y', None)\n\n meth_labels = [r'$Lit^1$', r'$Lit^2$', r'$Lit^3$', r'$Lit^4$', r'$Lit^5$', \\\n r'$Pop^1$', r'$Pop^2$', r'$Lit^3Pop^1$', r'$Lit^2Pop^1$', r'$Lit^1Pop^1$']\n idx = idx[order]\n meth_labels = [meth_labels[i] for i in order]\n data_df = data_df.set_index('COEFF')\n\n f_h = plt.figure(facecolor='w', figsize=(wdth, hght))\n ax_h = f_h.add_subplot(1,1,1)\n\n if not target_y == 'none':\n ax_h.plot([0, len(idx)+1], [target_y, target_y], c='black', alpha=.25, lw=3, ls='-', zorder=1)\n\n data_df.iloc[np.array(idx)].T.boxplot(ax=ax_h)\n plt.xticks(np.arange(1, len(idx)+1), meth_labels, color='black', rotation=30)\n if not ticks_y is None:\n ax_h.set_yscale('log')\n ax_h.yaxis.set_ticks(ticks_y)\n ax_h.yaxis.set_ticklabels( ['%1.1f' % i for i in ticks_y] )\n\n plt.grid(axis='x')\n plt.ylabel(label_y)\n # plt.title(name)\n f_h.tight_layout()\n\n f_h.savefig(os.path.join(output_path, experiment_name + '_' + 'allcountries_BOX_v4_' + name + '.pdf'),\\\n dpi=600, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format='pdf',\n transparent=False, bbox_inches=None, pad_inches=0.1,\n frameon=None, metadata=None)\n f_h.show()\n\n\n\nif validation_plots:\n \"\"\" make boxplots of skills per country ecetera (for evaluation)\"\"\"\n adm1_gdp_share_all = pd.DataFrame()\n # load GDP-share for each country and combine into 1 dataframe:\n rmsf = dict()\n slopes = dict()\n p_vals = dict()\n for i in countries_sel:\n adm1_gdp_share = pd.read_csv(os.path.join(output_path, experiment_name + '_' + countries[i] + \\\n str(resolution) + '_adm0.csv'), index_col=0)\n adm1_reference = pd.read_csv(os.path.join(output_path, experiment_name + '_' + countries[i] + \\\n str(resolution) + '_adm1_ref.csv'), index_col=0)\n\n adm1_gdp_share['Reference'] = adm1_reference['LitPop']\n adm1_gdp_share = adm1_gdp_share[adm1_gdp_share > 1e-12]\n adm1_gdp_share['country_num'] = i\n adm1_gdp_share['country'] = countries[i]\n adm1_gdp_share_all = pd.concat([adm1_gdp_share_all, adm1_gdp_share])\n\n\n # compute RMSF (Root mean squared fraction) for each method:\n rmsf[countries[i]] = list()\n slopes[countries[i]] = list()\n p_vals[countries[i]] = list()\n for i_meth in np.arange(0, len(methods)):\n rmsf[countries[i]].append(np.exp(np.sqrt(np.sum( \\\n (np.log(adm1_gdp_share[methods[i_meth]]/ \\\n adm1_gdp_share['Reference']))**2)/adm1_gdp_share.shape[0])))\n\n val1 = ~np.isnan(adm1_gdp_share['Reference'])\n val2 = ~np.isnan(adm1_gdp_share[methods[i_meth]])\n slopes[countries[i]].append(stats.linregress( \\\n adm1_gdp_share['Reference'][val1 & val2],\\\n adm1_gdp_share[methods[i_meth]][val1 & val2])[0])\n p_vals[countries[i]].append(stats.linregress( \\\n adm1_gdp_share['Reference'][val1 & val2],\\\n adm1_gdp_share[methods[i_meth]][val1 & val2])[3])\n #rmsf = np.exp(np.sqrt(np.sum((np.log(adm0_data/adm1_data))**2)/ \\\n # adm0_data.shape[0]))\n\n df_rmsf = pd.DataFrame(rmsf)\n df_rmsf['COEFF'] = rmsf_coeffs\n df_rmsf.to_csv(os.path.join(output_path, experiment_name + '_' + str(resolution) + '_rmsf.csv'))\n df_slope = pd.DataFrame(slopes)\n df_slope['COEFF'] = slope_coeffs\n df_slope.to_csv(os.path.join(output_path, experiment_name + '_' + str(resolution) + '_slope.csv'))\n df_p = pd.DataFrame(p_vals)\n df_p['COEFF'] = pval_coeffs\n df_p.to_csv(os.path.join(output_path, experiment_name + '_' + str(resolution) + '_p_val.csv'))\n\n r = pd.read_csv(os.path.join(output_path, experiment_name + '_' + str(resolution) + '_corr_coeffs.csv'))\n r.__delitem__('Unnamed: 0')\n# r['all countries']=rho_all_df['ALL']\n\n # PLOTTING:\n order=np.array([9, 0, 1, 2, 3, 4, 5, 6, 8, 7], int)\n plot_skillpercountry(r, idx=rp_i, name='Pearson Correlation', countries=countries, \\\n countries_sel=countries_sel, label_y=r'$\\rho$', \\\n methods_show=methods_show)\n plot_skillpercountry(df_slope, name='Linear Regression Slope', countries=countries, \\\n countries_sel=countries_sel, label_y=r'$\\beta$', methods_show=methods_show)\n plot_skillpercountry(r, idx=rmsf_i, name='Root Mean Squared Fraction', countries=countries, \\\n countries_sel=countries_sel, label_y='RMSF', methods_show=methods_show)\n\n plot_countryperskill(r, idx=rp_i, order=order, name='Pearson Correlation', countries=countries, \\\n countries_sel=countries_sel, label_y=r'$\\rho$', \\\n methods_show=methods_show)\n plot_countryperskill(df_slope, order=order, name='Linear Regression Slope', countries=countries, \\\n countries_sel=countries_sel, label_y=r'$\\beta$', methods_show=methods_show)\n plot_countryperskill(r, idx=rmsf_i, order=order, name='Root Mean Squared Fraction', countries=countries, \\\n countries_sel=countries_sel, label_y='RMSF', methods_show=methods_show)\n boxplot_skillpermethod(r, idx=rp_i, order=order, name='Pearson Correlation', countries=countries, \\\n countries_sel=countries_sel, label_y=r'$\\rho$', \\\n methods_show=methods_show)\n boxplot_skillpermethod(df_slope, order=order, name='Linear Regression Slope', countries=countries, \\\n countries_sel=countries_sel, label_y=r'$\\beta$', methods_show=methods_show)\n boxplot_skillpermethod(r, idx=rmsf_i, order=order, name='Root Mean Squared Fraction', countries=countries, \\\n countries_sel=countries_sel, label_y='RMSF', methods_show=methods_show, \\\n ticks_y = np.array([1, 2, 5, 10, 20]))\n\n # Rearrange Data frames for nice CSV output\n methods = [methods[i] for i in order]\n countries_ = [countries[i] for i in countries_sel]\n r_rp = r[countries_].iloc[rp_i[order]]\n r_rp['COEFF'] = r['COEFF'].iloc[rp_i[order]]\n\n r_slope = df_slope[countries_].iloc[order]\n r_slope['COEFF'] = df_slope['COEFF'].iloc[order]\n\n r_rmsf = r[countries_].iloc[rmsf_i[order]]\n r_rmsf['COEFF'] = r['COEFF'].iloc[rmsf_i[order]]\n\n cols = r_rp.columns.tolist()\n cols = cols[-1:] + cols[:-1]\n r_rp = r_rp[cols]\n r_rp.set_index('COEFF')\n r_slope = r_slope[cols]\n r_slope.set_index('COEFF')\n r_rmsf = r_rmsf[cols]\n r_rmsf.set_index('COEFF')\n r_rp = r_rp.reset_index(drop=True)\n r_slope = r_slope.reset_index(drop=True)\n r_rmsf = r_rmsf.reset_index(drop=True)\n\n #Create a DataFrame\n statistics_ = {'Method':methods}\n r_stat = pd.DataFrame(statistics_)\n r_stat['rp_median'] = r_rp.quantile(q=0.5, axis=1, numeric_only=True, interpolation='linear')\n r_stat['rp_IQR'] = r_rp.quantile(q=0.75, axis=1, numeric_only=True, interpolation='linear')\\\n -r_rp.quantile(q=0.25, axis=1, numeric_only=True, interpolation='linear')\n r_stat['slope_median'] = r_slope.quantile(q=0.5, axis=1, numeric_only=True, interpolation='linear')\n r_stat['slope_IQR'] = r_slope.quantile(q=0.75, axis=1, numeric_only=True, interpolation='linear')\\\n -r_slope.quantile(q=0.25, axis=1, numeric_only=True, interpolation='linear')\n r_stat['rmsf_median'] = r_rmsf.quantile(q=0.5, axis=1, numeric_only=True, interpolation='linear')\n r_stat['rmsf_IQR'] = r_rmsf.quantile(q=0.75, axis=1, numeric_only=True, interpolation='linear')\\\n -r_rmsf.quantile(q=0.25, axis=1, numeric_only=True, interpolation='linear')\n r_stat = r_stat.round(2)\n r_rp = r_rp.round(2)\n r_slope = r_slope.round(2)\n r_rmsf = r_rmsf.round(2)\n\n # save to CSV:\n r_stat.to_csv(os.path.join(output_path, experiment_name + '_STAT_' + str(resolution) + '.csv'))\n r_rp.to_csv(os.path.join(output_path, experiment_name + '_RP_' + str(resolution) + '.csv'))\n r_slope.to_csv(os.path.join(output_path, experiment_name + '_SLOPE_' + str(resolution) + '.csv'))\n r_rmsf.to_csv(os.path.join(output_path, experiment_name + '_RMSF_' + str(resolution) + '.csv'))\n","sub_path":"201903_litpop_exposure_data_model/climada_v1.3.1/litpop_evaluation.py","file_name":"litpop_evaluation.py","file_ext":"py","file_size_in_byte":20437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"471750899","text":"# Copyright (c) 2020-2023 Antmicro \n#\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\nWrappers for drawing plots for reports.\n\"\"\"\n\nimport sys\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nfrom typing import List, Tuple, Optional, Dict, Union, Iterable, Any\nimport numpy as np\nimport itertools\nfrom pathlib import Path\nfrom matplotlib import gridspec\nfrom matplotlib import patheffects\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.colors import ListedColormap\nfrom math import floor, pi\nfrom scipy.signal import savgol_filter\nfrom contextlib import contextmanager\nif sys.version_info.minor < 9:\n from importlib_resources import path\nelse:\n from importlib.resources import path\n\nfrom kenning.resources import reports\n\n\nBOKEH_THEME_FILE = path(reports, 'bokeh_theme.yml')\nMATPLOTLIB_THEME_FILE = path(reports, 'matplotlib_theme_rc')\n\nRED = '#d52a2a'\nGREEN = '#1c7d4d'\n# Creating colormap for confusion matrix\ncmap_values = np.ones((256, 4))\nfor channel in range(3):\n pos = 1 + 2*channel\n cmap_values[:, channel] = np.linspace(\n int(RED[pos:pos + 2], 16),\n int(GREEN[pos:pos + 2], 16), 256)\ncmap_values[:, :3] /= 255\nRED_GREEN_CMAP = ListedColormap(cmap_values, name='red_green_colormap')\n\nIMMATERIAL_COLORS = [\n \"#ef5552\", # red\n # \"#e92063\", # pink\n \"#ab47bd\", # purple\n \"#7e56c2\", # deep-purple\n \"#4051b5\", # indigo\n \"#2094f3\", # blue\n \"#00bdd6\", # cyan\n \"#009485\", # teal\n \"#4cae4f\", # green\n \"#cbdc38\", # lime\n # \"#ffec3d\", # yellow\n \"#ffa724\", # orange\n \"#795649\", # brown\n \"#546d78\", # deep-blue\n]\n\n\ndef get_comparison_color_scheme(n_colors: int) -> List[Tuple]:\n \"\"\"\n Creates default color schema to use for comparison plots (such as violin\n plot, bubble chart etc.).\n\n Parameters\n ----------\n n_colors : int\n Number of colors to return.\n\n Returns\n -------\n List[Tuple] :\n List of colors to use for plotting.\n \"\"\"\n CMAP_NAME = \"nipy_spectral\"\n cmap = plt.get_cmap(CMAP_NAME)\n return [cmap(i) for i in np.linspace(0.0, 1.0, n_colors)]\n\n\ndef time_series_plot(\n outpath: Optional[Path],\n title: str,\n xtitle: str,\n xunit: str,\n ytitle: str,\n yunit: str,\n xdata: List,\n ydata: List,\n trimxvalues: bool = True,\n skipfirst: bool = False,\n figsize: Tuple = (15, 8.5),\n bins: int = 20):\n \"\"\"\n Draws time series plot.\n\n Used i.e. for timeline of resource usage.\n\n It also draws the histogram of values that appeared throughout the\n experiment.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Output path for the plot image. If None, the plot will be displayed.\n title : str\n Title of the plot.\n xtitle : str\n Name of the X axis.\n xunit : str\n Unit for the X axis.\n ytitle : str\n Name of the Y axis.\n yunit : str\n Unit for the Y axis.\n xdata : List\n The values for X dimension.\n ydata : List\n The values for Y dimension.\n trimxvalues : bool\n True if all values for the X dimension should be subtracted by\n the minimal value on this dimension.\n skipfirst : bool\n True if the first entry should be removed from plotting.\n figsize : Tuple\n The size of the figure.\n bins : int\n Number of bins for value histograms.\n \"\"\"\n start = 1 if skipfirst else 0\n xdata = np.array(xdata[start:], copy=True)\n ydata = np.array(ydata[start:], copy=True)\n if trimxvalues:\n minx = min(xdata)\n xdata = [x - minx for x in xdata]\n fig, (axplot, axhist) = plt.subplots(\n ncols=2,\n tight_layout=True,\n figsize=figsize,\n sharey=True,\n gridspec_kw={'width_ratios': (8, 3)}\n )\n if title:\n fig.suptitle(title, fontsize='x-large')\n axplot.scatter(xdata, ydata, c='purple', alpha=0.5)\n xlabel = xtitle\n if xunit is not None:\n xlabel += f' [{xunit}]'\n ylabel = ytitle\n if yunit is not None:\n ylabel += f' [{yunit}]'\n axplot.set_xlabel(xlabel, fontsize='large')\n axplot.set_ylabel(ylabel, fontsize='large')\n axplot.grid()\n\n axhist.hist(ydata, bins=bins, orientation='horizontal', color='purple')\n axhist.set_xscale('log')\n axhist.set_xlabel('Value histogram', fontsize='large')\n axhist.grid(which='both')\n plt.setp(axhist.get_yticklabels(), visible=False)\n\n if outpath is None:\n plt.show()\n else:\n plt.savefig(outpath)\n plt.close()\n\n\ndef draw_multiple_time_series(\n outpath: Optional[Path],\n title: str,\n xdata: Dict[str, List],\n xtitle: str,\n ydata: Dict[str, List],\n ytitle: str,\n skipfirst: bool = False,\n smooth: Optional[int] = None,\n figsize: Tuple = (11, 8.5),\n colors: Optional[List] = None,\n outext: Iterable[str] = ['png'],\n):\n \"\"\"\n Draws multiple time series plots.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Path where the plot will be saved. If None, the plot will be displayed.\n title : str\n Title of the plot.\n xdata : Dict[str, List]\n Mapping between name of the model and x coordinates of samples.\n xtitle : str\n Name of the X axis.\n ydata : Dict[str, List]\n Mapping between name of the model and y coordinates of samples.\n ytitle : str\n Name of the Y axis.\n skipfirst : bool\n True if the first entry should be removed from plotting.\n smooth : Optional[int]\n If None, raw point coordinates are plotted in a line plot.\n If int, samples are plotted in a scatter plot in a background,\n and smoothing is performed with Savitzky–Golay filter to the line,\n where `smooth` is the window size parameter.\n figsize : Tuple\n The size of the figure.\n colors : Optional[List]\n List with colors which should be used to draw plots.\n outext : Iterable[str]\n List with files extensions, should be supported by matplotlib.\n \"\"\"\n start = 1 if skipfirst else 0\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n if title:\n fig.suptitle(title, fontsize=\"x-large\")\n if colors is None:\n colors = get_comparison_color_scheme(len(xdata))\n for color, (samplename, sample) in zip(colors, ydata.items()):\n x_sample = xdata[samplename][start:]\n x_sample = np.array(x_sample)\n x_sample -= np.min(x_sample)\n y_sample = sample[start:]\n if smooth is None:\n ax.plot(x_sample, y_sample, label=samplename, color=color)\n else:\n ax.scatter(x_sample, y_sample, alpha=0.15, marker='.',\n s=10, color=color)\n smoothed = savgol_filter(y_sample, smooth, 3)\n ax.plot(x_sample, smoothed, label=samplename,\n linewidth=3, color=color)\n\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n plt.legend()\n plt.grid()\n\n if outpath is None:\n plt.show()\n else:\n for ext in outext:\n plt.savefig(f\"{outpath}.{ext}\")\n plt.close()\n\n\ndef draw_violin_comparison_plot(\n outpath: Optional[Path],\n title: str,\n xnames: List[str],\n data: Dict[str, List],\n colors: Optional[List] = None,\n outext: Iterable[str] = ['png'],\n):\n \"\"\"\n Draws violin plots comparing different metrics.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Path where the plot will be saved without extension. If None,\n the plot will be displayed.\n title : str\n Title of the plot.\n xnames : List[str]\n Names of the metrics in order.\n data : Dict[str, List]\n Map between name of the model and list of metrics to visualize.\n colors : Optional[List]\n List with colors which should be used to draw plots.\n outext : Iterable[str]\n List with files extensions, should be supported by matplotlib.\n \"\"\"\n\n num_plots = len(xnames)\n legend_lines, legend_labels = [], []\n fig, axs = plt.subplots(num_plots, 1, figsize=(12, 3.5*num_plots))\n if num_plots == 1:\n axs = np.array([axs])\n axs = axs.flatten()\n bbox_extra = []\n if title:\n bbox_extra.append(fig.suptitle(title, fontsize='x-large'))\n if colors is None:\n colors = get_comparison_color_scheme(len(data))\n\n for i, (samplename, samples) in enumerate(data.items()):\n for ax, metric_sample in zip(axs, samples):\n vp = ax.violinplot(metric_sample, positions=[i], vert=False)\n for body in vp['bodies']:\n body.set_color(colors[i])\n vp['cbars'].set_color(colors[i])\n vp['cmins'].set_color(colors[i])\n vp['cmaxes'].set_color(colors[i])\n # dummy plot used to create a legend\n line, = plt.plot([], label=samplename, color=colors[i])\n legend_lines.append(line)\n legend_labels.append(samplename)\n\n for ax, metricname in zip(axs, xnames):\n ax.set_title(metricname)\n ax.tick_params(\n axis='y',\n which='both',\n left=False,\n labelleft=False\n )\n\n bbox_extra.append(fig.legend(\n legend_lines,\n legend_labels,\n loc=\"lower center\",\n fontsize=\"large\",\n ncol=2\n ))\n\n if outpath is None:\n plt.show()\n else:\n for ext in outext:\n plt.savefig(\n f\"{outpath}.{ext}\",\n bbox_extra_artists=bbox_extra,\n bbox_inches='tight')\n plt.close()\n\n\ndef draw_radar_chart(\n outpath: Optional[Path],\n title: str,\n data: Dict[str, List],\n labelnames: List,\n figsize: Tuple = (11, 12),\n colors: Optional[List] = None,\n outext: Iterable[str] = ['png'],\n):\n \"\"\"\n Draws radar plot.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Path where the plot will be saved. If None, the plot will be displayed.\n title : str\n Title of the plot.\n data : Dict[str, List]\n Map between name of the model and list of metrics to visualize.\n labelnames : List[str]\n Names of the labels in order.\n figsize : Optional[Tuple]\n The size of the plot.\n colors : Optional[List]\n List with colors which should be used to draw plots.\n outext : Iterable[str]\n List with files extensions, should be supported by matplotlib.\n \"\"\"\n n_categories = len(labelnames)\n\n angles = [n / n_categories * 2 * pi for n in range(n_categories)]\n fig, ax = plt.subplots(1, 1, figsize=figsize,\n subplot_kw={'projection': 'polar'})\n ax.set_theta_offset(pi / 2)\n ax.set_theta_direction(-1)\n ax.set_xticks(angles, labelnames)\n ax.set_rlabel_position(1 / (n_categories * 2) * 2 * pi)\n ax.set_yticks([0.25, 0.5, 0.75], [\"25%\", \"50%\", \"75%\"])\n ax.set_ylim((0, 1.0))\n bbox_extra = []\n if title:\n bbox_extra.append(fig.suptitle(title, fontsize='x-large'))\n if colors is None:\n colors = get_comparison_color_scheme(len(data))\n\n angles += [0]\n linestyles = ['-', '--', '-.', ':']\n for i, (color, (samplename, sample)) in enumerate(zip(colors, data.items())): # noqa: E501\n sample += sample[:1]\n ax.plot(\n angles,\n sample,\n label=samplename,\n color=color,\n alpha=0.5,\n linestyle=linestyles[i % len(linestyles)]\n )\n ax.fill(\n angles,\n sample,\n alpha=0.1,\n color=color\n )\n bbox_extra.append(\n plt.legend(fontsize=\"large\", bbox_to_anchor=[0.50, -0.05],\n loc=\"upper center\", ncol=2))\n\n angles = np.array(angles)\n angles[np.cos(angles) <= -1e-5] += pi\n angles = np.rad2deg(angles)\n for i in range(n_categories):\n label = ax.get_xticklabels()[i]\n labelname, angle = labelnames[i], angles[i]\n x, y = label.get_position()\n lab = ax.text(\n x, y, labelname,\n transform=label.get_transform(),\n ha=label.get_ha(),\n va=label.get_va()\n )\n lab.set_rotation(-angle)\n lab.set_fontsize('large')\n bbox_extra.append(lab)\n ax.set_xticklabels([])\n\n if outpath is None:\n plt.show()\n else:\n for ext in outext:\n plt.savefig(\n f\"{outpath}.{ext}\",\n bbox_extra_artists=bbox_extra,\n bbox_inches='tight')\n plt.close()\n\n\ndef draw_bubble_plot(\n outpath: Optional[Path],\n title: str,\n xdata: List[float],\n xlabel: str,\n ydata: List[float],\n ylabel: str,\n bubblesize: List[float],\n bubblename: List[str],\n figsize: Tuple = (11, 10),\n colors: Optional[List] = None,\n outext: Iterable[str] = ['png'],\n):\n \"\"\"\n Draws bubble plot.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Path where the plot will be saved. If None, the plot will be displayed.\n title : str\n Title of the plot.\n xdata : List[float]\n The values for X dimension.\n xlabel : str\n Name of the X axis.\n ydata : List[float]\n The values for Y dimension.\n ylabel : str\n Name of the Y axis.\n bubblesize : List[float]\n Sizes of subsequent bubbles.\n bubblename : List[str]\n Labels for consecutive bubbles.\n figsize : Tuple\n The size of the plot.\n colors : Optional[List]\n List with colors which should be used to draw plots.\n outext : Iterable[str]\n List with files extensions, should be supported by matplotlib.\n \"\"\"\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n if colors is None:\n colors = get_comparison_color_scheme(len(xdata))\n markers = []\n bbox_extra = []\n maxsize = max(bubblesize)\n minsize = min(bubblesize)\n for x, y, bsize, label, c in zip(xdata, ydata, bubblesize,\n bubblename, colors):\n size = (bsize - minsize) / (maxsize - minsize + 1) * 100\n marker = ax.scatter(x, y, s=(15 + size**1.75), label=label, color=c,\n alpha=0.5, edgecolors='black')\n markers.append(marker)\n legend = ax.legend(\n loc='upper center',\n handles=markers,\n bbox_to_anchor=[0.5, -0.08],\n ncol=2\n )\n for handler in legend.legendHandles:\n handler.set_sizes([40.0])\n ax.add_artist(legend)\n bbox_extra.append(legend)\n\n bubblemarkers, bubblelabels = [], []\n for i in [0, 25, 50, 75, 100]:\n bubblemarker = ax.scatter(\n [], [], s=(15 + i**1.75), color='None',\n edgecolors=mpl.rcParams['legend.labelcolor'])\n bubblemarkers.append(bubblemarker)\n bubblelabels.append(f\"{(minsize + i / 100 * (maxsize - minsize)) / 1024 ** 2:.4f} MB\") # noqa: E501\n bubblelegend = ax.legend(\n bubblemarkers,\n bubblelabels,\n handletextpad=3,\n labelspacing=4.5,\n borderpad=3,\n title=\"Model size\",\n frameon=False,\n bbox_to_anchor=[1.05, 0.5],\n loc='center left'\n )\n bubblelegend._legend_box.sep = 20\n ax.add_artist(bubblelegend)\n bbox_extra.append(bubblelegend)\n\n box = ax.get_position()\n ax.set_position(\n [box.x0, box.y0 + 0.05, box.width * 0.85, box.height - 0.05])\n\n if title:\n bbox_extra.append(fig.suptitle(title))\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n if outpath is None:\n plt.show()\n else:\n for ext in outext:\n plt.savefig(\n f\"{outpath}.{ext}\",\n bbox_extra_artists=bbox_extra,\n bbox_inches='tight'\n )\n plt.close()\n\n\ndef _value_to_nondiagonal_color(\n value: Union[float, np.ndarray], cmap: Optional[Any]) -> np.ndarray:\n \"\"\"\n Calculates colors of non-diagonal cells in confusion matrix.\n\n Parameters\n ----------\n value : float | np.ndarray\n Values from confusion matrix.\n cmap : Optional[Any]\n Color map associating values with colors.\n\n Returns\n -------\n np.ndarray :\n Calculated colors.\n \"\"\"\n color = np.asarray(cmap(1 - np.log2(99*value + 1) / np.log2(100)))\n color[..., 3] = np.log2(99*value + 1) / np.log2(100)\n return color\n\n\ndef draw_confusion_matrix(\n confusion_matrix: np.ndarray,\n outpath: Optional[Path],\n title: str,\n class_names: List[str],\n cmap: Optional[Any] = None,\n figsize: Optional[Tuple] = None,\n dpi: Optional[int] = None,\n backend: str = 'matplotlib',\n outext: Iterable[str] = ['png'],\n):\n \"\"\"\n Creates a confusion matrix plot.\n\n Parameters\n ----------\n confusion_matrix : ArrayLike\n Square numpy matrix containing the confusion matrix.\n 0-th axis stands for ground truth, 1-st axis stands for predictions.\n outpath : Optional[Path]\n Path where the plot will be saved. If None, the plot will be displayed.\n title : str\n Title of the plot.\n class_names : List[str]\n List of the class names.\n cmap : Optional[Any]\n Color map for the plot.\n figsize : Optional[Tuple]\n The size of the plot.\n dpi : Optional[int]\n The dpi of the plot.\n backend : str\n Which library should be used to generate plot - bokeh or matplotlib.\n outext : Iterable[str]\n List with files extensions, should be supported by chosen backend.\n \"\"\"\n available_backends = ('matplotlib', 'bokeh')\n assert backend in available_backends, (\n f\"Backend has to be one of: {' '.join(available_backends)}\")\n if cmap is None:\n if len(class_names) < 50:\n cmap = plt.get_cmap('BuPu')\n else:\n cmap = plt.get_cmap('nipy_spectral_r')\n\n confusion_matrix = np.array(confusion_matrix, dtype=np.float32, copy=True)\n\n # compute sensitivity\n correctactual = confusion_matrix.diagonal() / confusion_matrix.sum(axis=1)\n correctactual = correctactual.reshape(1, len(class_names))\n\n # compute precision\n correctpredicted = \\\n confusion_matrix.diagonal() / confusion_matrix.sum(axis=0)\n correctpredicted = correctpredicted.reshape(len(class_names), 1)\n # change nan to 0\n correctpredicted[np.isnan(correctpredicted)] = 0.0\n\n # compute overall accuracy\n accuracy = np.trace(confusion_matrix) / np.sum(confusion_matrix)\n\n # normalize confusion matrix\n confusion_matrix /= confusion_matrix.sum(axis=0)\n confusion_matrix = confusion_matrix.transpose()\n # change nan to 0\n confusion_matrix[np.isnan(confusion_matrix)] = 0.0\n\n # Calculate colors for confusion matrix\n colors = np.zeros(confusion_matrix.shape + (4,))\n for i in range(confusion_matrix.shape[0]):\n for j in range(confusion_matrix.shape[1]):\n if i == j:\n colors[i, j] = cmap(confusion_matrix[i, j])\n else:\n colors[i, j] = _value_to_nondiagonal_color(\n confusion_matrix[i, j], cmap\n )\n\n if backend == \"bokeh\" or \"html\" in outext:\n draw_confusion_matrix_bokeh(\n output_path=outpath,\n class_names=class_names,\n confusion_matrix=confusion_matrix,\n confusion_matrix_colors=colors,\n sensitivity=correctactual,\n precision=correctpredicted,\n accuracy=accuracy,\n width=figsize[0] if figsize else None,\n height=figsize[1] if figsize else None,\n title=title,\n cmap=cmap,\n formats=outext if backend == 'bokeh' else ['html'])\n outext = set(outext)\n outext.discard('html')\n draw_confusion_matrix_matplotlib(\n output_path=outpath,\n class_names=class_names,\n confusion_matrix=confusion_matrix,\n confusion_matrix_colors=colors,\n sensitivity=correctactual,\n precision=correctpredicted,\n accuracy=accuracy,\n figsize=figsize,\n dpi=dpi,\n title=title,\n cmap=cmap,\n outext=outext)\n\n\ndef draw_confusion_matrix_matplotlib(\n confusion_matrix: np.ndarray,\n confusion_matrix_colors: np.ndarray,\n sensitivity: np.ndarray,\n precision: np.ndarray,\n accuracy: np.ndarray,\n class_names: List[str],\n figsize: Optional[Tuple[int]] = None,\n dpi: Optional[int] = None,\n output_path: Optional[str] = None,\n title: Optional[str] = None,\n cmap: Optional[Any] = None,\n outext: Iterable[str] = ['png'],\n):\n \"\"\"\n Function drawing interactive confusion matrix with bokeh backend.\n\n Parameters\n ----------\n confusion_matrix : np.ndarray\n Values of confusion matrix, from 0 to 1.\n confusion_matrix_colors : np.ndarray\n Colors for calculated based on confusion matrix.\n sensitivity : np.ndarray\n Ordered values with sensitivity.\n precision : np.ndarray\n Ordered values with precision.\n accuracy : np.ndarray | float\n Overall accuracy.\n class_names : List[str]\n List with names of classes.\n figsize : Optional[Tuple[int]]\n Tuple with width and height of figure.\n dpi : Optional[int]\n DPI of the output plot.\n output_path : str | None\n Path to the file, where plot will be saved to. If not specified,\n result won't be saved.\n title : str | None\n Title of the plot.\n cmap : Optional[Any]\n Color map which will be used for drawing plot. If not specified,\n 'RdYlGn' color map from matplotlib will be chosen.\n outext : Iterable[str]\n List with files extensions, should be supported by matplotlib.\n \"\"\"\n\n if figsize is None:\n figsize = [15, 15]\n\n if dpi is None:\n dpi = 216\n\n base_font_size = 18\n percent_font_size = min(12, 12*20/confusion_matrix.shape[0])\n\n # create axes\n fig = plt.figure(figsize=figsize, dpi=dpi)\n vectors = 1\n if len(class_names) >= 50:\n vectors = 0\n gs = gridspec.GridSpec(\n len(class_names) + vectors, len(class_names) + vectors\n )\n axConfMatrix = fig.add_subplot(gs[0:len(class_names), 0:len(class_names)])\n plots = [axConfMatrix]\n if len(class_names) < 50:\n axPredicted = fig.add_subplot(\n gs[len(class_names), 0:len(class_names)],\n sharex=axConfMatrix\n )\n axActual = fig.add_subplot(\n gs[0:len(class_names), len(class_names)],\n sharey=axConfMatrix\n )\n axTotal = fig.add_subplot(\n gs[len(class_names), len(class_names)],\n sharex=axActual,\n sharey=axPredicted\n )\n plots = [axPredicted, axConfMatrix, axActual, axTotal]\n # define ticks for classes\n ticks = np.arange(len(class_names))\n\n # configure and draw confusion matrix\n if len(class_names) < 50:\n axConfMatrix.set_xticks(ticks)\n axConfMatrix.set_xticklabels(\n class_names,\n fontsize=base_font_size,\n rotation=90,\n fontweight='bold'\n )\n axConfMatrix.set_yticks(ticks)\n axConfMatrix.set_yticklabels(\n class_names, fontsize=base_font_size, fontweight='bold'\n )\n axConfMatrix.xaxis.set_ticks_position('top')\n else:\n # plt.setp(axConfMatrix.get_yticklabels(), visible=False)\n # plt.setp(axConfMatrix.get_xticklabels(), visible=False)\n axConfMatrix.tick_params(\n top=False,\n bottom=False,\n left=False,\n right=False,\n labelleft=False,\n labelbottom=False\n )\n axConfMatrix.xaxis.set_label_position('top')\n axConfMatrix.set_xlabel(\n 'Actual class', fontsize=base_font_size*1.2, fontweight='bold')\n axConfMatrix.set_ylabel(\n 'Predicted class', fontsize=base_font_size*1.2, fontweight='bold')\n img = axConfMatrix.imshow(\n confusion_matrix_colors,\n # norm=colors.PowerNorm(0.5),\n interpolation='nearest',\n cmap=cmap,\n aspect='auto',\n vmin=0.0,\n vmax=1.0\n )\n\n if len(class_names) < 50:\n # add percentages for confusion matrix\n for i, j in itertools.product(\n range(len(class_names)),\n range(len(class_names))):\n txt = axConfMatrix.text(\n j, i,\n ('100' if confusion_matrix[i, j] == 1.0\n else f'{100.0 * confusion_matrix[i,j]:3.1f}'),\n ha='center',\n va='center',\n color='black',\n fontsize=percent_font_size)\n txt.set_path_effects([\n patheffects.withStroke(linewidth=5, foreground='w')\n ])\n\n # configure and draw sensitivity percentages\n axPredicted.set_xticks(ticks)\n axPredicted.set_yticks([0])\n axPredicted.set_xlabel(\n 'Sensitivity', fontsize=base_font_size, fontweight='bold')\n axPredicted.imshow(\n sensitivity,\n interpolation='nearest',\n cmap='RdYlGn' if cmap is None else cmap,\n aspect='auto',\n vmin=0.0,\n vmax=1.0\n )\n for i in range(len(class_names)):\n txt = axPredicted.text(\n i, 0,\n ('100' if sensitivity[0, i] == 1.0\n else f'{100.0 * sensitivity[0, i]:3.1f}'),\n ha='center',\n va='center',\n color='black',\n fontsize=percent_font_size)\n txt.set_path_effects([\n patheffects.withStroke(linewidth=5, foreground='w')\n ])\n\n # configure and draw precision percentages\n axActual.set_xticks([0])\n axActual.set_yticks(ticks)\n axActual.set_ylabel(\n 'Precision', fontsize=base_font_size, fontweight='bold')\n axActual.yaxis.set_label_position('right')\n axActual.imshow(\n precision,\n interpolation='nearest',\n cmap='RdYlGn' if cmap is None else cmap,\n aspect='auto',\n vmin=0.0,\n vmax=1.0\n )\n for i in range(len(class_names)):\n txt = axActual.text(\n 0, i,\n ('100' if precision[i, 0] == 1.0\n else f'{100.0 * precision[i, 0]:3.1f}'),\n ha='center',\n va='center',\n color='black',\n fontsize=percent_font_size)\n txt.set_path_effects([\n patheffects.withStroke(linewidth=5, foreground='w')\n ])\n\n # configure and draw total accuracy\n axTotal.set_xticks([0])\n axTotal.set_yticks([0])\n axTotal.set_xlabel(\n 'Accuracy', fontsize=base_font_size, fontweight='bold')\n axTotal.imshow(\n np.array([[accuracy]]),\n interpolation='nearest',\n cmap='RdYlGn' if cmap is None else cmap,\n aspect='auto',\n vmin=0.0,\n vmax=1.0\n )\n txt = axTotal.text(\n 0, 0,\n f'{100 * accuracy:3.1f}',\n ha='center',\n va='center',\n color='black',\n fontsize=percent_font_size\n )\n txt.set_path_effects([\n patheffects.withStroke(linewidth=5, foreground='w')\n ])\n\n # disable axes for other matrices than confusion matrix\n for a in (axPredicted, axActual, axTotal):\n plt.setp(a.get_yticklabels(), visible=False)\n plt.setp(a.get_xticklabels(), visible=False)\n\n # draw colorbar for confusion matrix\n cbar = fig.colorbar(\n img,\n ax=plots,\n shrink=0.5,\n ticks=np.linspace(0.0, 1.0, 11),\n pad=0.1\n )\n cbar.ax.set_yticks(np.linspace(0.0, 1.0, 11),\n labels=list(range(0, 101, 10)))\n for t in cbar.ax.get_yticklabels():\n t.set_fontsize('medium')\n suptitlehandle = None\n if title:\n suptitlehandle = fig.suptitle(\n f'{title} (ACC={accuracy:.5f})',\n fontsize=base_font_size*1.4\n )\n if output_path is None:\n plt.show()\n else:\n for ext in outext:\n plt.savefig(\n f\"{output_path}.{ext}\",\n dpi=dpi,\n bbox_inches='tight',\n bbox_extra_artists=[\n suptitlehandle] if suptitlehandle else None,\n pad_inches=0.1,\n )\n plt.close()\n\n\ndef _create_custom_hover_template(\n names: List[str],\n values: List[str] = None,\n units: List[str] = None\n) -> str:\n \"\"\"\n Function creating custom template for tooltip displaying when hover\n event occurs. This tooltip is part of bokeh features.\n\n Parameters\n ----------\n names : List[str]\n List with names, displayed before values.\n values : List[str]\n List with names of fields (in source object) containing values.\n units : List[str]\n List with units, displayed after values.\n\n Returns\n -------\n str :\n HTML template for tooltip.\n \"\"\"\n if values is None:\n values = names\n if units is None:\n units = ['' for _ in names]\n else:\n units = ['' if unit is None else unit for unit in units]\n template = \"\"\"\n \n %s\n @{%s}%s\n \n \"\"\"\n result = \"\"\n for name, value, unit in zip(names, values, units):\n result += template % (name, value, unit)\n result += \"
    \"\n return result\n\n\ndef draw_confusion_matrix_bokeh(\n confusion_matrix: np.ndarray,\n confusion_matrix_colors: np.ndarray,\n sensitivity: np.ndarray,\n precision: np.ndarray,\n accuracy: np.ndarray,\n class_names: List[str],\n width: Optional[int] = None,\n height: Optional[int] = None,\n output_path: Optional[str] = None,\n title: Optional[str] = None,\n cmap: Optional[Any] = None,\n formats: Tuple[str] = ('html',),\n):\n \"\"\"\n Function drawing interactive confusion matrix with bokeh backend.\n\n Parameters\n ----------\n confusion_matrix : np.ndarray\n Values of confusion matrix, from 0 to 1.\n confusion_matrix_colors : np.ndarray\n Colors for calculated based on confusion matrix.\n sensitivity : np.ndarray\n Ordered values with sensitivity.\n precision : np.ndarray\n Ordered values with precision.\n accuracy : np.ndarray | float\n Overall accuracy.\n class_names : List[str]\n List with names of classes.\n width : int\n Width of the generated plot.\n height : int\n Height of the generated plot.\n output_path : str | None\n Path to the file, where plot will be saved to. If not specified,\n result won't be saved.\n title : str | None\n Title of the plot.\n cmap : Optional[Any]\n Color map which will be used for drawing plot. If not specified,\n 'RdYlGn' color map from matplotlib will be chosen.\n formats : Tuple[str]\n Tuple with formats names.\n \"\"\"\n from bokeh.plotting import figure, output_file, save, row, show\n from bokeh.models import ColumnDataSource, HoverTool, FactorRange, Range1d\n from bokeh.layouts import Spacer, gridplot\n from bokeh.io import export_png, export_svg\n\n if cmap is None:\n cmap = plt.get_cmap('RdYlGn')\n\n if width is None:\n width = 900\n if height is None:\n height = 778\n\n # === Confusion Matrix ===\n\n # Calculate confusion matrix sizes\n cm_width = int(width / (1 + 1/15 + 1/13 + 1/11))\n cm_height = int(height / (1 + 1/15))\n\n # Prepare figure\n confusion_matrix_fig = figure(\n title=None, x_range=FactorRange(\n factors=class_names, bounds=(0, len(class_names))),\n y_range=FactorRange(\n factors=class_names[::-1], bounds=(0, len(class_names))),\n tools=\"pan,box_zoom,wheel_zoom,reset,save\",\n toolbar_location=None,\n x_axis_location=\"above\",\n width=cm_width,\n height=cm_height,\n output_backend='webgl',\n )\n\n # Preprocess data\n confusion_matrix_colors = np.rot90(\n confusion_matrix_colors, k=-1).reshape((-1, 4))\n coords = np.array(list(itertools.product(\n class_names, class_names)), dtype=str)\n coords[:, 1] = coords[::-1, 1]\n percentage = np.rot90(confusion_matrix, k=-1).flatten() * 100\n source = ColumnDataSource(data={\n 'Actual class': coords[:, 0],\n 'Predicted class': coords[:, 1],\n 'color': confusion_matrix_colors,\n 'Percentage': percentage,\n })\n\n # Draw confusion matrix\n confusion_matrix_fig.rect(\n x='Actual class', y='Predicted class',\n color='color',\n line_color=None,\n width=1, height=1,\n source=source,)\n\n # Set labels and styles\n confusion_matrix_fig.xaxis.axis_label = \"Actual class\"\n confusion_matrix_fig.yaxis.axis_label = \"Predicted class\"\n if len(class_names) < 50:\n confusion_matrix_fig.xaxis.major_label_orientation = 'vertical'\n else:\n confusion_matrix_fig.xaxis.major_label_text_alpha = 0.0\n confusion_matrix_fig.yaxis.major_label_text_alpha = 0.0\n confusion_matrix_fig.xaxis.major_tick_line_alpha = 0.0\n confusion_matrix_fig.yaxis.major_tick_line_alpha = 0.0\n confusion_matrix_fig.xaxis.axis_line_alpha = 0.0\n confusion_matrix_fig.yaxis.axis_line_alpha = 0.0\n confusion_matrix_fig.grid.visible = False\n\n # Set custom tooltips\n confusion_matrix_fig.add_tools(HoverTool(\n tooltips=_create_custom_hover_template(\n [\"Actual class\", \"Predicted class\", \"Percentage\"],\n units=[None, None, '%']\n )\n ))\n\n # === Sensitivity ===\n\n # Prepare figure\n sensitivity_fig = figure(\n title=None,\n x_range=confusion_matrix_fig.x_range,\n y_range=FactorRange(factors=['Sensivity'], bounds=(0, 1)),\n width=confusion_matrix_fig.width,\n height=confusion_matrix_fig.height // 15,\n toolbar_location=None,\n output_backend='webgl',\n )\n\n # Preprocess data\n cc = cmap(sensitivity).reshape((-1, 4))\n sensitivity_source = ColumnDataSource(data={\n 'y': ['Sensivity' for _ in class_names],\n 'Class': class_names,\n 'color': cc,\n \"Sensitivity\": sensitivity.flatten() * 100,\n })\n\n # Draw sensitivity\n sensitivity_fig.rect(\n x='Class', y='y', color='color',\n source=sensitivity_source,\n line_color='black',\n line_width=0.1,\n width=1,\n height=1,\n )\n\n # Add label and custom tooltip\n sensitivity_fig.xaxis.axis_label = \"Sensitivity\"\n sensitivity_fig.add_tools(HoverTool(\n tooltips=_create_custom_hover_template(\n [\"Class\", \"Sensitivity\"],\n units=[None, '%']\n ),\n attachment='above',\n ))\n\n # === Precision ===\n\n # Prepare figure\n precision_fig = figure(\n title=None,\n x_range=FactorRange(factors=['Precision'], bounds=(0, 1)),\n y_range=confusion_matrix_fig.y_range,\n width=confusion_matrix_fig.width // 15,\n height=confusion_matrix_fig.height,\n toolbar_location=None,\n y_axis_location='right',\n output_backend='webgl',\n )\n\n # Preprocess data\n cc2 = cmap(precision).reshape((-1, 4))\n precision_source = ColumnDataSource(data={\n 'x': ['Precision' for _ in class_names],\n 'Class': class_names,\n 'color': cc2,\n 'Precision': precision.flatten() * 100,\n })\n\n # Draw sensitivity\n precision_fig.rect(\n x='x',\n y='Class',\n color='color',\n source=precision_source,\n height=1,\n width=1,\n line_color='black',\n line_width=0.1,\n )\n\n # Add label and custom tooltip\n precision_fig.yaxis.axis_label = \"Precision\"\n precision_fig.add_tools(HoverTool(\n tooltips=_create_custom_hover_template(\n [\"Class\", \"Precision\"],\n units=[None, '%']\n ),\n attachment='left',\n ))\n\n # === Accuracy ===\n\n # Prepare figure\n accuracy_fig = figure(\n title=None,\n x_range=FactorRange(factors=['x'], bounds=(0, 1)),\n y_range=FactorRange(factors=['y'], bounds=(0, 1)),\n width=precision_fig.width,\n height=sensitivity_fig.height,\n toolbar_location=None,\n output_backend='webgl',\n )\n\n # Preprocess data\n c = cmap(accuracy)\n color_str = (f\"#{int(255 * c[0]):02X}{int(255 * c[1]):02X}\"\n f\"{int(255 * c[2]):02X}\")\n accuracy_source = ColumnDataSource(data={\n 'x': ['x'], 'y': ['y'],\n 'Accuracy': [float(accuracy) * 100],\n })\n\n # Draw sensitivity\n accuracy_fig.rect(\n x='x',\n y='y',\n color=color_str,\n source=accuracy_source,\n width=1,\n height=1,\n line_color='black',\n line_width=0.1,\n )\n\n # Add label and custom tooltip\n accuracy_fig.xaxis.axis_label = \"ACC\"\n accuracy_fig.add_tools(HoverTool(\n tooltips=_create_custom_hover_template(\n ['Accuracy'], units=['%']\n ),\n attachment='above',\n ))\n\n # Set style for Sensitivity, Precision and Accuracy\n for fig in (sensitivity_fig, precision_fig, accuracy_fig):\n fig.yaxis.major_label_text_alpha = 0.0\n fig.xaxis.major_label_text_alpha = 0.0\n fig.yaxis.major_tick_line_alpha = 0.0\n fig.xaxis.major_tick_line_alpha = 0.0\n fig.xaxis.axis_line_alpha = 0.0\n fig.yaxis.axis_line_alpha = 0.0\n fig.grid.visible = False\n\n # === Scale ===\n\n # Prepare figure\n scale_fig = figure(\n title=None,\n x_range=['color'],\n y_range=Range1d(0.0, 100.0),\n width=confusion_matrix_fig.width // 11,\n height=height // 2,\n tools=\"\",\n toolbar_location=None,\n x_axis_location='above',\n y_axis_location='right',\n margin=(height // 4, 0, height // 4, 0),\n output_backend='webgl',\n )\n # Draw scale\n scale_fig.hbar(\n y=np.linspace(0.0, 100.0, 256),\n left=[0.0] * 256,\n right=[1.0] * 256,\n color=cmap(np.linspace(0.0, 1.0, 256))\n )\n\n # Set styles for scale\n scale_fig.xaxis.major_tick_line_alpha = 0.0\n scale_fig.yaxis.major_tick_line_alpha = 0.0\n scale_fig.yaxis.minor_tick_line_alpha = 0.0\n scale_fig.xaxis.axis_line_alpha = 0.0\n scale_fig.yaxis.axis_line_alpha = 0.0\n scale_fig.xaxis.major_label_text_alpha = 0.0\n\n # === Saving to file ===\n\n grid_fig = gridplot(\n [\n [confusion_matrix_fig, precision_fig, ],\n [sensitivity_fig, accuracy_fig, ]\n ],\n merge_tools=True,\n toolbar_location='above',\n toolbar_options={'logo': None},\n )\n plot_with_scale = row(\n grid_fig,\n Spacer(width=confusion_matrix_fig.width // 13),\n scale_fig,\n )\n if output_path is None:\n show(plot_with_scale)\n return\n\n if 'html' in formats:\n output_file(f\"{output_path}.html\", mode='inline')\n save(plot_with_scale)\n\n grid_fig = gridplot(\n [\n [confusion_matrix_fig, precision_fig, ],\n [sensitivity_fig, accuracy_fig, ]\n ]\n )\n plot_with_scale = row(\n grid_fig,\n Spacer(width=confusion_matrix_fig.width // 13),\n scale_fig,\n )\n if 'png' in formats:\n export_png(plot_with_scale, f\"{output_path}.png\")\n if 'svg' in formats:\n export_svg(plot_with_scale, f\"{output_path}.svg\")\n\n\ndef recall_precision_curves(\n outpath: Optional[Path],\n title: str,\n lines: List[Tuple[List, List]],\n class_names: List[str],\n figsize: Tuple = (15, 15),\n outext: Iterable[str] = ['png'],\n):\n \"\"\"\n Draws Recall-Precision curves for AP measurements.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Output path for the plot image. If None, the plot will be displayed.\n title : str\n Title of the plot.\n lines : List[List[List]]\n Per-class list of tuples with list of recall values and precision\n values.\n class_names : List[str]\n List of the class names.\n figsize : Tuple\n The size of the figure.\n outext : Iterable[str]\n List with files extensions, should be supported by matplotlib.\n \"\"\"\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n colormap = plt.cm.nipy_spectral\n colors = [colormap(i) for i in np.linspace(0, 1, len(class_names))]\n linestyles = ['-', '--', '-.', ':']\n for i, (cls, line) in enumerate(zip(class_names, lines)):\n ax.plot(\n line[0], line[1],\n label=cls, c=colors[i], linewidth=3,\n linestyle=linestyles[i % len(linestyles)],\n alpha=0.8\n )\n legendhandle = ax.legend(\n bbox_to_anchor=(0.5, -0.3),\n loc='lower center',\n ncol=10\n )\n ax.set_xlabel('recall')\n ax.set_ylabel('precision')\n ax.set_xlim((0.0, 1.01))\n ax.set_ylim((0.0, 1.01))\n ax.grid('on')\n ax.set_xticks(np.arange(0, 1.1, 0.1))\n ax.set_yticks(np.arange(0, 1.1, 0.1))\n ax.set_title(title)\n\n if outpath is None:\n plt.show()\n else:\n for ext in outext:\n fig.savefig(\n f\"{outpath}.{ext}\",\n bbox_extra_artists=[legendhandle],\n bbox_inches='tight'\n )\n plt.close()\n\n\ndef true_positive_iou_histogram(\n outpath: Optional[Path],\n title: str,\n lines: List[float],\n class_names: List[str],\n figsize: Tuple = (10, 25),\n colors: Optional[List] = None,\n color_offset: int = 0,\n outext: Iterable[str] = ['png'],\n):\n \"\"\"\n Draws per-class True Positive IoU precision plot.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Output path for the plot image. If None, the plot will be displayed.\n title : str\n Title of the plot.\n lines : List[float]\n Per-class list of floats with IoU values.\n class_names : List[str]\n List of the class names.\n figsize : Tuple\n The size of the figure.\n colors : Optional[List]\n List with colors which should be used to draw plots.\n color_offset : int\n How many colors from default color list should be skipped.\n outext : Iterable[str]\n List with files extensions, should be supported by matplotlib.\n \"\"\"\n if colors is None:\n color = 'purple'\n else:\n color = colors[color_offset]\n plt.figure(figsize=figsize)\n plt.barh(\n class_names,\n np.array(lines),\n orientation='horizontal',\n color=color\n )\n plt.ylim((-1, len(class_names)))\n plt.yticks(np.arange(0, len(class_names)))\n plt.xticks(np.arange(0, 1.1, 0.1))\n plt.xlabel('IoU precision')\n plt.ylabel('classes')\n if title:\n plt.title(f'{title}')\n plt.tight_layout()\n\n if outpath is None:\n plt.show()\n else:\n for ext in outext:\n plt.savefig(f\"{outpath}.{ext}\")\n plt.close()\n\n\ndef true_positives_per_iou_range_histogram(\n outpath: Optional[Path],\n title: str,\n lines: List[float],\n range_fraction: float = 0.05,\n figsize: Tuple = (10, 10),\n colors: Optional[List] = None,\n color_offset: int = 0,\n outext: Iterable[str] = ['png'],\n):\n \"\"\"\n Draws histogram of True Positive IoU values.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Output path for the plot image. If None, the plot will be displayed.\n title : str\n Title of the plot.\n lines : List[float]\n All True Positive IoU values.\n range_fraction : float\n Fraction by which the range should be divided (1/number_of_segments).\n figsize : Tuple\n The size of the figure.\n colors : Optional[List]\n List with colors which should be used to draw plots.\n color_offset : int\n How many colors from default color list should be skipped.\n outext : Iterable[str]\n List with files extensions, should be supported by matplotlib.\n \"\"\"\n if colors is None:\n color = 'purple'\n else:\n color = colors[color_offset]\n lower_bound = floor(10*min(lines)) / 10\n x_range = np.arange(lower_bound, 1.01, (1 - lower_bound) * range_fraction)\n plt.figure(figsize=figsize)\n plt.hist(\n lines,\n x_range,\n color=color\n )\n plt.xlabel('IoU ranges')\n plt.xticks(x_range, rotation=45)\n plt.ylabel('Number of masks in IoU range')\n if title:\n plt.title(f'{title}')\n plt.tight_layout()\n\n if outpath is None:\n plt.show()\n else:\n for ext in outext:\n plt.savefig(f\"{outpath}.{ext}\")\n plt.close()\n\n\ndef recall_precision_gradients(\n outpath: Optional[Path],\n title: str,\n lines: List[Tuple[List, List]],\n class_names: List[str],\n aps: List[float],\n map: float,\n figsize: Tuple = (10, 25),\n cmap: Optional[Any] = None,\n outext: Iterable[str] = ['png'],\n):\n \"\"\"\n Draws per-class gradients of precision dependent to recall.\n\n Provides per-class AP and mAP values.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Output path for the plot image. If None, the plot will be displayed.\n title : str\n Title of the plot.\n lines : List[Tuple[List, List]]\n Per-class list of tuples with list of recall values and precision\n values.\n class_names : List[str]\n List of the class names.\n aps : List[float]\n Per-class AP values.\n map : float\n The mAP value.\n figsize : Tuple\n The size of the figure.\n cmap : Optional[Any]\n Color map for the plot.\n outext : Iterable[str]\n List with files extensions, should be supported by matplotlib.\n \"\"\"\n if cmap is None:\n cmap = plt.get_cmap('RdYlGn')\n plt.figure(figsize=figsize)\n clsticks = []\n for i, (cls, line, averageprecision) \\\n in enumerate(zip(class_names, lines, aps)):\n clscoords = np.ones(len(line[0])) * i\n points = np.array([line[0], clscoords]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lc = LineCollection(\n segments,\n cmap=cmap,\n norm=plt.Normalize(0, 1.0)\n )\n lc.set_array(line[1])\n lc.set_linewidth(10)\n plt.gca().add_collection(lc)\n clsticks.append(f'{cls} (AP={averageprecision:.4f})')\n plt.ylim((-1, len(class_names)))\n plt.yticks(np.arange(0, len(clsticks)), labels=clsticks)\n plt.xticks(np.arange(0, 1.1, 0.1))\n plt.xlabel('recall')\n plt.ylabel('classes')\n plt.colorbar(\n plt.cm.ScalarMappable(norm=plt.Normalize(0, 1.0), cmap=cmap),\n orientation='horizontal',\n label='precision',\n fraction=0.1,\n pad=0.05\n )\n if title:\n plt.title(f'{title} (mAP={map})')\n plt.tight_layout()\n\n if outpath is None:\n plt.show()\n else:\n for ext in outext:\n plt.savefig(f\"{outpath}.{ext}\")\n plt.close()\n\n\ndef draw_plot(\n outpath: Optional[Path],\n title: str,\n xtitle: str,\n xunit: str,\n ytitle: str,\n yunit: str,\n lines: List[Tuple[List, List]],\n linelabels: Optional[List[str]] = None,\n figsize: Tuple = (15, 15),\n colors: Optional[List] = None,\n color_offset: int = 0,\n outext: Iterable[str] = ['png'],\n):\n \"\"\"\n Draws plot.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Output path for the plot image. If None, the plot will be displayed.\n title : str\n Title of the plot.\n xtitle : str\n Name of the X axis.\n xunit : str\n Unit for the X axis.\n ytitle : str\n Name of the Y axis.\n yunit : str\n Unit for the Y axis.\n lines : List[Tuple[List, List]]\n Per-class list of tuples with list of recall values and precision\n values.\n linelabels : Optional[List[str]]\n Optional list of labels naming each line.\n figsize : Tuple\n The size of the figure.\n colors : Optional[List]\n List with colors which should be used to draw plots.\n color_offset : int\n How many colors from default color list should be skipped.\n outext : Iterable[str]\n List with files extensions, should be supported by matplotlib.\n \"\"\"\n plt.figure(figsize=figsize)\n\n bbox_extra = []\n if colors is None:\n color = 'purple'\n else:\n color = colors[color_offset]\n for color, line in zip(colors, lines):\n plt.plot(line[0], line[1], c=color, linewidth=3)\n xlabel = xtitle\n if xunit is not None:\n xlabel += f' [{xunit}]'\n ylabel = ytitle\n if yunit is not None:\n ylabel += f' [{yunit}]'\n plt.xlabel(xlabel, fontsize='large')\n plt.ylabel(ylabel, fontsize='large')\n plt.grid()\n if title:\n bbox_extra.append(plt.title(title))\n if linelabels is not None:\n bbox_extra.append(\n plt.legend(\n linelabels,\n loc='upper center',\n bbox_to_anchor=[0.5, -0.06],\n ncols=2)\n )\n\n if outpath is None:\n plt.show()\n else:\n for ext in outext:\n plt.savefig(\n f\"{outpath}.{ext}\",\n bbox_extra_artists=bbox_extra,\n bbox_inches='tight')\n plt.close()\n\n\ndef draw_barplot(\n outpath: Optional[Path],\n title: str,\n xtitle: str,\n xunit: str,\n ytitle: str,\n yunit: str,\n xdata: List[Any],\n ydata: Dict[str, List[Union[int, float]]],\n figsize: Optional[Tuple] = None,\n colors: Optional[List] = None,\n backend: str = 'matplotlib',\n outext: Iterable[str] = ['png'],\n max_bars_matplotlib: Optional[int] = None\n):\n \"\"\"\n Draws barplot.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Output path for the plot image. If None, the plot will be displayed.\n title : str\n Title of the plot.\n xtitle : str\n Name of the X axis.\n xunit : str\n Unit for the X axis.\n ytitle : str\n Name of the Y axis.\n yunit : str\n Unit for the Y axis.\n xdata : List[Any]\n List of x labels for bars.\n ydata : Dict[str, List[Union[int, float]]]\n Dictionary of values.\n figsize : Tuple\n The size of the figure.\n colors : Optional[List]\n List with colors which should be used to draw plots.\n backend : str\n Which library should be used to generate plot - bokeh or matplotlib.\n outext : Iterable[str]\n List with files extensions, should be supported by matplotlib.\n max_bars_matplotlib : Optional[int]\n Max number of bars for matplotlib backend.\n \"\"\"\n xlabel = xtitle\n if xunit is not None:\n xlabel += f' [{xunit}]'\n ylabel = ytitle\n if yunit is not None:\n ylabel += f' [{yunit}]'\n\n bar_width = .8 / len(ydata)\n if len(ydata) == 1:\n bar_offset = [0.]\n else:\n bar_offset = np.linspace(\n -.4 + bar_width/2,\n .4 - bar_width/2,\n len(ydata)\n ).tolist()\n\n if colors is None:\n colors = get_comparison_color_scheme(len(ydata))\n\n if backend == 'bokeh' or 'html' in outext:\n draw_barplot_bokeh(\n outpath=outpath,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n xdata=xdata,\n ydata=ydata,\n width=figsize[0] if figsize else None,\n height=figsize[1] if figsize else None,\n colors=colors,\n bar_width=bar_width,\n bar_offset=bar_offset,\n formats=outext if backend == 'bokeh' else ['html']\n )\n outext = set(outext)\n outext.discard('html')\n\n if max_bars_matplotlib is not None:\n xdata = xdata[:max_bars_matplotlib]\n ydata = {\n name: values[:max_bars_matplotlib]\n for name, values in ydata.items()\n }\n\n draw_barplot_matplotlib(\n outpath=outpath,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n xdata=xdata,\n ydata=ydata,\n figsize=figsize,\n colors=colors,\n bar_width=bar_width,\n bar_offset=bar_offset,\n outext=outext\n )\n\n\ndef draw_barplot_bokeh(\n outpath: Optional[Path],\n title: str,\n xlabel: str,\n ylabel: str,\n xdata: List[Any],\n ydata: Dict[str, List[Union[int, float]]],\n width: Optional[int] = None,\n height: Optional[int] = None,\n colors: Optional[List] = None,\n bar_width: float = 0.,\n bar_offset: List[float] = [0.],\n formats: Tuple[str] = ('html',)\n):\n \"\"\"\n Draws barplot using bokeh library.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Output path for the plot image. If None, the plot will be displayed.\n title : str\n Title of the plot.\n xlabel : str\n Label of the X axis.\n ylabel : str\n Label of the Y axis.\n xdata : List[Any]\n List of x labels for bars.\n ydata : Dict[str, List[Union[int, float]]]\n Dictionary of values.\n width : Optional[int]\n Width of the plot.\n height : Optional[int]\n Height of the plot.\n colors : Optional[List]\n List with colors which should be used to draw plots.\n bar_width : float\n Width of the single bar.\n bar_offset : List[float]\n Offsets of the bars from different groups.\n formats : Tuple[str]\n Tuple with formats names.\n \"\"\"\n from bokeh.plotting import figure, output_file, save, show\n from bokeh.models import Range1d, HoverTool\n from bokeh.io import export_png, export_svg\n from bokeh.transform import dodge\n\n if width is None:\n width = 900\n if height is None:\n height = 778\n\n barplot_fig = figure(\n title=title,\n x_range=xdata,\n y_range=Range1d(0, max([max(y) for y in ydata.values()])),\n tools='pan,box_zoom,wheel_zoom,reset,save',\n toolbar_location='above',\n width=width,\n height=height,\n x_axis_label=xlabel,\n y_axis_label=ylabel,\n output_backend='webgl',\n )\n\n data = dict(ydata, xdata=xdata)\n\n for i, label in enumerate(ydata.keys()):\n vbar = barplot_fig.vbar(\n x=dodge('xdata', bar_offset[i], range=barplot_fig.x_range),\n top=label,\n source=data,\n bottom=0,\n fill_color=colors[i],\n width=bar_width\n )\n tooltips = [(xlabel, '@xdata'), (ylabel, f'@{{{label}}}')]\n if len(ydata) > 1:\n tooltips.insert(0, ('File', label))\n barplot_fig.add_tools(HoverTool(\n renderers=[vbar],\n tooltips=tooltips,\n ))\n\n barplot_fig.xaxis.major_label_orientation = 'vertical'\n\n if outpath is None:\n show(barplot_fig)\n return\n\n if 'html' in formats:\n output_file(f'{outpath}.html', mode='inline')\n save(barplot_fig)\n if 'png' in formats:\n export_png(barplot_fig, filename=f'{outpath}.png')\n if 'svg' in formats:\n export_svg(barplot_fig, filename=f'{outpath}.svg')\n\n\ndef draw_barplot_matplotlib(\n outpath: Optional[Path],\n title: str,\n xlabel: str,\n ylabel: str,\n xdata: List[Any],\n ydata: Dict[str, List[Union[int, float]]],\n figsize: Tuple = (15, 15),\n colors: Optional[List] = None,\n bar_width: float = 0.,\n bar_offset: List[float] = [0.],\n outext: Iterable[str] = ['png'],\n):\n \"\"\"\n Draws barplot using matplotlib library.\n\n Parameters\n ----------\n outpath : Optional[Path]\n Output path for the plot image. If None, the plot will be displayed.\n title : str\n Title of the plot.\n xlabel : str\n Label of the X axis.\n ylabel : str\n Label of the Y axis.\n xdata : List[Any]\n List of x labels for bars.\n ydata : Dict[str, List[Union[int, float]]]\n Dictionary of values.\n figsize : Tuple\n The size of the figure.\n colors : Optional[List]\n List with colors which should be used to draw plots.\n bar_width : float\n Width of the single bar.\n bar_offset : List[float]\n Offsets of the bars from different groups.\n outext : Iterable[str]\n List with files extensions.\n \"\"\"\n plt.figure(figsize=figsize)\n\n x_range = np.arange(0, len(xdata))\n\n bbox_extra = []\n for i, (label, values) in enumerate(ydata.items()):\n plt.bar(\n x_range + bar_offset[i],\n values,\n width=bar_width,\n color=colors[i],\n label=label\n )\n\n plt.xticks(x_range, xdata, rotation=90)\n plt.xlabel(xlabel, fontsize='large')\n plt.ylabel(ylabel, fontsize='large')\n plt.ticklabel_format(style='plain', axis='y')\n if len(ydata) > 1:\n plt.legend()\n plt.grid()\n if title:\n bbox_extra.append(plt.title(title))\n\n if outpath is None:\n plt.show()\n else:\n for ext in outext:\n plt.savefig(\n f\"{outpath}.{ext}\",\n bbox_extra_artists=bbox_extra,\n bbox_inches='tight')\n plt.close()\n\n\n@contextmanager\ndef choose_theme(\n custom_bokeh_theme: Union[bool, str, Path] = False,\n custom_matplotlib_theme: Union[bool, str, Path] = False,\n):\n \"\"\"\n Context manager, allowing to temporally set theme.\n\n Parameters\n ----------\n custom_bokeh_theme : bool | str | Path\n If True uses BOKEH_THEME_FILE, if str or Path uses file specified\n by this path.\n custom_matplotlib_theme : bool | str | Path\n If True uses MATPLOTLIB_THEME_FILE, if str or Path uses file specified\n by this path.\n \"\"\"\n # Backup current setups\n if custom_bokeh_theme:\n from bokeh.io import curdoc\n from bokeh.themes import Theme\n _copy_bokeh_theme = curdoc().theme\n # Set theme for bokeh\n if isinstance(custom_bokeh_theme, bool):\n with BOKEH_THEME_FILE as bokeh_theme_file:\n filename = bokeh_theme_file\n else:\n filename = custom_bokeh_theme\n theme = Theme(filename=filename)\n curdoc().theme = theme\n\n # Create temporary context for matplotlib\n with mpl.rc_context():\n # Set matplotlib theme\n if custom_matplotlib_theme:\n if isinstance(custom_matplotlib_theme, bool):\n with MATPLOTLIB_THEME_FILE as matplotlib_theme_file:\n filename = matplotlib_theme_file\n else:\n filename = custom_matplotlib_theme\n plt.style.use(filename)\n\n yield\n # Cleanup\n if custom_bokeh_theme:\n curdoc().theme = _copy_bokeh_theme\n","sub_path":"kenning/core/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":58921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653084366","text":"from insert_sort import insert_sort\n\n\ndef merge(left, right):\n result = []\n i, j = 0, 0\n\n while i < len(left) and j < len(right):\n if left[i] <= right[j]:\n result.append(left[i])\n i += 1\n else:\n result.append(right[j])\n j += 1\n\n result += left[i:]\n result += right[j:]\n return result\n\n\ndef merge_sort(list, optimize=False, size=10):\n length = len(list)\n if optimize and length <= size:\n insert_sort(list)\n return list\n elif length <= 1:\n return list\n\n mid = int(len(list) / 2)\n left = merge_sort(list[:mid])\n right = merge_sort(list[mid:])\n\n return merge(left, right)\n\n\ndef merge_sort_list_part(list, first, last):\n return merge_sort(list[first:last])\n","sub_path":"src/lib/sort/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"251154142","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom copy import copy\n\ndef dvdt(q,p):\n gauss_rand = np.random.normal(0.0,1.0)\n return q #+ gamma*p -(2*beta*gamma)**0.5*gauss_rand\n\ndef potential(q):\n return 0.5*q**2\n\n\n\n\ndeltat = np.pi/1000\nN = 1\ndimension = 1\ngamma = 1.0\nbeta = 1.0\n\n\n\ndef velocity_verlet_step(pspace,dvdt,deltat,etherm):\n q = pspace[0]\n p = pspace[1]\n \n c1 = np.exp(-(deltat/2.0)*gamma)\n c2 = (1-c1**2)**0.5\n \n \n etherm+=p**2/2.0\n gauss_rand = np.random.normal(0.0,1.0)\n p = c1*p + (1/beta)**0.5*c2*gauss_rand # Thermostat step\n etherm-=p**2/2.0\n acc = -dvdt(q,p)\n p = p + (deltat/2.0)*acc # Momentum update step\n q = q + (deltat)*p # Position update step\n acc = -dvdt(q,p)\n p = p + (deltat/2.0)*acc # Momentum update step\n \n etherm+=p**2/2.0\n gauss_rand = np.random.normal(0.0,1.0)\n p = c1*p + (1/beta)**0.5*c2*gauss_rand # Thermostat step\n etherm-=p**2/2.0\n \n \n #evol_q = q + p*deltat + 0.5*acc*deltat**2\n \n #evol_acc = -dvdt(evol_q)\n #evol_p = p + 0.5*(acc+evol_acc)*deltat\n return [q,p]\n \n \n\ndef velocity_verlet(q,p,dvdt,deltat,Tarr):\n t=0.0\n T = Tarr[len(Tarr)-1]\n pspace = [q,p]\n etherm = np.zeros_like(p)\n etherm = np.array(etherm)\n tarr.append(t)\n qarr.append(pspace[0])\n parr.append(pspace[1])\n ethermarr.append(etherm)\n count=0\n while (t<=T):\n pspace = velocity_verlet_step(pspace,dvdt,deltat,etherm)\n t+=deltat\n count+=1\n #print(pspace)\n #print(t)\n if(count%10 ==0):\n tarr.append(t)\n qarr.append(pspace[0])\n parr.append(pspace[1])\n ethermarr.append(etherm.copy())\n #print(ethermarr)\n #print('here')\n #print(etherm.copy)\n \n \nq = np.zeros((N,dimension))\np = np.zeros((N,dimension))\nqarr = []\nparr = []\ntarr = []\nethermarr= []\n\nq = [[1.0]]\np = [[0.0]]\n\nq = np.array(q)\np = np.array(p)\n\nT = 2000*np.pi\nTarr = np.arange(0,T+0.01,np.pi/20)\n\nvelocity_verlet(q,p,dvdt,deltat,Tarr)\n\nqarr = np.array(qarr)\nparr = np.array(parr)\n\nenergyarr = parr**2/2 + potential(qarr)\nethermarr = np.array(ethermarr)\n\nkinarr = parr[:,0,0]**2/2\n\nkinmean = np.zeros_like(kinarr)\n\nfor i in range(len(kinmean)):\n kinmean[i] = np.sum(kinarr[:i])/(i+1)\n \nplt.plot(tarr,kinmean)\n\n\n#plt.plot(tarr[1:],ethermarr[1:,0,0]+energyarr[1:,0,0])\n","sub_path":".config/spyder-py3/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"209373411","text":"import heapq\ndef solution(scoville, K):\n heapq.heapify(scoville)\n cnt = 0\n while True:\n # 가장 작은 요소가 K보다 크면 끝난 것\n notspicy1 = heapq.heappop(scoville)\n if notspicy1 > K:\n return cnt\n # 가장 작은 요소가 K보다 작은 경우\n else :\n # 그런데 한개만 남고 더 섞을 수 없는 경우는 -1을 반환\n if len(scoville) == 0:\n return -1\n # 섞을 소스가 존재\n else :\n # 두번째로 안매운 소스를 뺀다\n notspicy2 = heapq.heappop(scoville)\n # 새로운 소스를 만든다\n newsauce = notspicy1 + (notspicy2*2)\n # 새로운 소스를 넣는다\n heapq.heappush(scoville, newsauce)\n # 횟수 추가\n cnt += 1\n\n# 효율성 통과 못함\n# def mix(a, b):\n# return a + b*2\n# def solution(scoville, K):\n# cnt = 0 \n# while min(scoville) < K :\n# scoville.sort()\n# temp = mix(scoville[0],scoville[1])\n# del scoville[0:2]\n# scoville.insert(0, temp)\n# cnt += 1\n# return cnt","sub_path":"Programmers/힙/더 맵게.py","file_name":"더 맵게.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390466730","text":"import cv2\nimport numpy\nfrom os.path import join\ndatapath = \"/home/d3athmast/dev/python/Cardata/TrainImage/\"\ndef path(cls, i):\n return \"%s/%s%d.pgm\" % (datapath, cls, i+1)\npos, neg = \"pos-\", \"neg-\"\ndetect = cv2.xfeatures2d_SIFT.create()\nextract = cv2.xfeatures2d_SIFT.create()\nflann_param = dict(algorithm=1, trees=5)\nflann=cv2.FlannBasedMatcher(flann_param, {})\nbow_kmeans_trainer=cv2.BOWKMeansTrainer(40)\nextract_bow = cv2.BOWImgDescriptorExtractor(extract,flann)\n\ndef extract_sift(fn):\n im = cv2.imread(fn,0)\n return extract.compute(im,detect.detect.detect(im))[1]\n\nfor i in range(8):\n bow_kmeans_trainer.add(extract_sift(path(pos,i)))\n bow_kmeans_trainer.add(extract_sift(path(neg,i)))\n\nvoc = bow_kmeans_trainer.cluster()\nextract_bow.setVocabulary(voc)","sub_path":"openCV/CarDetect.py","file_name":"CarDetect.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"505163290","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport sys\nimport argparse\nimport requests\nimport dns.resolver\n\nrequests.packages.urllib3.disable_warnings()\n\n\ndef get_ip_addresses(hostname):\n results = []\n answers = dns.resolver.query(hostname, 'A')\n for a in answers:\n results.append(str(a))\n return results\n\n\ndef main():\n parser = argparse.ArgumentParser(description='checks HTTP but uses every IP the hostname resovles to')\n parser.add_argument('--proto', default='http', help='Protocol to be used, default http')\n parser.add_argument('--hostname', required=True, help='hostname e.g. www.example.com')\n parser.add_argument('--port', default=80, help='Port to be used, default 80')\n parser.add_argument('--path', default=\"/\", help='path e.g. /path/to, default /')\n parser.add_argument('--timeout', type=int, default=10, help='HTTP timeout in seconds')\n\n args = parser.parse_args()\n\n proto = args.proto\n hostname = args.hostname\n port = args.port\n timeout = args.timeout\n\n if not args.path.startswith('/'):\n path = \"/\" + args.path\n else:\n path = args.path\n\n ips = get_ip_addresses(hostname)\n\n headers = {'host': hostname}\n response = {}\n for ip in ips:\n try:\n url = \"%s://%s:%s%s\" % (proto, hostname, port, path)\n r = requests.get(url, headers=headers, timeout=timeout)\n\n status_code = r.status_code\n if status_code != 200:\n continue\n\n content = r.json()\n\n for k, v in content.items():\n if not isinstance(v, int):\n continue\n\n if k not in response:\n response[k] = v\n else:\n response[k] += v\n\n except Exception:\n continue\n\n response_string = \"nodes_in_dns=%s\" % len(ips)\n for k, v in response.items():\n response_string += \" %s=%s\" % (k, v)\n\n print(\"OK | \" + response_string)\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"check_hbbtv_restreamer_stats.py","file_name":"check_hbbtv_restreamer_stats.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"346674860","text":"from flask import current_app as app, abort\nfrom pyzotero import zotero\nfrom pyzotero.zotero_errors import UserNotAuthorised\n\nschema = {\n 'libraryId': {\n 'type': 'string',\n 'required': True\n },\n 'libraryType': {\n 'type': 'string',\n 'required': True,\n 'allowed': ['user', 'group']\n },\n 'apiKey': {\n 'type': 'string',\n 'required': True\n },\n 'name': {\n 'type': 'string',\n }\n}\n\nconfig = {\n 'public_methods': ['POST'],\n 'public_item_methods': [],\n 'allowed_filters': [],\n 'item_method': ['GET', 'PATCH'],\n 'resource_methods': ['GET', 'POST'],\n 'schema': schema\n}\n\ndef init(app):\n ''' (LocalProxy) -> NoneType\n Adds this route's specific hooks to this route.\n '''\n\n app.on_insert_users += onInsert\n app.on_inserted_users += onInserted\n\n# on_insert_users\ndef onInsert(insertUsers):\n ''' (list of dict) -> NoneType\n An Eve hook used prior to insertion.\n '''\n\n import models.users as users\n for user in insertUsers:\n\n # remove non-digits from libraryId\n user['libraryId'] = ''.join(\n [c for c in user['libraryId'] if c in '1234567890']\n )\n\n # check for duplicates\n try:\n users.User.findOne(\n app.data.driver.db,\n libraryId=user['libraryId'],\n libraryType=user['libraryType']\n )\n abort(422, 'Library already exists.')\n except KeyError:\n pass \n\n # verify that apiKey works\n try:\n zotUserInfo = zotero.Zotero(\n user['libraryId'],\n user['libraryType'],\n user['apiKey']\n ).key_info()\n except UserNotAuthorised:\n abort(422, 'Invalid APIKey.')\n\n # add supplementary information to user\n user['name'] = zotUserInfo['username']\n\n# on_inserted_users\ndef onInserted(insertUsers):\n ''' (list of dict) -> NoneType\n An Eve hook used prior to insertion.\n '''\n\n import models.users as users\n for userData in insertUsers:\n\n # set owner to self\n users.User(\n app.data.driver.db,\n users.User.collection,\n **userData\n ).selfOwn().commit()\n","sub_path":"api/routes/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"466405470","text":"\r\nimport os\r\nfrom os import path\r\nimport argparse\r\nfrom pprint import pprint\r\nfrom bisect import bisect\r\n\r\ndef main():\r\n args = parse_arguments()\r\n parse_and_solve(args.input, args.output)\r\n\r\ndef parse_arguments():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('input', type=argparse.FileType('r'),\r\n nargs='?', default='input.txt', help='input file')\r\n parser.add_argument('output', type=argparse.FileType('w'),\r\n nargs='?', default='output.txt', help='output file')\r\n\r\n return parser.parse_args()\r\n\r\ndef parse_and_solve(input, output):\r\n content = input.read()\r\n\r\n cases = parse_input(content)\r\n\r\n solutions = [solve(case) for case in cases]\r\n\r\n formatted_solutions = [format_solution(solution, i) for i, solution in enumerate(solutions)]\r\n joined_solutions = '\\n'.join(formatted_solutions)\r\n\r\n output.write(joined_solutions)\r\n\r\ndef parse_input(input):\r\n numbers = [float(x) for x in input.split()]\r\n numbers.reverse()\r\n\r\n cases_count = numbers.pop()\r\n\r\n cases =[]\r\n for i in xrange(int(cases_count)):\r\n blocks_count = numbers.pop()\r\n naomi_blocks = [numbers.pop() for _ in xrange(int(blocks_count))]\r\n ken_blocks = [numbers.pop() for _ in xrange(int(blocks_count))]\r\n cases.append((naomi_blocks, ken_blocks))\r\n\r\n assert(len(numbers) == 0)\r\n\r\n return cases\r\n\r\ndef solve(case):\r\n naomi_blocks, ken_blocks = case\r\n\r\n naomi_blocks.sort()\r\n ken_blocks.sort()\r\n\r\n deceitful_war_solution = solve_deceitful_war(naomi_blocks, ken_blocks)\r\n war_solution = solve_war(naomi_blocks, ken_blocks)\r\n\r\n return deceitful_war_solution, war_solution\r\n\r\ndef solve_deceitful_war(naomi_blocks, ken_blocks):\r\n naomi_blocks = naomi_blocks[:]\r\n ken_blocks = ken_blocks[:]\r\n naomi_score = 0\r\n while len(naomi_blocks) > 0:\r\n if ken_blocks[-1] > naomi_blocks[-1]:\r\n naomi_blocks.pop(0)\r\n ken_blocks.pop(-1)\r\n else:\r\n naomi_blocks.pop(-1)\r\n ken_blocks.pop(-1)\r\n naomi_score += 1\r\n\r\n return naomi_score\r\n\r\ndef solve_war(naomi_blocks, ken_blocks):\r\n naomi_blocks = naomi_blocks[:]\r\n ken_blocks = ken_blocks[:]\r\n naomi_score = 0\r\n while len(naomi_blocks) > 0:\r\n naomi_block = naomi_blocks.pop()\r\n ken_block_index = bisect(ken_blocks, naomi_block)\r\n if ken_block_index == len(ken_blocks):\r\n ken_blocks.pop(0)\r\n naomi_score += 1\r\n else:\r\n ken_blocks.pop(ken_block_index)\r\n\r\n return naomi_score\r\n\r\ndef format_solution(solution, index):\r\n format = 'Case #%d: %d %d'\r\n return format % (index + 1, solution[0], solution[1])\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"solutions_5644738749267968_1/Python/robocode/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"37487783","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport requests\n\nimport datetime\n\n# apply method\ndef format_ids_helper(x):\n if x != x.upper():\n x = x.upper()\n if len(x) < 6 and x.isdigit():\n zrs = 6-len(x)\n for i in range(zrs):\n x = '0' + x\n return x\n\n\n# loc method \ndef format_ids_loc(df, col):\n # format station ids\n for i in df.index:\n if df[col].loc[i] != df[col].loc[i].upper():\n df[col].loc[i] = df[col].loc[i].upper()\n \n if len(df[col].loc[i]) < 6 and str(df[col].loc[i])[0].isdigit():\n zrs = 6-len(df[col].loc[i])\n for i in range(zrs):\n df[col].loc[i] = '0'+df[col].loc[i]\n \n return df\n\n\n# iterrows method \ndef format_ids(df, col):\n # format station ids\n for index, val in df.iterrows():\n if val[col] != val[col].upper():\n df[col].loc[index] = df[col].loc[index].upper()\n \n if len(val[col]) < 6 and str(val[col])[0].isdigit():\n zrs = 6-len(val[col])\n for i in range(zrs):\n df[col].loc[index] = '0'+df[col].loc[index]\n \n return df\n\n\n''' API ids'''\n\nroot = 'https://environment.data.gov.uk/flood-monitoring/id/stations?parameter=rainfall'\ndata = requests.get(root).json()\n\napi_ids = pd.DataFrame(columns=['id', 'easting', 'northing'])\nfor i in data['items']:\n if all(['easting' in i, 'northing' in i]):\n api_ids = api_ids.append({'id': i['notation'], 'easting': i['easting'], 'northing': i['northing']}, ignore_index=True)\n\n\n\no = datetime.datetime.now()\napi_idss = format_ids(api_ids, 'id')\nprint(datetime.datetime.now()-o)\n\no = datetime.datetime.now()\napi_idszs = format_ids_loc(api_ids, 'id')\nprint(datetime.datetime.now()-o)\n\no = datetime.datetime.now()\napi_ids['id'] = api_ids['id'].apply(format_ids_helper)\nprint(datetime.datetime.now()-o)\n","sub_path":"zzz_scripts/check_EA_api.py","file_name":"check_EA_api.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"373116064","text":"# @copyright@\n# Copyright (c) 2006 - 2017 Teradata\n# All rights reserved. Stacki(r) v5.x stacki.com\n# https://github.com/Teradata/stacki/blob/master/LICENSE.txt\n# @copyright@\n#\n# @rocks@\n# Copyright (c) 2000 - 2010 The Regents of the University of California\n# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org\n# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt\n# @rocks@\n\n\nimport stack.commands\n\n\nclass Command(stack.commands.list.host.command):\n\t\"\"\"\n\tLists the current bot action for hosts. For each host supplied on the\n\tcommand line, this command prints the hostname and boot action for\n\tthat host. The boot action describes what the host will do the next\n\ttime it is booted.\n\n\t\n\tZero, one or more host names. If no host names are supplied, info about\n\tall the known hosts is listed.\n\t\n\n\t\n\tList the current boot action for backend-0-0.\n\t\n\n\t\n\tList the current boot action for all known hosts.\n\t\n\t\"\"\"\n\n\tdef run(self, params, args):\n\n\t\tboot = {}\n\t\tfor h, b in self.db.select(\n\t\t\t\"\"\"\n\t\t\tn.name, b.action from nodes n\n\t\t\tleft join boot b on\n\t\t\tn.id = b.node\n\t\t\t\"\"\"):\n\t\t\tboot[h] = b\n\n\t\tattrs = {}\n\t\tfor row in self.call('list.host.attr', [ 'attr=nuke*' ]):\n\t\t\thost = row['host']\n\t\t\tattr = row['attr']\n\t\t\tvalue = self.str2bool(row['value'])\n\t\t\tif host not in attrs:\n\t\t\t\tattrs[host] = {}\n\t\t\tattrs[host][attr] = value\n\n\n\t\tself.beginOutput()\n\t\tfor host in self.getHostnames(args):\n\t\t\tnukedisks = False\n\t\t\tnukecontroller = False\n\t\t\tif host in attrs:\n\t\t\t\ta = attrs[host]\n\t\t\t\tnukedisks = a.get('nukedisks')\n\t\t\t\tnukecontroller = a.get('nukecontroller')\n\t\t\t\t\n\t\t\tself.addOutput(host, (boot[host], nukedisks, nukecontroller))\n\t\tself.endOutput(header=['host', 'action', 'nukedisks', 'nukecontroller'])\n\n","sub_path":"common/src/stack/command/stack/commands/list/host/boot/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"108542209","text":"# Implementation of the Queue ADT using a circular array.\nclass Queue:\n # Creates an empty queue.\n def __init__( self, maxSize ) :\n self._count = 0\n self._front = 0\n self._back = maxSize - 1\n self._qArray = [None] * maxSize\n\n # Returns True if the queue is empty.\n def isEmpty( self ) :\n return len(self._qArray) == 0\n\n # Returns True if the queue is full.\n def isFull( self ) :\n return self._count == len(self._qArray)\n\n # Returns the number of items in the queue.\n def __len__( self ):\n return self._count\n\n # Adds the given item to the queue.\n def enqueue( self, item ):\n assert not self.isFull(), \"Cannot enqueue to a full queue.\"\n maxSize = len(self._qArray)\n self._back = (self._back + 1) % maxSize\n self._qArray[self._back] = item\n self._count += 1\n\n # Removes and returns the first item in the queue.\n def dequeue( self ):\n assert not self.isEmpty(), \"Cannot dequeue from an empty queue.\"\n item = self._qArray[self._front]\n maxSize = len(self._qArray)\n self._front = (self._front + 1) % maxSize\n self._count -= 1\n return item\n\n # Return the content of the queue (with array index in square\n # brackets].\n def __str__( self ) :\n maxSize = len(self._qArray)\n outStr = ''\n for i in range(self._count):\n outStr += ('[' + str((self._front + i) % maxSize) + ']:')\n outStr += (str(self._qArray[(self._front + i) % maxSize]) + ' ')\n return outStr\n\nif __name__ == '__main__':\n\n PROMPT = \"Enter a number (ctrl-D to end): \"\n myQueue = Queue(5)\n myQueue.enqueue(10)\n myQueue.enqueue(20)\n myQueue.enqueue(30)\n myQueue.enqueue(40)\n myQueue.enqueue(50)\n print(\"Testing with a queue of maximum size of 5 items\")\n print(\"After enqueing 5 items to an empty queue: \")\n print(myQueue)\n\n myQueue.dequeue()\n myQueue.dequeue()\n print(\"\\nAfter dequeuing 2 items from the queue: \")\n print(myQueue)\n\n myQueue.enqueue(60)\n myQueue.enqueue(70)\n print(\"\\nAfter enqueing 2 items from the queue: \")\n print(myQueue)\n","sub_path":"Week 14/Practical/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"185562717","text":"from django.contrib.auth.decorators import permission_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom .models import Todo\nfrom .forms import TodoForm, TodoDeleteForm\n\n\ndef index(request):\n todos = Todo.objects.all()\n \n paginator = Paginator(todos, 2)\n page = request.GET.get('page')\n todos = paginator.get_page(page)\n\n return render(request, \n 'todo/index.html',\n {'todos': todos}\n )\n\n\ndef detail(request, slug=None):\n todo = get_object_or_404(Todo, slug=slug)\n return render(request, \n 'todo/detail.html',\n {'todo': todo}\n )\n\n\n@permission_required('todo.add_todo')\ndef add(request):\n if request.method == 'POST':\n form = TodoForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('todo:index')\n else:\n form = TodoForm()\n return render(request, 'todo/add.html',\n {'form': form})\n\n\n@permission_required('todo.change_todo')\ndef edit(request, pk=None):\n todo = get_object_or_404(Todo, pk=pk)\n if request.method == 'POST':\n form = TodoForm(request.POST,\n instance=todo)\n if form.is_valid():\n form.save()\n return redirect('todo:index')\n else:\n form = TodoForm(instance=todo)\n return render(request, 'todo/edit.html',\n {'form': form,\n\t\t 'todo': todo})\n\n\n@permission_required('todo.delete_todo')\ndef delete(request, pk=None):\n todo = get_object_or_404(Todo, pk=pk)\n if request.method == 'POST':\n form = TodoDeleteForm(request.POST,\n instance=todo)\n if form.is_valid():\n todo.delete()\n return redirect('todo:index')\n else:\n form = TodoDeleteForm(instance=todo)\n return render(request,\n 'todo/delete.html',\n {'form': form,\n\t\t 'todo': todo})\n","sub_path":"project-1/todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"505684755","text":"import argparse\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nimport numpy as np\n\nimport psl\nimport slim\nfrom neuromancer import loggers\nfrom neuromancer.datasets import EmulatorDataset, FileDataset, systems\nfrom neuromancer import blocks\nfrom neuromancer import dynamics\nfrom neuromancer import estimators\nfrom neuromancer.problem import Problem, Objective\nfrom neuromancer.activations import BLU, SoftExponential\nfrom neuromancer import policies\nfrom neuromancer.train_scripts.common.common import get_base_parser\n\n\ndef get_parser(parser=None, add_prefix=False):\n if parser is None:\n parser = get_base_parser()\n\n # maybe prefix arg with \"ctrl_\"\n pfx = (lambda x: f\"-ctrl_{x.strip('-')}\") if add_prefix else (lambda x: x)\n\n # optimization parameters\n opt_group = parser.add_argument_group(\"OPTIMIZATION PARAMETERS\")\n opt_group.add_argument(pfx(\"-epochs\"), type=int, default=200)\n opt_group.add_argument(\n pfx(\"-lr\"), type=float, default=0.001, help=\"Step size for gradient descent.\"\n )\n opt_group.add_argument(\n pfx(\"-patience\"),\n type=int,\n default=20,\n help=\"How many epochs to allow for no improvement in eval metric before early stopping.\",\n )\n opt_group.add_argument(\n pfx(\"-warmup\"),\n type=int,\n default=10,\n help=\"Number of epochs to wait before enacting early stopping policy.\",\n )\n opt_group.add_argument(\n pfx(\"-skip_eval_sim\"),\n action=\"store_true\",\n help=\"Whether to run simulator during evaluation phase of training.\",\n )\n\n # data parameters\n data_group = parser.add_argument_group(\"DATA PARAMETERS\")\n data_group.add_argument(\n pfx(\"-nsteps\"),\n type=int,\n default=64,\n help=\"Number of steps for open loop during training.\",\n )\n # TODO: update emulator model\n data_group.add_argument(\n pfx(\"-system\"),\n type=str,\n default=\"Reno_full\",\n help=\"select particular dataset with keyword\",\n )\n data_group.add_argument(\n pfx(\"-nsim\"),\n type=int,\n default=10000,\n help=\"Number of time steps for full dataset. (ntrain + ndev + ntest)\"\n \"train, dev, and test will be split evenly from contiguous, sequential, \"\n \"non-overlapping chunks of nsim datapoints, e.g. first nsim/3 art train,\"\n \"next nsim/3 are dev and next nsim/3 simulation steps are test points.\"\n \"None will use a default nsim from the selected dataset or emulator\",\n )\n data_group.add_argument(\n pfx(\"-norm\"),\n nargs=\"+\",\n default=[\"U\", \"D\", \"Y\"],\n choices=[\"U\", \"D\", \"Y\", \"X\"],\n help=\"List of sequences to max-min normalize\",\n )\n data_group.add_argument(\n pfx(\"-data_seed\"), type=int, default=408, help=\"Random seed used for simulated data\"\n )\n\n # TODO: update trained system ID model path\n path = f\"./test/Reno_full_best_model.pth\"\n data_group.add_argument('-model_file', type=str, default=path)\n\n ##################\n # POLICY PARAMETERS\n policy_group = parser.add_argument_group(\"POLICY PARAMETERS\")\n policy_group.add_argument(\n pfx(\"-policy\"), type=str, choices=[\"mlp\", \"linear\"], default=\"mlp\"\n )\n policy_group.add_argument(\n \"-controlled_outputs\", nargs='+', default=[0, 1],\n help=\"list of indices of controlled outputs len(default)<=ny\"\n )\n policy_group.add_argument(\n pfx(\"-n_hidden\"), type=int, default=60, help=\"Number of hidden states\"\n )\n policy_group.add_argument(\n pfx(\"-n_layers\"),\n type=int,\n default=3,\n help=\"Number of hidden layers of single time-step state transition\",\n )\n policy_group.add_argument(\n pfx(\"-bias\"),\n action=\"store_true\",\n help=\"Whether to use bias in the neural network models.\",\n )\n policy_group.add_argument(\n pfx(\"-policy_features\"),\n nargs=\"+\",\n default=['Y_ctrl_p', 'Rf', 'Y_maxf', 'Y_minf'],\n help=\"Policy features\",\n ) # reference tracking option\n policy_group.add_argument(\n pfx(\"-activation\"),\n choices=[\"gelu\", \"softexp\"],\n default=\"gelu\",\n help=\"Activation function for neural networks\",\n )\n policy_group.add_argument(\n pfx(\"-perturbation\"),\n choices=[\"white_noise_sine_wave\", \"white_noise\"],\n default=\"white_noise\",\n )\n policy_group.add_argument(\n pfx(\"-seed\"),\n type=int,\n default=408,\n help=\"Random seed used for weight initialization.\",\n )\n\n # linear parameters\n linear_group = parser.add_argument_group(\"LINEAR PARAMETERS\")\n linear_group.add_argument(\n pfx(\"-linear_map\"), type=str, choices=[\"linear\", \"softSVD\", \"pf\"], default=\"softSVD\"\n )\n linear_group.add_argument(pfx(\"-sigma_min\"), type=float, default=0.1)\n linear_group.add_argument(pfx(\"-sigma_max\"), type=float, default=1.0)\n\n # layers\n layers_group = parser.add_argument_group(\"LAYERS PARAMETERS\")\n # TODO: generalize freeze unfreeze - we want to unfreeze only policy network\n layers_group.add_argument(\n \"-freeze\", nargs=\"+\", default=[\"\"], help=\"sets requires grad to False\"\n )\n layers_group.add_argument(\n \"-unfreeze\", default=[\"components.2\"], help=\"sets requires grad to True\"\n )\n\n # weight parameters\n weight_group = parser.add_argument_group(\"WEIGHT PARAMETERS\")\n weight_group.add_argument(\n pfx(\"-Q_con_x\"),\n type=float,\n default=1.0,\n help=\"Hidden state constraints penalty weight.\",\n )\n weight_group.add_argument(\n pfx(\"-Q_con_y\"),\n type=float,\n default=10.0,\n help=\"Observable constraints penalty weight.\",\n )\n weight_group.add_argument(\n pfx(\"-Q_dx\"),\n type=float,\n default=0.0,\n help=\"Penalty weight on hidden state difference in one time step.\",\n )\n weight_group.add_argument(\n pfx(\"-Q_sub\"), type=float, default=1.0, help=\"Linear maps regularization weight.\"\n )\n weight_group.add_argument(\n pfx(\"-Q_con_fdu\"),\n type=float,\n default=0.0,\n help=\"Penalty weight on control actions and disturbances.\",\n )\n weight_group.add_argument(\n pfx(\"-Q_con_u\"), type=float, default=2.0, help=\"Input constraints penalty weight.\"\n )\n weight_group.add_argument(\n pfx(\"-Q_r\"), type=float, default=1.0, help=\"Reference tracking penalty weight\"\n )\n weight_group.add_argument(\n pfx(\"-Q_du\"),\n type=float,\n default=0.1,\n help=\"control action difference penalty weight\",\n )\n\n # objective and constraints variations\n weight_group.add_argument(pfx(\"-con_tighten\"), action=\"store_true\")\n weight_group.add_argument(\n pfx(\"-tighten\"),\n type=float,\n default=0.0,\n help=\"control action difference penalty weight\",\n )\n weight_group.add_argument(pfx(\"-loss_clip\"), action=\"store_true\")\n weight_group.add_argument(pfx(\"-noise\"), action=\"store_true\")\n\n return parser\n\n\ndef update_system_id_inputs(args, dataset, estimator, dynamics_model):\n dynamics_model.input_keys[dynamics_model.input_keys.index('Uf')] = 'U_pred_policy'\n dynamics_model.fe = None\n dynamics_model.fyu = None\n\n estimator.input_keys[0] = 'Y_ctrl_p'\n estimator.data_dims = dataset.dims\n estimator.data_dims['Y_ctrl_p'] = dataset.dims['Yp']\n # estimator.data_dims = {**dataset.dims, 'Y_ctrl_p': estimator.data_dims['Yp']}\n estimator.nsteps = args.nsteps\n\n return estimator, dynamics_model\n\n\ndef get_policy_components(args, dataset, dynamics_model, policy_name=\"policy\"):\n torch.manual_seed(args.seed)\n\n # control policy setup\n activation = {\n \"gelu\": nn.GELU,\n \"relu\": nn.ReLU,\n \"blu\": BLU,\n \"softexp\": SoftExponential,\n }[args.activation]\n\n linmap = slim.maps[args.linear_map]\n linargs = {\"sigma_min\": args.sigma_min, \"sigma_max\": args.sigma_max}\n nh_policy = args.n_hidden\n\n policy = {\n \"linear\": policies.LinearPolicy,\n \"mlp\": policies.MLPPolicy,\n \"rnn\": policies.RNNPolicy,\n }[args.policy](\n {\"x0_estim\": (dynamics_model.nx,), **dataset.dims},\n nsteps=args.nsteps,\n bias=args.bias,\n linear_map=linmap,\n nonlin=activation,\n hsizes=[nh_policy] * args.n_layers,\n input_keys=args.policy_features,\n linargs=linargs,\n name=policy_name,\n )\n return policy\n\n\ndef get_objective_terms(args, policy):\n if args.noise:\n output_key = \"Y_pred_dynamics_noise\"\n else:\n output_key = \"Y_pred_dynamics\"\n\n reference_loss = Objective(\n [output_key, \"Rf\"],\n lambda pred, ref: F.mse_loss(pred[:, :, args.controlled_outputs], ref),\n weight=args.Q_r,\n name=\"ref_loss\",\n )\n regularization = Objective(\n [f\"reg_error_{policy.name}\"], lambda reg: reg, weight=args.Q_sub, name=\"reg_loss\",\n )\n control_smoothing = Objective(\n [f\"U_pred_{policy.name}\"],\n lambda x: F.mse_loss(x[1:], x[:-1]),\n weight=args.Q_du,\n name=\"control_smoothing\",\n )\n observation_lower_bound_penalty = Objective(\n [output_key, \"Y_minf\"],\n lambda x, xmin: torch.mean(F.relu(-x[:, :, args.controlled_outputs] + xmin)),\n weight=args.Q_con_y,\n name=\"observation_lower_bound\",\n )\n observation_upper_bound_penalty = Objective(\n [output_key, \"Y_maxf\"],\n lambda x, xmax: torch.mean(F.relu(x[:, :, args.controlled_outputs] - xmax)),\n weight=args.Q_con_y,\n name=\"observation_upper_bound\",\n )\n inputs_lower_bound_penalty = Objective(\n [f\"U_pred_{policy.name}\", \"U_minf\"],\n lambda x, xmin: torch.mean(F.relu(-x + xmin)),\n weight=args.Q_con_u,\n name=\"input_lower_bound\",\n )\n inputs_upper_bound_penalty = Objective(\n [f\"U_pred_{policy.name}\", \"U_maxf\"],\n lambda x, xmax: torch.mean(F.relu(x - xmax)),\n weight=args.Q_con_u,\n name=\"input_upper_bound\",\n )\n\n # Constraints tightening\n if args.con_tighten:\n observation_lower_bound_penalty = Objective(\n [output_key, \"Y_minf\"],\n lambda x, xmin: torch.mean(F.relu(-x[:, :, args.controlled_outputs] + xmin + args.tighten)),\n weight=args.Q_con_y,\n name=\"observation_lower_bound\",\n )\n observation_upper_bound_penalty = Objective(\n [output_key, \"Y_maxf\"],\n lambda x, xmax: torch.mean(F.relu(x[:, :, args.controlled_outputs] - xmax + args.tighten)),\n weight=args.Q_con_y,\n name=\"observation_upper_bound\",\n )\n inputs_lower_bound_penalty = Objective(\n [f\"U_pred_{policy.name}\", \"U_minf\"],\n lambda x, xmin: torch.mean(F.relu(-x + xmin + args.tighten)),\n weight=args.Q_con_u,\n name=\"input_lower_bound\",\n )\n inputs_upper_bound_penalty = Objective(\n [f\"U_pred_{policy.name}\", \"U_maxf\"],\n lambda x, xmax: torch.mean(F.relu(x - xmax + args.tighten)),\n weight=args.Q_con_u,\n name=\"input_upper_bound\",\n )\n\n # Loss clipping\n if args.loss_clip:\n reference_loss = Objective(\n [output_key, \"Rf\", \"Y_minf\", \"Y_maxf\"],\n lambda pred, ref, xmin, xmax: F.mse_loss(\n pred[:, :, args.controlled_outputs] * torch.gt(ref, xmin).int() * torch.lt(ref, xmax).int(),\n ref * torch.gt(ref, xmin).int() * torch.lt(ref, xmax).int(),\n ),\n weight=args.Q_r,\n name=\"ref_loss\",\n )\n\n objectives = [regularization, reference_loss]\n constraints = [\n observation_lower_bound_penalty,\n observation_upper_bound_penalty,\n inputs_lower_bound_penalty,\n inputs_upper_bound_penalty,\n ]\n\n return objectives, constraints\n\n\ndef add_reference_features(args, dataset, dynamics_model):\n \"\"\"\n \"\"\"\n ny = dynamics_model.fy.out_features\n if ny != dataset.data[\"Y\"].shape[1]:\n new_sequences = {\"Y\": dataset.data[\"Y\"][:, :1]}\n dataset.add_data(new_sequences, overwrite=True)\n dataset.min_max_norms[\"Ymin\"] = dataset.min_max_norms[\"Ymin\"][0]\n dataset.min_max_norms[\"Ymax\"] = dataset.min_max_norms[\"Ymax\"][0]\n\n # nsim = dataset.data[\"Y\"].shape[0]\n nsim = dataset.dims['nsim']\n nu = dataset.data[\"U\"].shape[1]\n ny = len(args.controlled_outputs)\n dataset.add_data({\n \"Y_max\": psl.Periodic(nx=ny, nsim=nsim, numPeriods=30, xmax=0.9, xmin=0.6)[:nsim,:],\n \"Y_min\": psl.Periodic(nx=ny, nsim=nsim, numPeriods=24, xmax=0.4, xmin=0.1)[:nsim,:],\n \"U_max\": np.ones([nsim, nu]),\n \"U_min\": np.zeros([nsim, nu]),\n \"R\": psl.Periodic(nx=ny, nsim=nsim, numPeriods=20, xmax=0.8, xmin=0.2)[:nsim,:]\n # 'Y_ctrl_': psl.WhiteNoise(nx=ny, nsim=nsim, xmax=[1.0] * ny, xmin=[0.0] * ny)\n })\n # indices of controlled states, e.g. [0, 1, 3] out of 5 outputs\n dataset.ctrl_outputs = args.controlled_outputs\n return dataset","sub_path":"neuromancer/train_scripts/papers/nmpc2020_buildings/setup_control.py","file_name":"setup_control.py","file_ext":"py","file_size_in_byte":13043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"550434031","text":"\"\"\"\nID: kevin173\nLANG: PYTHON3\nTASK: beads\n\"\"\"\n\ndef read_left_right(string, index):\n\tchar = 'w'\n\ti = index\n\tl = 0\n\tlength = len(string)\n\n\twhile l < length:\n\t\tif string[i] != char:\n\t\t\tif char == 'w':\n\t\t\t\tchar = string[i]\n\t\t\telif string[i] != 'w':\n\t\t\t\tbreak\n\t\ti = (i + 1) % length\n\t\tl += 1\n\n\ti = (index - 1) % length\n\tchar = 'w'\n\twhile l < length:\n\t\tif string[i] != char:\n\t\t\tif char == 'w':\n\t\t\t\tchar = string[i]\n\t\t\telif string[i] != 'w':\n\t\t\t\tbreak\n\t\ti = (i - 1) % length\n\t\tl += 1\n\treturn l\n\n\n\ntry:\n\tfio = open(\"beads.in\", 'r')\nexcept OSError:\n\tprint(\"Cannot open input file\")\n\tsys.exit()\n\nn = int(fio.readline())\nbeads = fio.readline().replace('\\n', '')\nfio.close()\nmax_beads = 0\n\nfor i in range(0, n):\n\t# We split at i\n\tl = read_left_right(beads, i)\n\tif max_beads < l:\n\t\tmax_beads = l\n\ntry:\n\tfio = open(\"beads.out\", 'w')\nexcept OSError:\n\tprint(\"Cannot open output file\")\n\tsys.exit()\n\nfio.write(str(max_beads) + '\\n')\nfio.close()\n\n","sub_path":"usaco/chapter1/sec1-2/beads/beads.py","file_name":"beads.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"540538002","text":"from contextlib import asynccontextmanager\r\n\r\nfrom aiomysql import create_pool\r\nfrom scrapy.settings import Settings\r\n\r\nfrom aioscrapy.utils.tools import singleton\r\n\r\n\r\n@singleton\r\nclass AioMysqlManager(object):\r\n _clients = {}\r\n\r\n @staticmethod\r\n def get_alias(params):\r\n if params is None:\r\n raise\r\n host = params.get('host')\r\n port = params.get('port')\r\n alias = params.pop('alias', host + str(port))\r\n return alias, params\r\n\r\n async def create(self, params, alias=None):\r\n if alias is None:\r\n alias, params = self.get_alias(params)\r\n pool = await create_pool(**params)\r\n return self._clients.setdefault(alias, pool)\r\n\r\n @asynccontextmanager\r\n async def get(self, alias_or_params, ping=False):\r\n if isinstance(alias_or_params, dict):\r\n alias, params = self.get_alias(alias_or_params)\r\n pool = self._clients.get(alias)\r\n if not pool:\r\n pool = await self.create(params, alias=alias)\r\n elif isinstance(alias_or_params, str):\r\n alias = alias_or_params\r\n pool = self._clients.get(alias)\r\n if not pool:\r\n raise\r\n else:\r\n raise\r\n\r\n conn = await pool.acquire()\r\n if ping:\r\n await conn.ping()\r\n cur = await conn.cursor()\r\n yield conn, cur\r\n await cur.close()\r\n await pool.release(conn)\r\n\r\n async def close(self, alias_or_params):\r\n if alias_or_params is dict:\r\n alias, _ = self.get_alias(alias_or_params)\r\n elif isinstance(alias_or_params, str):\r\n alias = alias_or_params\r\n else:\r\n raise\r\n pool = self._clients.get(alias)\r\n if pool:\r\n pool.close()\r\n await pool.wait_closed()\r\n\r\n async def close_all(self):\r\n for pool in self._clients.values():\r\n pool.close()\r\n await pool.wait_closed()\r\n\r\n\r\nmysql_manager = AioMysqlManager()\r\n\r\nif __name__ == '__main__':\r\n import asyncio\r\n\r\n\r\n async def t():\r\n await mysql_manager.create({\r\n 'db': 'test',\r\n 'user': 'root',\r\n 'password': '123456',\r\n 'host': '172.16.177.22',\r\n 'port': 3306,\r\n 'charset': 'utf8',\r\n }, alias='xc')\r\n async with mysql_manager.get('xc') as (conn, cur):\r\n print(cur.execute('select 1'))\r\n # print(conn.commit())\r\n await mysql_manager.close_all()\r\n\r\n\r\n asyncio.run(t())\r\n","sub_path":"aioscrapy/connection/_aiomysql.py","file_name":"_aiomysql.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"205404762","text":"# Andreza Santana\n# Recebe três duplas de valores e retorna a média dos maiores valores\n\nduplas = int(input())\nmaior = cont = 0\nfor i in range(duplas):\n n1, n2 = input().split(' ')\n if int(n1) > int(n2):\n maior += int(n1)\n cont += 1\n elif int(n2) > int(n1):\n maior += int(n2)\n cont += 1\n elif int(n1) == int(n2):\n maior += 0\n\nif maior != 0:\n media = maior / cont\n print(f\"{media:.2f}\")\nelse:\n print('Não é possível calcular a média.')\n\n\n","sub_path":"u4/mediadasduplas/mediadasduplas.py","file_name":"mediadasduplas.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"481204626","text":"import argparse\nimport datetime\nimport time\nimport tensorflow as tf\nfrom kungfu import current_cluster_size, current_rank\nfrom kungfu.tensorflow.ops import reshape_strategy\nfrom kungfu.tensorflow.optimizers import (PairAveragingOptimizer,\n SynchronousAveragingOptimizer,\n SynchronousSGDOptimizer)\n\n\nparser = argparse.ArgumentParser(description='KungFu mnist example.')\nparser.add_argument('--kf-optimizer',\n type=str,\n default='sync-sgd',\n help='available options: sync-sgd, async-sgd, sma')\nparser.add_argument('--name',\n type=str,\n required=True,\n help='name this experiement run for Tensorboard logging')\nparser.add_argument('--reshape-on', \n action='store_true',\n default=False,\n help='turn on reshape strategy method')\n\nargs = parser.parse_args()\nreshape = 1 if args.reshape_on else 0\n\nDATASET_SIZE = 300\nTRAIN_VAL_SPLIT = 0.8\nNUM_EPOCHS = 15\nBATCH_SIZE = 8\n# adjust number of steps based on number of workers\nNUM_STEPS = (DATASET_SIZE // BATCH_SIZE) // current_cluster_size()\n\n\ndef load_data():\n\n (mnist_images, mnist_labels), _ = \\\n tf.keras.datasets.mnist.load_data(path='mnist-%d.npz' % current_rank())\n print(len(mnist_images))\n\n dataset = tf.data.Dataset.from_tensor_slices(\n (tf.cast(mnist_images[..., tf.newaxis] / 255.0,\n tf.float32), tf.cast(mnist_labels, tf.int64)))\n\n # smaller dataset for quick testing\n smaller_dataset = dataset.take(DATASET_SIZE)\n split = int(DATASET_SIZE*TRAIN_VAL_SPLIT)\n train_dataset = smaller_dataset.take(split-120).batch(BATCH_SIZE)\n test_dataset = smaller_dataset.skip(split).batch(BATCH_SIZE)\n return train_dataset, test_dataset\n\n\ndef build_model():\n\n mnist_model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, [3, 3], activation='relu'),\n tf.keras.layers.Conv2D(64, [3, 3], activation='relu'),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n # tf.keras.layers.Dropout(0.25),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation='relu'),\n # tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10, activation='softmax')\n ])\n return mnist_model\n\n\ndef build_optimizer():\n # KungFu: adjust learning rate based on number of GPUs.\n # opt = tf.keras.optimizers.SGD(0.001 * current_cluster_size())\n opt = tf.compat.v1.train.AdamOptimizer(0.001 * current_cluster_size())\n\n # KungFu: wrap tf.compat.v1.train.Optimizer.\n if args.kf_optimizer == 'sync-sgd':\n opt = SynchronousSGDOptimizer(opt)\n elif args.kf_optimizer == 'async-sgd':\n opt = PairAveragingOptimizer(opt)\n elif args.kf_optimizer == 'sma':\n opt = SynchronousAveragingOptimizer(opt)\n else:\n raise RuntimeError('Unknown KungFu optimizer')\n\n return opt\n\n\ndef show_duration(duration):\n if duration < 1:\n return '%.2fms' % (duration * 1e3)\n if duration < 60:\n return '%.2fs' % duration\n sec = int(duration)\n mm, ss = sec / 60, sec % 60\n if duration < 3600:\n return '%dm%ds' % (mm, ss)\n return '%dh%dm%ds' % (mm / 60, mm % 60, ss)\n\n\n@tf.function\ndef training_step(mnist_model, opt, images, labels, first_batch):\n with tf.GradientTape() as tape:\n probs = mnist_model(images, training=True)\n loss = tf.losses.SparseCategoricalCrossentropy()\n loss_value = loss(labels, probs)\n\n grads = tape.gradient(loss_value, mnist_model.trainable_variables)\n opt.apply_gradients(zip(grads, mnist_model.trainable_variables))\n\n # KungFu: broadcast is done after the first gradient step to ensure optimizer initialization.\n if first_batch:\n from kungfu.tensorflow.initializer import broadcast_variables\n broadcast_variables(mnist_model.variables)\n broadcast_variables(opt.variables())\n\n return probs, loss_value\n\n\nif __name__ == \"__main__\":\n train_dataset, test_dataset = load_data()\n opt = build_optimizer()\n mnist_model = build_model()\n\n # Prepare the metrics.\n train_acc_metric = tf.metrics.SparseCategoricalAccuracy()\n val_acc_metric = tf.metrics.SparseCategoricalAccuracy()\n best_val_acc = 0\n\n time_log = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n log_dir = f\"tensorboard-logs/{args.name}/{time_log}\"\n summary_writer = tf.summary.create_file_writer(\n log_dir, flush_millis=10000)\n\n step = 0\n with summary_writer.as_default():\n for epoch in range(NUM_EPOCHS):\n print('Start of epoch %d' % (epoch+1,))\n for batch, (images, labels) in enumerate(train_dataset.take(NUM_STEPS)):\n\n\n # reshape strategy before apply_gradients (and therefore AllReduce is called in KungFu)\n reshape_strategy(reshape)\n \n t0 = time.time()\n probs, loss_value = training_step(\n mnist_model, opt, images, labels, batch == 0)\n\n print('training step %d, took %s' %\n (step, show_duration(time.time() - t0)))\n step += 1\n # print(f\"batch number here is {batch}\")\n # update training metric\n train_acc_metric(labels, probs)\n\n # Log loss metric every 10th step only on the 0th worker\n if step % 3 == 0 and current_rank() == 0:\n # print('Training step #%d\\tLoss: %.6f' %\n # (step, loss_value))\n # print('Training acc : %s' %\n # float(train_acc_metric.result()))\n tf.summary.scalar(\n 'training-loss', loss_value, step=step)\n tf.summary.scalar('training-accuracy',\n float(train_acc_metric.result()), step=step)\n summary_writer.flush()\n\n # Display metric at the end of each epoch\n train_acc = train_acc_metric.result()\n # print('Training acc over epoch: %s' % (float(train_acc),))\n # Reset training metric\n train_acc_metric.reset_states()\n\n # Run a validation loop at the end of each epoch.\n for x_batch_val, y_batch_val in test_dataset:\n val_logits = mnist_model(x_batch_val)\n # Update val metrics\n val_acc_metric(y_batch_val, val_logits)\n\n # log only on 0th worker to prevent corruption\n val_acc = val_acc_metric.result()\n if current_rank() == 0:\n tf.summary.scalar('val_accuracy', float(val_acc), step=step)\n summary_writer.flush()\n\n best_val_acc = max(val_acc, best_val_acc)\n val_acc_metric.reset_states()\n print(\n f\"VALIDATION ACCURACY : worker {current_rank()} | epoch {epoch+1} | val_acc {float(val_acc)})\")\n print(\n f\"BEST VAL ACC OVER TRAINING: worker {current_rank()}, | best_val_acc {best_val_acc}\")\n","sub_path":"examples/tf2_mnist_gradient_tape.py","file_name":"tf2_mnist_gradient_tape.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"88232220","text":"# coding utf-8\n\nsum = 0\npayRate6 = 0.0\npay = 0.0\npayRate3 = 0.0\n# 每月六厘利息,年利率%7.2\nyaerRate = 1.072\nmouthRate1 = 1.003\nmouthRate2 = 1.006\nprint(\"月利率三厘\")\nfor i in range(1, 31):\n if i >= 4:\n sum += i\n if i <= 10:\n # pay = (pay * yaerRate) + 8.4\n for j in range(1, 13):\n payRate6 = (payRate6 * mouthRate1) + 0.7\n pay += 8.4\n else:\n for j in range(1, 13):\n payRate6 = (payRate6 * mouthRate1)\n\n print(\"第%d年累计收入 %d千元, 包括利息支出 %.3f千元 ,实际支出 %.2f 千元\" % (i, sum, payRate6, pay))\n\n\ndef cacuMoney(money, rate=0.0, times=1):\n if times <= 0:\n return money\n return cacuMoney(money * (1 + rate), rate, times - 1)\n\n# 按月计息和按年计息十年期差距大约为2.5%, 平均下来年利率差距大约为0.2%\nprint(cacuMoney(100.0, 0.006, 12 * 10))\nprint(cacuMoney(100.0, 0.072, 10))\n","sub_path":"jingle/demo/InterestRate.py","file_name":"InterestRate.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"328123883","text":"from cassiopeia import riotapi\nfrom sklearn.cluster import DBSCAN\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport random\nimport matplotlib.cm as cmx\nimport matplotlib.colors as colors\n\n# testFile = pickle.load(open('goldDeaths.txt'))\n# points = []\n# for eventSet in testFile:\n# \tfor event in eventSet:\n# \t\tpoints.append([event.position.x, event.position.y])\n\n\n# print len(points)\n# points = np.array(points)\n# pickle.dump(points, open('points', 'w'))\nX = pickle.load(open('points'))\nX = np.array(random.sample(X, 5000))\n\n\n\n'''MeanShift'''\n# bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)\n\n# ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n# ms.fit(X)\n# labels = ms.labels_\n# cluster_centers = ms.cluster_centers_\n\n# labels_unique = np.unique(labels)\n# n_clusters_ = len(labels_unique)\n\n# print(\"number of estimated clusters : %d\" % n_clusters_)\n\n# ###############################################################################\n# # Plot result\n# import matplotlib.pyplot as plt\n# from itertools import cycle\n\n# plt.figure(1)\n# plt.clf()\n\n# colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\n# for k, col in zip(range(n_clusters_), colors):\n# my_members = labels == k\n# cluster_center = cluster_centers[k]\n# plt.plot(X[my_members, 0], X[my_members, 1], col + '.')\n# plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,\n# markeredgecolor='k', markersize=14)\n# plt.title('Estimated number of clusters: %d' % n_clusters_)\n# plt.show()\n\n\n\n'''DBSCAN'''\ndb = DBSCAN(eps=120, min_samples=3).fit(X)\ncore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\ncore_samples_mask[db.core_sample_indices_] = True\nlabels = db.labels_\n\ndef get_cmap(N):\n '''Returns a function that maps each index in 0, 1, ... N-1 to a distinct \n RGB color.'''\n color_norm = colors.Normalize(vmin=0, vmax=N-1)\n scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv') \n def map_index_to_rgb_color(index):\n return scalar_map.to_rgba(index)\n return map_index_to_rgb_color\n\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\nunique_labels = set(labels)\ncmap = get_cmap(len(unique_labels))\ncolors = [cmap(i) for i in range(len(unique_labels))]\n# colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = 'k'\n\n class_member_mask = (labels == k)\n\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=4)\n\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=4)\n\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()","sub_path":"deathCluster.py","file_name":"deathCluster.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"581452813","text":"\"\"\"\r\nD E P A R T E M E N F I S I K A\r\nReizkian Yesaya_15/383192/PA/16852\r\nprogram: Pembakaran_Euler Method\r\ncreated: 31 Juli 2018\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef area(u,t):\r\n area=4*np.pi*(u**2)\r\n return area\r\n\r\ndef volume(u,t):\r\n volume=(4*np.pi*(u**3))/3\r\n return volume\r\n\r\ndef du_dt(u,t):\r\n du_dt=(area(u,t)-volume(u,t))\r\n return du_dt\r\n\r\n\r\nu0=0.05\r\nt0=0\r\nh=0.02\r\n\r\nu=[]\r\nt=[]\r\nA=[]\r\nV=[]\r\n\r\nif t0<10: \r\n while t0<=2:\r\n u_tp1=u0+h*du_dt(u0,t0)\r\n u0=u_tp1\r\n u.append(u0)\r\n t0=t0+h\r\n t.append(t0)\r\n \r\n v=volume(u0,t)\r\n a=area(u0,t)\r\n V.append(v)\r\n A.append(a)\r\nprint(t0,\" \",u0)\r\n \r\nplt.grid(True)\r\nplt.plot(t,u,'g--')\r\n#plt.plot(t,V,'r--')\r\n#plt.plot(t,A,'b--')\r\nplt.show\r\n","sub_path":"Pembakaran_Euler.py","file_name":"Pembakaran_Euler.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"342962373","text":"import webbrowser\n\n\nclass Movie():\n def __init__(self, movie_title, movie_storyline, poster_image,\n trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n\ndef show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)\n\n\ndeadpool = Movie('Deadpool 2',\n 'Crazy hero',\n 'https://pt.wikipedia.org/wiki/Deadpool#/media/File:Deadpool_Vol_4_7.jpg',\n 'https://www.youtube.com/watch?v=DiQjnWELurw')\nprint(deadpool.storyline)\ndeadpool.poster_image_url\n","sub_path":"Fun/09.1 - class_movie.py","file_name":"09.1 - class_movie.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"252009613","text":"#!/usr/bin/env python3\nimport itertools\nimport os\n\nimport lib\nimport widgets\n\n\n# Params\nOUTPUT_DIR = 'throughput-interference-poolsize'\nPOOL_SIZES = list(range(10, 200, 10))\nN_CONNS = [50, 100, 200]\nW = [\n {\n 'name': 'noop-20k',\n 'vcl': os.path.join(widgets.WIDGETS_DIR, 'noop.vcl'),\n 'url': '/rand/20k.txt',\n 'cid': 1,\n 'script': 'custid.lua',\n },\n {\n 'name': 'noop-1k',\n 'vcl': os.path.join(widgets.WIDGETS_DIR, 'noop.vcl'),\n 'url': '/rand/1k.txt',\n 'cid': 2,\n 'script': 'custid.lua',\n },\n]\n\nthreads_conns = list(itertools.product(POOL_SIZES, N_CONNS))\n\nlogs_dir = os.path.join(lib.LOGS_DIR, OUTPUT_DIR)\nif not os.path.exists(logs_dir):\n os.makedirs(logs_dir)\n\nlogs_dirs = list(map(lambda tc: os.path.join(logs_dir, '{0}-threads_{1}-conns'.format(*tc)), threads_conns))\nfor l in logs_dirs:\n if not os.path.exists(l):\n os.makedirs(l)\n\nthreads_conns = list(map(lambda tc,d: (*tc,d), threads_conns, logs_dirs))\n\nsys_configs = list(map(lambda tcd: {\n 'clients': [lib.CLIENT0_HOSTNAME, lib.CLIENT1_HOSTNAME],\n 'varnishs': [\n {\n 'host': lib.VARNISH_HOSTNAME,\n 'sys_cores': list(range(8)),\n 'varnish_cores': list(range(8)),\n 'varnish_nthreads': tcd[0] + 1, # +1 for acceptor\n }\n ],\n 'varnish_commit': 'mr3', # lib.ACCEPT_FIX_COMMIT,\n}, threads_conns))\n\nexps_params = list(map(lambda tcd: {\n 'sites': W,\n 'logs_dir': tcd[2],\n 'type': 'tput',\n 'nsamples': 1,\n 'test_secs': 30,\n 'nconns': tcd[1],\n}, threads_conns))\n\nlib.save_params(sys_configs, exps_params, logs_dir)\nfor sys_config, exp_params in zip(sys_configs, exps_params):\n lib.setup_system(sys_config)\n\n iso_configs = lib.gen_exp_configs(sys_config, exp_params, 1)\n combi_configs = lib.gen_exp_configs(sys_config, exp_params, 2)\n configs = iso_configs + combi_configs\n\n for c in configs:\n lib.run_exp(c)\n","sub_path":"data-collection/throughput-interference-poolsize.py","file_name":"throughput-interference-poolsize.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"145318550","text":"import sys\n\nclass minHeap:\n # For getting the index of the parent Node.\n def parent(self, idx): return (idx // 2)\n\n # For getting the index of the leftChild Node.\n def leftChild(self, idx): return (idx * 2)\n\n # For getting the index of the rightChild Node.\n def rightChild(self, idx): return (idx * 2 + 1)\n\n # Initializing a empty heap.\n def __init__(self):\n self.heapSize = 0\n self.heapArr = [0]\n\n # Max_Heapify function for placing parent node in its proper position.\n def heapify(self, idx):\n l = self.leftChild(idx)\n r = self.rightChild(idx)\n\n # Comparing with the left child.\n if l <= self.heapSize and self.heapArr[l] < self.heapArr[idx]:\n smallest = l\n else:\n smallest = idx\n\n # Comparing with the right child.\n if r <= self.heapSize and self.heapArr[r] < self.heapArr[smallest]:\n smallest = r\n\n # If the parent Node is less than the leftchild or rightchild.\n if smallest != idx:\n\n # Swap the largest of the three nodes with the parent.\n self.heapArr[idx], self.heapArr[smallest] = self.heapArr[smallest], self.heapArr[idx]\n self.heapify(smallest)\n\n # Extracting the root of the heap.\n def extractMin(self):\n if self.heapSize > 0:\n res = self.heapArr[1]\n self.heapArr[1], self.heapArr[self.heapSize] = self.heapArr[self.heapSize], self.heapArr[1]\n self.heapSize -= 1\n if self.heapSize != 0:\n self.heapify(1)\n return res\n\n # HeapSort function.\n def heapSort(self):\n for i in range(self.heapSize, 0, -1):\n val = self.extractMin()\n self.heapArr[i] = val\n\n # Building maxHeap of a given array.\n def buildHeap(self, A):\n self.heapSize = len(A)\n self.heapArr = [0] + A\n idx = self.parent(self.heapSize)\n for i in range(idx, 0, -1):\n self.heapify(i)\n\n def PrintSorted(self):\n for i in range(1, len(self.heapArr)):\n print(self.heapArr[i], end = ' ')\n print()\n\n# HeapSort\n# Taking in input.\ntry:\n gotdata = sys.argv[1]\nexcept IndexError:\n print(\"Enter proper Input\")\n exit()\n\nA = list(map(int,sys.argv[1].split(',')))\n\n# Building empty heap.\nh = minHeap()\n\n# Building heap of the give heap.\nh.buildHeap(A)\n\n# Sorting the given array using heapSort.\nh.heapSort()\n\n# Displaying the sorted array.\nh.PrintSorted()\n","sub_path":"additionques/Asn03G07P02.py","file_name":"Asn03G07P02.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591232494","text":"# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport os\n\nimport third_party.json_schema_compiler.model as model\nfrom docs_server_utils import SanitizeAPIName\n\nclass APIListDataSource(object):\n \"\"\" This class creates a list of chrome.* APIs and chrome.experimental.* APIs\n that are used in the api_index.html and experimental.html pages.\n \"\"\"\n def __init__(self, cache_builder, file_system, api_path, public_path):\n self._cache = cache_builder.build(self._ListAPIs)\n self._file_system = file_system\n self._api_path = api_path + '/'\n self._public_path = public_path + '/'\n\n def _GetAPIsInSubdirectory(self, api_names, doc_type):\n public_templates = self._file_system.ReadSingle(\n self._public_path + doc_type + '/')\n template_names = [os.path.splitext(name)[0]\n for name in public_templates]\n experimental_apis = []\n chrome_apis = []\n for i, template_name in enumerate(sorted(template_names)):\n if model.UnixName(template_name) in api_names:\n if template_name.startswith('experimental'):\n experimental_apis.append({\n 'name': template_name.replace('_', '.')\n })\n else:\n chrome_apis.append({ 'name': template_name.replace('_', '.') })\n chrome_apis[-1]['last'] = True\n experimental_apis[-1]['last'] = True\n return {\n 'chrome': chrome_apis,\n 'experimental': experimental_apis\n }\n\n def _ListAPIs(self, apis):\n api_names = set(SanitizeAPIName(name, self._api_path) for name in apis)\n return {\n 'apps': self._GetAPIsInSubdirectory(api_names, 'apps'),\n 'extensions': self._GetAPIsInSubdirectory(api_names, 'extensions')\n }\n\n def __getitem__(self, key):\n return self.get(key)\n\n def get(self, key):\n try:\n return self._cache.GetFromFileListing(self._api_path)[key]\n except Exception as e:\n return None\n","sub_path":"chrome/common/extensions/docs/server2/api_list_data_source.py","file_name":"api_list_data_source.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"530575596","text":"import datetime\nfrom django.utils import timezone\nfrom dateutil.relativedelta import relativedelta\n\nfrom betterforms.multiform import MultiModelForm\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm, UsernameField\nfrom django.contrib.auth.models import User\n\nfrom connect_therapy.forms.practitioner.custom_duration_field import DurationField, decompress_duration\nfrom connect_therapy.models import Practitioner\n\n\nclass PractitionerSignUpForm(UserCreationForm):\n address_line_1 = forms.CharField(max_length=100)\n address_line_2 = forms.CharField(max_length=100, required=False)\n postcode = forms.CharField(max_length=10)\n mobile = forms.CharField(max_length=20)\n bio = forms.CharField(widget=forms.Textarea)\n\n def clean_email(self):\n email = self.cleaned_data['email']\n if User.objects.filter(username=email).exists():\n raise forms.ValidationError(\"You've already signed up!\",\n code='exists'\n )\n return email\n\n class Meta:\n model = User\n fields = ('first_name',\n 'last_name',\n 'email',\n 'mobile',\n 'address_line_1',\n 'address_line_2',\n 'postcode',\n 'bio',\n 'password1',\n 'password2')\n\n widgets = {\n 'first_name': forms.TextInput(attrs={'class': 'form-control'}),\n 'last_name': forms.TextInput(attrs={'class': 'form-control'}),\n 'email': forms.TextInput(attrs={'size': 35, 'class': 'form-control'}),\n }\n\n\nclass PractitionerLoginForm(AuthenticationForm):\n username = UsernameField(\n max_length=254,\n widget=forms.TextInput(attrs={'autofocus': True,\n 'size': 35,\n 'class': 'form-control'}, ),\n label=\"Email\"\n )\n\n def confirm_login_allowed(self, user):\n try:\n user.practitioner\n except Practitioner.DoesNotExist:\n raise forms.ValidationError(\n \"You are not a practitioner\",\n code='not-practitioner'\n )\n\n if not user.practitioner.is_approved:\n raise forms.ValidationError(\n \"You have not been approved\",\n code='not-approved'\n )\n\n if not user.practitioner.email_confirmed:\n raise forms.ValidationError(\n \"Your email address hasn't been verified yet. Please check your inbox and junk folder for the \"\n \"activation email. Visit the help pages to resend the verification email.\",\n code='email_unconfirmed'\n )\n super().confirm_login_allowed(user)\n\n\nclass PractitionerNotesForm(forms.Form):\n practitioner_notes = forms.CharField(label=\"Notes for Practitioner\",\n widget=forms.Textarea(\n attrs={'class': 'form-control'}))\n patient_notes_by_practitioner = forms.CharField(label=\"Notes for Patient\",\n widget=forms.Textarea(\n attrs={'class': 'form-control'}\n ))\n\n\nclass PractitionerForm(forms.ModelForm):\n address_line_1 = forms.CharField(max_length=100)\n address_line_2 = forms.CharField(max_length=100, required=False)\n postcode = forms.CharField(max_length=10)\n mobile = forms.CharField(max_length=20)\n bio = forms.Textarea\n\n class Meta:\n model = Practitioner\n fields = ('address_line_1',\n 'address_line_2',\n 'postcode',\n 'mobile',\n 'bio')\n\n # Prevents a user from editing these fields in the form.\n def __init__(self, *args, **kwargs):\n super(PractitionerForm, self).__init__(*args, **kwargs)\n\n\nclass PractitionerUserForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ('first_name',\n 'last_name',\n 'email')\n\n widgets = {\n 'email': forms.TextInput(attrs={'size': 35})\n }\n\n # Prevents a user from editing these fields in the form.\n def __init__(self, *args, **kwargs):\n super(PractitionerUserForm, self).__init__(*args, **kwargs)\n self.fields['first_name'].widget.attrs['readonly'] = True\n self.fields['last_name'].widget.attrs['readonly'] = True\n\n\nclass PractitionerEditMultiForm(MultiModelForm):\n form_classes = {\n 'user': PractitionerUserForm,\n 'practitioner': PractitionerForm\n }\n\n\nclass PractitionerDefineAppointmentForm(forms.Form):\n start_date_and_time = forms.DateTimeField(help_text=\" Format: DD/MM/YYYY H:M\",\n required=True,\n input_formats=['%d/%m/%Y %H:%M'],\n widget=forms.DateInput(attrs={'id': 'datetimepicker',\n 'class': 'form-control'}))\n minute_interval_choices = (\n (00, '00'),\n (30, '30'),\n )\n length = DurationField(required=True,\n minute_interval_choices=minute_interval_choices,\n help_text=\"Hour(s) Minute(s)\")\n\n def clean_start_date_and_time(self):\n start_datetime = self.cleaned_data['start_date_and_time']\n\n # Check appointment date and time is not in the past.\n if start_datetime < timezone.now():\n raise forms.ValidationError(\"Invalid date, cannot enter a time that has already passed!\",\n code='invalid'\n )\n # Check appointment date is not greater than three months\n if start_datetime.date() > datetime.date.today() + relativedelta(months=+3):\n raise forms.ValidationError(\"Invalid date, cannot enter a date more than 3 months ahead!\",\n code='invalid'\n )\n\n return start_datetime\n\n def clean_length(self):\n length_ = self.cleaned_data['length']\n if decompress_duration(length_) == [0, 0] :\n raise forms.ValidationError(\n \"Invalid length, minimum length required is 30 minutes\",\n code='invalid'\n )\n return length_\n\n\n\n\n","sub_path":"connect_therapy/forms/practitioner/practitioner.py","file_name":"practitioner.py","file_ext":"py","file_size_in_byte":6626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95998746","text":"import random\nimport os\nimport matplotlib.pyplot as plt\n\nfrom config import *\n\n\ndef create_reads(ref):\n with open(ref, 'r') as f:\n myseq = f.read()\n mylength = len(myseq)\n nuc_list = list(myseq)\n for coverage in coverages:\n for average_length in average_length_list:\n nr_of_reads = int(mylength * coverage / average_length)\n dist_till_next_read = int(mylength / nr_of_reads)\n foo = \"mkdir {}\".format(DIR + ref.split('.')[0] + \"shuffled_c{}_l{}\".format(coverage, average_length))\n os.system(foo)\n filename = DIR + ref.split('.')[0] + \"shuffled_c{0}_l{1}/shuffled_c{0}_l{1}\".format(coverage,\n average_length)\n pre_list = []\n for i in range(nr_of_reads):\n startindex = random.randint(0, mylength)\n length = average_length + random.randint(-average_length * variance, average_length * variance)\n pre_list.append((startindex, length))\n pre_list = sorted(pre_list, key=lambda tup: tup[0])\n with open(\"{}.fasta\".format(filename, nr_of_reads, average_length), \"w\") as handle:\n for pre_elem in pre_list:\n startindex = pre_elem[0]\n length = pre_elem[1]\n endindex = int(min(startindex + length, mylength))\n if endindex - startindex >= 30:\n currentseq = nuc_list[startindex:endindex]\n handle.write(\n \">NC_005816_Yersinia_pestis_biovar_Microtus/{}/{}_{}\\n\".format(i + 1, startindex, endindex))\n handle.write(\"\".join(currentseq) + \"\\n\")\n # os.system(\"{}fasta2DB -v {} {}.fasta\".format(DAZZ_DB, filename, filename))\n\n\ndef make_latex_stat_table(stats, ref):\n output = '\\\\begin{tabular}{|c|c|c|c|c|c|}\\\\hline\\n& \\\\multicolumn{4}{c|}{Read Length} & \\\\\\\\\\nCoverage & Average & Median & Shortest' \\\n '& Longest & \\\\#Reads \\\\\\\\\\n\\\\hline\\n'\n for coverage in coverages:\n for average_length in average_length_list:\n val = stats[(coverage, average_length)]\n output = output + '{0:.1f} & {1:.1f} & {2} & {3} & {4} & {5} \\\\\\\\\\n'.format(val[0], val[2], val[3],\n val[4], val[5], val[1])\n output = output + '\\\\hline\\n'\n output = output + '\\\\end{tabular}'\n with open(DIR + ref.split('.')[0] + '_stats.tex', 'w') as f:\n f.write(output)\n print(output)\n\n\ndef plot(prelist, filename, coverage, average_length):\n #plt.figure()\n plt.title(\"Coverage: {}, Average Length: {}, Reads: {}\\nReads below average: {}, Reads above average: {}\".format(\n coverage, average_length, len(prelist), len([x for x in prelist if x[1] < average_length]),\n len([x for x in prelist if x[1] > average_length])))\n #plt.hist([tup[1] for tup in prelist], bins=[0,average_length, maximum_length*average_length])\n plt.hist([tup[1] for tup in prelist], bins=50)\n plt.grid(True)\n #plt.xticks(xrange(len(list)))\n # plt.yticks(xrange(0,max(nr_best_edge_used)+10, 5))\n plt.ylabel(\"Frequency\")\n plt.xlabel(\"Read Length\")\n plt.savefig(filename + \"_plot.pdf\")\n plt.close()\n\n\ndef create_reads_shotgun_style(ref):\n '''\n create reads shotgun sequencing style\n :param ref:\n :return:\n '''\n stats = {}\n with open(ref, 'r') as f:\n ref_seq = f.read()\n ref_length = len(ref_seq)\n nuc_list = list(ref_seq)\n for coverage in coverages:\n for average_length in average_length_list:\n nr_of_shots = int(ref_length / average_length)\n os.system(\"mkdir {}\".format(DIR + ref.split('.')[0] + \"shuffled_c{}_l{}\".format(coverage, average_length)))\n filename = DIR + ref.split('.')[0] + \"shuffled_c{0}_l{1}/shuffled_c{0}_l{1}\".format(coverage,\n average_length)\n pre_list = []\n while sum([tup[1] for tup in pre_list]) < coverage * ref_length:\n hits = [random.randint(0, 20)] # stores the hits of the shotgun sequence in on run\n for i in range(nr_of_shots):\n hits.append(random.randint(0, ref_length))\n hits = sorted(hits)\n foolist = [(hits[i], hits[i + 1] - hits[i]) for i in range(len(hits) - 1)\n if minimum_length <= hits[i + 1] - hits[i] <= maximum_length * average_length]\n pre_list = pre_list + foolist\n pre_list = sorted(pre_list, key=lambda tup: tup[0])\n with open(\"{}.stat\".format(filename), \"w\") as handle:\n handle.write('Real Coverage: {}\\n'\n 'Number of reads: {}\\n'\n 'Average read length: {}\\n'\n 'Median read length: {}\\n'\n 'Shortest read length\" : {}\\n'\n 'Longest read length: {}'.format(\n sum([tup[1] for tup in pre_list]) / ref_length,\n len(pre_list),\n sum([tup[1] for tup in pre_list]) / len(pre_list),\n sorted([tup[1] for tup in pre_list])[len(pre_list) // 2],\n min([tup[1] for tup in pre_list]),\n max([tup[1] for tup in pre_list])\n ))\n # save stats in dict for later creation of latex table\n stats[(coverage, average_length)] = (\n sum([tup[1] for tup in pre_list]) / ref_length,\n len(pre_list),\n sum([tup[1] for tup in pre_list]) / len(pre_list),\n sorted([tup[1] for tup in pre_list])[len(pre_list) // 2],\n min([tup[1] for tup in pre_list]),\n max([tup[1] for tup in pre_list])\n )\n with open(\"{}.fasta\".format(filename), \"w\") as handle:\n for pre_elem in pre_list:\n startindex = pre_elem[0]\n length = pre_elem[1]\n endindex = int(min(startindex + length, ref_length))\n if endindex - startindex >= 30:\n currentseq = nuc_list[startindex:endindex]\n handle.write(\n \">NC_005816_Yersinia_pestis_biovar_Microtus/{}/{}_{}\\n\".format(i + 1, startindex, endindex))\n handle.write(\"\".join(currentseq) + \"\\n\")\n # os.system(\"{}fasta2DB -v {} {}.fasta\".format(DAZZ_DB, filename, filename))\n plot(pre_list, filename, coverage, average_length)\n make_latex_stat_table(stats, ref)\n\n\ncreate_reads_shotgun_style(ref1)\ncreate_reads_shotgun_style(ref2)\ncreate_reads_shotgun_style(ref3)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527213982","text":"# -*- coding: utf-8 -*-\nfrom matplotlib import pyplot as plt\nimport pandas\nimport numpy as np\nfrom dateutil.parser import parse\nfrom multiprocessing import Pool\nimport csv\n\ndef parse_header_line(line):\n \"\"\"Parse header lines.\n \n >>> s = ' a b c '\n >>> parse_header_line(s)\n ['a', 'b', 'c']\n >>> s = ' a, b , c '\n >>> parse_header_line(s)\n ['a', 'b', 'c']\n \"\"\"\n line = line.strip('#')\n if ',' in line:\n newline = line.split(',')\n else:\n newline = line.split()\n return [i.strip() for i in newline]\n \ndef get_headers_pprint(fname):\n \"\"\"Get headers from pprint output.\n \n >>> fname = '/Users/maye/data/diviner/noise2.tab'\n >>> headers = get_headers_pprint(fname)\n >>> headers[:7]\n ['date', 'month', 'year', 'hour', 'minute', 'second', 'jdate']\n \"\"\"\n with open(fname) as f:\n headers = parse_header_line(f.readline())\n return headers\n\ndef get_headers_pds(fname):\n \"\"\"Get headers from PDS RDR files.\n \n >>> fname = '/Users/maye/data/diviner/201204090110_RDR.TAB'\n >>> headers = get_headers_pds(fname)\n >>> headers[:7]\n ['utc', 'jdate', 'orbit', 'sundist', 'sunlat', 'sunlon', 'sclk']\n \"\"\"\n with open(fname) as f:\n for i in range(3):\n f.readline()\n headers = parse_header_line(f.readline())\n return headers\n \ndef read_pprint(fname):\n \"\"\"Read tabular diviner data into pandas data frame and return it.\n \n Lower level function. Use read_div_data which calls this as appropriate. \n \"\"\"\n\n # pandas parser does not read this file correctly, but loadtxt does.\n # first will get the column headers\n\n headers = get_headers_pprint(fname)\n print(\"Found {0} headers: {1}\".format(len(headers),headers))\n\n # use numpy's loadtxt to read the tabulated file into a numpy array\n ndata = np.loadtxt(fname, skiprows=1)\n dataframe = pandas.DataFrame(ndata)\n dataframe.columns = headers\n dataframe.sort('jdate',inplace=True)\n return dataframe\n\ndef read_pds(fname,nrows=None):\n \"\"\"Read tabular files from the PDS depository.\n \n Lower level function. Use read_div_data which calls this as appropriate.\n \"\"\"\n headers = get_headers_pds(fname)\n with open(fname) as f:\n dialect = csv.Sniffer().sniff(f.read(2048))\n return pandas.io.parsers.read_csv(fname,\n dialect = dialect,\n comment='#',\n names=headers, \n na_values=['-9999.0'], \n skiprows=4, \n nrows=nrows,\n parse_dates=[[0,1]],\n index_col=0,\n )\n \ndef read_div_data(fname, **kwargs):\n with open(fname) as f:\n line = f.readline()\n if any(['dlre_edr.c' in line, 'Header' in line]):\n return read_pds(fname, **kwargs)\n elif fname.endswith('.h5'):\n return get_df_from_h5(fname)\n else:\n return read_pprint(fname)\n\ndef make_date_index(dataframe):\n \"\"\"Parse date fields/columns with pandas date converter parsers.\n\n Parse the date columns and create a date index from it\n In: pandas dataframe read in from diviner div38 data\n Out: DatetimeIndex\n \"\"\"\n d = dataframe\n di = pandas.io.date_converters.parse_all_fields(\n d.year, d.month, d.date, d.hour, d.minute, d.second)\n return di\n\ndef divplot(df, col, c=1, det=11):\n plt.plot(df[col][(df.c==c) & (df.det==det)])\n \n \ndef read_rdrplus(fpath,nrows):\n with open(fpath) as f:\n line = f.readline()\n headers = parse_header_line(line)\n \n return pandas.io.parsers.read_csv(fpath, names=headers, na_values=['-9999'],\n skiprows=1, nrows=nrows)\n\ndef get_df_from_h5(fname):\n \"\"\"Provide df from h5 file.\"\"\"\n store = pandas.HDFStore(fname)\n return store[store.keys()[0]]\n\ndef get_channel_mean(df, col_str, channel):\n \"The dataframe has to contain c and jdate for this to work.\"\n return df.groupby(['c',df.index])[col_str].mean()[channel]","sub_path":"diviner.py","file_name":"diviner.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"411608393","text":"\"\"\"\n COCO keypoint detection (2D multiple human pose estimation) dataset (for Lightweight OpenPose).\n\"\"\"\n\nimport os\nimport json\nimport math\nimport cv2\nimport numpy as np\nfrom mxnet.gluon.data import dataset\nfrom .dataset_metainfo import DatasetMetaInfo\n\n\nclass CocoHpe2Dataset(dataset.Dataset):\n \"\"\"\n COCO keypoint detection (2D multiple human pose estimation) dataset.\n\n Parameters\n ----------\n root : string\n Path to `annotations`, `train2017`, and `val2017` folders.\n mode : string, default 'train'\n 'train', 'val', 'test', or 'demo'.\n transform : callable, optional\n A function that transforms the image.\n \"\"\"\n def __init__(self,\n root,\n mode=\"train\",\n transform=None):\n super(CocoHpe2Dataset, self).__init__()\n self._root = os.path.expanduser(root)\n self.mode = mode\n self.transform = transform\n\n mode_name = \"train\" if mode == \"train\" else \"val\"\n annotations_dir_path = os.path.join(root, \"annotations\")\n annotations_file_path = os.path.join(annotations_dir_path, \"person_keypoints_\" + mode_name + \"2017.json\")\n with open(annotations_file_path, \"r\") as f:\n self.file_names = json.load(f)[\"images\"]\n self.image_dir_path = os.path.join(root, mode_name + \"2017\")\n self.annotations_file_path = annotations_file_path\n\n def __str__(self):\n return self.__class__.__name__ + \"(\" + self._root + \")\"\n\n def __len__(self):\n return len(self.file_names)\n\n def __getitem__(self, idx):\n file_name = self.file_names[idx][\"file_name\"]\n image_file_path = os.path.join(self.image_dir_path, file_name)\n image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR)\n # image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)\n\n img_mean = (128, 128, 128)\n img_scale = 1.0 / 256\n base_height = 368\n stride = 8\n pad_value = (0, 0, 0)\n\n height, width, _ = image.shape\n image = self.normalize(image, img_mean, img_scale)\n ratio = base_height / float(image.shape[0])\n image = cv2.resize(image, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC)\n min_dims = [base_height, max(image.shape[1], base_height)]\n image, pad = self.pad_width(\n image,\n stride,\n pad_value,\n min_dims)\n image = image.astype(np.float32)\n image = image.transpose((2, 0, 1))\n # image = torch.from_numpy(image)\n\n # if self.transform is not None:\n # image = self.transform(image)\n\n image_id = int(os.path.splitext(os.path.basename(file_name))[0])\n\n label = np.array([image_id] + pad + [height, width], np.float32)\n # label = torch.from_numpy(label)\n\n return image, label\n\n @staticmethod\n def normalize(img,\n img_mean,\n img_scale):\n img = np.array(img, dtype=np.float32)\n img = (img - img_mean) * img_scale\n return img\n\n @staticmethod\n def pad_width(img,\n stride,\n pad_value,\n min_dims):\n h, w, _ = img.shape\n h = min(min_dims[0], h)\n min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride\n min_dims[1] = max(min_dims[1], w)\n min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride\n top = int(math.floor((min_dims[0] - h) / 2.0))\n left = int(math.floor((min_dims[1] - w) / 2.0))\n bottom = int(min_dims[0] - h - top)\n right = int(min_dims[1] - w - left)\n pad = [top, left, bottom, right]\n padded_img = cv2.copyMakeBorder(\n src=img,\n top=top,\n bottom=bottom,\n left=left,\n right=right,\n borderType=cv2.BORDER_CONSTANT,\n value=pad_value)\n return padded_img, pad\n\n# ---------------------------------------------------------------------------------------------------------------------\n\n\nclass CocoHpe2ValTransform(object):\n def __init__(self,\n ds_metainfo):\n self.ds_metainfo = ds_metainfo\n\n def __call__(self, src, label):\n return src, label\n\n# ---------------------------------------------------------------------------------------------------------------------\n\n\nclass CocoHpe2MetaInfo(DatasetMetaInfo):\n def __init__(self):\n super(CocoHpe2MetaInfo, self).__init__()\n self.label = \"COCO\"\n self.short_label = \"coco\"\n self.root_dir_name = \"coco\"\n self.dataset_class = CocoHpe2Dataset\n self.num_training_samples = None\n self.in_channels = 3\n self.num_classes = 17\n self.input_image_size = (368, 368)\n self.train_metric_capts = None\n self.train_metric_names = None\n self.train_metric_extra_kwargs = None\n self.val_metric_capts = None\n self.val_metric_names = None\n self.test_metric_capts = [\"Val.CocoOksAp\"]\n self.test_metric_names = [\"CocoHpe2OksApMetric\"]\n self.test_metric_extra_kwargs = [\n {\"name\": \"OksAp\",\n \"annotations_file_path\": None}]\n self.saver_acc_ind = 0\n self.do_transform = True\n self.val_transform = CocoHpe2ValTransform\n self.test_transform = CocoHpe2ValTransform\n self.ml_type = \"hpe\"\n self.net_extra_kwargs = {}\n self.mean_rgb = (0.485, 0.456, 0.406)\n self.std_rgb = (0.229, 0.224, 0.225)\n self.load_ignore_extra = False\n\n def add_dataset_parser_arguments(self,\n parser,\n work_dir_path):\n \"\"\"\n Create python script parameters (for ImageNet-1K dataset metainfo).\n\n Parameters:\n ----------\n parser : ArgumentParser\n ArgumentParser instance.\n work_dir_path : str\n Path to working directory.\n \"\"\"\n super(CocoHpe2MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)\n parser.add_argument(\n \"--input-size\",\n type=int,\n nargs=2,\n default=self.input_image_size,\n help=\"size of the input for model\")\n parser.add_argument(\n \"--load-ignore-extra\",\n action=\"store_true\",\n help=\"ignore extra layers in the source PyTroch model\")\n\n def update(self,\n args):\n \"\"\"\n Update ImageNet-1K dataset metainfo after user customizing.\n\n Parameters:\n ----------\n args : ArgumentParser\n Main script arguments.\n \"\"\"\n super(CocoHpe2MetaInfo, self).update(args)\n self.input_image_size = args.input_size\n self.load_ignore_extra = args.load_ignore_extra\n\n def update_from_dataset(self,\n dataset):\n \"\"\"\n Update dataset metainfo after a dataset class instance creation.\n\n Parameters:\n ----------\n args : obj\n A dataset class instance.\n \"\"\"\n self.test_metric_extra_kwargs[0][\"annotations_file_path\"] = dataset.annotations_file_path\n","sub_path":"gluon/datasets/coco_hpe2_dataset.py","file_name":"coco_hpe2_dataset.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"206303819","text":"class Solution(object):\n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n dic={}\n for x in nums:\n if dic.has_key(x):\n dic[x]+=1\n else:\n dic[x]=1\n for x in dic.keys():\n if dic[x]==1:\n return x","sub_path":"Python/136. Single Number.py","file_name":"136. Single Number.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436298099","text":"import numpy as np\n\n\nclass CalibrationManager(object):\n\n\tdef __init__(self):\n\t\tself.path = np.array([[0,0,0],[-0.1,0.01,0.15],[-0.12,0.07,0.2]])\n\t\n\tdef simulate_path(self):\n\t\tall_paths = [[],[],[]]\n\t\tfor i in xrange(len(self.path)-1):\n\t\t\tpoint = self.path[i]\n\t\t\tnext_point = self.path[i+1]\n\t\t\tpath = np.vstack([np.linspace(point[0],next_point[0],10),np.linspace(point[1],next_point[1],10),np.linspace(point[2],next_point[2],10)])\n\t\t\tall_paths = np.hstack([all_paths, path])\n\t\tdraw_path(all_paths) \n\t\treturn all_paths\n\n\tdef get_motion_tracking_data(self):\n\t\treturn all_paths.transpose()\n\n\tdef start_calibration(self):\n\t\tpass\n\t\t\n\t\n\ndef draw_path(path):\n\timport matplotlib.pyplot as plt\n\tfrom mpl_toolkits.mplot3d import Axes3D\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection='3d')\n\tax.plot(path[0],path[1],path[2])\n\tfig.show()\n\nif __name__ == '__main__':\n\tcal = CalibrationManager()\n\tcal.simulate_path()\n\tinput()\n\n\t\n","sub_path":"calibration_manager.py","file_name":"calibration_manager.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"604973498","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"AIS Cursor-on-Target Gateway Function Tests.\"\"\"\n\nimport unittest\n\nimport aiscot.functions\n\n__author__ = 'Greg Albrecht W2GMD '\n__copyright__ = 'Copyright 2020 Orion Labs, Inc.'\n__license__ = 'Apache License, Version 2.0'\n\n\nclass FunctionsTestCase(unittest.TestCase):\n \"\"\"\n Test class for functions... functions.\n \"\"\"\n\n def test_ais_to_cot(self):\n \"\"\"\n Tests that ais_to_cot decodes an AIS Sentence into a Cursor-on-Target\n message.\n \"\"\"\n test_sentence = {\n 'id': 1,\n 'repeat_indicator': 0,\n 'mmsi': 211433000,\n 'nav_status': 0,\n 'rot_over_range': True,\n 'rot': -731.386474609375,\n 'sog': 1.100000023841858,\n 'position_accuracy': 1,\n 'x': -122.65529333333333,\n 'y': 37.72890666666667,\n 'cog': 80.30000305175781,\n 'true_heading': 511,\n 'timestamp': 25,\n 'special_manoeuvre': 0,\n 'spare': 0,\n 'raim': True,\n 'sync_state': 0,\n 'slot_timeout': 3,\n 'received_stations': 133,\n 'nmea': '!AIVDM,1,1,,B,139`n:0P0;o>Qm@EUc838wvj2<25,0*4E\\n'\n }\n cot_msg = aiscot.functions.ais_to_cot(test_sentence)\n self.assertEqual(cot_msg.event_type, 'a-f-G-E-V-C')\n self.assertEqual(cot_msg.uid, 'AIS.211433000')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_functions.py","file_name":"test_functions.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"419604006","text":"# encoding: utf-8\n# ---------------------------------------------------------------------------\n# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.\n# Distributed under the terms of the BSD License. See COPYING.rst.\n# ---------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport numpy\nfrom distarray.externals.six.moves import input\n\nfrom distarray.dist import Context, Distribution\nfrom distarray.dist.decorators import local\nfrom pprint import pprint\n\ncontext = Context()\n\nnumpy.set_printoptions(precision=2, linewidth=1000)\n\n\n@local\ndef local_sin(da):\n \"\"\"A simple @local function.\"\"\"\n return numpy.sin(da)\n\n\n@local\ndef local_sin_plus_50(da):\n \"\"\"An @local function that calls another.\"\"\"\n return local_sin(da) + 50\n\n\n@local\ndef global_sum(da):\n \"\"\"Reproducing the `sum` function in LocalArray.\"\"\"\n from distarray.local.mpiutils import MPI\n from distarray.local import LocalArray\n from distarray.local.maps import Distribution\n\n local_sum = da.ndarray.sum()\n global_sum = da.distribution.comm.allreduce(local_sum, None, op=MPI.SUM)\n\n new_arr = numpy.array([global_sum])\n distribution = Distribution.from_shape((1,), comm=da.comm)\n new_distarray = LocalArray(distribution, buf=new_arr)\n return new_distarray\n\n\nif __name__ == '__main__':\n\n arr_len = 40\n\n print()\n input(\"Basic creation:\")\n dist_b = Distribution.from_shape(context, (arr_len,), dist={0: 'b'})\n dap_b = context.empty(dist_b)\n dist_c = Distribution.from_shape(context, (arr_len,), dist={0: 'c'})\n dap_c = context.empty(dist_c)\n print(\"dap_b is a \", type(dap_b))\n print(\"dap_c is a \", type(dap_c))\n\n print()\n input(\"__setitem__:\")\n for x in range(arr_len):\n dap_b[x] = x\n dap_c[x] = x\n pprint(dap_b.get_localarrays())\n pprint(dap_c.get_localarrays())\n\n# print\n# input(\"__getitem__ with slicing:\")\n# print dap_b[19:34:2]\n# print dap_c[19:34:2]\n\n print()\n input(\"@local functions:\")\n dap1 = local_sin(dap_b)\n pprint(dap1.get_localarrays())\n\n print()\n input(\"calling @local functions from each other:\")\n dap2 = local_sin_plus_50(dap_b)\n pprint(dap2.get_localarrays())\n\n print()\n input(\"calling MPI from @local functions:\")\n dap3 = global_sum(dap_b)\n pprint(dap3.get_localarrays())\n","sub_path":"examples/basic_demo.py","file_name":"basic_demo.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"74673272","text":"card_number_in = input('What is the credit card number that you would like to evaluate? > ')\n\nif len(card_number_in) != 16:\n print('You Entered a invalid amount of numbers or characters')\n quit()\n\ndef convert_string(a):\n print(a)\n x = list(a)\n for i in range(len(a)):\n x[i] = int(a[i])\n return x\n\ndef double_element(b):\n tempList = []\n for i in range(0, len(b)):\n if i % 2 == 0:\n tempList.append(b[i] * 2)\n\n else:\n tempList.append(b[i])\n return tempList\n\ndef find_second_digit(c):\n into_list = list(str(c))\n out_list = int(into_list[1])\n return out_list\n\n\n# Calling Function convert_string(card_number_in) to conver string to integer\nconverted_card_intList = convert_string(card_number_in)\nprint(f'Converted Card Numbers Into Integers {converted_card_intList}')\n#slice off last number and store it into a vairable\npopoff1 = converted_card_intList.pop(-1)\nprint(f'You popped off number {popoff1}')\n#Reversing the digists of list in place\nconverted_card_intList.reverse()\nprint(f'Reversed The Card Numbers {converted_card_intList}')\n#Calling Function To Double Ever Other Element in List\ndouble_list = double_element(converted_card_intList)\nprint(f'Doubled The List Every Other Index {double_list}')\n#Sum all the digits in the list\nsum_list = sum(double_list)\nprint(sum_list)\n#Call function to find second digit\nfound_second_digit = find_second_digit(sum_list)\nprint(f'The Second Digit In the number is {found_second_digit}')\n#print evaluation to find if True or False\nprint(popoff1 == found_second_digit)","sub_path":"code/chad/python/lab_20/credit_card_validationv1.py","file_name":"credit_card_validationv1.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"449331388","text":"import pytest\n\nfrom os.path import join\n\nimport numpy as np\nimport pandas as pd\nfrom delphi_usafacts.geo import fips_to_state, disburse, geo_map\n\nMAP_DF = pd.read_csv(\n join(\"..\", \"static\", \"fips_prop_pop.csv\"),\n dtype={\"fips\": int}\n)\n\nsensor = \"new_counts\"\nclass TestFipsToState:\n\n def test_normal(self):\n\n assert fips_to_state(\"53003\") == \"wa\"\n assert fips_to_state(\"48027\") == \"tx\"\n assert fips_to_state(\"12003\") == \"fl\"\n assert fips_to_state(\"50103\") == \"vt\"\n assert fips_to_state(\"15003\") == \"hi\"\n\n\nclass TestDisburse:\n def test_even(self):\n\n df = pd.DataFrame(\n {\n \"fips\": [\"51093\", \"51175\", \"51620\"],\n \"timestamp\": [\"2020-02-15\", \"2020-02-15\", \"2020-02-15\"],\n \"new_counts\": [3, 2, 2],\n \"cumulative_counts\": [13, 12, 12],\n \"population\": [100, 2100, 300],\n }\n ).sort_values([\"fips\", \"timestamp\"])\n\n new_df = disburse(df, \"51620\", [\"51093\", \"51175\"])\n\n assert new_df[\"new_counts\"].values == pytest.approx([4, 3, 2])\n assert new_df[\"cumulative_counts\"].values == pytest.approx([19, 18, 12])\n\n\nclass TestGeoMap:\n def test_incorrect_geo(self):\n\n df = pd.DataFrame(\n {\n \"fips\": [\"53003\", \"48027\", \"50103\"],\n \"timestamp\": [\"2020-02-15\", \"2020-02-15\", \"2020-02-15\"],\n \"new_counts\": [10, 15, 2],\n \"cumulative_counts\": [100, 20, 45],\n \"population\": [100, 2100, 300],\n }\n )\n\n with pytest.raises(ValueError):\n geo_map(df, \"département\", MAP_DF, sensor)\n\n def test_county(self):\n\n df = pd.DataFrame(\n {\n \"fips\": [\"53003\", \"48027\", \"50103\"],\n \"timestamp\": [\"2020-02-15\", \"2020-02-15\", \"2020-02-15\"],\n \"new_counts\": [10, 15, 2],\n \"cumulative_counts\": [100, 20, 45],\n \"population\": [100, 2100, 300],\n }\n )\n\n new_df = geo_map(df, \"county\", MAP_DF, sensor)\n\n exp_incidence = df[\"new_counts\"] / df[\"population\"] * 100000\n exp_cprop = df[\"cumulative_counts\"] / df[\"population\"] * 100000\n\n assert set(new_df[\"geo_id\"].values) == set(df[\"fips\"].values)\n assert set(new_df[\"timestamp\"].values) == set(df[\"timestamp\"].values)\n assert set(new_df[\"incidence\"].values) == set(exp_incidence.values)\n assert set(new_df[\"cumulative_prop\"].values) == set(exp_cprop.values)\n\n def test_state(self):\n\n df = pd.DataFrame(\n {\n \"fips\": [\"04001\", \"04003\", \"04009\", \"25023\"],\n \"timestamp\": [\"2020-02-15\", \"2020-02-15\", \"2020-02-15\", \"2020-02-15\"],\n \"new_counts\": [10, 15, 2, 13],\n \"cumulative_counts\": [100, 20, 45, 60],\n \"population\": [100, 2100, 300, 25],\n }\n )\n\n new_df = geo_map(df, \"state\", MAP_DF, sensor)\n\n exp_incidence = np.array([27, 13]) / np.array([2500, 25]) * 100000\n exp_cprop = np.array([165, 60]) / np.array([2500, 25]) * 100000\n\n assert (new_df[\"geo_id\"].values == [\"az\", \"ma\"]).all()\n assert (new_df[\"timestamp\"].values == [\"2020-02-15\", \"2020-02-15\"]).all()\n assert (new_df[\"new_counts\"].values == [27, 13]).all()\n assert (new_df[\"cumulative_counts\"].values == [165, 60]).all()\n assert (new_df[\"population\"].values == [2500, 25]).all()\n assert (new_df[\"incidence\"].values == exp_incidence).all()\n assert (new_df[\"cumulative_prop\"].values == exp_cprop).all()\n\n def test_hrr(self):\n\n df = pd.DataFrame(\n {\n \"fips\": [\"13009\", \"13017\", \"13021\", \"09015\"],\n \"timestamp\": [\"2020-02-15\", \"2020-02-15\", \"2020-02-15\", \"2020-02-15\"],\n \"new_counts\": [10, 15, 2, 13],\n \"cumulative_counts\": [100, 20, 45, 60],\n \"population\": [100, 2100, 300, 25],\n }\n )\n\n new_df = geo_map(df, \"hrr\", MAP_DF, sensor)\n\n exp_incidence = np.array([13, 27]) / np.array([25, 2500]) * 100000\n exp_cprop = np.array([60, 165]) / np.array([25, 2500]) * 100000\n\n assert (new_df[\"geo_id\"].values == [110, 147]).all()\n assert (new_df[\"timestamp\"].values == [\"2020-02-15\", \"2020-02-15\"]).all()\n assert new_df[\"new_counts\"].values == pytest.approx([13.0, 27.0])\n assert new_df[\"cumulative_counts\"].values == pytest.approx([60, 165])\n assert new_df[\"population\"].values == pytest.approx([25, 2500])\n assert new_df[\"incidence\"].values == pytest.approx(exp_incidence)\n assert new_df[\"cumulative_prop\"].values == pytest.approx(exp_cprop)\n\n def test_msa(self):\n\n df = pd.DataFrame(\n {\n \"fips\": [\"13009\", \"13017\", \"13021\", \"09015\"],\n \"timestamp\": [\"2020-02-15\", \"2020-02-15\", \"2020-02-15\", \"2020-02-15\"],\n \"new_counts\": [10, 15, 2, 13],\n \"cumulative_counts\": [100, 20, 45, 60],\n \"population\": [100, 2100, 300, 25],\n }\n )\n\n new_df = geo_map(df, \"msa\", MAP_DF, sensor)\n\n exp_incidence = np.array([2, 13]) / np.array([300, 25]) * 100000\n exp_cprop = np.array([45, 60]) / np.array([300, 25]) * 100000\n\n assert (new_df[\"geo_id\"].values == [31420, 49340]).all()\n assert (new_df[\"timestamp\"].values == [\"2020-02-15\", \"2020-02-15\"]).all()\n assert new_df[\"new_counts\"].values == pytest.approx([2.0, 13.0])\n assert new_df[\"cumulative_counts\"].values == pytest.approx([45, 60])\n assert new_df[\"population\"].values == pytest.approx([300, 25])\n assert new_df[\"incidence\"].values == pytest.approx(exp_incidence)\n assert new_df[\"cumulative_prop\"].values == pytest.approx(exp_cprop)\n","sub_path":"usafacts/tests/test_geo.py","file_name":"test_geo.py","file_ext":"py","file_size_in_byte":5829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"482990914","text":"t_f = list(map(int, input().split()))\n\nt = t_f[0]\nf = t_f[1]\n\nstart_idx = dict()\nstart_idx['A'] = 0\nstart_idx['B'] = 1\nstart_idx['C'] = 2\nstart_idx['D'] = 3\nstart_idx['E'] = 4\n\nreverse_dict = ['A', 'B', 'C', 'D', 'E']\n\n\nfor test in range(t):\n start = []\n for i in range(5):\n start.append([])\n \n ans = ''\n query_left = f + 0\n # first query: figure out first character\n for i in range(119):\n query_idx = 1 + (5 * i)\n print(query_idx)\n query_left -= 1\n alphabet = str(input())\n start[start_idx[alphabet]].append(query_idx)\n\n\n target_first = -1\n for idx, s in enumerate(start):\n if len(s) < 24:\n ans += reverse_dict[idx]\n target_first = idx\n\n # second query: figure out second character\n second = []\n for i in range(5):\n second.append([])\n for i in start[target_first]:\n query_idx = i+1\n print(query_idx)\n query_left -= 1\n alphabet = str(input())\n second[start_idx[alphabet]].append(query_idx)\n \n target_second = -1\n for idx, s in enumerate(second):\n if len(s) == 0:\n continue\n elif len(s) < 6:\n ans += reverse_dict[idx]\n target_second = idx\n\n # third query: figure out third character\n third = []\n for i in range(5):\n third.append([])\n for i in second[target_second]:\n query_idx = i+1\n print(query_idx)\n query_left -= 1\n alphabet = str(input())\n third[start_idx[alphabet]].append(query_idx)\n \n target_third = -1\n for idx, s in enumerate(third):\n if len(s) == 0:\n continue\n elif len(s) < 2:\n ans += reverse_dict[idx]\n target_third = idx\n\n # fourth query: figure out fourth character\n fourth = []\n for i in range(5):\n fourth.append([])\n for i in third[target_third]:\n query_idx = i+1\n print(query_idx)\n query_left -= 1\n alphabet = str(input())\n check = [False] * 5\n for char in ans:\n check[start_idx[char]] = True\n check[start_idx[alphabet]] = True\n\n last = alphabet\n last_prev = ''\n for idx, i in enumerate(check):\n if i == False:\n last_prev = reverse_dict[idx]\n ans += last_prev\n ans += last\n\n for i in range(query_left):\n print(1)\n input()\n\n print(ans)\n is_correct = str(input()) == 'Y'\n if not is_correct:\n break\n else:\n continue\n \n","sub_path":"GoogleCodeJam_2019/Round1C/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"77642226","text":"import configparser\nfrom functools import partial\n\nfrom constraints import Constraints, constraint_included, constraint_distances, constraint_max_charge, constraint_max_num_changes\nfrom evolution import *\nfrom logger import FileLogger\nfrom utils import *\n\n# PARSING CONFIG\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\npdb_file = config['PDB']['File']\nvalue = float(config['PDB']['Value'])\ncros_prob = float(config['PARAMS']['CrosProb'])\nmut_prob = float(config['PARAMS']['MutProb'])\neval_param = float(config['PARAMS']['EvalParam'])\npop_count = int(config['PARAMS']['PopCount'])\npop_size = int(config['PARAMS']['PopSize'])\nstop_step = int(config['PARAMS']['StopStep'])\ncompute_lmb_dir = config['COMPUTING']['ComputeLambdaDir']\ncomputed_proteins_file = config['COMPUTING']['ComputedProteinsFileName']\nresult_file_name = config['COMPUTING']['ResultFileName']\npopulation_from_computed = config['COMPUTING']['PopulationFromComputed']\n\n# GENERATING CONSTRAINTS\nconstraints = Constraints()\n\ncoordinates = read_coordinates(pdb_file)\nsequence = read_sequence(pdb_file)\n\nf1 = partial(constraint_included, aminoacids_set=\"DE\", positions_set=PositionsSet1)\nf2 = partial(constraint_distances, min_distance=5.0, coords=coordinates, positions_set=PositionsSetUnion)\nf3 = partial(constraint_max_charge, max_charge=7)\nf4 = partial(constraint_max_num_changes, max_num_changes=10)\n\nconstraints.add(f1)\nconstraints.add(f2)\nconstraints.add(f3)\nconstraints.add(f4)\n\n# COMPUTING\nfrom_computed = None\nif population_from_computed.lower() == 'true':\n from_computed = True\nelif population_from_computed.lower() == 'false':\n from_computed = False\ncomputed_protein_saver = ProteinEvolutionSaver(computed_proteins_file)\nevolution = []\nfor i in range(pop_count):\n working_dir = os.path.join(compute_lmb_dir, f'{i + 1}')\n e = ProteinEvolution(population=None, mut_prob=mut_prob, cros_prob=cros_prob,\n working_dir=working_dir, logger=FileLogger, save_function=computed_protein_saver, checker=constraints)\n e.generate_population(default_sequence=sequence, default_value=value, pop_size=pop_size, from_computed=from_computed)\n evolution.append(e)\n\n\nasync def main():\n async def evolution_step(e):\n e.mutation(attempts=4000)\n e.crossover(attempts=4000)\n await e.compute()\n e.selection(eval_param=0.05, save_n_best=3)\n\n logger = FileLogger('logout')\n iteration, step = 1, 0\n the_best_value = 0.0\n while step < stop_step:\n logger(f\"Iteration: {iteration}\\n\")\n\n await asyncio.gather(*(evolution_step(e) for e in evolution))\n for e in evolution:\n e.print_info(iter=iteration)\n\n cur_best_value = max([e.get_best_protein().value for e in evolution])\n if the_best_value < cur_best_value:\n the_best_value = cur_best_value\n step = 0\n else:\n step += 1\n\n logger(f\"The best value: {the_best_value}\\n\"\n f\"Step/Stop {step}/{stop_step}\\n\\n\")\n\n iteration += 1\n\n\nasyncio.run(main())\n","sub_path":"run_GA.py","file_name":"run_GA.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"506776279","text":"import cv2\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau\nfrom keras.models import Model\nfrom keras.preprocessing import image\nimport numpy as np\n\n\ndef resize_image(img, size=(20,20)):\n\n h, w = img.shape[:2]\n \n if h == w: \n return cv2.resize(img, size, cv2.INTER_AREA)\n\n dif = h if h > w else w\n\n\n if dif > (size[0] + size[1]):\n interpolation = cv2.INTER_AREA\n else:\n interpolation = cv2.INTER_CUBIC\n\n x_pos = (dif - w)//2\n y_pos = (dif - h)//2\n\n mask = np.zeros((dif, dif), dtype=img.dtype)\n mask[y_pos:y_pos+h, x_pos:x_pos+w] = img[:h, :w]\n\n return cv2.resize(mask, size, interpolation)\n\n\ndef get_categories(dir_path):\n categories = []\n for root, subdirectories, files in os.walk(dir_path):\n for subdirectory in subdirectories:\n categories.append(subdirectory)\n \n return categories\n\n\ndef compile_model(model):\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n return model\n\n\ndef earlystop(nbr_patience):\n return EarlyStopping(patience=nbr_patience)\n\n\ndef learning_rate_reduction():\n return ReduceLROnPlateau(monitor='val_acc', patience=2, verbose=1, factor=0.5, min_lr=0.00001)\n\n\ndef train_model(model, epochs, train_generator, validation_generator, callbacks = None):\n return model.fit_generator(train_generator, \n epochs=epochs, \n validation_data=validation_generator,\n callbacks = callbacks)\n\n\ndef loss_visualisation(training_loss, training_val_loss):\n plt.plot(training_loss, color='red', label='Training loss')\n plt.plot(training_val_loss, color='green', label='Validation loss')\n\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n\n plt.legend()\n\n plt.show()\n\n\ndef accuracy_visualisation(training_accuracy, training_val_accuracy):\n plt.plot(training_accuracy, color='red', label='Training accuracy')\n plt.plot(training_val_accuracy, color='green', label='Validation accuracy')\n\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n\n plt.legend()\n\n plt.show()\n\n\ndef init_activation(model, test_img):\n layer_outputs = [layer.output for layer in model.layers]\n img = image.load_img(test_img, target_size=(32,32, 3))\n img_arr = image.img_to_array(img)\n img_arr = np.expand_dims(img_arr, axis=0)\n activation_model = Model(inputs=model.input, outputs=layer_outputs)\n return activation_model.predict(img_arr)\n\n\ndef display_activation(activations, col_size, row_size, act_index): \n activation = activations[act_index]\n activation_index=0\n fig, ax = plt.subplots(row_size, col_size, figsize=(row_size*13.5,col_size*2.5))\n for row in range(0,row_size):\n for col in range(0,col_size):\n ax[row][col].imshow(activation[0, :, :, activation_index], cmap='plasma')\n activation_index += 1\n\n\n\nfrom keras.preprocessing import image\nfrom keras.models import load_model\nimport pandas as pd\nimport seaborn as sns\nimport cv2 as cv\n\n\ndef predict_image(model, categories, choix):\n test_image = image.load_img(choix, target_size = (32, 32))\n test_image = image.img_to_array(test_image)\n test_image = np.expand_dims(test_image, axis = 0)\n result = model.predict(test_image)\n\n preds = model.predict_classes(test_image)\n prob = model.predict_proba(test_image)\n\n index = preds[0]\n print(f'Categorie {categories[preds[0]]}, Prédiction {\"%.2f\" % (prob[0][index] * 100)}%')\n \n predictrions_array = []\n \n for x in range(0,2):\n predictrions_array.append([categories[x], prob[0][x]])\n \n df = pd.DataFrame(predictrions_array, columns = ['Category', 'Prediction'])\n\n f, axarr = plt.subplots(1,2, figsize=(10,4))\n\n img = cv.imread(choix)\n axarr[0].imshow(img)\n axarr[0].axis('off')\n\n axarr[1] = sns.barplot(x=\"Prediction\", y=\"Category\", data=df)\n sns.set_style(style='white')\n\n axarr[1].set_ylabel('Category') \n axarr[1].set_xlabel('Prediction')\n\n plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n\n f.suptitle(\"Model Prediction\")\n f.subplots_adjust(top=0.88)","sub_path":"src/modules/helper_old.py","file_name":"helper_old.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"176972615","text":"import os\nimport glob\nimport PIL\nimport scipy.io as sio\nimport h5py\nimport numpy as np\nfrom sklearn.utils.extmath import cartesian\nimport torch\nfrom torch.utils.data import Dataset, DataLoader, Subset, SubsetRandomSampler\nfrom torchvision import datasets, transforms\nimport pandas as pd\n\nfrom utils import Resize\n\n\n \ndef get_dataloader(args):\n print('Loading data...')\n if args.dataset_name == 'mnist':\n return get_mnist_dataloader(args=args)\n\n elif args.dataset_name == 'fashion-mnist':\n return get_fashion_mnist_dataloaders(args=args)\n\n elif args.dataset_name == 'svhn':\n return get_svhn_dataloader(args=args)\n\n elif args.dataset_name == 'cars3d':\n return get_cars3d_dataloader(args=args)\n\n elif args.dataset_name == '3dshapes':\n return get_3dshapes_dataloader(args=args)\n\n elif args.dataset_name == 'dsprites':\n return get_dsprites_dataloader(args=args)\n\n elif args.dataset_name == 'stl10':\n return get_stl10_dataloader(args=args)\n elif args.dataset_name == 'imagenette':\n return get_imagenette_dataloader(args=args)\n elif args.dataset_name == 'cifar10subset':\n return get_cifar10_subset_dataloader(args=args)\n elif args.dataset_name == 'galaxyzoo':\n return get_galaxyzoo_dataloader(args=args)\n \n \n\ndef get_transfer_features(args):\n print('Loading pre-trained features for transfer learning...')\n if args.dataset_name == 'imagenette':\n return get_imagenette_transfer_features(args=args)\n elif args.dataset_name == 'cifar10subset':\n return get_cifar10_subset_transfer_features(args=args)\n \ndef get_imagenette_transfer_features(args, path_to_data='./data/'):\n train_data = pd.read_csv(path_to_data + 'Transfer_Features__Resnet50_features_dataframe.csv').iloc[:, 1:]\n return train_data\n\ndef get_cifar10_subset_transfer_features(args, path_to_data='./data/'):\n train_data = pd.read_csv(path_to_data + 'Transfer_Features_CIFAR10_Resnet50_features_dataframe.csv').iloc[:, 1:]\n return train_data\n\ndef get_mnist_dataloader(args, path_to_data='mnist'):\n \"\"\"MNIST dataloader with (28, 28) images.\"\"\"\n\n all_transforms = transforms.Compose([transforms.ToTensor()])\n train_data = datasets.MNIST(path_to_data, train=True, download=True, transform=all_transforms)\n train_loader = DataLoader(train_data, batch_size=args.mb_size, shuffle=args.shuffle,\n pin_memory=True, num_workers=args.workers)\n _, c, x, y = next(iter(train_loader))[0].size()\n return train_loader, c*x*y, c\n\n\ndef get_fashion_mnist_dataloaders(args, path_to_data='fashion-mnist'):\n \"\"\"FashionMNIST dataloader with (28, 28) images.\"\"\"\n\n all_transforms = transforms.Compose([transforms.ToTensor()])\n train_data = datasets.FashionMNIST(path_to_data, train=True, download=True, transform=all_transforms)\n train_loader = DataLoader(train_data, batch_size=args.mb_size, shuffle=args.shuffle,\n pin_memory=True, num_workers=args.workers)\n _, c, x, y = next(iter(train_loader))[0].size()\n return train_loader, c*x*y, c\n\n\ndef get_svhn_dataloader(args, path_to_data='svhn'):\n \"\"\"SVHN dataloader with (28, 28) images.\"\"\"\n\n all_transforms = transforms.Compose([transforms.Resize(28),\n transforms.ToTensor()])\n train_data = datasets.SVHN(path_to_data, split='train', download=True, transform=all_transforms)\n train_loader = DataLoader(train_data, batch_size=args.mb_size, shuffle=args.shuffle,\n pin_memory=True, num_workers=args.workers)\n _, c, x, y = next(iter(train_loader))[0].size()\n return train_loader, c*x*y, c\n\n\ndef get_cars3d_dataloader(args, path_to_data='cars3d'):\n \"\"\"Cars3D dataloader with (64, 64, 3) images.\"\"\"\n\n name = '{}/data/cars/'.format(path_to_data)\n if not os.path.exists(name):\n print('Data at the given path doesn\\'t exist. Downloading now...')\n os.system(\" mkdir cars3d/;\"\n \" wget -O cars3d/nips2015-analogy-data.tar.gz http://www.scottreed.info/files/nips2015-analogy-data.tar.gz ;\"\n \" cd cars3d/; tar xzf nips2015-analogy-data.tar.gz\")\n\n all_transforms = transforms.Compose([transforms.ToTensor()])\n\n cars3d_data = cars3dDataset(path_to_data, transform=all_transforms)\n cars3d_loader = DataLoader(cars3d_data, batch_size=args.mb_size,\n shuffle=args.shuffle, pin_memory=True, num_workers=args.workers)\n _, c, x, y = next(iter(cars3d_loader))[0].size()\n return cars3d_loader, c*x*y, c\n\ndef get_stl10_dataloader(args, path_to_data='./data/stl10'):\n \"\"\"STL10 dataloader with (64, 64, 3) images.\"\"\"\n\n name = '{}/stl10_binary/'.format(path_to_data)\n if not os.path.exists(name):\n print('Data at the given path doesn\\'t exist. Downloading now...')\n os.system(\" mkdir stl10/;\"\n \" wget -O stl10/stl10_binary.tar.gz http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz ;\"\n \" cd stl10/; tar xzf stl10_binary.tar.gz\")\n\n all_transforms = transforms.Compose([transforms.Resize(224), transforms.ToTensor()])\n\n stl10_data = datasets.STL10('stl10', split='train', transform=all_transforms, download=True)\n if args.proc == 'cpu':\n stl10_loader = DataLoader(stl10_data, batch_size=args.mb_size,\n shuffle=args.shuffle, pin_memory=False, num_workers=0)\n else:\n stl10_loader = DataLoader(stl10_data, batch_size=args.mb_size,\n shuffle=args.shuffle, pin_memory=True, num_workers=args.workers)\n _, c, x, y = next(iter(stl10_loader))[0].size()\n return stl10_loader, c*x*y, c\n\ndef get_cifar10_subset_dataloader(args, path_to_data='./data/cifar10'):\n \"\"\" CIFAR10 loader with all the transforms needed for 64x64 sized images\"\"\"\n cifar10_dataset = CIFAR10Dataset(path_to_data)\n np.random.seed(42)\n \n dog_indices, cat_indices, bird_indices, horse_indices = [], [], [], []\n dog_idx, cat_idx, bird_idx, horse_idx = cifar10_dataset.cifar_images.class_to_idx['dog'], cifar10_dataset.cifar_images.class_to_idx['cat'], cifar10_dataset.cifar_images.class_to_idx['bird'], cifar10_dataset.cifar_images.class_to_idx['horse']\n\n for i in range(len(cifar10_dataset)):\n current_class = cifar10_dataset[i][1]\n if current_class == dog_idx:\n dog_indices.append(i)\n elif current_class == cat_idx:\n cat_indices.append(i)\n elif current_class == bird_idx:\n bird_indices.append(i)\n elif current_class == horse_idx:\n horse_indices.append(i)\n \n subset_indices = dog_indices + cat_indices + bird_indices + horse_indices\n torch.manual_seed(42)\n np.random.seed(42)\n subset_indices = torch.tensor(subset_indices)\n subsetting_choice = subset_indices[torch.randperm(len(subset_indices))[:1000]]\n\n\n cifar10_dataset_subset = SubsetRandomSampler(subsetting_choice)\n ### PyTorch data loaders ###\n if args.proc == 'cpu':\n cifar_loader = DataLoader(cifar10_dataset, args.mb_size, shuffle=False, num_workers=0, pin_memory=False, sampler=cifar10_dataset_subset)\n else:\n cifar_loader = DataLoader(cifar10_dataset, args.mb_size, shuffle=False, num_workers=args.workers, pin_memory=True, sampler=cifar10_dataset_subset)\n _, c, x, y = next(iter(cifar_loader))[0].size()\n return cifar_loader, c*x*y, c\n \ndef get_imagenette_dataloader(args, use_sub_sample=True, path_to_data='./data/imagenette2'):\n \"\"\" Imagenette loader with all the transforms needed for 224 sized images\"\"\"\n name = '{}/imagenette2/train/'.format(path_to_data)\n if not os.path.exists(name):\n print('Data at the given path doesn\\'t exist. Downloading now...')\n os.system(\" mkdir imagenette2/;\"\n \" wget -O imagenette2/imagenette2.tgz https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz ;\"\n \" cd imagenette2/; tar xzf imagenette2.tgz\")\n \n imagenette_dataset = ImagenetteDataset(path_to_data)\n np.random.seed(42)\n subsetting_choice = torch.randperm(len(imagenette_dataset))[:1000]\n imagenette_dataset_subset = SubsetRandomSampler(subsetting_choice)\n ### PyTorch data loaders ###\n if args.proc == 'cpu':\n imagenette_loader = DataLoader(imagenette_dataset, args.mb_size, shuffle=False, num_workers=0, pin_memory=False, sampler=imagenette_dataset_subset)\n else:\n imagenette_loader = DataLoader(imagenette_dataset, args.mb_size, shuffle=False, num_workers=args.workers, pin_memory=True, sampler=imagenette_dataset_subset)\n _, c, x, y = next(iter(imagenette_loader))[0].size()\n return imagenette_loader, c*x*y, c\n\ndef get_galaxyzoo_dataloader(args, path_to_data='./data/'):\n \"\"\" Galaxy Zoo loader with all the transforms needed for 128 sized images\"\"\"\n\n galaxy_dataset = GalaxyZooDataset(path_to_data)\n np.random.seed(42)\n\n ### PyTorch data loaders ###\n if args.proc == 'cpu':\n galaxy_loader = DataLoader(galaxy_dataset, args.mb_size, shuffle=False, num_workers=0, pin_memory=False)\n else:\n galaxy_loader = DataLoader(galaxy_dataset, args.mb_size, shuffle=False, num_workers=args.workers, pin_memory=True)\n _, c, x, y = next(iter(galaxy_loader))[0].size()\n return galaxy_loader, c*x*y, c\n\n\ndef get_dsprites_dataloader(args, path_to_data='dsprites'):\n \"\"\"DSprites dataloader (64, 64) images\"\"\"\n\n name = '{}/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz'.format(path_to_data)\n if not os.path.exists(name):\n print('Data at the given path doesn\\'t exist. Downloading now...')\n os.system(\" mkdir dsprites;\"\n \" wget -O dsprites/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz https://github.com/deepmind/dsprites-dataset/raw/master/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz\")\n\n transform = transforms.Compose([transforms.ToTensor()])\n\n dsprites_data = DSpritesDataset(name, transform=transform)\n dsprites_loader = DataLoader(dsprites_data, batch_size=args.mb_size,\n shuffle=args.shuffle, pin_memory=True, num_workers=args.workers)\n _, c, x, y = next(iter(dsprites_loader))[0].size()\n return dsprites_loader, c*x*y, c\n\n\ndef get_3dshapes_dataloader(args, path_to_data='3dshapes'):\n \"\"\"3dshapes dataloader with images rescaled to (28,28,3)\"\"\"\n\n name = '{}/3dshapes.h5'.format(path_to_data)\n if not os.path.exists(name):\n print('Data at the given path doesn\\'t exist. ')\n os.system(\" mkdir 3dshapes;\"\n \" wget -O 3dshapes/3dshapes.h5 https://storage.googleapis.com/3d-shapes/3dshapes.h5\")\n\n transform = transforms.Compose([Resize(28), transforms.ToTensor()])\n\n d3shapes_data = d3shapesDataset(name, transform=transform)\n d3shapes_loader = DataLoader(d3shapes_data, batch_size=args.mb_size,\n shuffle=args.shuffle, pin_memory=True, num_workers=args.workers)\n _, c, x, y = next(iter(d3shapes_loader))[0].size()\n return d3shapes_loader, c*x*y, c\n\n\nclass DSpritesDataset(Dataset):\n \"\"\"DSprites dataloader class\"\"\"\n\n lat_names = ('shape', 'scale', 'orientation', 'posX', 'posY')\n lat_sizes = np.array([3, 6, 40, 32, 32])\n\n def __init__(self, path_to_data, subsample=1, transform=None):\n \"\"\"\n Parameters\n ----------\n subsample : int\n Only load every |subsample| number of images.\n \"\"\"\n dat = np.load(path_to_data)\n self.imgs = dat['imgs'][::subsample]\n self.lv = dat['latents_values'][::subsample]\n # self.lc = dat['latents_classes'][::subsample]\n self.transform = transform\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, idx):\n sample = self.imgs[idx] * 255\n sample = sample.reshape(sample.shape + (1,))\n\n if self.transform:\n sample = self.transform(sample)\n return sample, self.lv[idx]\n\n\nclass d3shapesDataset(Dataset):\n \"\"\"3dshapes dataloader class\"\"\"\n\n lat_names = ('floor_hue', 'wall_hue', 'object_hue', 'scale', 'shape', 'orientation')\n lat_sizes = np.array([10, 10, 10, 8, 4, 15])\n\n def __init__(self, path_to_data, subsample=1, transform=None):\n \"\"\"\n Parameters\n ----------\n subsample : int\n Only load every |subsample| number of images.\n \"\"\"\n dataset = h5py.File(path_to_data, 'r')\n self.imgs = dataset['images'][::subsample]\n self.lat_val = dataset['labels'][::subsample]\n self.transform = transform\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, idx):\n sample = self.imgs[idx] / 255\n if self.transform:\n sample = self.transform(sample)\n return sample, self.lat_val[idx]\n\n\nclass cars3dDataset(Dataset):\n \"\"\"Cars3D dataloader class\n\n The data set was first used in the paper \"Deep Visual Analogy-Making\"\n (https://papers.nips.cc/paper/5845-deep-visual-analogy-making) and can be\n downloaded from http://www.scottreed.info/. The images are rescaled to 64x64.\n\n The ground-truth factors of variation are:\n 0 - elevation (4 different values)\n 1 - azimuth (24 different values)\n 2 - object type (183 different values)\n\n Reference: Code adapted from\n https://github.com/google-research/disentanglement_lib/blob/master/disentanglement_lib/data/ground_truth/cars3d.py\n \"\"\"\n lat_names = ('elevation', 'azimuth', 'object_type')\n lat_sizes = np.array([4, 24, 183])\n\n def __init__(self, path_to_data, subsample=1, transform=None):\n \"\"\"\n Parameters\n ----------\n subsample : int\n Only load every |subsample| number of images.\n \"\"\"\n self.imgs = self._load_data()[::subsample]\n self.lat_val = cartesian([np.array(list(range(i))) for i in self.lat_sizes])[::subsample]\n self.transform = transform\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, idx):\n if self.transform:\n sample = self.transform(self.imgs[idx])\n return sample.float(), self.lat_val[idx]\n\n def _load_data(self):\n dataset = np.zeros((24 * 4 * 183, 64, 64, 3))\n all_files = glob.glob(\"cars3d/data/cars/*.mat\")\n for i, filename in enumerate(all_files):\n data_mesh = self._load_mesh(filename)\n factor1 = np.array(list(range(4)))\n factor2 = np.array(list(range(24)))\n all_factors = np.transpose([\n np.tile(factor1, len(factor2)),\n np.repeat(factor2, len(factor1)),\n np.tile(i,\n len(factor1) * len(factor2))\n ])\n dataset[np.arange(i, 24*4*183, 183)] = data_mesh\n return dataset\n\n def _load_mesh(self, filename):\n \"\"\"Parses a single source file and rescales contained images.\"\"\"\n mesh = np.einsum(\"abcde->deabc\", sio.loadmat(filename)[\"im\"])\n flattened_mesh = mesh.reshape((-1,) + mesh.shape[2:])\n rescaled_mesh = np.zeros((flattened_mesh.shape[0], 64, 64, 3))\n for i in range(flattened_mesh.shape[0]):\n pic = PIL.Image.fromarray(flattened_mesh[i, :, :, :])\n pic.thumbnail((64, 64), PIL.Image.ANTIALIAS)\n rescaled_mesh[i, :, :, :] = np.array(pic)\n return rescaled_mesh * 1. / 255\n\n\nclass ImagenetteDataset(Dataset):\n def __init__(self, path_to_data):\n self.all_transforms = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.Resize(64),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )])\n self.imagenette_images = datasets.ImageFolder(path_to_data + '/imagenette2/train/', self.all_transforms) \n def __getitem__(self, index):\n data, target = self.imagenette_images[index] \n # Your transformations here (or set it in ImageFolder class instantiation) \n return data, target, index\n def __len__(self):\n return len(self.imagenette_images)\n\nclass GalaxyZooDataset(Dataset):\n def __init__(self, path_to_data):\n self.all_transforms = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(128),\n transforms.Resize(64),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor()\n ])\n self.galaxy_images = datasets.ImageFolder(path_to_data + '/galaxy_zoo_images/', self.all_transforms) \n def __getitem__(self, index):\n data, target = self.galaxy_images[index] \n # Your transformations here (or set it in ImageFolder class instantiation) \n return data, target, index\n def __len__(self):\n return len(self.galaxy_images)\n \nclass CIFAR10Dataset(Dataset):\n def __init__(self, path_to_data):\n self.all_transforms = transforms.Compose([\n transforms.CenterCrop(32),\n #transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5,),(0.5,))\n #transforms.Normalize(\n #mean=[0.4914, 0.4822, 0.4465],\n #std=[0.2023, 0.1994, 0.2010]\n #)\n ])\n self.cifar_images = datasets.CIFAR10(root=path_to_data + '/cifar10/train/', train=True , download=True, transform=self.all_transforms ) \n def __getitem__(self, index):\n data, target = self.cifar_images[index] \n # Your transformations here (or set it in ImageFolder class instantiation) \n return data, target, index\n def __len__(self):\n return len(self.cifar_images)\n ","sub_path":"code/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":17701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403684141","text":"#!/usr/bin/env python\nimport numpy as np\nimport sys\nimport os\nprint(os.getcwd())\nprint(os.environ['PYTHONPATH'])\n\nimport boost_python_catkin_example.examples as examples\n\n\n\nclass Animal():\n pass\n\nclass Mammal():\n pass\n\nclass Dog():\n def __init__(self):\n self.msg = 'Bark Bark'\n\n def test_wrap_tuple(self):\n mytuple = (3, 3.14)\n mytuple1 = examples.tupid1(mytuple)\n print(mytuple1)\n mytuple2 = examples.tupid2((5, 1.55, \"Hi\"))\n print(mytuple2)\n a = np.array([[1., 2., 3., 4.], [4., 3., 2., 1.]])\n mytuple3 = examples.tupid3((5, 1.55, a))\n print(mytuple3)\n\n def test_none_tuple(self):\n a = np.array([[1., 2., 3., 4.], [4., 3., 2., 1.]])\n print(examples.tupidCheckNone((5, 1.55, a)))\n print(examples.tupidCheckNone(None))\n\n def test_dict(self):\n a = np.array([[1., 2., 3., 4.], [4., 3., 2., 1.]])\n thisdict = { \"brand\": \"Ford\", \"model\": \"Mustang\", \"year\": 1964, \"size\" : a }\n print(examples.readDictStringOnly(thisdict))\n print(examples.readDict(thisdict))\n\n def test_vector_multiplication(self, input):\n a = np.array(input).reshape(2, 3)\n b = a.reshape(3, 2)\n res2 = a.dot(b)\n print(\"{}.{} = {}\".format(a, b, res2))\n res1 = examples.mul(a, b)\n print(\"res: {} == {}\".format(res1, res2))\n examples.printStr(str(np.equal(res1, res2).all()))\n \n def learn(self, message, mat):\n self.msg = message\n print(\"The dog learns to bark: {}\".format(message))\n self.test_wrap_tuple()\n self.test_none_tuple()\n self.test_dict()\n self.test_vector_multiplication(mat)\n\n \n def bark(self):\n print(\"The dog barks: {}\".format(self.msg))\n x = examples.Detector('pi')\n x.value = 3.14\n print('{} is around {}'.format(x.name, x.value))\n return x\n","sub_path":"src/boostpython/scripts/interopLoad.py","file_name":"interopLoad.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"651102953","text":"import os\nimport inspect\nimport logging\nimport numpy as np\nimport progressbar\nimport time\nimport sys\nfrom threading import Timer\nfrom collections import namedtuple, Mapping, OrderedDict\n\nfrom pybar.analysis.analyze_raw_data import AnalyzeRawData\nfrom pybar.fei4.register_utils import invert_pixel_mask, make_box_pixel_mask_from_col_row\nfrom pybar.fei4_run_base import Fei4RunBase\nfrom pybar.run_manager import RunManager\n\nfrom basil.utils.BitLogic import BitLogic\n\nfrom basil.dut import Dut\n\nclass M26TelescopeScan(Fei4RunBase):\n '''External trigger scan with FE-I4 and up to 6 Mimosa26 telescope planes.\n\n For use with external scintillator (user RX0), TLU (use RJ45), FE-I4 HitOR (USBpix self-trigger).\n\n Note:\n Set up trigger in DUT configuration file (e.g. dut_configuration_mio.yaml).\n '''\n _default_run_conf = {\n \"trig_count\": 0, # FE-I4 trigger count, number of consecutive BCs, 0 means 16, from 0 to 15\n \"trigger_latency\": 232, # FE-I4 trigger latency, in BCs, external scintillator / TLU / HitOR: 232, USBpix self-trigger: 220\n \"trigger_delay\": 8, # trigger delay, in BCs\n \"trigger_rate_limit\": 0, # artificially limiting the trigger rate, in BCs (25ns)\n \"col_span\": [1, 79], # defining active column interval, 2-tuple, from 1 to 80\n \"row_span\": [1, 336], # defining active row interval, 2-tuple, from 1 to 336\n \"overwrite_enable_mask\": False, # if True, use col_span and row_span to define an active region regardless of the Enable pixel register. If False, use col_span and row_span to define active region by also taking Enable pixel register into account.\n \"use_enable_mask_for_imon\": True, # if True, apply inverted Enable pixel mask to Imon pixel mask\n \"no_data_timeout\": 120, # no data timeout after which the scan will be aborted, in seconds\n \"scan_timeout\": 60, # timeout for scan after which the scan will be stopped, in seconds\n \"max_triggers\": False, # maximum triggers after which the scan will be stopped, in seconds\n \"enable_tdc\": False, # if True, enables TDC (use RX2)\n \"reset_rx_on_error\": True, # long scans have a high propability for ESD related data transmission errors; recover and continue here\n \"remote\": True # if True, Powersupply remote is enabled\n }\n\n def init_dut(self): \n \n if self.remote:\n dut = Dut('agilent_e3644a_pyserial.yaml')\n dut.init()\n status = dut['Powersupply'].get_enable()\n time.sleep(0.15) \n status = status.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n status = int(status) #convert string to float in order to compare values!\n if status == 1:\n logging.info(\"Output of powersupply is ON, status: %s\"%status)\n else:\n logging.info(\"Output of powersupply is OFF, status: %s\" % status) # TODO: STOP READOUT!!!\n #abort(msg='Scan timeout was reached')\n #stop_current_run(msg='OFF')\n current = dut['Powersupply'].get_current()\n current = current.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n logging.info('Current: %s A', current)\n current = float(current) # convert string to float in order to compare values!\n else:\n logging.info('No remote enabled')\n\n map(lambda channel: channel.reset(), self.dut.get_modules('m26_rx'))\n self.dut['jtag'].reset()\n\n if 'force_config_mimosa26' in self._conf and not self._conf['force_config_mimosa26'] and self.remote and current >= 3.3: #check if force_config is False\n logging.info('Skipping m26 configuration, m26 is already configured')\n else: \n if 'm26_configuration' in self._conf and self._conf['m26_configuration']:\n m26_config_file = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))), self._conf['m26_configuration'])\n\n logging.info('Loading m26 configuration file %s', m26_config_file)\n self.dut.set_configuration(m26_config_file)\n\n IR={\"BSR_ALL\":'00101',\"DEV_ID_ALL\":'01110',\"BIAS_DAC_ALL\":'01111',\"LINEPAT0_REG_ALL\":'10000',\n \"DIS_DISCRI_ALL\":'10001',\"SEQUENCER_PIX_REG_ALL\":'10010',\"CONTROL_PIX_REG_ALL\":'10011',\n \"LINEPAT1_REG_ALL\":'10100',\"SEQUENCER_SUZE_REG_ALL\":'10101',\"HEADER_REG_ALL\":'10110',\n \"CONTROL_SUZE_REG_ALL\":'10111',\n \"CTRL_8b10b_REG0_ALL\":'11000',\"CTRL_8b10b_REG1_ALL\":'11001',\"RO_MODE1_ALL\":'11101',\n \"RO_MODE0_ALL\":'11110',\n \"BYPASS_ALL\":'11111'}\n ## write JTAG\n irs = [\"BIAS_DAC_ALL\",\"BYPASS_ALL\",\"BSR_ALL\",\"RO_MODE0_ALL\",\"RO_MODE1_ALL\",\n \"DIS_DISCRI_ALL\",\"LINEPAT0_REG_ALL\",\"LINEPAT1_REG_ALL\",\"CONTROL_PIX_REG_ALL\",\"SEQUENCER_PIX_REG_ALL\",\n \"HEADER_REG_ALL\",\"CONTROL_SUZE_REG_ALL\",\"SEQUENCER_SUZE_REG_ALL\",\"CTRL_8b10b_REG0_ALL\",\n \"CTRL_8b10b_REG1_ALL\"]\n for i,ir in enumerate(irs):\n logging.info('Programming M26 JATG configuration reg %s', ir)\n logging.debug(self.dut[ir][:])\n self.dut['jtag'].scan_ir([BitLogic(IR[ir])]*6)\n ret = self.dut['jtag'].scan_dr([self.dut[ir][:]])[0]\n \n if self.remote: \n current = dut['Powersupply'].get_current()\n current = current.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n logging.info('Current: %s A', current) \n ## read JTAG and check\n irs=[\"DEV_ID_ALL\",\"BSR_ALL\",\"BIAS_DAC_ALL\",\"RO_MODE1_ALL\",\"RO_MODE0_ALL\",\n \"DIS_DISCRI_ALL\",\"LINEPAT0_REG_ALL\",\"LINEPAT1_REG_ALL\",\"CONTROL_PIX_REG_ALL\",\n \"SEQUENCER_PIX_REG_ALL\",\n \"HEADER_REG_ALL\",\"CONTROL_SUZE_REG_ALL\",\"SEQUENCER_SUZE_REG_ALL\",\"CTRL_8b10b_REG0_ALL\",\n \"CTRL_8b10b_REG1_ALL\",\"BYPASS_ALL\"]\n ret={}\n for i,ir in enumerate(irs):\n logging.info('Reading M26 JATG configuration reg %s', ir)\n self.dut['jtag'].scan_ir([BitLogic(IR[ir])]*6)\n ret[ir]= self.dut['jtag'].scan_dr([self.dut[ir][:]])[0]\n \n if self.remote:\n current = dut['Powersupply'].get_current()\n current = current.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n logging.info('Current: %s A', current) \n ## check\n for k,v in ret.iteritems():\n if k==\"CTRL_8b10b_REG1_ALL\":\n pass\n elif k==\"BSR_ALL\":\n pass #TODO mask clock bits and check others\n elif self.dut[k][:]!=v:\n logging.error(\"JTAG data does not match %s get=%s set=%s\"%(k,v,self.dut[k][:]))\n else:\n logging.info(\"Checking M26 JTAG %s ok\"%k)\n \n if self.remote: \n current = dut['Powersupply'].get_current()\n current = current.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n logging.info('Current: %s A', current) \n #START procedure\n logging.info('Starting M26')\n temp=self.dut['RO_MODE0_ALL'][:]\n #disable extstart\n for reg in self.dut[\"RO_MODE0_ALL\"][\"RO_MODE0\"]:\n reg['En_ExtStart']=0\n reg['JTAG_Start']=0\n self.dut['jtag'].scan_ir([BitLogic(IR['RO_MODE0_ALL'])]*6)\n self.dut['jtag'].scan_dr([self.dut['RO_MODE0_ALL'][:]])\n #JTAG start\n for reg in self.dut[\"RO_MODE0_ALL\"][\"RO_MODE0\"]:\n reg['JTAG_Start']=1\n self.dut['jtag'].scan_ir([BitLogic(IR['RO_MODE0_ALL'])]*6)\n self.dut['jtag'].scan_dr([self.dut['RO_MODE0_ALL'][:]])\n for reg in self.dut[\"RO_MODE0_ALL\"][\"RO_MODE0\"]:\n reg['JTAG_Start']=0\n self.dut['jtag'].scan_ir([BitLogic(IR['RO_MODE0_ALL'])]*6)\n self.dut['jtag'].scan_dr([self.dut['RO_MODE0_ALL'][:]])\n #write original configuration\n self.dut['RO_MODE0_ALL'][:]=temp\n self.dut['jtag'].scan_ir([BitLogic(IR['RO_MODE0_ALL'])]*6)\n self.dut['jtag'].scan_dr([self.dut['RO_MODE0_ALL'][:]])\n #readback?\n self.dut['jtag'].scan_ir([BitLogic(IR['RO_MODE0_ALL'])]*6)\n self.dut['jtag'].scan_dr([self.dut['RO_MODE0_ALL'][:]]*6)\n \n if self.remote:\n current = dut['Powersupply'].get_current()\n current = current.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n logging.info('Current: %s A', current)\n else:\n logging.info('Skipping m26 configuration')\n \n def configure(self):\n commands = []\n commands.extend(self.register.get_commands(\"ConfMode\"))\n # Enable\n enable_pixel_mask = make_box_pixel_mask_from_col_row(column=self.col_span, row=self.row_span)\n if not self.overwrite_enable_mask:\n enable_pixel_mask = np.logical_and(enable_pixel_mask, self.register.get_pixel_register_value('Enable'))\n self.register.set_pixel_register_value('Enable', enable_pixel_mask)\n commands.extend(self.register.get_commands(\"WrFrontEnd\", same_mask_for_all_dc=False, name='Enable'))\n # Imon\n if self.use_enable_mask_for_imon:\n imon_pixel_mask = invert_pixel_mask(enable_pixel_mask)\n else:\n imon_pixel_mask = make_box_pixel_mask_from_col_row(column=self.col_span, row=self.row_span, default=1, value=0) # 0 for selected columns, else 1\n imon_pixel_mask = np.logical_or(imon_pixel_mask, self.register.get_pixel_register_value('Imon'))\n self.register.set_pixel_register_value('Imon', imon_pixel_mask)\n commands.extend(self.register.get_commands(\"WrFrontEnd\", same_mask_for_all_dc=False, name='Imon'))\n # C_High\n self.register.set_pixel_register_value('C_High', 0)\n commands.extend(self.register.get_commands(\"WrFrontEnd\", same_mask_for_all_dc=True, name='C_High'))\n # C_Low\n self.register.set_pixel_register_value('C_Low', 0)\n commands.extend(self.register.get_commands(\"WrFrontEnd\", same_mask_for_all_dc=True, name='C_Low'))\n # Registers\n self.register.set_global_register_value(\"Trig_Lat\", self.trigger_latency) # set trigger latency\n self.register.set_global_register_value(\"Trig_Count\", self.trig_count) # set number of consecutive triggers\n commands.extend(self.register.get_commands(\"WrRegister\", name=[\"Trig_Lat\", \"Trig_Count\"]))\n commands.extend(self.register.get_commands(\"RunMode\"))\n self.register_utils.send_commands(commands)\n self.dut['TLU']['RESET']=1\n for plane in range(1,7):\n self.dut['M26_RX%d'%plane].reset()\n self.dut['M26_RX%d'%plane]['TIMESTAMP_HEADER']=1\n\n def scan(self):\n # preload command\n lvl1_command = self.register.get_commands(\"zeros\", length=self.trigger_delay)[0] + self.register.get_commands(\"LV1\")[0] + self.register.get_commands(\"zeros\", length=self.trigger_rate_limit)[0]\n self.register_utils.set_command(lvl1_command)\n\n with self.readout(**self.scan_parameters._asdict()):\n got_data = False\n while not self.stop_run.wait(1.0):\n if not got_data:\n if self.fifo_readout.data_words_per_second() > 0:\n got_data = True\n logging.info('Taking data...')\n self.progressbar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=self.max_triggers, poll=10, term_width=80).start()\n else:\n triggers = self.dut['TLU']['TRIGGER_COUNTER']\n try:\n self.progressbar.update(triggers)\n except ValueError:\n pass\n if self.max_triggers and triggers >= self.max_triggers:\n self.progressbar.finish()\n self.stop(msg='Trigger limit was reached: %i' % self.max_triggers)\n# print self.fifo_readout.data_words_per_second()\n# if (current_trigger_number % show_trigger_message_at < last_trigger_number % show_trigger_message_at):\n# logging.info('Collected triggers: %d', current_trigger_number)\n\n logging.info('Total amount of triggers collected: %d', self.dut['TLU']['TRIGGER_COUNTER'])\n\n def analyze(self):\n pass\n #with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:\n # analyze_raw_data.create_source_scan_hist = True\n # analyze_raw_data.create_cluster_size_hist = True\n # analyze_raw_data.create_cluster_tot_hist = True\n # analyze_raw_data.align_at_trigger = True\n # if self.enable_tdc:\n # analyze_raw_data.create_tdc_counter_hist = True # histogram all TDC words\n # analyze_raw_data.create_tdc_hist = True # histogram the hit TDC information\n # analyze_raw_data.align_at_tdc = False # align events at the TDC word\n # analyze_raw_data.interpreter.set_warning_output(False)\n # analyze_raw_data.interpret_word_table()\n # analyze_raw_data.interpreter.print_summary()\n # analyze_raw_data.plot_histograms()\n\n def start_readout(self, **kwargs):\n if kwargs:\n self.set_scan_parameters(**kwargs)\n self.fifo_readout.start(reset_sram_fifo=False, clear_buffer=True, callback=self.handle_data, errback=self.handle_err, no_data_timeout=self.no_data_timeout)\n #self.dut['TDC']['ENABLE'] = self.enable_tdc\n self.dut['TLU']['RESET']=1\n self.dut['TLU']['TRIGGER_MODE']=3\n self.dut['TLU']['TRIGGER_LOW_TIMEOUT']=200\n self.dut['TLU']['TRIGGER_HANDSHAKE_ACCEPT_WAIT_CYCLES']=20\n self.dut['TLU']['DATA_FORMAT']=2\n self.dut['TLU']['TRIGGER_DATA_DELAY']=8\n self.dut['TLU']['TRIGGER_COUNTER'] = 0\n self.dut['TLU']['TRIGGER_VETO_SELECT'] = 0\n self.dut['TLU']['EN_TLU_VETO'] = 0\n\n self.dut['M26_RX1'].set_en(True)\n self.dut['M26_RX2'].set_en(True)\n self.dut['M26_RX3'].set_en(True)\n self.dut['M26_RX4'].set_en(True)\n self.dut['M26_RX5'].set_en(True)\n self.dut['M26_RX6'].set_en(True)\n\n if self.max_triggers:\n self.dut['TLU']['MAX_TRIGGERS'] = self.max_triggers\n else:\n self.dut['TLU']['MAX_TRIGGERS'] = 0 # infinity triggers\n # use this with FE-I4 connected\n self.dut['CMD']['EN_EXT_TRIGGER'] = True\n # use this if no FE-I4 is connected\n# self.dut['TLU']['TRIGGER_ENABLE'] = True\n \n\n def timeout():\n try:\n self.progressbar.finish()\n except AttributeError:\n pass\n self.stop(msg='Scan timeout was reached')\n\n self.scan_timeout_timer = Timer(self.scan_timeout, timeout)\n if self.scan_timeout:\n self.scan_timeout_timer.start()\n\n def stop_readout(self, timeout=10.0):\n self.scan_timeout_timer.cancel()\n self.dut['TLU']['TRIGGER_ENABLE'] = False\n self.dut['CMD']['EN_EXT_TRIGGER'] = False\n self.dut['M26_RX1'].set_en(False)\n self.dut['M26_RX2'].set_en(False)\n self.dut['M26_RX3'].set_en(False)\n self.dut['M26_RX4'].set_en(False)\n self.dut['M26_RX5'].set_en(False)\n self.dut['M26_RX6'].set_en(False)\n self.fifo_readout.stop(timeout=timeout)\n\n\nif __name__ == \"__main__\":\n RunManager('../configuration.yaml').run_run(M26TelescopeScan)\n","sub_path":"pybar/scans/scan_m26_telescope.py","file_name":"scan_m26_telescope.py","file_ext":"py","file_size_in_byte":16187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"289354893","text":"import utime\r\nimport pyb\r\nimport uasyncio as asyncio\r\nfrom pin_cb import PinCall\r\nt=0 \r\nmax_latency=0\r\npinout=pyb.Pin(pyb.Pin.board.X1,pyb.Pin.OUT)\r\ndef toggle(_):\r\n global t\r\n pinout.value(not pinout.value())\r\n t=utime.ticks_us()\r\ndef cb(pin,ud):\r\n print('Callback',pin.value(),ud)\r\ndef cbl(pinin):\r\n global max_latency\r\n dt=utime.ticks_diff(utime.ticks_us(),t)\r\n max_latency=max(max_latency,dt)\r\n print('Latency {:6d}μs {:6d}μs max'.format(dt,max_latency))\r\nasync def dummy():\r\n while True:\r\n await asyncio.sleep(0)\r\n utime.sleep_ms(5) \r\nasync def killer():\r\n await asyncio.sleep(20)\r\ndef test(fast_io=True,latency=False):\r\n loop=asyncio.get_event_loop(ioq_len=6 if fast_io else 0)\r\n pinin=pyb.Pin(pyb.Pin.board.X2,pyb.Pin.IN)\r\n pyb.Timer(4,freq=2.1,callback=toggle)\r\n for _ in range(5):\r\n loop.create_task(dummy())\r\n if latency:\r\n pin_cb=PinCall(pinin,cb_rise=cbl,cbr_args=(pinin,))\r\n else:\r\n pincall=PinCall(pinin,cb_rise=cb,cbr_args=(pinin,'rise'),cb_fall=cb,cbf_args=(pinin,'fall'))\r\n loop.run_until_complete(killer())\r\nprint('''Link Pyboard pins X1 and X2.\r\nThis test uses a timer to toggle pin X1, recording the time of each state change.\r\nThe basic test with latency False just demonstrates the callbacks.\r\nThe latency test measures the time between the leading edge of X1 output and the\r\ndriver detecting the state change. This is in the presence of five competing coros\r\neach of which blocks for 5ms. Latency is on the order of 5ms max under fast_io,\r\n50ms max under official V2.0.\r\nIssue ctrl-D between runs.\r\ntest(fast_io=True, latency=False)\r\nargs:\r\nfast_io test fast I/O mechanism.\r\nlatency test latency (delay between X1 and X2 leading edge).\r\nTests run for 20s.''')\r\n# Created by pyminifier (https://github.com/liftoff/pyminifier)\r\n","sub_path":"_lib/uasyncio/pin_cb_test.py","file_name":"pin_cb_test.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"165436691","text":"import django_filters\nfrom .models import Events\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit,Field\nfrom bootstrap_datepicker_plus import DatePickerInput\n\nclass EventsFilter(django_filters.FilterSet):\n #name = django_filters.CharFilter(lookup_expr='iexact')\n\n class Meta:\n model = Events\n # fields = '__all__'\n fields = ['start_date','event_type','event_doctor','end_date','branch_event','event_service','event_clinic']\n\n labels = {\n 'event_doctor': (''),\n 'event_type': (''),\n 'end_date': (''),\n 'event_type': (''),\n 'event_note': (''),\n \n \n \n } \n \n widgets = {\n 'start_date': DatePickerInput(),\n }\n \n \n\n # def __init__(self, *args, **kwargs):\n # super(Events, self).__init__(*args, **kwargs)\n # # at sturtup user doen't push Submit button, and QueryDict (in data) is empty\n # if self.data == {}:\n # self.queryset = self.queryset.none()\n # self.filters['Sireal'].label=\"برجاء ادخال السريال المدون بكارت الضمان\"\n # self.helper = FormHelper()\n # self.helper.layout = Layout(\n # Field('Sireal', placeholder='Search ...'),\n # )","sub_path":"event_manage/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"511824332","text":"class AppUtils:\r\n\r\n @staticmethod\r\n def make_search_query(email, name):\r\n if name:\r\n name_query = {'$regex': f'.*{name}.*'}\r\n else:\r\n name_query = {'$regex': f'.*.*'}\r\n if email:\r\n email_query = {'$regex': f'.*{email}.*'}\r\n else:\r\n email_query = {'$regex': f'.*.*'}\r\n query = {'name': name_query, 'email': email_query}\r\n return query\r\n\r\n @staticmethod\r\n def object_id_to_string(id_dict):\r\n id_dict['_id'] = str(id_dict['_id'])\r\n return id_dict\r\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"75836207","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('Score2.png')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nedges = cv2.Canny(gray, 50, 150, apertureSize = 3)\nminLineLength = 50\nmaxLineGap = 10\n\nlines = cv2.HoughLinesP(edges, 1, np.pi/180, 100, minLineLength, maxLineGap)\n\nif lines is not None:\n for line in lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 1)\n\ncv2.imshow('SCORE_EDGE', edges)\ncv2.imshow('RESULT', img)\ncv2.waitKey()\ncv2.destroyAllWindows()","sub_path":"07_TEST/OPENCV/test/test07.py","file_name":"test07.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"470332464","text":"# update linux-stable-zfs-bin when bumping\npkgname = \"linux-stable\"\npkgver = \"6.4.11\"\npkgrel = 0\narchs = [\"aarch64\", \"ppc64le\", \"ppc64\", \"riscv64\", \"x86_64\"]\nmake_dir = \"build\"\nhostmakedepends = [\"base-kernel-devel\"]\ndepends = [\"base-kernel\"]\nprovides = [\"linux\"]\npkgdesc = f\"Linux kernel {pkgver[0:pkgver.rfind('.')]}.x\"\nmaintainer = \"q66 \"\nlicense = \"GPL-2.0-only\"\nurl = \"https://kernel.org\"\nsource = f\"https://cdn.kernel.org/pub/linux/kernel/v{pkgver[0]}.x/linux-{pkgver}.tar.xz\"\nsha256 = \"546b68b5097d3c0d74722de62aae217729d98e45fbb6bd458b490ac21ea40918\"\n# no meaningful checking to be done\noptions = [\n \"!check\",\n \"!debug\",\n \"!strip\",\n \"!scanrundeps\",\n \"!scanshlibs\",\n \"!linkparallel\",\n \"!lto\",\n \"textrels\",\n \"execstack\",\n \"foreignelf\", # vdso32\n]\n\n_flavor = \"generic\"\n# set to True to refresh kernel configs\n_conf = False\n\nif _conf:\n hostmakedepends += [\"base-cross\", \"ncurses-devel\"]\n\nif self.profile().cross:\n broken = \"linux-devel does not come out right\"\n\n\ndef init_configure(self):\n # generate scriptlets for packaging, just hooking to base-kernel helpers\n from cbuild.util import linux\n\n if not _conf:\n linux.generate_scriptlets(self, _flavor)\n\n\ndef do_configure(self):\n from cbuild.util import linux\n\n if _conf:\n linux.update_configs(self, archs, _flavor)\n else:\n linux.configure(self, _flavor)\n\n\ndef do_build(self):\n from cbuild.util import linux\n\n linux.build(self, _flavor)\n\n\ndef do_install(self):\n from cbuild.util import linux\n\n linux.install(self, _flavor)\n\n\n@subpackage(\"linux-stable-devel\")\ndef _devel(self):\n self.depends += [\"clang\"]\n self.options = [\"foreignelf\", \"execstack\", \"!scanshlibs\"]\n return [\"usr/src\", \"usr/lib/modules/*/build\"]\n\n\n@subpackage(\"linux-stable-dbg\")\ndef _dbg(self):\n self.pkgdesc += \" (debug files)\"\n self.options = [\n \"!scanrundeps\",\n \"!strip\",\n \"!scanshlibs\",\n \"foreignelf\",\n \"execstack\",\n \"textrels\",\n ]\n return [\"usr/lib/debug\", \"boot/System.map-*\"]\n","sub_path":"main/linux-stable/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"287785183","text":"\"\"\"Base socket logic and message parsing.\"\"\"\n\nimport re\nfrom .cluster_pb2 import Wrapper\nfrom .constants import APP, VERSION\n\n\nclass ClusterSocket:\n \"\"\"Base socket logic and message parsing.\"\"\"\n\n def __init__(self):\n self.message_type_pattern = re.compile(r'(? None:\n \"\"\"Starts the socket.\"\"\"\n await self.init()\n\n def stop(self) -> None:\n \"\"\"Stops the socket.\"\"\"\n self.running = False\n\n async def init(self) -> None:\n \"\"\"This method should be overwritten by the children class.\"\"\"\n\n def build_message(self): # pylint: disable=no-self-use\n \"\"\"Builds a new message object.\n\n :returns: Message wrapper\n :rtype: protocol.cluster_pb2.Wrapper\n \"\"\"\n wrapper = Wrapper()\n wrapper.app = APP\n wrapper.version = VERSION\n\n return wrapper\n\n async def receive_message(self, data: bytes, call_events: bool = True, address: str = '') -> \\\n (Wrapper, str):\n \"\"\"Waits until the next message and parses it.\n\n :param bytes data: Received bytes\n :param bool call_events: If true, the `on_` events will be called on the class.\n :param str address: Address of the sender if already known\n :returns: Message and the sending IP\n :rtype: (protocol.cluster_pb2.Wrapper, str)\n \"\"\"\n try:\n message = Wrapper()\n message.ParseFromString(data)\n\n # ignore message if it is not from real stereo\n if message.app != APP:\n return None, address\n\n if call_events:\n await self.call_events(message, address)\n\n return message, address\n except RuntimeError as error:\n print(error)\n return None, None\n\n async def call_events(self, message: Wrapper, address: str) -> None:\n \"\"\"Calls the `on_` events on the class for the given message.\n For example, when a ServiceAnnouncement has been received, the `on_service_announcement`\n method will be called with the two parameters `message: Wrapper` and `address: str`.\n\n :param protocol.cluster_pb2.Wrapper message: Received message\n :param str address: IP address of sender\n \"\"\"\n message_type = message.WhichOneof('message')\n\n # convert camelCase to snake_case\n message_type = self.message_type_pattern.sub('_', message_type).lower()\n\n # check if the event has been implemented\n event = 'on_' + message_type\n event_method = getattr(self, event, None)\n\n if event_method is not None:\n await event_method(message, address)\n","sub_path":"backend/src/protocol/socket.py","file_name":"socket.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"542755196","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : wwong3\nDate : 2019-04-14\nPurpose: Rock the Casbah\n\"\"\"\n\nimport argparse\nimport sys\nimport os\nimport logging\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"get command-line arguments\"\"\"\n parser = argparse.ArgumentParser(\n description='Hamm Distance Script',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n 'file', metavar='FILE',type=str, help='File inputs', nargs=2)\n\n parser.add_argument(\n '-d', '--debug', help='Debug', action='store_true', default=False)\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef warn(msg):\n \"\"\"Print a message to STDERR\"\"\"\n print(msg, file=sys.stderr)\n\n\n# --------------------------------------------------\ndef die(msg='Something bad happened'):\n \"\"\"warn() and exit with error\"\"\"\n warn(msg)\n sys.exit(1)\n\n# --------------------------------------------------\ndef dist(s1, s2):\t\n\tdiffs=0\n\t# need this is one word is longer than the other\n\tlen_diff=abs(len(s1)-len(s2))\n\tfor chr1, chr2 in zip(s1, s2):\n\t\tif chr1 != chr2:\n\t\t\tdiffs+=1\n\treturn diffs+len_diff\n\t\n\n# --------------------------------------------------\ndef main():\n\t\"\"\"Make a jazz noise here\"\"\"\n\targs = get_args()\n\tfiles=args.file\n\n\tlogging.basicConfig(\n\t\tfilename='.log',\n\t\tfilemode='w',\n\t\tlevel=logging.DEBUG if args.debug else logging.CRITICAL)\n\t\n\tfor txt in files:\n\t\tif not os.path.isfile(txt):\n\t\t\tdie(msg='\"{}\" is not a file'.format(txt))\n\n\t\n\tlogging.debug('file1={}, file2={}'.format(files[0], files[1]))\n\n# Can't use below because spaces screw up hamming distance, wonder if you could do it to skip space\n#\tstr1=''\n#\tstr2=''\n#\twith open(files[0]) as fh1:\n#\t\tstr1=fh1.read().replace('\\n', '')\n#\n#\twith open(files[1]) as fh2:\n#\t\tstr2=fh2.read().replace('\\n', '')\n\n#\tprint(str1)\n#\tprint(str2)\n\n\tword1=[]\n\tword2=[]\n\twith open(files[0]) as fh1:\n\t\tword1=[word for line in fh1 for word in line.split()]\n\n\twith open(files[1]) as fh2:\n\t\tword2=[word for line in fh2 for word in line.split()]\n\t\n\tcombo=list(zip(word1, word2))\n\thamm=0\n\tfor word1, word2 in combo:\n\t\td=dist(word1, word2)\n\t\thamm+=d\n\t\tlogging.debug(msg='s1= {}, s2= {}, d= {}'.format(word1, word2, d))\n\tprint(hamm)\n\n# --------------------------------------------------\n\t\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/13-hamm/hamm.py","file_name":"hamm.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315305580","text":"from PIL import Image\n\nclass GIFplayer():\n def __init__(self, filename):\n self.filename = filename\n self.image = Image.open(filename)\n self.frames = []\n\n def extractFrames(self):\n image = self.image\n pal = image.getpalette()\n base_palette = []\n for i in range(0, len(pal), 3):\n rgb = pal[i:i+3]\n base_palette.append(rgb)\n\n print(base_palette)\ntest = GIFplayer(\".\\\\opening.gif\")\ntest.extractFrames()","sub_path":"addons/GIFplayer.py","file_name":"GIFplayer.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396267965","text":"# Programming Exercise 2-4\r\n#\r\n# Program to compute a final price for five items with tax.\r\n# This program will prompt a user for a set of five prices,\r\n# sum them to a subtotal and calculate sales tax with tax rate stored in a constant,\r\n# then display the results on the screen.\r\n\r\n# Variables to hold the prices of five items, the subtotal, and the total.\r\n# All should be initialized as floats.\r\nitem1 = 0.0\r\nitem2 = 0.0\r\nitem3 = 0.0\r\nitem4 = 0.0\r\nitem5 = 0.0\r\n\r\nsubtotal = 0.0\r\nsales_tax = 0.0\r\ntotal = 0.0\r\n\r\n# Constant for the sales tax rate.\r\nTAX_RATE = 0.06\r\n\r\n# Get the price of each item by prompting the user.\r\n# You will need to convert each input to a float.\r\ndef get_price(for_item):\r\n val = input(\"Please enter the price for the \" + for_item + \" item: \")\r\n return float(val)\r\n\r\nitem1 = get_price(\"first\")\r\nitem2 = get_price(\"second\")\r\nitem3 = get_price(\"third\")\r\nitem4 = get_price(\"fourth\")\r\nitem5 = get_price(\"fifth\")\r\n\r\n# Calculate the subtotal by adding up the item prices.\r\nsubtotal = item1 + item2 + item3 + item4 + item5\r\n\r\n# Calculate the sales tax by multiplying the subtotal by the tax rate.\r\nsales_tax = subtotal * TAX_RATE\r\n\r\n# Calculate the total by adding the subtotal and tax.\r\ntotal = subtotal + sales_tax\r\n\r\n# Print the values for the subtotal, tax and total.\r\n# Label each value, and format them to display with two decimal places. \r\nprint(\"Subotal:\", format(subtotal, \">10.2f\"))\r\nprint(\"Tax: \", format(sales_tax, \">10.2f\"))\r\nprint(\"Total: \", format(total, \">10.2f\"))\r\n\r\n\r\n\r\n","sub_path":"chapter-2/ex-2-4.py","file_name":"ex-2-4.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"588858159","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 29 10:21:43 2016\n\n@author: vleang\n\"\"\"\n\nimport os\nos.chdir('F:\\Projects\\Path dependent vol hedge backtest')\n\nimport Derivatives as deriv\nimport Models as mod\nimport Simulator as sim\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time \nfrom scipy.integrate import simps \n\n# Test : PDV Market of calls\npast = np.zeros(301)\nsigma = 0.2\ndt = 1.0 / (365.*10)\npast[0] = 100\ndZ = np.sqrt(dt) * np.random.randn(300)\nfor i in range(300):\n past[i+1] = past[i] * np.exp(-0.5 * sigma * sigma * dt + sigma * dZ[i])\n\npast[::-1]\n\nPDV2 = mod.PDV(init=past[-1],\n past=past[:-1],\n vol_up=0.32,\n vol_down=0.08,\n style=21,\n delta=300)\n \nboson = sim.particle(length=30, n_step_day=10, div=0.1)\ncall = deriv.europ_call(K=past[-1], T=30, particle=boson)\nstart = time.time()\nmarche = sim.market(particle=boson, n_simu=200, model=PDV2, derivative=call, n_mc=10000, init_only=True)\nend = time.time()\n\nsimulation = sim.simulator(marche,'Market Delta', sim.time_based(1))\nsimulation2 = sim.simulator(marche,'Sticky strike', sim.time_based(1))\n\ndata = marche.data_ts\ncall = marche.derivative_ts\ndelta = marche.int_delta_ts\nimplied = marche.IV_ts\ndelta_BS = simulation2.delta_ts\nmoney = simulation.money_ts\nportfolio = simulation.portfolio_ts\nportfolio2 = simulation2.portfolio_ts\n\nplt.subplots(figsize=(12,7))\n\nplt.title('Hedging error distribution: Call strike 100 maturity 30 days model 2 smooth BS-delta')\nplt.hist((portfolio2[-1,:]), label='delta = 30 days')\nplt.legend()\n\nplt.subplots(figsize=(12,7))\nplt.title('Hedging error distribution: Call strike 100 maturity 30 days model 2 smooth PDV-delta')\nplt.hist((portfolio[-1,:]), label='delta = 30 days')\nplt.legend()\n \nplt.title('Hedging error distribution: Call strike 100 maturity 30 days model 2 smooth BS-delta')\nplt.hist((portfolio2[-1,:]), label='delta = 30 days')\nplt.legend()\n\n\nplt.subplots(figsize=(12,6))\nplt.title('Absolute Average Hedging Error in function of holding period')\nplt.plot(np.mean(abs(portfolio),1),label='BS delta')\nplt.plot(np.mean(abs(portfolio2),1), label='BS delta')\nplt.legend(loc=2)\n\nplt.subplots(figsize=(12,6))\nplt.title('Absolute Hedging Error Standard Deviation in function of holding period')\nplt.plot(np.std(abs(portfolio),1), label='BS delta')\nplt.plot(np.std(abs(portfolio2),1), label='BS delta')\nplt.legend(loc=2)\n\n#==============================================================================\nfinal_payoff = data[-1,:] - 100\nfinal_payoff[final_payoff<0] = 0\n\ninit_price = call_PRICE[0,0]\n\n\nd_delta = np.diff(delta[:-1,:], axis=0)\nstrat = np.zeros((len(data), len(data[0])))\nstrat[0,:] = delta[0,:] * data[len(past) - 1,:]\nfor p in range(len(delta[0])):\n for i in range(1, len(d_delta)):\n strat[i,p] = strat[i-1,p] + d_delta[i,p]*data[len(past) + i, p]\n \n\nerror = final_payoff - init_price - PNL_strat\n","sub_path":"backtestcall.py","file_name":"backtestcall.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"227895835","text":"import attr\nimport time\nimport pathlib\nimport re\n\nfrom Repositories.Repository import Repository, RepositoryTypes\nfrom .Repository import RegisteredRepository\n\n@RegisteredRepository\n@attr.s\nclass FileSystem(Repository):\n\n # region Initialized attributes\n\n path : str = attr.ib(default=None)\n\n # endregion\n\n\n # region Attributes\n\n source : str = 'os'\n type = RepositoryTypes.FILE\n\n # endregion\n\n\n\n # region Constructors\n\n def __attrs_post_init__(self):\n self.name : str = None\n self.ext : str = None\n self.directory : str = None\n self.modified : str = None\n self.size : int = None\n\n self.get_path_info()\n\n # endregion\n\n\n\n # region Functions\n\n def get_path_info(self):\n if self.path is not None and len(str(self.path)) > 0:\n path_obj = pathlib.Path(self.path)\n\n self.name = path_obj.stem\n self.ext = path_obj.suffix[1:]\n self.all_ext = re.match(r'^.+?\\.(.+)$', path_obj.name).group(1) if path_obj.is_file() else ''\n self.directory = str(path_obj.parent)\n self.modified = time.ctime(path_obj.stat().st_mtime)\n self.size = path_obj.stat().st_size\n \n # endregion","sub_path":"src/Repositories/FileSystem.py","file_name":"FileSystem.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"311180384","text":"import numpy as np \nimport random as rd \nimport math\nimport EKNN\nfrom DATAPOINT import data_point\nfrom POINTMAP import point_map\n\nclass c_means(EKNN.edited_knn):\n \n def __init__(self,in_k,data_set,alg):\n EKNN.edited_knn.__init__(self,in_k,data_set,alg)\n self.d_set = data_set\n self.c_clusters = ''\n self.k = in_k\n if alg == 1: #indicates wether or not to use eknn for the number of centroids\n self.numC = int(round(len(data_set)/4))\n elif alg == 0:\n self.numC = len(self.d_map.points)\n self.c_clusters = self.mini_gen(rd.sample(data_set,self.numC))\n self.data_points = self.mini_gen(data_set)\n for x in range(0,10):\n self.calculate_centroids()\n self.d_map = point_map(self.c_clusters)\n\n\n def mini_gen(self, data_in): # Makes a new list of points\n point_list = []\n for line in self.d_set:\n point_list.append(data_point(line[:-1], line[-1]))\n return point_list\n\n\n def calculate_centroids(self): #Calculates the centroids\n children = []\n for point in self.data_points: #Decides which centoid each data point belongs to\n point_belongs_to = ''\n shortest_dist = np.inf\n for i in self.c_clusters:\n temp_dist = self.euclidian(point.data,i.data)\n if (temp_dist < shortest_dist):\n shortest_dist = temp_dist\n point_belongs_to = i\n children.append([point,point_belongs_to])\n\n for cluster in self.c_clusters: #Decides which class each centoid belongs to \n points = []\n for point in children:\n if point[1] == cluster:\n points.append(point[0])\n ind = self.c_clusters.index(cluster)\n if len(points) > 0:\n self.c_clusters[ind] = self.find_average(points)\n \n def find_average(self,points): #finds the average position of data points that belong to a cluster and assigns the new position to the centoid\n new_data = np.full(len(points[0].data),0)\n new_class = 0\n for i in range(len(points[0].data)): #average the position\n for row in range(len(points)):\n new_data[i] += points[row].data[i]\n new_data[i] = int(round(new_data[i]/len(points)))\n for i in range(len(points)): #average the class\n new_class += points[i].class_type\n new_class = int(round(new_class/len(points)))\n return data_point(new_data,new_class) #return the new centroid","sub_path":"project_2/python/actual code/CMEANS.py","file_name":"CMEANS.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"190169632","text":"from django.http.response import HttpResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom .forms import Mentee_requestForm, ResponseForm ,PointForm\nfrom .models import Mentee_request, Mname, Response #평점에 조건 username을 얻기위해 만든 모델\nfrom accounts.models import CustomUser\nfrom main.models import Mentor #멘토 선택에서 선택한 정보를 가저오기 위한 모델\nfrom django.core.mail import EmailMessage\nfrom django.conf import settings\n\n\n\ndef request_view(request):\n # for e in request.__dict__:\n # print(e)\n if request.method == 'GET':\n form = Mentee_requestForm()\n \n\n elif request.method == 'POST':\n form = Mentee_requestForm(request.POST)\n if form.is_valid():\n post = form.save(commit = False) \n post.mentor = Mentor.username #폼에 추가적으로 저장할 멘토의 아이디와 이메일 이부분은 metor모델에 저장된 값을 가져와서 이 폼에 저장한다\n post.mentor_email = Mentor.email\n post.university = Mentor.university\n post.mentee = request.user\n post.save()\n return redirect ('requestsuccess')\n\n return render (request, 'menteerequest/requestform.html',{'form':form})\n\n\ndef requests_list(request):\n #멘티에게 보여지는 리스트\n if request.user.studenttype == False:\n request_list = Mentee_request.objects.filter( mentee = request.user) \n return render (request, 'menteerequest/mentee_request_list.html',{'request_list':request_list})\n\n else:#멘토에게 들어온 요청을 보여줌\n request_list = Mentee_request.objects.filter( mentor = request.user) \n return render (request, 'menteerequest/mentor_request_list.html',{'request_list':request_list})\n\n\ndef success_request_view(request): # 요청을 성공적으로 작성하면 멘토의 유저정보에 있는 이메일에 메일이 보내진다\n email = EmailMessage(\n 'Menting의 요청이 들어왔습니다', # 제목\n 'Menting에서 당신에게 필요한 요청이 들어왔습니다 홈페이지에 접속해서 확인헤 주세요. http://127.0.0.1:8000', # 내용\n to=[Mentor.email], # 멘토 모델 사용받는 이메일 리스트\n )\n email.send()\n return redirect ('requestslist')\n\n# 기서 Mentor의 값에는 (1) (2) ....반복 할 수록 많이 들어가게 되는데 어떻게 마지막으로 저장된 부분을 가져오는 걸까 다른 부분에 사용하면 여러개의 값을 가지고 있어 특정해 줘야한다 아니면 오류가 나옴 \n\n\n\ndef request_detail(request,post_id): # 디테일 페이지\n \n post = Mentee_request.objects.get(id = post_id) #선택한 post를 가져온다\n Mname.username = post.mentor # 그 포스트에 저장된 mentor를 새로운 모델에 저장\n Mname.post_id = post.id \n return render (request, 'menteerequest/request_detail.html',{'post':post})\n\n\ndef request_response(request, post_id):\n post = get_object_or_404(Mentee_request, id=post_id) \n if request.method == \"POST\":\n form = ResponseForm(request.POST)\n if form.is_valid():\n response = form.save(commit=False)\n response.author = request.user #응답에 작성자 멘토아이디\n response.post = post\n response.save()\n return redirect('requestdetail', post_id=post.id)\n else:\n form = ResponseForm()\n return render(request, 'menteerequest/request_response.html', {'form': form})\n\n\ndef request_response_reject(request, post_id):\n post = get_object_or_404(Mentee_request, id=post_id) \n \n if request.method == \"POST\":\n form = ResponseForm(request.POST)\n if form.is_valid():\n post.finish_check = 1\n post.save()\n response = form.save(commit=False)\n response.author = request.user\n response.text = '거절 되었습니다 ㅠ'\n response.post = post\n response.save()\n return redirect('requestdetail', post_id=post.id)\n else:\n form = ResponseForm()\n return render(request, 'menteerequest/request_response_reject.html', {'form': form})\n\n\n\ndef grade_point(request): #평점 계산\n userr = CustomUser.objects.get(username = Mname.username) #커스텀 우유져 폼에 평점을 넣기 위해 가져옴\n k = get_object_or_404(Mentee_request, id=Mname.post_id) #멘토리스트를 가져와 완료를 체크 하기 위함\n\n if request.method == 'POST':\n form = PointForm(request.POST)\n if form.is_valid():\n k.finish_check = 1 #멘팅 완료 체크\n k.save()\n post = form.save(commit = False)\n userr.grade = int(post.grade) + userr.grade\n userr.count = userr.count + 1\n userr.avg = userr.grade / userr.count\n userr.save()\n return render (request, 'menteerequest/grade_success.html')\n \n return HttpResponse('fail')\n\n else:\n form = PointForm()\n\n return render (request, 'menteerequest/grade.html',{'form':form})\n\n","sub_path":"menteerequest/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"468228026","text":"from collections import Counter\nfrom gensim.models import KeyedVectors\nimport numpy as np\n\n\nclass Vocab(object):\n PAD, UNK = 0, 1\n\n def __init__(self, word_counter, label, min_occur_count=0):\n # self._id2word = ['', '']\n # self._wordid2freq = [10000, 10000]\n self._id2label = [k for k, v in label.most_common()]\n # for word, count in word_counter.most_common():\n # if count > min_occur_count:\n # self._id2word.append(word)\n # self._wordid2freq.append(count)\n\n reverse = lambda x: dict(zip(x, range(len(x))))\n # self._word2id = reverse(self._id2word)\n self._label2id = reverse(self._id2label)\n # if len(self._word2id) != len(self._id2word):\n # print(\"serious bug: words dumplicated, please check!\")\n #\n # print(\"Vocab info: #words {0}\".format(self.vocab_size))\n\n def create_pretrained_embs(self, config):\n word_vectors = KeyedVectors.load_word2vec_format(config.pretrained_embeddings_file, binary=False)\n wv_matrix = list()\n\n # one for UNK and one for zero padding\n wv_matrix.append(np.zeros(config.word_dims).astype(\"float32\")) # zero padding\n wv_matrix.append(np.random.uniform(-0.01, 0.01, config.word_dims).astype(\"float32\")) # UNK\n\n for word in self._id2word[2:]:\n if word in word_vectors.vocab:\n wv_matrix.append(word_vectors.word_vec(word))\n else:\n wv_matrix.append(wv_matrix[1])\n wv_matrix = np.array(wv_matrix)\n\n assert len(wv_matrix) == len(self._id2word)\n print('embedding size', wv_matrix.shape)\n return wv_matrix\n\n def word2id(self, xs):\n if isinstance(xs, list):\n return [self._word2id.get(x, self.UNK) for x in xs]\n return self._word2id.get(xs, self.UNK)\n\n def id2word(self, xs):\n if isinstance(xs, list):\n return [self._id2word[x] for x in xs]\n return self._id2word[xs]\n\n def id2label(self, xs):\n if isinstance(xs, list):\n return [self._id2label[x] for x in xs]\n return self._id2label[xs]\n\n def label2id(self, xs):\n if isinstance(xs, list):\n return [self._label2id.get(x, 0) for x in xs]\n return self._label2id.get(xs, 0)\n\n @property\n def vocab_size(self):\n return len(self._id2word)\n\n\ndef creatVocab(train_data, min_occur_count):\n word_counter = Counter()\n label = Counter()\n for instance in train_data:\n # for word in instance.seg_list:\n # word_counter[word] += 1\n label[instance.label] += 1\n\n return Vocab(word_counter, label, min_occur_count)\n","sub_path":"data/Vocab.py","file_name":"Vocab.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"496772619","text":"#!/usr/bin/env python\n# -- encoding: utf-8 --\n#\n# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with opensource@tid.es\n#\nfrom unittest import TestCase\nfrom tests.unit.test_getnid import get_path\nimport os\n\n\nclass TestGlanceSyncNIDOperations(TestCase):\n relativepath = 'tests/unit/resources/nid'\n glancesynchome = '/Users/foo/Documents/workspace/python/fiware-glancesync'\n validresult = os.path.join(glancesynchome, relativepath)\n\n def test_simple(self):\n # Given\n path = self.glancesynchome\n\n # When\n result = get_path(path, self.relativepath)\n\n # Then\n self.assertEqual(self.validresult, result)\n\n def test_path_with_tests_unit(self):\n # Given\n path = os.path.join(self.glancesynchome, 'tests/unit')\n\n # When\n result = get_path(path, self.relativepath)\n\n # Then\n self.assertEqual(self.validresult, result)\n\n def test_path_with_tests(self):\n # Given\n path = os.path.join(self.glancesynchome, 'tests')\n\n # When\n result = get_path(path, self.relativepath)\n\n # Then\n self.assertEqual(self.validresult, result)\n\n def test_path_with_tests_unit_resources(self):\n # Given\n path = os.path.join(self.glancesynchome, 'tests/unit/resources')\n\n # When\n result = get_path(path, self.relativepath)\n\n # Then\n self.assertEqual(self.validresult, result)\n\n def test_path_with_some_mistake_folder1(self):\n # Given\n path = os.path.join(self.glancesynchome, 'tests/fake/resources')\n\n # When\n try:\n get_path(path, self.relativepath)\n # Then\n except ValueError as ex:\n self.assertEqual(ex.message, 'Error, the paths are not equivalent')\n\n def test_path_with_some_mistake_folder2(self):\n # Given\n path = os.path.join(self.glancesynchome, 'tests/unit/fake')\n\n # When\n try:\n get_path(path, self.relativepath)\n # Then\n except ValueError as ex:\n self.assertEqual(ex.message, 'Error, the paths are not equivalent')\n\n def test_path_with_some_mistake_folder3(self):\n # Given\n path = os.path.join(self.glancesynchome, 'fake/uni/resources')\n\n # When\n try:\n get_path(path, self.relativepath)\n # Then\n except ValueError as ex:\n self.assertEqual(ex.message, 'Error, the paths are not equivalent')\n","sub_path":"tests/unit/test_get_path.py","file_name":"test_get_path.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59594222","text":"import keras\nimport pyopenpose as op\nimport numpy as np\nimport cv2\nimport time\nimport asyncio\nfrom log import log\nfrom keras.models import load_model\n\nclass GestureRec(object):\n\n\tdef __init__(self):\n\t\tself.model = load_model('/home/cornell.edu/nst45/hanabi_project/Hanabi/vision/gesture/gesture_data/pointing.h5')\n\n\t\tself.params = dict()\n\t\tself.params[\"model_folder\"] = \"/home/cornell.edu/nst45/hanabi_project/openpose/models/\"\n\t\tself.params[\"hand\"] = True\n\n\t\t# Starting OpenPose\n\t\tself.opWrapper = op.WrapperPython()\n\t\tself.opWrapper.configure(self.params)\n\t\tself.opWrapper.start()\n\t\tself.datum = op.Datum()\n\n\t\tself.cap = cv2.VideoCapture(0)\n\n\t\tself.timing = False\n\t\tself.noposeTiming = False\n\t\tself.nopose = False\n\t\tself.noposeStart = time.time()\n\t\tself.curpose = -1\n\t\tself.start_time = time.time()\n\n\n\n\t\tself.indices = []\n\n\tdef indexFromKeypoint(self, x, y):\n\t\tif (y<250 or y>450):\n\t\t\treturn -1\n\t\telif x >= 190 and x <= 235:\n\t\t\treturn 0\n\t\telif x >= 241 and x <= 283:\n\t\t\treturn 1\n\t\telif x >= 288 and x <= 326:\n\t\t\treturn 2\n\t\telif x >= 328 and x <= 369:\n\t\t\treturn 3\n\t\telif x >= 378 and x <= 430:\n\t\t\treturn 4\n\t\telse: return -1\n\n\t# def getPointingIndices():\n\t# \ttemp = indices\n\t# \tindices = []\n\t# \ttiming = False\n\t# \tcurpose = -1\n\t# \treturn temp\n\n\tasync def main(self, buffer):\n\t\twhile True:\n\t\t\t#asyncio.sleep(0.05)\n\t\t\tret, frame = self.cap.read()\n\n\n\t\t\tself.datum.cvInputData = frame\n\t\t\tself.opWrapper.emplaceAndPop([self.datum])\n\n\t\t\t#image = datum.cvOutputData\n\t\t\t#cv2.imshow(\"OpenPose\", image)\n\n\t\t\tif self.datum.poseKeypoints.any():\n\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tp = np.append(self.datum.poseKeypoints, self.datum.handKeypoints[0])\n\t\t\t\t\tp = np.append(p, self.datum.handKeypoints[1])\n\t\t\t\t\tp = p.reshape(1, 67, 3)\n\t\t\t\t\tp = keras.utils.normalize(p, axis = 1)\n\t\t\t\t\topt = self.model.predict_classes(p, batch_size = 67)\n\n\t\t\t\t\tfor j in opt: \n\t\t\t\t\t\tif j == 0:\n\t\t\t\t\t\t\t# print(\"NO POSE\")\n\t\t\t\t\t\t\tself.curpose = -1\n\t\t\t\t\t\t\tself.timing = False\n\n\t\t\t\t\t\t\tif( not self.noposeTiming):\n\t\t\t\t\t\t\t\tself.noposeTiming = True\n\t\t\t\t\t\t\t\tself.noposeStart = time.time()\n\t\t\t\t\t\t\telif time.time() - self.noposeStart > 5:\n\t\t\t\t\t\t\t\t# self.noposeTimi\n\t\t\t\t\t\t\t\tbuffer.pointedIndices = self.indices \n\t\t\t\t\t\t\t\tself.indices = []\n\t\t\t\t\t\t\t\tself.noposeTiming = False\n\n\t\t\t\t\t\telif j == 1:\n\t\t\t\t\t\t\t# print(\"point_left\")\n\t\t\t\t\t\t\tx, y, _ = self.datum.handKeypoints[0][0][8]\n\t\t\t\t\t\t\tindex = self.indexFromKeypoint(x,y)\n\t\t\t\t\t\t\tself.timing = index != -1\n\t\t\t\t\t\t\tself.noposeTiming = False\n\t\t\t\t\t\t\tif (self.timing and self.curpose == index):\n\t\t\t\t\t\t\t\tif time.time() - self.start_time > 0.3 and index not in self.indices:\n\t\t\t\t\t\t\t\t\tself.timing = True\n\t\t\t\t\t\t\t\t\tself.indices.append(index)\n\t\t\t\t\t\t\telif (self.timing):\n\t\t\t\t\t\t\t\tself.curpose = index\n\t\t\t\t\t\t\t\tself.start_time = time.time()\n\t\t\t\t\t\t\telse: self.start_time = time.time()\n\t\t\t\t\t\telif j == 2:\n\t\t\t\t\t\t\t# print(\"point_right\")\n\t\t\t\t\t\t\tx, y, _ = self.datum.handKeypoints[1][0][8]\n\t\t\t\t\t\t\tindex = self.indexFromKeypoint(x,y)\n\t\t\t\t\t\t\tself.timing = index != -1\n\t\t\t\t\t\t\tself.noposeTiming = False\n\t\t\t\t\t\t\tif (self.timing and self.curpose == index):\n\t\t\t\t\t\t\t\tif time.time() - self.start_time > 0.3 and index not in self.indices:\n\t\t\t\t\t\t\t\t\tself.timing = True\n\t\t\t\t\t\t\t\t\tself.indices.append(index)\n\t\t\t\t\t\t\telif (self.timing):\n\t\t\t\t\t\t\t\tself.curpose = index\n\t\t\t\t\t\t\t\tself.start_time = time.time()\n\t\t\t\t\t\t\telse: self.start_time = time.time()\n\t\t\t\t\t#print (self.indices)\n\t\t\t\t\t# buffer.pointedIndices = self.indices \n\t\t\t\texcept ValueError:\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\tkey = cv2.waitKey(1)\n\t\t\tif key == ord('q'):\n\t\t\t\tbreak\n\n\n\tasync def run(self):\n\t\tx = asyncio.create_task(self.main())\n\t\tawait asyncio.gather(x)\n\nif __name__ == '__main__':\n\tg = GestureRec()\n\tasyncio.run(g.run())\n\n\tg.cap.release()\n\tcv2.destroyAllWindows()\n","sub_path":"vision/gesture/run_openpose.py","file_name":"run_openpose.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"388413477","text":"import sqlite3\nimport json\nimport logging\nfrom urllib.parse import urlparse, parse_qs\nfrom urllib.request import Request, urlopen\nimport numpy\nfrom flask import Flask, request\nimport functools\n\n\nclass Evaluator(Flask):\n def __init__(self, frontend_host, frontend_port):\n super(Evaluator, self).__init__(\"Evaluator\")\n self._metrics = {}\n self.frontend_host = frontend_host\n self.frontend_port = frontend_port\n @self.route(\"/\", methods=[\"GET\", \"POST\"])\n def handle():\n if request.method == \"GET\": \n return \"Evaluator server\"\n elif request.method == \"POST\": \n j = request.get_json()\n inputs = numpy.asarray([x[0] for x in j[\"data\"]])\n outputs = [x[1] for x in j[\"data\"]]\n for name, callback in self._metrics.items():\n retval = {\"metadata\" : {k : v for k, v in j[\"metadata\"].items()}}\n retval[\"metric_name\"] = name\n retval[\"metric_value\"] = callback(inputs, outputs, j[\"metadata\"])\n r = Request(\"http://{}:{}\".format(self.frontend_host, self.frontend_port),\n method=\"POST\",\n headers={\"Content-Type\" : \"application/json\"},\n data=json.dumps(retval).encode())\n urlopen(r)\n return \"OK\"\n\n def register_metric(self, name, callback):\n self._metrics[name] = callback\n\n\ndef average_absolute_activation(inputs, outputs, metadata):\n total = 0.0\n count = 0\n for batches in outputs:\n for batch in batches:\n np = numpy.abs(numpy.array(batch))\n total += np.sum()\n count += functools.reduce(lambda x, y : x * y, np.shape)\n return 0.0 if count == 0 else total / count\n\n\ndef average_absolute_value(inputs, outputs, metadata):\n total = 0.0\n count = 0\n for batches in outputs:\n for batch in batches:\n np = numpy.array(batch)\n total += np.sum()\n count += functools.reduce(lambda x, y : x * y, np.shape)\n return 0.0 if count == 0 else total / count\n\n\ndef standard_deviation(inputs, outputs, metadata):\n vals = []\n for batches in outputs:\n for batch in batches:\n s = numpy.std(batch)\n vals.append(s)\n return (0.0 if len(vals) == 0 else (sum(vals) / float(len(vals))))\n\n\ndef create_server(frontend_host, frontend_port):\n server = Evaluator(frontend_host, frontend_port)\n server.register_metric(\"Average absolute value\", average_absolute_value)\n server.register_metric(\"Standard deviation\", standard_deviation)\n return server\n","sub_path":"src/servers/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"384628934","text":"\n\nfrom xai.brain.wordbase.nouns._parliamentarian import _PARLIAMENTARIAN\n\n#calss header\nclass _PARLIAMENTARIANS(_PARLIAMENTARIAN, ):\n\tdef __init__(self,): \n\t\t_PARLIAMENTARIAN.__init__(self)\n\t\tself.name = \"PARLIAMENTARIANS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"parliamentarian\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_parliamentarians.py","file_name":"_parliamentarians.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561445330","text":"import os,logging\nfrom . import metadata\nfrom .utils import run_metadata_reformatting\nfrom flask import render_template,flash,Response,request\nfrom flask_wtf import FlaskForm,RecaptchaField\nfrom wtforms.fields import FileField,SubmitField,MultipleFileField\nfrom flask_wtf.file import FileAllowed,FileRequired\nfrom werkzeug.utils import secure_filename\nfrom igf_data.utils.fileutils import get_temp_dir,remove_dir\n\n\nclass MetadataForm(FlaskForm):\n metadata_file = \\\n FileField(\\\n 'Metadata csv file',\n validators=[FileAllowed(['csv']),FileRequired()])\n recaptcha = RecaptchaField()\n submit = SubmitField('Reformat metadata')\n\n\n@metadata.route('/',methods=['GET','POST'])\ndef metadata_home():\n try:\n csv_data = ''\n form = MetadataForm()\n if form.validate_on_submit():\n temp_dir = get_temp_dir()\n metadata_filename = \\\n secure_filename(form.metadata_file.data.filename)\n form.metadata_file.\\\n data.save(\\\n os.path.join(\\\n temp_dir,\n metadata_filename))\n new_metadata_file = \\\n os.path.join(\\\n temp_dir,\n metadata_filename)\n try:\n csv_data = \\\n run_metadata_reformatting(\\\n metadata_file=new_metadata_file,\\\n output_dir=temp_dir)\n except Exception as e:\n flash('Failed metadata file reformatting')\n logging.warning(e)\n remove_dir(temp_dir)\n if csv_data != '':\n return \\\n Response(\\\n csv_data,\n mimetype=\"text/csv\",\n headers={\"Content-disposition\":\n \"attachment; filename=reformatted_metadata.csv\"})\n else:\n if request.method=='POST':\n flash('Failed file type validation check')\n except Exception as e:\n logging.warning('Failed metadata reformatting, error: {0}'.format(e))\n\n return render_template('metadata/metadata_reformat.html',form=form)","sub_path":"app/metadata/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"514339523","text":"# coding=utf-8\nimport arrow\nimport time\nimport requests\n\n\ndef format_con(con, lens):\n if len(con) >= lens:\n return con[:lens]\n return con + (' ' * (lens - len(con)))\n\n\ndef run():\n sites = [\n 'http://m.qipeilong.net',\n 'http://mall.qipeilong.net',\n 'http://api.qipeilong.net',\n 'http://auth.qipeilong.net',\n 'http://www.qipeilong.net',\n 'http://p.qipeilong.net',\n 'http://b.qipeilong.net',\n 'http://r.qipeilong.net',\n 'http://sso.qipeilong.net',\n 'http://service.qipeilong.net:9006/wsProduct/ProductSearch.svc',\n 'http://service.qipeilong.net:9006/wsUserService/UserService.svc',\n 'http://service.qipeilong.net:9006/wsTraderAccess/TraderAccess.svc',\n\n 'http://m.qipeilong.cn',\n 'http://mall.qipeilong.cn',\n 'http://mobileapi.qipeilong.cn',\n 'http://auth.qipeilong.cn',\n 'http://www.qipeilong.cn',\n 'http://p.qipeilong.cn',\n 'http://b.qipeilong.cn',\n 'http://r.qipeilong.cn',\n 'http://sso.qipeilong.cn',\n ]\n lens = max([len(x) for x in sites]) + 2\n for s in sites:\n try:\n with requests.get(s, timeout=30) as res:\n print(format_con(s, lens), '正常' if res.status_code == 200 else '状态码:{0}'.format(res.status_code))\n except Exception as e:\n print('请求地址异常', s)\n print(e)\n\n\nwhile True:\n run()\n print('检查时间', arrow.now(), '\\n')\n time.sleep(5)\n","sub_path":"test_/WakeSite.py","file_name":"WakeSite.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"274100992","text":"# -*- coding: utf8 -*-fr\n\n\"\"\"\nItopapiService is an abstraction of Service representation on iTop\n\"\"\"\n\nfrom itopapi.model.prototype import ItopapiPrototype\nfrom itopapi.model.features.hasOrganization import HasOrganization\nfrom itopapi.model.features.hasServiceFamily import HasServiceFamily\n\n__version__ = '1.0'\n__authors__ = ['Julien Nauroy ']\n\n\nclass ItopapiService(ItopapiPrototype, HasOrganization, HasServiceFamily):\n\n # Configuration specific to itop\n itop = {\n # Name of the class in Itop\n 'name': 'Service',\n # Define which fields to save when creating or updating from the python API\n 'save': ['name', 'description', 'status'],\n 'foreign_keys': [\n HasOrganization.foreign_key,\n HasServiceFamily.foreign_key,\n ],\n 'list_types': {\n 'functionalcis_list': 'functionalci_id_finalclass_recall',\n 'contacts_list': 'contact_id_finalclass_recall'\n },\n }\n\n @staticmethod\n def find(key):\n \"\"\" Retrieve one or more instance of PhysicalInterface with the given key or criteria \"\"\"\n return ItopapiPrototype.find(ItopapiService, key)\n\n @staticmethod\n def find_by_name(name):\n return ItopapiPrototype.find_by_name(ItopapiService, name)\n\n @staticmethod\n def find_all():\n \"\"\" Retrieve all instance of PhysicalInterface \"\"\"\n return ItopapiPrototype.find_all(ItopapiService)\n\n \"\"\"\n ItopapiService is an object that represents a Service from iTop\n \"\"\"\n def __init__(self, data=None):\n super(ItopapiService, self).__init__(data)\n # Description\n self.description = None\n # Service's status. Values within [implementation, obsolete, production]\n self.status = None\n # Lists\n self.servicesubcategories_list = None\n self.documents_list = None\n self.contacts_list = None\n self.customercontracts_list = None\n self.providercontracts_list = None\n self.functionalcis_list = None\n","sub_path":"itopapi/model/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"506717056","text":"\"\"\"This module contains the function to perform principal components analysis.\"\"\"\nimport numpy as np\n\n\ndef pca(data):\n \"\"\"\n Perform principal components analysis to reduce dimensionality of data.\n\n :param data: d x n matrix of n d-dimensional data points. Each column is an example.\n :type data: ndarray\n :return: tuple containing three components: (new_data, variances, eigenvectors). The variable new_data is a d x n\n matrix containing the original data mapped to a new coordinate space. The variable variances is a length-d vector\n containing the variance captured by each new dimensions. The variable eigenvectors is a matrix where each column\n is one of the eigenvectors that the data has been projected onto.\n :rtype: tuple\n \"\"\"\n #####################################################################\n # Enter your code below for computing new_data and variances.\n # You may use built in np.linalg.eig or np.linalg.svd, but you are\n # not allowed to use a pre-built pca in your implementation\n #####################################################################\n (d, n) = data.shape\n #print (data)\n mean_data = np.mean(data, 1)\n #print (mean_data)\n mean_data = mean_data.reshape(d,1)\n mean_data_matrix = np.tile(mean_data, (1,n))\n #mean = np.tile(np.mean(data,1).reshape(d,1), (1, n))\n data -= mean_data_matrix\n #print (data)\n variances = np.zeros(n)\n co_variance = np.dot(data, data.T)/n\n print (co_variance)\n variances, eigenvectors = np.linalg.eig(co_variance)\n #ranks = variances.argsort()[::-1]\n #variances = variances[ranks]\n #eigenvectors = eigenvectors[:, ranks]\n new_data = np.dot(eigenvectors.T, data)\n \n #####################################################################\n # End of your contributed code\n #####################################################################\n\n return np.real(new_data), np.real(variances), np.real(eigenvectors)\n","sub_path":"hw4/hw4/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"202582438","text":"import logging\r\nimport pdb\r\nimport torch\r\nfrom glob import glob\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\nimport os\r\nimport sys\r\n# from src.utils.bleu import compute_bleu\r\n# Ignore warnings\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nfrom src.utils.pre_data import *\r\n\r\ndef gpu_init_pytorch(gpu_num):\r\n\t'''\r\n\t\tInitialize GPU\r\n\t'''\r\n\ttorch.cuda.set_device(int(gpu_num))\r\n\tdevice = torch.device(\"cuda:{}\".format(\r\n\t\tgpu_num) if torch.cuda.is_available() else \"cpu\")\r\n\treturn device\r\n\r\ndef create_save_directories(path):\r\n\tif not os.path.exists(path):\r\n\t\tos.makedirs(path)\r\n\r\ndef stack_to_string(stack):\r\n\top = \"\"\r\n\tfor i in stack:\r\n\t\tif op == \"\":\r\n\t\t\top = op + i\r\n\t\telse:\r\n\t\t\top = op + ' ' + i\r\n\treturn op\r\n\r\ndef index_batch_to_words(input_batch, input_length, lang):\r\n\t'''\r\n\t\tArgs:\r\n\t\t\tinput_batch: List of BS x Max_len\r\n\t\t\tinput_length: List of BS\r\n\t\tReturn:\r\n\t\t\tcontextual_input: List of BS\r\n\t'''\r\n\tcontextual_input = []\r\n\tfor i in range(len(input_batch)):\r\n\t\tcontextual_input.append(stack_to_string(sentence_from_indexes(lang, input_batch[i][:input_length[i]])))\r\n\r\n\treturn contextual_input\r\n\r\ndef sort_by_len(seqs, input_len, device=None, dim=1):\r\n\torig_idx = list(range(seqs.size(dim)))\r\n\t# pdb.set_trace()\r\n\r\n\t# Index by which sorting needs to be done\r\n\tsorted_idx = sorted(orig_idx, key=lambda k: input_len[k], reverse=True)\r\n\tsorted_idx= torch.LongTensor(sorted_idx)\r\n\tif device:\r\n\t\tsorted_idx = sorted_idx.to(device)\r\n\r\n\tsorted_seqs = seqs.index_select(1, sorted_idx)\r\n\tsorted_lens= [input_len[i] for i in sorted_idx]\r\n\r\n\t# For restoring original order\r\n\torig_idx = sorted(orig_idx, key=lambda k: sorted_idx[k])\r\n\torig_idx = torch.LongTensor(orig_idx)\r\n\tif device:\r\n\t\torig_idx = orig_idx.to(device)\r\n\t\t# sorted_lens = torch.LongTensor(sorted_lens).to(device)\r\n\treturn sorted_seqs, sorted_lens, orig_idx\r\n\r\ndef save_checkpoint(state, epoch, logger, model_path, ckpt):\r\n\t'''\r\n\t\tSaves the model state along with epoch number. The name format is important for \r\n\t\tthe load functions. Don't mess with it.\r\n\r\n\t\tArgs:\r\n\t\t\tmodel state\r\n\t\t\tepoch number\r\n\t\t\tlogger variable\r\n\t\t\tdirectory to save models\r\n\t\t\tcheckpoint name\r\n\t'''\r\n\tckpt_path = os.path.join(model_path, '{}.pt'.format(ckpt))\r\n\tlogger.info('Saving Checkpoint at : {}'.format(ckpt_path))\r\n\ttorch.save(state, ckpt_path)\r\n\r\ndef load_checkpoint(config, embedding, encoder, predict, generate, merge, mode, ckpt_path, logger, device,\r\n\t\t\t\t\tembedding_optimizer = None, encoder_optimizer = None, predict_optimizer = None, generate_optimizer = None, merge_optimizer = None,\r\n\t\t\t\t\tembedding_scheduler = None, encoder_scheduler = None, predict_scheduler = None, generate_scheduler = None, merge_scheduler = None\r\n\t\t\t\t\t):\r\n\tcheckpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)\r\n\r\n\tembedding.load_state_dict(checkpoint['embedding_state_dict'])\r\n\tencoder.load_state_dict(checkpoint['encoder_state_dict'])\r\n\tpredict.load_state_dict(checkpoint['predict_state_dict'])\r\n\tgenerate.load_state_dict(checkpoint['generate_state_dict'])\r\n\tmerge.load_state_dict(checkpoint['merge_state_dict'])\r\n\r\n\tif mode == 'train':\r\n\t\tembedding_optimizer.load_state_dict(checkpoint['embedding_optimizer_state_dict'])\r\n\t\tencoder_optimizer.load_state_dict(checkpoint['encoder_optimizer_state_dict'])\r\n\t\tpredict_optimizer.load_state_dict(checkpoint['predict_optimizer_state_dict'])\r\n\t\tgenerate_optimizer.load_state_dict(checkpoint['generate_optimizer_state_dict'])\r\n\t\tmerge_optimizer.load_state_dict(checkpoint['merge_optimizer_state_dict'])\r\n\r\n\t\tembedding_scheduler.load_state_dict(checkpoint['embedding_scheduler_state_dict'])\r\n\t\tencoder_scheduler.load_state_dict(checkpoint['encoder_scheduler_state_dict'])\r\n\t\tpredict_scheduler.load_state_dict(checkpoint['predict_scheduler_state_dict'])\r\n\t\tgenerate_scheduler.load_state_dict(checkpoint['generate_scheduler_state_dict'])\r\n\t\tmerge_scheduler.load_state_dict(checkpoint['merge_scheduler_state_dict'])\r\n\r\n\tstart_epoch = checkpoint['epoch']\r\n\tmin_train_loss = checkpoint['min_train_loss']\r\n\tmax_train_acc = checkpoint['max_train_acc']\r\n\tmax_val_acc = checkpoint['max_val_acc']\r\n\tequation_acc = checkpoint['equation_acc']\r\n\tbest_epoch = checkpoint['best_epoch']\r\n\tgenerate_nums = checkpoint['generate_nums']\r\n\r\n\tembedding.to(device)\r\n\tencoder.to(device)\r\n\tpredict.to(device)\r\n\tgenerate.to(device)\r\n\tmerge.to(device)\r\n\r\n\tlogger.info('Successfully Loaded Checkpoint from {}, with epoch number: {} for {}'.format(ckpt_path, start_epoch, mode))\r\n\r\n\tif mode == 'train':\r\n\t\tembedding.train()\r\n\t\tencoder.train()\r\n\t\tpredict.train()\r\n\t\tgenerate.train()\r\n\t\tmerge.train()\r\n\telse:\r\n\t\tembedding.eval()\r\n\t\tencoder.eval()\r\n\t\tpredict.eval()\r\n\t\tgenerate.eval()\r\n\t\tmerge.eval()\t\t\r\n\r\n\treturn start_epoch, min_train_loss, max_train_acc, max_val_acc, equation_acc, best_epoch, generate_nums\r\n\r\ndef get_latest_checkpoint(model_path, logger):\r\n\t'''\r\n\t\tLooks for the checkpoint with highest epoch number in the directory \"model_path\" \r\n\r\n\t\tArgs:\r\n\t\t\tmodel_path: including the run_name\r\n\t\t\tlogger variable: to log messages\r\n\t\tReturns:\r\n\t\t\tcheckpoint: path to the latest checkpoint \r\n\t'''\r\n\r\n\tckpts = glob('{}/*.pt'.format(model_path))\r\n\tckpts = sorted(ckpts)\r\n\r\n\tif len(ckpts) == 0:\r\n\t\tlogger.warning('No Checkpoints Found')\r\n\r\n\t\treturn None\r\n\telse:\r\n\t\t#pdb.set_trace()\r\n\t\t#latest_epoch = max([int(x.split('_')[-1].split('.')[0]) for x in ckpts])\r\n\t\t#ckpts = sorted(ckpts, key= lambda x: int(x.split('_')[-1].split('.')[0]) , reverse=True )\r\n\t\tckpt_path = ckpts[0]\r\n\t\t#logger.info('Checkpoint found with epoch number : {}'.format(latest_epoch))\r\n\t\tlogger.debug('Checkpoint found at : {}'.format(ckpt_path))\r\n\r\n\t\treturn ckpt_path","sub_path":"code/gts/src/utils/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":5624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"419602698","text":"\n\nfrom xai.brain.wordbase.nouns._blindfold import _BLINDFOLD\n\n#calss header\nclass _BLINDFOLDING(_BLINDFOLD, ):\n\tdef __init__(self,): \n\t\t_BLINDFOLD.__init__(self)\n\t\tself.name = \"BLINDFOLDING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"blindfold\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_blindfolding.py","file_name":"_blindfolding.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485435353","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom datetime import datetime\n\nfrom ..models import Student, Group\n\n# Views for Students\ndef students_list(request):\n '''\n students = (\n {'id': 1,\n 'first_name': u'Виталий',\n 'last_name': u'Подоба',\n 'ticket': 235,\n 'image': 'img/me.jpeg'},\n {'id': 2,\n 'first_name': u'Корост',\n 'last_name': u'Андрей',\n 'ticket': 2123,\n 'image': 'img/piv.png'},\n {'id': 3,\n 'first_name': u'Тарас',\n 'last_name': u'Притула',\n 'ticket': 5332,\n 'image': 'img/podoba3.jpg'},\n )\n '''\n #students = Student.objects.all()\n students = Student.objects.order_by('last_name')\n\n # try to order students list\n order_by = request.GET.get('order_by', '')\n if order_by in ('last_name', 'first_name', 'ticket'):\n students = students.order_by(order_by)\n if request.GET.get('reverse', '') == '1':\n students = students.reverse()\n\n # paginate students\n paginator = Paginator(students, 3)\n page = request.GET.get('page')\n try:\n students = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n students = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n students = paginator.page(paginator.num_pages)\n\n return render(request, 'students/students_list.html', {'students': students})\n\ndef students_add(request):\n # was form posted?\n if request.method == \"POST\":\n # was form add button clicked?\n if request.POST.get('add_button') is not None:\n\n # errors collection\n errors = {}\n # validate student data will go here\n data = {'middle_name': request.POST.get('middle_name'),\n 'notes': request.POST.get('notes')}\n\n # validate user input\n middle_name = request.POST.get('middle_name').strip()\n first_name = request.POST.get('first_name', '').strip()\n if not first_name:\n errors['first_name'] = u\"Имя должно быть обязательно\"\n else:\n data['first_name'] = first_name\n\n last_name = request.POST.get('last_name', '').strip()\n if not last_name:\n errors['last_name'] = u\"Фамилия должна быть обязательно\"\n else:\n data['last_name'] = last_name\n\n birthday = request.POST.get('birthday', '').strip()\n if not birthday:\n errors['birthday'] = u\"Дата рождения должна быть обязательно\"\n else:\n try:\n datetime.strptime(birthday, '%Y-%m-%d')\n except Exception:\n errors['birthday'] = u\"Введите корректный формат даты (напр. 1984-12-30)\"\n else:\n data['birthday'] = birthday\n\n ticket = request.POST.get('ticket', '').strip()\n if not ticket:\n errors['ticket'] = u\"Номер билета должен быть обязательно\"\n else:\n data['ticket'] = ticket\n\n student_group = request.POST.get('student_group', '').strip()\n if not student_group:\n errors['student_group'] = u\"Выберите группу для студента\"\n else:\n groups = Group.objects.filter(pk=student_group)\n if len(groups) != 1:\n errors['student_group'] = u\"Выберите корректную группу\"\n else:\n data['student_group'] = groups[0]\n\n photo = request.FILES.get('photo')\n if photo:\n data['photo'] = photo\n\n # save student\n if not errors:\n # create student object\n student = Student(**data)\n # save it to database\n student.save()\n\n # redirect user to students list\n return HttpResponseRedirect(u'%s?status_message=Студент %s %s %s успешно добавлен!' % (reverse('home'), first_name, middle_name, last_name))\n\n else:\n # render form with errors and previous user input\n return render(request, 'students/students_add.html',\n {'groups': Group.objects.all().order_by('title'),\n 'errors': errors})\n elif request.POST.get('cancel_button') is not None:\n # redirect to home page on cancel button\n return HttpResponseRedirect(u'%s?status_message=Добавление студента отменено!' %reverse('home'))\n else:\n # initial form render\n return render(request, 'students/students_add.html',\n {'groups': Group.objects.all().order_by('title')})\n\n\n\ndef students_edit(request, sid):\n return HttpResponse('

    Edit Students %s

    ' % sid)\n\ndef students_delete(request, sid):\n return HttpResponse('

    Delete Students %s

    ' % sid)","sub_path":"students/views/students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"100459248","text":"# -*- coding: utf-8 -*-\nimport os\n\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# 加载mnist_inference.py中定义的常量和前向传播的函数\nimport test_numpy_6_4_1_mnist_inference\n\n# 配置神经网络的参数\nBATCH_SIZE = 1000\t\t\t\t# 后注:一个训练batch中的训练数据个数\nLEARNING_RATE_BASE = 0.01\t\t# 后注:训练的学习率\n\t\t\t\t\t\t\t\t# 后注:注意!!原书内容出错!这里应该是0.01,书上是0.8,但是书上指定的源代码是正确的为0.01\n\t\t\t\t\t\t\t\t# 后注:学习率不设置好对训练会有很大影响。学习率为0.8的时候,虽然5.5节的代码的损失值立刻就收敛到1以内了\n\t\t\t\t\t\t\t\t# 后注:但是6.4的代码的损失值会收敛得特别慢\nLEARNING_RATE_DECAY = 0.99\t\t# 后注:学习率的衰减率\nREGULARIZATION_RATE = 0.0001\t# 后注:描述训练模型复杂度的正则化项在损失函数中的系数,即“J(θ)+λR(w)”中的λ\nTRAINING_STEPS = 30000\t\t\t# 后注:训练轮数\nMOVING_AVERAGE_DECAY = 0.99\t\t# 后注:滑动平均衰减率\n\n# 模型保存的路径和文件名\n\n#MODEL_SAVE_PATH = \"/path/to/model\"\nMODEL_SAVE_PATH = \"/home/work/tmp/tttt/TensorFlowBoy/model\"\nMODEL_NAME = \"model.ckpt\"\n\ndef train(mnist):\n\t#定义输入输出placeholder\n\t# x = tf.placeholder(\n\t# \ttf.float32, [None, test_numpy_5_5_mnist_inference.INPUT_NODE], name = 'x-input')\n\n\t# 后注:输入节点矩阵为[1000, 28, 28, 1]\n\tx = tf.placeholder(tf.float32, [\n\t\t\t\tBATCH_SIZE, \t\t\t\t\t\t\t\t\t\t# 第一维表示一个batch中样例的个数。\n\t\t\t\ttest_numpy_6_4_1_mnist_inference.IMAGE_SIZE,\t\t# 第二维和第三维表示图片的尺寸。\n\t\t\t\ttest_numpy_6_4_1_mnist_inference.IMAGE_SIZE,\t\t# \n\t\t\t\ttest_numpy_6_4_1_mnist_inference.NUM_CHANNELS], \t# 第四维表示图片的深度,对于RBG格式的图片,深度为3。\n\t\t\t\tname = 'x-input')\n\n\ty_ = tf.placeholder(\n\t\ttf.float32, [None, test_numpy_6_4_1_mnist_inference.OUTPUT_NODE], name = 'y-input')\n\n\t# 后注:计算模型的正则化损失\n\tregularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n\n\t# 直接使用mnist_inference.py中定义的前向传播过程\n\ty = test_numpy_6_4_1_mnist_inference.inference(x, 1, regularizer)\n\n\t# 后注:注意!!global_step传入反向传播算法函数后将被自动更新,这里只给初始值即可,见P86\n\tglobal_step = tf.Variable(0, trainable = False)\n\n\t# 和5.2.1小节样例中类似地定义损失函数、学习率、滑动平均操作以及训练过程\n\t# 后注:定义滑动平均的类\n\tvariable_averages = tf.train.ExponentialMovingAverage(\n\t\tMOVING_AVERAGE_DECAY, global_step)\n\t# 后注:定义一个更新变量滑动平均的操作\n\tvariables_averages_op = variable_averages.apply(\n\t\ttf.trainable_variables())\n\t# 后注:计算交叉熵\n\tcross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n\t\tlogits = y, labels = tf.argmax(y_, 1))\n\t# 后注:计算交叉熵平均值\n\tcross_entropy_mean = tf.reduce_mean(cross_entropy)\n\t# 后注:总损失\n\tloss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))\n\t# 后注:学习率\n\tlearning_rate = tf.train.exponential_decay(\n\t\tLEARNING_RATE_BASE, \n\t\tglobal_step, \n\t\tmnist.train.num_examples / BATCH_SIZE, \n\t\tLEARNING_RATE_DECAY)\n\t# 后注:优化损失函数\n\ttrain_step = tf.train.GradientDescentOptimizer(learning_rate) \\\n\t\t\t\t\t\t.minimize(loss, global_step = global_step)\n\n\twith tf.control_dependencies([train_step, variables_averages_op]) :\n\t\ttrain_op = tf.no_op(name = 'train')\n\n\t# 初始化TensorFlow持久化类\n\tsaver = tf.train.Saver()\n\n\twith tf.Session() as sess: \n\t\ttf.initialize_all_variables().run()\n\n\t\t# 在训练过程中不再测试模型在验证数据上的表现,验证和测试的过程将会有一个独立的程序来完成。\n\t\tfor i in range(TRAINING_STEPS):\n\t\t\txs, ys = mnist.train.next_batch(BATCH_SIZE)\n\n\t\t\t# 类似地将输入的训练数据格式调整为一个四维矩阵,并将这个调整后的数据传入sess.run过程。\n\t\t\treshaped_xs = np.reshape(xs, (BATCH_SIZE, \n\t\t\t\t\t\t\t\t\t\t\ttest_numpy_6_4_1_mnist_inference.IMAGE_SIZE, \n\t\t\t\t\t\t\t\t\t\t\ttest_numpy_6_4_1_mnist_inference.IMAGE_SIZE,\n\t\t\t\t\t\t\t\t\t\t\ttest_numpy_6_4_1_mnist_inference.NUM_CHANNELS))\n\n\t\t\t# _, loss_value, step = sess.run([train_op, loss, global_step], \n\t\t\t#\t\t\t\t\t\t\t\tfeed_dict = {x: xs, y_ : ys})\n\t\t\t#print(reshaped_xs[0])\n\t\t\taa, loss_value, step = sess.run([train_step, loss, global_step], \n\t\t\t\t\t\t\t\t\t\t\tfeed_dict = {x: reshaped_xs, y_ : ys})\n\t\t\t# 每1000轮保存一次模型\n\t\t\tif i % 100 == 0:\n\t\t\t\t# 输出当前模型的情况。这里只输出了模型在当前训练batch上的损失函数大小。通过损失\n\t\t\t\t# 函数的大小可以大概了解训练的情况。在验证数据集上的正确率信息会有一个单独的程序\n\t\t\t\t# 来生成。\n\t\t\t\t#print(\"After %d training step(s), loss on training \"\n\t\t\t\t#\t\t\"batch is %g.aa=%g\" % (step, loss_value, aa))\n\t\t\t\tprint(\"After %d training step(s), loss on training \"\n\t\t\t\t\t\t\"batch is %g\" % (step, loss_value))\n\n\t\t\t\t# 保存当前的模型。注意这里给出了global_step参数,这样可以让每个被保存模型的文件名\n\t\t\t\t# 末尾加上训练的轮数,比如“model.ckpt-1000”表示训练1000轮之后得到的模型\n\t\t\t\tsaver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), \n\t\t\t\t\t\t\tglobal_step = global_step)\n\ndef main(argv = None):\n\t# 声明处理MNIST数据集的类,这个类在初始化时会自动下载数据。\n\tmnist = input_data.read_data_sets(\"/tmp/data\", one_hot = True)\n\t#print(\"Example training data: \", mnist.train.images[0]) \n\t#print(\"Example training data label: \", mnist.train.labels[0])\n\ttrain(mnist)\n\nif __name__ == '__main__':\n\ttf.app.run()","sub_path":"test_numpy_6_4_1_mnist_train.py","file_name":"test_numpy_6_4_1_mnist_train.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485183233","text":"#!/usr/bin/env python\n\nimport re\nimport os\n\n# r'^herodetails/(?P.*)'\n\npattern = \"\"\"r'^(.*?)/\\(?P<(.*?)>\"\"\"\n\n\npattern = \"\"\"path\\(\"\"\"\n\nfor curr_dir, subs, files in os.walk('.'):\n for file_name in files:\n if file_name == 'urls.py':\n file_path = os.path.join(curr_dir, file_name)\n print(file_path)\n with open(file_path) as file_in:\n for line in file_in:\n m = re.search(pattern, line)\n if m:\n print(m.group(0)) # , m.group(2))\n \n","sub_path":"EXAMPLES/django2.0/fix_urls.py","file_name":"fix_urls.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231302226","text":"# -*- coding: utf-8 -*-\nimport re\nimport datetime\nimport json\nimport scrapy\nfrom DataItem.redis_pool import RedisPool\nfrom bs4 import BeautifulSoup\nfrom DataItem.myLog import Log\nfrom scrapy.selector import Selector\nimport random\n\nlogger = Log(log_name='DetailSpider')\n\n\nclass ProductSpider(scrapy.Spider):\n name = 'ProductSpider'\n\n allowed_domains = ['*']\n # 开启scrapyhub代理 备用\n crawlera_enabled = True\n crawlera_apikey = ''\n\n # todo 自定义设置,调优\n custom_settings = {\n 'CONCURRENT_REQUESTS': 5,\n 'CONCURRENT_REQUESTS_PER_DOMAIN': 5, # 单个ip最大并发量\n 'AUTOTHROTTLE_ENABLED': False, # 限流器\n 'DOWNLOADER_MIDDLEWARES': {\n # 'DataItem.middlewares.ProxyDownloaderMiddleware': 610, #开启阿布云\n 'scrapy_crawlera.CrawleraMiddleware': 610\n }\n }\n\n def __init__(self, **kwargs):\n logger.info(f'----------------开始采集,{datetime.datetime.now()}----------------')\n\n self.reds_conn = RedisPool().redis_con()\n\n self.product_detail_api = r'https://h5api.m.taobao.com/h5/mtop.taobao.detail.getdetail/6.0/?data=%7B%22itemNumId%22%3A%22{0}%22%7D'\n\n # 初始化详情正则对象\n self.img_cpl = re.compile(r'.*?\"img\":\"(https://.*?\\.jpg)', re.DOTALL) # 匹配详情图\n self.img_cpl2 = re.compile(r'.*?image\":{\"itemImages\":\\[(.*?)\\].*', re.DOTALL) # 匹配详情图\n self.desc_cpl = re.compile(r'.*?descUrl\".*?\"(.*?)\"', re.DOTALL) # 匹配详情图入口url\n self.all_imgs = [] # 所有图片的详情\n\n self.job_id = kwargs.get('_job', '1') # 此爬虫的job_id\n\n # 店铺名称\n # self.shopName = kwargs.get('shop_name')\n # self.shopid = kwargs.get('shop_id')\n\n self.productIdList = kwargs.get('productId_list')\n\n self.result = {}\n\n super().__init__(**kwargs)\n\n # 开始解析\n def start_requests(self):\n\n product_list = self.productIdList\n\n n = 0\n for product_id in product_list:\n n += 1\n logger.info(f'开始采集第{n}个商品,id为:{product_id}')\n\n product_detail_api = r'https://h5api.m.taobao.com/h5/mtop.taobao.detail.getdetail/6.0/?data=%7B%22itemNumId%22%3A%22{}%22%7D'\n\n # 构建商品详情接口\n url = product_detail_api.format(product_id)\n # 增加cookies\n cookies_dict = self.reds_conn.hgetall('queue_cookie')\n cookies_list = list(cookies_dict.values())\n\n cookie = cookies_list[-1]\n\n unb_random = ''.join(str(random.choice(range(9))) for _ in range(13))\n\n pattern = r'unb=(\\d+)'\n temp = re.sub(pattern=pattern, repl='unb=' + unb_random, string=cookie)\n ls = temp.split(';')\n cookies = {}\n for i in ls:\n key, value = i.split('=', 1)\n cookies[key] = value\n\n yield scrapy.Request(url=url, callback=self.parse_tb, dont_filter=True, cookies=cookies,\n meta={'url': url, 'slug': product_id, 'request_tp': 'store'})\n\n # 解析淘宝网页\n def parse_tb(self, response):\n try:\n # 参考:商品详情json\n product_detail_json = json.loads(response.text)\n\n data = product_detail_json['data']\n\n slug = response.meta.get('slug') # 淘宝的商品ID\n\n # 响应失败标志\n\n status = product_detail_json['ret'][0]\n if '成功' not in status:\n logger.warning(f'{response.request.url},商品列表采集失败!响应内容:{product_detail_json.get(\"ret\")}')\n return\n\n # 商品信息\n product_info = {}\n\n # 商品名称\n product_info['product_name'] = data['item'].get('title')\n # 商品id\n product_info['product_id'] = data['item'].get('itemId')\n # 商品类别id\n product_info['product_categoryId'] = data['item'].get('categoryId')\n\n site_type = data['params']['trackParams']['BC_type'] # B为天猫C为淘宝\n # 商品源url,区分B-淘宝,C-天猫\n if site_type == 'B':\n source_url = \"https://detail.tmall.com/item.htm?id={0}\".format(slug)\n else:\n source_url = \"https://item.taobao.com/item.htm?id={0}\".format(slug)\n\n product_info['product_url'] = source_url\n\n price_info = json.loads(data['apiStack'][0]['value']) # 价格信息\n\n purchase_price = price_info['price']['price']['priceText'].split('-')[-1] # 实际价格\n\n # 销量\n sell_count = price_info.get('item').get('sellCount') if price_info.get('item', dict()).get(\n 'sellCount') else 0\n\n purchase_list_price = price_info['price'].get('extraPrices')\n\n purchase_list_price = purchase_list_price[0]['priceText'].split('-')[\n -1] if purchase_list_price else purchase_price # 市场价\n\n item = data['item'] # 商品信息\n\n skuBase = data['skuBase']\n option_values = skuBase.get('props', {})\n\n tmall_descurl = f\"https:{item['tmallDescUrl']}\" # 商品图片详情页 origin\n\n images = item['images'] # 缩略图信息\n feature_image_list = [f'https:{u}' for u in images]\n\n props = data.get('props').get('groupProps')[0].get('基本信息')\n\n # todo 重写\n opt = [{'name': i['name'], 'values': [{'name': j['name'],\n 'thumb': (f\"http:{j.get('image')}\" if not j.get('image').startswith(\n 'http') else j.get('image')) if j.get('image') else ''} for j in\n i['values']]} for i in option_values][::-1] if option_values else {}\n\n product_info['product_price'] = purchase_price # 淘宝价\n product_info['product_sale_price'] = purchase_list_price # 定价\n\n product_info['product_props'] = props # 商品属性集合\n\n product_info['product_options'] = opt # 商品选项集合\n product_info['product_sell_count'] = sell_count # 商品销量\n\n product_info['product_images'] = feature_image_list # 图片集合\n\n sku_price_info = price_info['skuCore']['sku2info'] # sku价格\n\n product_info['product_sku_prices'] = sku_price_info\n\n # 抓取页面图片详情\n yield scrapy.Request(tmall_descurl,\n callback=self.parse_detail,\n meta={'product_info': product_info, 'slug': slug},\n dont_filter=True)\n except Exception as e:\n logger.warning(f'错误信息{e}')\n\n # 解析详情接口,判断是否有图片接口存在,如果存在则进一步抓取图片\n def parse_detail(self, response):\n\n product_info = response.meta.get('product_info')\n\n product_id = response.meta.get('slug')\n # 详情html\n product_describe = response.text\n\n product_info['product_describe'] = product_describe\n\n # 详情接口\n descurl = 'https://h5api.m.taobao.com/h5/mtop.taobao.detail.getdesc/6.0/?jsv=2.4.11&data={\"id\":\"%s\",\"type\":\"1\"}' % (\n product_id)\n\n yield scrapy.Request(url=descurl, callback=self.parse_img_new,\n meta={'product_info': product_info},\n dont_filter=True)\n\n # 获取图片\n def parse_img_new(self, response):\n product_info = response.meta.get('product_info')\n text = json.loads(response.text).get('data').get('pcDescContent')\n selector = Selector(text=text)\n image_urls = selector.css('img::attr(src)').getall()\n\n if not image_urls:\n soup = BeautifulSoup(text, 'lxml')\n if soup.find_all('img'):\n for link in soup.find_all('img'):\n image_urls.append(link.get(\"src\"))\n else:\n logger.warning(f'{response.request.url}:没有图片!')\n\n image_urls = [f'https:{img}' for img in image_urls]\n\n product_info['product_images_urls'] = image_urls\n\n # 参考:商品详情2.json\n data = json.dumps(product_info)\n\n # 存储到数据库\n self.reds_conn.hset('product_info', product_info['product_id'], data)\n\n def close(self, spider, reason):\n logger.info(f'----------------全部商品采集完成,{datetime.datetime.now()}----------------')\n","sub_path":"DataItem/DataItem/spiders/ProductSpider.py","file_name":"ProductSpider.py","file_ext":"py","file_size_in_byte":8657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"616327276","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nTrain or evaluate a single classifier with its given set of hyperparameters.\n\nCreated on Wed Sep 29 14:23:48 2021\n\n@author: lbechberger\n\"\"\"\n\nimport argparse, pickle\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.metrics import accuracy_score, cohen_kappa_score, f1_score, balanced_accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.naive_bayes import ComplementNB\nfrom sklearn.pipeline import make_pipeline\nfrom mlflow import log_metric, log_param, set_tracking_uri\n\n# setting up CLI\nparser = argparse.ArgumentParser(description = \"Classifier\")\nparser.add_argument(\"input_file\", help = \"path to the input pickle file\")\nparser.add_argument(\"-s\", '--seed', type = int, help = \"seed for the random number generator\", default = None)\nparser.add_argument(\"-e\", \"--export_file\", help = \"export the trained classifier to the given location\", default = None)\nparser.add_argument(\"-i\", \"--import_file\", help = \"import a trained classifier from the given location\", default = None)\n\n# <--- Classifier --->\nparser.add_argument(\"-m\", \"--majority\", action = \"store_true\", help = \"majority class classifier\")\nparser.add_argument(\"-f\", \"--frequency\", action = \"store_true\", help = \"label frequency classifier\")\nparser.add_argument(\"-u\", \"--uniform\", action = \"store_true\", help = \"uniform (random) classifier\")\nparser.add_argument(\"--knn\", type = int, help = \"k nearest neighbor classifier with the specified value of k\", default = None)\nparser.add_argument(\"--knn_weights\", type = str, help = \"weight function of knn, uniform or distance\", default = \"uniform\")\nparser.add_argument(\"--tree\", action = \"store_true\", help = \"decision tree classifier\", default = None)\nparser.add_argument(\"--tree_depth\", type = int, help = \"max depth of decision tree\", default = None)\nparser.add_argument(\"--tree_criterion\", type = str, help = \"criterion to measure split quality, gini or entropy\", default = \"gini\")\nparser.add_argument(\"--svm\", type = str, help = \"support vector machine with specified kernel: linear, polynomial, rbf, or sigmoid\", default = None)\nparser.add_argument(\"--randforest\", type = int, help = \"random forest classifier with specified value as # of trees in forest\", default = None)\nparser.add_argument(\"--forest_criterion\", type = str, help = \"criterion to measure split quality, gini or entropy\", default = \"gini\")\nparser.add_argument(\"--forest_max_depth\", type = int, help = \"max depth of trees in forest\", default = None)\nparser.add_argument(\"--mlp\", nargs = \"+\", type = int, help = \"multilayer perceptron classifier, values resemble hidden layer sizes (1 value per layer)\", default = None)\nparser.add_argument(\"--bayes\", action = \"store_true\", help = \"complement naive bayes classifier\")\n\n# <--- Evaluation metrics --->\nparser.add_argument(\"-a\", \"--accuracy\", action = \"store_true\", help = \"evaluate using accuracy\")\nparser.add_argument(\"-k\", \"--kappa\", action = \"store_true\", help = \"evaluate using Cohen's kappa\")\nparser.add_argument(\"-f1\", \"--f1_score\", action = \"store_true\", help = \"evaluate using F1 score\")\nparser.add_argument(\"-ba\", \"--balanced_accuracy\", action = \"store_true\", help = \"evaluate using balanced accuracy score\")\n\n# <--- Param optimization --->\nparser.add_argument(\"--log_folder\", help = \"where to log the mlflow results\", default = \"data/classification/mlflow\")\n\nargs = parser.parse_args()\n\n# load data\nwith open(args.input_file, 'rb') as f_in:\n data = pickle.load(f_in)\n\nset_tracking_uri(args.log_folder)\n\nif args.import_file is not None:\n # import a pre-trained classifier\n with open(args.import_file, 'rb') as f_in:\n input_dict = pickle.load(f_in)\n \n classifier = input_dict[\"classifier\"]\n for param, value in input_dict[\"params\"].items():\n log_param(param, value)\n \n log_param(\"dataset\", \"validation\")\n\nelse: # manually set up a classifier\n \n if args.majority:\n # majority vote classifier\n print(\" majority vote classifier\")\n log_param(\"classifier\", \"majority\")\n params = {\"classifier\": \"majority\"}\n classifier = DummyClassifier(strategy = \"most_frequent\", random_state = args.seed)\n \n elif args.frequency:\n # label frequency classifier\n print(\" label frequency classifier\")\n log_param(\"classifier\", \"stratified\")\n params = {\"classifier\": \"stratified\"}\n classifier = DummyClassifier(strategy = \"stratified\", random_state = args.seed)\n \n elif args.uniform:\n # uniform classifier\n print(\" uniform classifier\")\n log_param(\"classifier\", \"uniform\")\n params = {\"classifier\": \"uniform\"}\n classifier = DummyClassifier(strategy = \"uniform\", random_state = args.seed)\n \n elif args.knn is not None:\n # k nearest neighbour classifier\n print(\" {0} nearest neighbor classifier, {1} weights\".format(args.knn, args.knn_weights))\n \n log_param(\"classifier\", \"knn\")\n log_param(\"k\", args.knn)\n log_param(\"weights\", args.knn_weights)\n params = {\"classifier\": \"knn\", \n \"k\": args.knn, \n \"weights\": args.knn_weights}\n \n standardizer = StandardScaler()\n knn_classifier = KNeighborsClassifier(n_neighbors = args.knn, weights = args.knn_weights, n_jobs = -1)\n classifier = make_pipeline(standardizer, knn_classifier)\n \n elif args.tree is not None:\n # decision tree classifier\n print(\" decision tree with max depth {0}, {1} split criterion\".format(args.tree_depth, args.tree_criterion))\n \n log_param(\"classifier\", \"tree\")\n log_param(\"criterion\", args.tree_criterion)\n log_param(\"max_depth\", args.tree_depth)\n params = {\"classifier\": \"tree\", \n \"criterion\": args.tree_criterion, \n \"max_depth\": args.tree_depth}\n \n #standardizer = StandardScaler()\n classifier = DecisionTreeClassifier(criterion = args.tree_criterion, max_depth = args.tree_depth)\n #classifier = make_pipeline(standardizer, decision_tree)\n \n elif args.svm is not None:\n # support vector machine\n print(\" svm classifier, kernel: {0}\".format(args.svm))\n \n log_param(\"classifier\", \"svm\")\n log_param(\"kernel\", args.svm)\n params = {\"classifier\": \"svm\", \n \"kernel\": args.svm}\n \n standardizer = StandardScaler()\n svm_classifier = SVC(kernel = args.svm, gamma = \"auto\")\n classifier = make_pipeline(standardizer, svm_classifier)\n \n elif args.randforest is not None:\n # random forest classifier\n print(\" random forest classifier with {0} trees, max depth {1}, {2} criterion\".format(args.randforest, args.forest_max_depth, args.forest_criterion))\n \n log_param(\"classifier\", \"random forest\")\n log_param(\"nr trees\", args.randforest)\n log_param(\"max depth\", args.forest_max_depth)\n log_param(\"criterion\", args.forest_criterion)\n params = {\"classifier\": \"random forest\", \n \"nr trees\": args.randforest,\n \"max depth\": args.forest_max_depth,\n \"criterion\": args.forest_criterion}\n \n classifier = RandomForestClassifier(n_estimators = args.randforest, criterion = args.forest_criterion, max_depth = args.forest_max_depth, n_jobs = -1)\n \n elif args.mlp is not None:\n # multilayer perceptron\n print(\" multilayer perceptron with hidden layer size {0}\".format(args.mlp))\n \n log_param(\"classifier\", \"mlp\")\n log_param(\"hidden layer sizes\", args.mlp)\n params = {\"classifier\": \"mlp\",\n \"hidden layer sizes\": args.mlp}\n \n standardizer = StandardScaler()\n mlp_classifier = MLPClassifier(hidden_layer_sizes = tuple(args.mlp))\n classifier = make_pipeline(standardizer, mlp_classifier)\n \n elif args.bayes:\n # gaussian naive bayes\n print(\" complement NB classifier\")\n \n log_param(\"classifier\", \"complementNB\")\n params = {\"classifier\": \"complementNB\"}\n \n #standardizer = StandardScaler()\n classifier = ComplementNB()\n #classifier = make_pipeline(standardizer, nb_classifer)\n \n classifier.fit(data[\"features\"], data[\"labels\"].ravel())\n log_param(\"dataset\", \"training\")\n\n# now classify the given data\nprediction = classifier.predict(data[\"features\"])\n\n# collect all evaluation metrics\nevaluation_metrics = []\nif args.accuracy:\n evaluation_metrics.append((\"Accuracy\", accuracy_score))\nif args.kappa:\n evaluation_metrics.append((\"Cohens_kappa\", cohen_kappa_score))\nif args.f1_score:\n evaluation_metrics.append((\"F1_score\", f1_score))\nif args.balanced_accuracy:\n evaluation_metrics.append((\"Balanced_accuracy\", balanced_accuracy_score))\n\n# compute and print them\nfor metric_name, metric in evaluation_metrics:\n metric_value = metric(data[\"labels\"], prediction)\n print(\" {0}: {1}\".format(metric_name, metric_value))\n log_metric(metric_name, metric_value)\n \n# export the trained classifier if the user wants us to do so\nif args.export_file is not None:\n output_dict = {\"classifier\": classifier, \"params\": params}\n with open(args.export_file, 'wb') as f_out:\n pickle.dump(output_dict, f_out)","sub_path":"code/classification/run_classifier.py","file_name":"run_classifier.py","file_ext":"py","file_size_in_byte":9605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"215590253","text":"import os\n\nWTF_CSRF_ENABLED = True\n#SECRET_KEY = '1BCDEFGHIJKLMNOPQRSTUVWXYZ12345'\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\nSQLALCHEMY_TRACK_MODIFICATIONS = True\n\n##########################\nclass Config(object):\n SECRET_KEY = 'a9087FFJFF9nnvc2@#$%FSD'","sub_path":"soft-eng/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"358146103","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 24 15:50:30 2020\n\n@author: alisha\n\"\"\"\nimport pandas as pd\n\nclass count:\n \n def __init__(self, data):\n \n '''count correct recall and recog trials'''\n \n #all trials together per condition\n self.recall = data.count('recallCorrect')\n self.recog = data.count('recogCorrect')\n \n #in session 1, we had 2 rounds of each condition\n #here we have the counts divided into counts per round\n self.recall1 = data[0:18].count('recallCorrect')\n self.recall2 = data[18:36].count('recallCorrect')\n \n self.recog1 = data[0:18].count('recogCorrect')\n self.recog2 = data[18:36].count('recogCorrect')\n\ndef rearange(data, ids):\n '''transform data into df, add ids, and sort\n '''\n data = pd.DataFrame(data) #df\n data['sub_id'] = ids #add id column\n data = data.sort_values('sub_id') #sort according id\n data.index = [x for x in range(len(data))] #reset indeces\n\n return data\n\n\ndef splitndcount(lt_ans, conds, nuRou, matchind, ids):\n \n counts, counts_sep1, counts_sep2 = ([] for i in range(3))\n\n for i in range(len(lt_ans)):\n \n #get index of subject from sess1\n num1 = matchind[i][1]\n \n #initialize some lists\n closed, video, game = ([] for i in range(3))\n \n #assign trials to individual conditions\n for j in range(nuRou):\n \n #select individual data\n ans = lt_ans[i][j]\n \n if conds.loc[num1][j] == 'closed':\n closed += ans\n \n elif conds.loc[num1][j] == 'video':\n video += ans\n \n else:\n game += ans\n \n #count correctly answered trials\n count_r = count(closed)\n count_v = count(video)\n count_g = count(game)\n \n #append to one big list\n counts.append([count_r.recall, count_v.recall, count_g.recall,\n count_r.recog, count_v.recog, count_g.recog]) \n \n counts_sep1.append(\n [count_r.recall1, count_v.recall1, count_g.recall1, \n count_r.recog1, count_v.recog1, count_g.recog1])\n counts_sep2.append(\n [count_r.recall2, count_v.recall2, count_g.recall2, \n count_r.recog2, count_v.recog2, count_g.recog2])\n\n #transform into df, add ids, and sort according to ids\n counts = rearange(counts, ids)\n counts_sep1 = rearange(counts_sep1, ids)\n counts_sep2 = rearange(counts_sep2, ids)\n\n return counts, counts_sep1, counts_sep2\n \n \n","sub_path":"lt_count.py","file_name":"lt_count.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"164908391","text":"#remove the logging info\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n#import tensorflow library\nimport tensorflow as tf\n\n# Construct a `Session` to execute the graph, but in compatibility mode\nwith tf.compat.v1.Session() as sess:\n # Build a graph.\n x = tf.constant(1,name='x')\n y = tf.Variable(x+9,name='y')\n\n\t#initialize variables but in compatibility mode\n model=tf.compat.v1.global_variables_initializer()\n\n\t#run the variabels initialization\n sess.run(model)\n\n #run the variables\n print(sess.run(y))","sub_path":"Getting-Started-with-TensorFlow-master/Chapter 1/2. first_session_only_tensorflow.py","file_name":"2. first_session_only_tensorflow.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"25223873","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# linkedin_jog_scraping.py\n\nimport os\nimport pandas as pd\nfrom parsel import Selector\nfrom time import sleep\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.by import By\nimport configparser\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\nif config.get('HEADLESS', 'headless') == 'Yes':\n # install webdrive when needed runing headless\n opts=webdriver.ChromeOptions()\n opts.headless=True\n driver = webdriver.Chrome(ChromeDriverManager().install() ,options=opts)\nelse:\n # install webdrive when needed runing browser\n driver = webdriver.Chrome(ChromeDriverManager().install())\n\nprint('\\nExecuting Linkedin Login...')\n# driver.get method() will navigate to a page given by the URL address\ndriver.get('https://www.linkedin.com/login')\n\n# locate email form by element_by_id\nusername = driver.find_element_by_id('username')\n\n# send_keys() to simulate key strokes\nusername.send_keys(config.get('LINKEDIN_LOGIN', 'email'))\n\n# locate password form by_class_name\npassword = driver.find_element_by_id('password')\n\n# send_keys() to simulate key strokes\npassword.send_keys(config.get('LINKEDIN_LOGIN', 'password'))\n\n# locate submit button by_class_name\nlog_in_button = driver.find_element_by_class_name('btn__primary--large')\n\n# locate submit button by_xpath\nlog_in_button = driver.find_element_by_xpath('//*[@type=\"submit\"]')\nlog_in_button.click()\n\nprint('\\nStarting Posting Search...')\n# driver goest to the jobs page\ndriver.get('https://www.linkedin.com/jobs/')\nsleep(2)\n\n# Start search term\nsearch_job = driver.find_element_by_xpath('//*[@type=\"text\"]')\nsearch_job.send_keys(config.get('LINKEDIN_LOGIN', 'search_term'))\nsleep(1)\n#search.send_keys(Keys.RETURN)\n\n# location\nsearch_location = driver.find_element_by_xpath('//input[starts-with(@id,\"jobs-search-box-location\")]')\nsearch_location.send_keys(Keys.COMMAND, 'a') #COMMAND is the mac keyboard control\nsearch_location.send_keys(Keys.BACKSPACE)\nsearch_location.send_keys(config.get('LINKEDIN_LOGIN', 'country'))\nsearch_location.send_keys(Keys.RETURN)\nsleep(3)\n\n# Gets the URL from the search result\nlinkedin_result = driver.current_url\n\n# Scroll job list to the end of first page\nrecentList = driver.find_elements_by_class_name('jobs-search-results__list-item')\nfor list in recentList :\n driver.execute_script(\"arguments[0].scrollIntoView();\", list)\n sleep(0.1)\n\n# Get full list of positions name\nposition_name = driver.find_elements_by_class_name('job-card-list__title')\nposition_name = [url.text for url in position_name]\nposition_name\nlen(position_name)\n\n# Get listing Company Name\ncompany_name = driver.find_elements_by_css_selector('.job-card-container__company-name')\ncompany_name = [url.text for url in company_name]\ncompany_name\nlen(company_name)\n\n# Get listing location\njob_location = driver.find_elements_by_xpath('//div[starts-with(@class,\"artdeco-entity-lockup__caption\")]')\njob_location = [url.text for url in job_location]\njob_location\nlen(job_location)\n\n# Get full list of links positions\nposition_link = driver.find_elements_by_css_selector(\"div.artdeco-entity-lockup__title > a\")\nposition_link = [link.get_attribute(\"href\") for link in position_link]\nposition_link\nlen(position_link)\n\nurls_linkedin = []\nfor lin in position_link:\n terminator = lin.index('?')\n urls_linkedin.append(lin[:terminator])\n\n\n\nif os.path.isfile('opportunities.csv') is True:\n opportunities = pd.read_csv('opportunities.csv')\nelse:\n dict = {'Job Title': [], 'Company Name': [], 'Location': [], 'Direct URL': [], 'TrimmedLinkedin' : [],'LinkedinLink': []}\n df = pd.DataFrame(dict)\n df.to_csv('opportunities.csv',mode = 'a', header = True, index = False)\n opportunities = pd.read_csv('opportunities.csv')\n\nprint('\\nTotal posts: ',len(position_link))\nprint('\\nStart buinding direct links list ...')\nmain_window_name = driver.window_handles[0]\n\ndef write_to_csv(posname,compname,joblocation,direct,link):\n dict = {'Job Title': [posname], 'Company Name': [compname], 'Location': [joblocation], 'Direct URL': [direct],'TrimmedLinkedin' : [urlslin], 'LinkedinLink': [link]}\n df = pd.DataFrame(dict)\n df.to_csv('opportunities.csv',mode = 'a', header = False, index = False)\n\ndef apply_position():\n apply_btn = driver.find_element_by_xpath(\"//button[contains(@class,'jobs-apply-button')]\")\n apply_btn.click()\n #driver.execute_script(\"window.open('http://google.com', 'new_window')\")\n sleep(5)\n #print(driver.window_handles[counter])\n window_name = driver.window_handles[1]\n driver.switch_to.window(window_name=window_name)\n direct_url.append(driver.current_url)\n driver.close()\n sleep(5)\n driver.switch_to.window(window_name=main_window_name)\n #counter += 1\n #print('Current counter = ', counter)\n\ndirect_url = []\nfor link in position_link :\n driver.get(link)\n sleep(3)\n # status = 'not applied'\n try:\n try:\n driver.find_element_by_xpath(\"//a//li-icon[contains(@type,'document-icon')]\")\n direct_url.append('Applied')\n #counter += 1\n #print('Current counter = ', counter)\n\n except NoSuchElementException:\n driver.find_element_by_xpath(\"//button//li-icon[contains(@type,'linkedin-bug')]\")\n direct_url.append('Easy Apply')\n sleep(5)\n # window_name = driver.window_handles[counter]\n driver.switch_to.window(window_name=main_window_name)\n #counter += 1\n #print('Current counter = ', counter)\n\n except NoSuchElementException:\n apply_position()\n\ndef validate_url(urlslin):\n emp_df = pd.read_csv('opportunities.csv',usecols=[4])\n # print(emp_df)\n # f2 = ['https://www.linkedin.com/jobs/view/2257024918/?eBP=JOB_SEARCH_ORGANIC&recommendedFlavor=COMPANY_RECRUIT&refId=3051f9a6-115e-47c3-a266-fe1fc163d1b3&trackingId=FteGSeadtXOUrgJHqXbVxw%3D%3D&trk=flagship3_search_srp_jobs']\n f2 = [urlslin]\n if f2 in emp_df.values:\n print('TRUE')\n return 'TRUE'\n else:\n print('FALSE')\n return 'FALSE'\n\nprint('\\nWriting data to CSV...')\ncount_exist = 0\ncount_inexist = 0\nfor posname,compname,joblocation,direct,urlslin,link in zip(position_name,company_name,job_location,direct_url,urls_linkedin,position_link):\n print(urlslin)\n x = validate_url(urlslin)\n if x == 'TRUE':\n print('Position exists: ',count_exist)\n break\n else:\n count_inexist += 1\n print('Positions being added: ',count_inexist)\n write_to_csv(posname,compname,joblocation,direct,link)\n\nprint('\\nBUILDING REPORT --------')\nsleep(3)\nprint('Total positions found: ',len(position_name))\nprint('Total new positions added: ',count_inexist)\nprint('Total repeated positions: ',len(position_name)-count_inexist)\n","sub_path":"linkedin_job_scraping.py","file_name":"linkedin_job_scraping.py","file_ext":"py","file_size_in_byte":7142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"570004288","text":"#!/usr/bin/env python2\n\n\n###########################################\n# #\n# This is a super simple environment to #\n# use in testing of basic pygame behav- #\n# ior. #\n# #\n###########################################\n\nimport pygame, sys\n\npygame.init()\nscreen = pygame.display.set_mode((640,480))\nclock = pygame.time.Clock()\n\nclass Circe(pygame.sprite.Sprite):\n image = pygame.Surface((100,100))\n image.set_colorkey((0,0,0))\n pygame.draw.circle(image, (255,0,0), (50,50), 50, 2)\n image.convert_alpha()\n\n\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = Circe.image\n self.rect = self.image.get_rect()\n self.radius = 50\n def update(self):\n self.rect.center = pygame.mouse.get_pos()\nsprites = pygame.sprite.Group()\nc = Circe()\nsprites.add(c)\nwhile 1:\n screen.fill((240,240,240))\n\n sprites.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n sprites.draw(screen)\n pygame.display.flip()\n clock.tick(60)\n","sub_path":"sub-squares/workscreen/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"186670796","text":"# -*- coding: utf-8 -*-\n# Problem 2: calculate the minimum fixed montly payment needed in order pay off\n# a credit card balance within 12 months\n\nbalance = 999999\nannualInterestRate = 0.18\nfixed_pay = 10\nnum_guess = 0\n# fixed_pay = round(balance // 12, -1) # another initial fixed payment to start\ndef remaining_balance_fixed_pay(balance, fixed_pay, annualInterestRate, n):\n \"\"\"\n balance: current balance\n fixed_pay: fixed montly payment(multiple of $10, int)\n annualInterestRate: annual interest rate as a decimal\n n: number of months remaining(n>=0, int)\n\n returns: remaining balance after n months by a fixed monthly payment\n \"\"\"\n if n == 0:\n return balance\n else:\n unpaid_balance = balance - fixed_pay\n interest = annualInterestRate / 12.0 * unpaid_balance\n balance = unpaid_balance + interest\n return remaining_balance_fixed_pay(balance, fixed_pay, annualInterestRate, n-1)\n\nwhile remaining_balance_fixed_pay(balance, fixed_pay, annualInterestRate, 12) > 0:\n num_guess += 1\n fixed_pay += 10\nprint('Number of guess =', num_guess)\nprint(\"Lowest Payment:\", round(fixed_pay, -1))\n","sub_path":"PS2/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"445911682","text":"from django.db import models\nfrom django.core.exceptions import ValidationError, NON_FIELD_ERRORS\n\nclass Subscriber(models.Model):\n subscriber = models.CharField(\"phone number\", unique=True, max_length=40)\n join_date = models.DateTimeField(\"join date\", auto_now_add=True)\n score = models.IntegerField(\"user total score\", default=0)\n serviceid = models.IntegerField(\"ServiceId\") #step-1 ashkan\n\n class Meta():\n db_table = 'Subscribers'\n\n def __unicode__(self):\n return self.subscriber\n\n @classmethod\n def add_score(cls, subscriber, new_score):\n subscriber = cls.objects.filter(subscriber=subscriber).update(score=models.F(\"score\")+new_score)\n\n\nclass History(models.Model):\n subscriber = models.CharField(\"phone number\", max_length=40)\n last_node = models.CharField(\"last node\", max_length=40, null=True )\n current_node = models.CharField(\"current node\", max_length=40, null=True)\n number = models.CharField(max_length=2, null=True,blank=True)\n score = models.SmallIntegerField('score', default=0)\n new_call = models.BooleanField(default=False)\n is_error = models.BooleanField(default=False)\n created_on = models.DateTimeField(auto_now_add=True)\n serviceid = models.IntegerField(\"ServiceId\") #step-2 ashkan\n\n class Meta():\n db_table= 'history'\n verbose_name_plural = \"histories\"\n\n def __unicode__(self):\n return str(self.subscriber)\n\nclass Node(models.Model):\n name = models.CharField(max_length=40, unique=True, primary_key=True)\n command = models.CharField(\"command which will be run\", max_length=250, blank=True, default='')\n related_file = models.CharField(\"voice file\", max_length=250)\n error_file = models.CharField(\"error file related to this node\", max_length=250, blank=True, default='')\n is_end = models.BooleanField('end node', default=False)\n serviceid = models.IntegerField(\"ServiceId\") #step-2 ashkan\n\n\n class Meta():\n db_table = 'Nodes'\n\n\n def __unicode__(self):\n return self.name\n\n\nclass Link(models.Model):\n node = models.ForeignKey(Node, verbose_name=\"target\")\n upper_node = models.ForeignKey(Node, related_name='upper_node', verbose_name=\"source\")\n number = models.PositiveSmallIntegerField(\"number associated to this link\")\n logic = models.CharField(\"logic for this number\", max_length=250, blank=True, default='')\n priority = models.SmallIntegerField(\"link priority\", blank=True, default=0)\n score = models.SmallIntegerField(default=0)\n serviceid = models.IntegerField(\"ServiceId\") #step-3 ashkan\n\n class Meta():\n db_table = 'Links'\n #unique_together = ((\"node\", \"upper_node\", \"number\"),)\n\n def __unicode__(self):\n return \"%s --%s--> %s\"%(self.upper_node, self.number, self.node)\n\n def validate_unique(self, *args, **kwargs):\n super(Link, self).validate_unique(*args, **kwargs)\n\n if self.logic: self.logic = self.logic.lower().strip()\n query = self.__class__.objects.filter(upper_node=self.upper_node, number= self.number, logic = self.logic, priority=self.priority)\n\n if query.exists():\n raise ValidationError({NON_FIELD_ERRORS: [\n \"tone number '%s' already assigned to '%s' to '%s' link\"%(self.number, query.first().node, self.upper_node.name)\n ]}\n )\n\n if self.upper_node != Node.objects.get(name='welcome'):\n query = self.__class__.objects.filter(node=self.upper_node)\n if not query.exists():\n raise ValidationError({NON_FIELD_ERRORS:[\n \"this will create orphaned link\"\n ]})\n\n\nclass Game(models.Model):\n subscriber = models.CharField(\"phone number\", max_length=40)\n start = models.DateTimeField(\"start time\")\n end = models.DateTimeField(\"end time\")\n duration = models.IntegerField(\"Duration\")\n score = models.SmallIntegerField()\n serviceid = models.IntegerField(\"ServiceId\")\n\n class Meta():\n db_table = 'games'\n\n def __unicode__(self):\n return self.subscriber\n\n def save(self, *args, **kwargs):\n self.duration = (self.end - self.start).seconds\n super(Game,self).save(*args, **kwargs)\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197929819","text":"# title: lowest-common-ancestor-of-a-binary-tree\n# detail: https://leetcode.com/submissions/detail/275049560/\n# datetime: Fri Nov 1 17:04:00 2019\n# runtime: 88 ms\n# memory: 26.1 MB\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n if root is None or root == p or root == q:\n return root\n left = self.lowestCommonAncestor(root.left, p, q)\n right = self.lowestCommonAncestor(root.right, p, q)\n \n if left is None:\n if right is None:\n return None\n return right\n if right is None:\n return left\n return root\n \n ","sub_path":"leetcode/lowest-common-ancestor-of-a-binary-tree/275049560.py","file_name":"275049560.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"528537631","text":"from rest_framework.permissions import BasePermission\nfrom rest_framework import exceptions\n\n\nallowed_params = {'api_key': 'required, case-sensitive',\n 'category': 'integer, category id in database',\n 'category__name': 'string, category name, case-sensitive',\n 'api_id': 'integer, Hacker News API story unique id',\n 'api_id__gte': 'description', 'api_id__lte': 'description',\n 'record__by': 'record, which was created by a specific user, case-sensitive',\n 'record__title': 'story title, case-sensitive',\n 'record__has_key': 'record keys, see all list on Hacker News API, case-sensitive',\n 'record__url': 'url associated with the story, case-sensitive'}\n\n\nclass APIPermission(BasePermission):\n\n def has_permission(self, request, view):\n\n if request.user.is_superuser:\n return True\n if not request.query_params.get('api_key') == '123':\n raise exceptions.PermissionDenied(\n detail='Required parameter api_key=YOUR_API_KEY is missing or incorrect.')\n else:\n if not all([p in allowed_params.keys() for p in request.query_params.keys()]):\n raise exceptions.PermissionDenied(\n detail='Not recognized parameters in request. Allowed parameters {}.'.format(list(allowed_params)))\n else:\n return True\n","sub_path":"search/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"39291186","text":"# edinbustrack V0.05 - get all of the upcoming services at a particular stop\n# (c) Mark Pentler 2017\n#\n# This uses screen scraping as you appear to have to be a proper developer to\n# get access to the Edinburgh City Council's BusTracker API\n\nfrom bs4 import BeautifulSoup\nimport requests\n\ndef get_bus_times(stop_id): # returns a list of expected buses at the chosen stop\n\turl = \"http://www.mybustracker.co.uk/?module=mobile&mode=1&busStopCode=\" + stop_id + \"&subBusStop=Display+Departures\"\n\tr = requests.get(url) # make our request\n\tdata = r.text\n\tsoup = BeautifulSoup(data, \"html.parser\") # bs4 doing its work\n\tstop_data = soup.find_all(\"tr\", attrs={\"style\": None, \"class\": None}) # grab every single bus entry in the table - this ID changes!\n\tservices = [] # service list goes in here\n\n\tfor row_num, row in enumerate(stop_data):\n\t\tcols = row.find_all(\"td\") # this will grab every column per bus\n\t\tservices.append ([row_num, cols[0].get_text(strip=True).encode(\"ascii\"), cols[2].get_text(strip=True).encode(\"ascii\")]) # extract service and time remaining\n\treturn services\n\ndef get_stop_name(stop_id):\n\turl = \"http://www.mybustracker.co.uk/?module=mobile&mode=1&busStopCode=\" + stop_id + \"&subBusStop=Display+Departures\"\n\tr = requests.get(url) # make our request\n\tdata = r.text\n\tsoup = BeautifulSoup(data, \"html.parser\")\n\tstop_name = soup.find(\"span\", attrs={\"class\": \"resultTitleText\"}).get_text(strip=True).encode(\"ascii\").split(\"Next departures from \") # find the stop name and chop it out of the string\n\treturn stop_name[1]\n","sub_path":"edinbustrack.py","file_name":"edinbustrack.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"551472327","text":"from flask import Blueprint\nfrom flask import render_template\n\nroot_api = Blueprint(\n 'root',\n __name__,\n template_folder='templates',\n static_folder='static',\n static_url_path='/static/root')\n\n@root_api.route('/')\ndef root_html():\n return render_template('index.html')\n","sub_path":"test_code/python/python_flask/blueprint/root/root_route.py","file_name":"root_route.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"78731926","text":"\nimport yaml\nimport pandas as pd\nimport os\nimport os.path\nimport shutil\nimport pickle\nimport datetime\nimport time\nimport numpy as np\n\n__version__ = '0.1'\n\n\n\n##############################################################\n# #\n# CUSTOMIZE FOR KAGGLE COMPETITION #\n# #\n##############################################################\n\nfrom sklearn.metrics import log_loss\n###\n#\n# function to calculate Kaggle performance metric during CV \n# Must be customized for each competition\n#\n###\ndef calculateKaggleMetric(y=None,y_hat=None):\n return log_loss(y,y_hat)\n\n\n########### END OF KAGGLE COMPETITION CUSTOMIZATION #########\n\n###\n#\n# Class for generating feature sets\n#\n###\nclass FeatureGenerator():\n \"\"\"\n Methods useful for building feature sets for modeling. \n\n Expected usage:\n \n fs = FeatureGenrator()\n \n X_train, y_train, X_test = fs.getRawData()\n \n # user specifed code to create feature set and perform tasks such as\n # imputing missing values\n # encoding categorical values\n # standardize values\n # create synthetic variables\n X_train_fs = ...\n y_train_fs = ...\n X_test_fs =\n \n fs.saveFeatureSet(X_train_fs, y_train_fs, X_test_fs)\n \n Arguments:\n in_dir: directory containing the input data set to transform to a \n feature set.\n out_dir: directory to contain the new feature set.\n\n \"\"\" \n \n def __init__(self,\n in_dir=None, # directory containing input training/test data sets\n out_dir=None # directory to contain generated feature set\n ):\n\n self.in_dir = in_dir \n self.out_dir = out_dir \n self.__version__ = __version__\n \n #\n # get parameters \n #\n with open('./config.yml') as f:\n self.CONFIG = yaml.load(f.read())\n \n self.root_dir = self.CONFIG['ROOT_DIR']\n self.id_vars = self.CONFIG['ID_VAR'] \n self.target_var = self.CONFIG['TARGET_VAR'] \n \n self._makeOutputDirectory()\n \n def _makeOutputDirectory(self):\n #create directory to hold feature set\n # clean out out_dir\n try:\n shutil.rmtree(os.path.join(self.root_dir,'data',self.out_dir))\n except:\n pass\n \n os.makedirs(os.path.join(self.root_dir,'data',self.out_dir))\n \n \n def getRawData(self):\n \"\"\"\n default behaviour - can be overridden for different raw data structures\n\n Assumes existence of train.csv and test.csv in in_dir location\n Expected function: create raw_id_df, raw_target_df, raw_train_features_df\n and raw_test_features_df data frames.\n \n Returns tuple containing the following elements\n raw_train_features_df: dataframe containing only train predictors to transform\n raw_train_target_df: dataframe containing target variable transform\n raw_test_features_df: dataframe containing test predictors to transform\n \"\"\"\n df = pd.read_csv(os.path.join(self.root_dir,'data',self.in_dir,'train.csv'))\n \n # split data into identifiers, predictors and target data frames\n self.raw_train_id_df = df.loc[:,self.id_vars]\n raw_train_target_df = df.loc[:,[self.target_var]]\n \n # isolate predictor variables\n predictors = sorted(set(df.columns) - set(self.id_vars) - set([self.target_var]))\n \n # isoloate training predictiors\n raw_train_features_df = df.loc[:,predictors]\n \n # get test data set\n df = pd.read_csv(os.path.join(self.root_dir,'data',self.in_dir,'test.csv'))\n self.raw_test_id_df = df.loc[:,self.id_vars]\n raw_test_features_df = df.loc[:,predictors]\n \n return raw_train_features_df, raw_train_target_df, raw_test_features_df\n \n \n def saveFeatureSet(self,new_train_features_df=None,\n new_train_target_df=None,\n new_test_features_df=None):\n \"\"\"\n default behaviour - can be overriddent for different new feature storage\n \n append id_vars and target_var save new_train_features_df and \n new_test_features_df in self.out_dir\n \n append id_vars and target to new feature set and save as csv\n \n Arguments:\n new_train_features_df: dataframe containg transformed train predictors\n new_train_target_df: dataframe containing transformed target variable\n new_test_features_df: dataframe containing tarnsformed test predictors\n \"\"\"\n\n self.raw_train_id_df.join(new_train_target_df)\\\n .join(new_train_features_df)\\\n .sort_values(self.id_vars)\\\n .to_csv(os.path.join(self.root_dir,'data',self.out_dir,'train.csv'),index=False)\n \n self.raw_test_id_df\\\n .join(new_test_features_df)\\\n .sort_values(self.id_vars)\\\n .to_csv(os.path.join(self.root_dir,'data',self.out_dir,'test.csv'),index=False)\n \n###\n#\n# Class for training models\n#\n### \nclass ModelTrainer():\n \"\"\"\n Methods to train model, convert predictions from the model into features\n for the next level.\n \n Arguments:\n ModelClass: Python class implementation of the model algorithm. Currently\n this class is assumed to following class structure of \n scikit-learn.\n model_params: Python dictionary specifying parameters for the model.\n model_id: character string used to identify the model\n test_data_method: method for generating test data predictions\n None - default, retrieve value from config.yml\n all_data_model - train model on all data to generate test data prediction\n k-fold_average_model - average test data predictions from k-fold training\n feature_set: Feature identifier used in FeatureGenerator\n train_ds: training data set\n test_ds: test data set\n \n \"\"\"\n \n def __init__(self,\n ModelClass=None, #Model Algorithm\n model_params={}, # Model hyper-parameters\n model_id=None, # model identifier\n test_prediction_method=None, #training method\n feature_set=None, # feature set to use\n train_ds='train.csv', # feature set training data set\n test_ds='test.csv' # feature set test data set\n ): \n \n self.ModelClass = ModelClass\n self.model_params = model_params\n self.model_id = model_id\n self.test_prediction_method = test_prediction_method\n self.feature_set = feature_set\n self.train_ds = train_ds\n self.test_ds = test_ds\n self.out_dir = \"M\"+model_id\n self.__version__ = __version__\n \n # this implment fix to Issue #1\n self.max_bytes = 2**31 - 1\n \n #\n # get global parameters \n #\n with open('./config.yml') as f:\n self.CONFIG = yaml.load(f.read())\n \n self.root_dir = self.CONFIG['ROOT_DIR']\n \n if self.test_prediction_method == None:\n self.test_prediction_method = self.CONFIG['TEST_PREDICTION_METHOD']\n elif test_prediction_method == 'all_data_model'\\\n or test_prediction_method == 'k-fold_average_model':\n self.test_prediction_method = test_prediction_method\n else:\n raise ValueError(\"test_prediction_method=\" + test_prediction_method \n + \", valid vaules are 'all_data_model' or 'k-fold_average_model'\") \n \n print('Model training starting for {} with feature set {} at {:%Y-%m-%d %H:%M:%S}'\\\n .format(self.model_id,self.feature_set,datetime.datetime.now()))\n print('test_prediction_method: {}'.format(self.test_prediction_method))\n \n \n # added to fix Issue #1\n def _saveModelToDisk(self,model=None):\n # convert to byte stream\n bytes_out = pickle.dumps(model)\n \n # write out byte stream in chunks of size self.max_bytes\n with open(os.path.join(self.CONFIG['ROOT_DIR'],'models',\n self.model_id,self.model_id+'_model.pkl'),'wb') as f:\n for idx in range(0, len(bytes_out), self.max_bytes):\n f.write(bytes_out[idx:idx+self.max_bytes])\n\n # added to fix Issue #1\n def _loadModelFromDisk(self):\n \n # initailize area to recieve file chunks\n bytes_in = bytearray(0)\n \n # get total size of saved model file\n model_file_name = os.path.join(self.CONFIG['ROOT_DIR'],'models',self.model_id,\n self.model_id+'_model.pkl')\n \n input_size = os.path.getsize(model_file_name)\n \n # read in saved model in max_byte chunks\n with open(model_file_name,'rb') as f:\n for _ in range(0, input_size, self.max_bytes):\n bytes_in += f.read(self.max_bytes) \n \n # recreate model object\n model = pickle.loads(bytes_in) \n \n return model\n \n \n def cleanPriorResults(self):\n \n # remove old \n try:\n shutil.rmtree(os.path.join(self.root_dir,'data',self.out_dir))\n except:\n pass\n \n os.makedirs(os.path.join(self.root_dir,'data',self.out_dir))\n \n try:\n os.remove(os.path.join(self.CONFIG['ROOT_DIR'],'models',\n self.model_id,\n self.model_id+'_model.pkl'))\n except:\n pass\n \n try:\n os.remove(os.path.join(self.CONFIG['ROOT_DIR'],'models',\n self.model_id,\n self.model_id+'_submission.csv'))\n except:\n pass\n \n \n\n def trainModel(self):\n \n print('Starting model training: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n start_training = time.time()\n \n #\n # retrieve KFold specifiction\n #\n with open(os.path.join(self.CONFIG['ROOT_DIR'],'data','k-fold_specification.pkl'),'rb') as f:\n k_folds = pickle.load(f)\n \n \n #\n # generate features for next level\n #\n \n # retrieve training data\n train_df = pd.read_csv(os.path.join(self.CONFIG['ROOT_DIR'],'data',\n self.feature_set,self.train_ds))\n \n predictors = sorted(list(set(train_df.columns) - \n set(self.CONFIG['ID_VAR']) - set([self.CONFIG['TARGET_VAR']])))\n \n\n \n \n #\n # create features for next level using the hold out set\n #\n self.cv_performance_metric = []\n \n models_list = []\n next_level = []\n i = 0\n for fold in k_folds:\n i += 1\n print('running fold: {:d} at {:%Y-%m-%d %H:%M:%S}'.format(i,datetime.datetime.now()))\n train_idx = fold[0]\n X_train = train_df.iloc[train_idx,:]\n X_train = X_train.loc[:,predictors]\n y_train = train_df[self.CONFIG['TARGET_VAR']].iloc[train_idx]\n \n model = self.ModelClass(**self.model_params)\n \n model.fit(X_train,y_train)\n \n #generate feature for next level\n # get indices for hold out set\n holdout_idx = fold[1]\n \n # set up predictors and target for hold out set\n X_holdout = train_df.iloc[holdout_idx,:]\n id_holdout = X_holdout.loc[:,self.CONFIG['ID_VAR']]\n X_holdout = X_holdout.loc[:,predictors]\n y_holdout = train_df[self.CONFIG['TARGET_VAR']].iloc[holdout_idx]\n \n # make preduction on hold out set to calculate metric and generate\n # features for next level of stack\n y_hat = model.predict_proba(X_holdout)\n self.cv_performance_metric.append(calculateKaggleMetric(y_holdout,y_hat))\n \n # geneate features for next level\n y_hat = pd.DataFrame(y_hat,index=id_holdout.index)\n y_hat.columns = [self.model_id+'_'+str(col) for col in y_hat.columns]\n y_hat = id_holdout.join(y_holdout).join(y_hat)\n \n next_level.append(y_hat)\n \n if self.test_prediction_method == 'k-fold_average_model':\n models_list.append(model)\n \n \n #\n # combine the generated features into single dataframe & save to disk\n #\n pd.concat(next_level).sort_values(self.CONFIG['ID_VAR'])\\\n .to_csv(os.path.join(self.CONFIG['ROOT_DIR'],'data',\n self.out_dir,\n 'train.csv'),\n index=False)\n \n # method for handling test data\n if self.test_prediction_method == 'all_data_model':\n #\n # train model on complete training data set\n #\n \n X_train = train_df[predictors]\n y_train = train_df[self.CONFIG['TARGET_VAR']]\n \n self.training_rows = X_train.shape[0]\n self.training_columns = X_train.shape[1]\n \n model = self.ModelClass(**self.model_params)\n \n\n model.fit(X_train,y_train)\n\n \n self._saveModelToDisk(model)\n \n else:\n self.training_rows = train_df.shape[0]\n self.training_columns = len(predictors)\n self._saveModelToDisk(models_list) \n \n self.training_time = time.time() - start_training\n\n def createTestPredictions(self,test_ds='test.csv'):\n #\n # create Kaggle Submission\n #\n # Assumes: trained model has been saved under \"`model_id`_model.pkl\"\n #\n \n print('Starting createTestPredictions: {:%Y-%m-%d %H:%M:%S}'\\\n .format(datetime.datetime.now()))\n \n model = self._loadModelFromDisk()\n\n # create data set to make predictions\n test_df = pd.read_csv(os.path.join(self.CONFIG['ROOT_DIR'],'data',\n self.feature_set,self.test_ds))\n \n predictors = sorted(list(set(test_df.columns) - \n set(self.CONFIG['ID_VAR']) - set([self.CONFIG['TARGET_VAR']])))\n \n test_id = test_df[self.CONFIG['ID_VAR']]\n \n # if single model, then generate predictions for test data\n # if list then generate predictions for each model in list and average test data prediction\n if isinstance(model,(list)):\n pred_list = []\n for m in model:\n y_hat = m.predict_proba(test_df[predictors])\n pred_list.append(y_hat)\n \n preds = np.dstack(pred_list).mean(axis=2)\n predictions = pd.DataFrame(preds,index=test_df.index)\n \n else:\n predictions = pd.DataFrame(model.predict_proba(test_df[predictors]),index=test_df.index)\n \n predictions.columns = [self.model_id+'_'+str(x) for x in list(predictions.columns)]\n \n\n # save test predictions for next level\n pred_df = test_id.join(predictions).sort_values(self.CONFIG['ID_VAR'])\n pred_df.to_csv(os.path.join(self.CONFIG['ROOT_DIR'],'data',\n self.out_dir,\n 'test.csv'), index=False)\n \n\n\n def createKaggleSubmission(self):\n print('Starting createKaggleSubmission: {:%Y-%m-%d %H:%M:%S}'\\\n .format(datetime.datetime.now()))\n \n # retrieve test predictions\n predictions = pd.read_csv(os.path.join(self.CONFIG['ROOT_DIR'],'data',\n self.out_dir,\n 'test.csv'))\n \n ##############################################################\n # #\n # CUSTOMIZE FOR KAGGLE COMPETITION #\n # #\n ##############################################################\n \n # save Kaggle submission\n submission = predictions[self.CONFIG['ID_VAR']].join(predictions[self.model_id+'_1'])\n submission.columns = self.CONFIG['KAGGLE_SUBMISSION_HEADERS']\n \n ########### END OF KAGGLE COMPETITION CUSTOMIZATION #########\n \n submission.to_csv(os.path.join(self.CONFIG['ROOT_DIR'],'models',\n self.model_id,\n self.model_id+'_submission.csv'),index=False)\n \n print('Completed createKaggleSubmission: {:%Y-%m-%d %H:%M:%S}'\\\n .format(datetime.datetime.now()))\n \n###\n#\n# Model Performance Tracker\n#\n###\nclass ModelPerformanceTracker():\n \n tracking_file = None\n \n def __init__(self,model_trainer=None):\n self.model_trainer = model_trainer\n self.__version__ = __version__\n \n #\n # get global parameters \n #\n with open('./config.yml') as f:\n self.CONFIG = yaml.load(f.read())\n \n self.tracking_file = os.path.join(self.CONFIG['ROOT_DIR'],'results','model_performance_data.csv')\n \n \n \n def recordModelPerformance(self,\n cv_metric_list=None # list of cv performance metrics\n ):\n # retrieve basic model information from model trainer\n model_params = \"{'model_params': \" + str(self.model_trainer.model_params) \\\n + \", 'test_prediction_method': '\" + self.model_trainer.test_prediction_method \\\n + \"'}\"\n \n model_id = self.model_trainer.model_id\n feature_set = self.model_trainer.feature_set\n date_time = '{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())\n \n #create a row\n df = pd.DataFrame([date_time,\n model_id,\n feature_set,\n self.model_trainer.training_rows,\n self.model_trainer.training_columns,\n self.model_trainer.training_time,\n np.min(self.model_trainer.cv_performance_metric), #cv_min_metric\n np.max(self.model_trainer.cv_performance_metric), #cv_max_metric\n np.mean(self.model_trainer.cv_performance_metric), #cv_avg_metric\n \"\", #public_leaderboard\n model_params]).T\n df.columns = ['date_time',\n 'model_id',\n 'feature_set',\n 'number_of_rows',\n 'number_of_columns',\n 'training_time',\n 'cv_min_metric',\n 'cv_max_metric',\n 'cv_avg_metric',\n 'public_leaderboard',\n 'model_params']\n \n \n # write out model performance metric\n if not os.path.isfile(self.tracking_file):\n \n df.to_csv(self.tracking_file, header=True, index=False)\n \n else: # else it exists so append without writing the header\n \n df.to_csv(self.tracking_file, mode='a', header=False, index=False)\n \n\n","sub_path":"framework/model_stacking.py","file_name":"model_stacking.py","file_ext":"py","file_size_in_byte":19976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"235439847","text":"import praw\nimport operator\nimport sys\nimport urllib2\nimport argparse\n\nuser_agent = (\"Creates html page with all of the youtube videos embeded like a playlist by u/datjeff\")\nr=praw.Reddit(user_agent=user_agent)\n\nsubs = [\"music\",\"hiphopheads\",\"Dubstep\",\"Jazz\"]\n\nlinkpage = open(\"SubredditPlaylists.html\",'w')\npagecontent = \"Reddit Music Subs Auto playlist

    You'll find links to the following music related subs providing top youtube links from each.

    \"\n\n\n\n\n\nfor sub in subs:\n\n\n#sub = \"music\"\n\tsubreddit = r.get_subreddit(sub).get_top(limit = 50)\n\n\t#w is for writing only, it overwrites every time. a is for appending r for read.\n\tf = open(sub+\"SubredditPlaylist.html\",'w')\n\tpagecontent += \"
  • r/\"+sub + \"
  • \"\n\t#beginner html encoding \n\thtml = \"r/\"+sub+\" Auto playlist

    This Page was auto-generated by u/datjeff. It contains all the youtube videos in the top 50 submissions on r/\" + sub +\".

    \"\n\tcount = 1\n\tfor submit in subreddit:\n\t\t\n\t\turl_string = submit.url\n\t\tyoutube_id = url_string[-11:]\n\t\tembed_link = \"http://www.youtube.com/embed/\"+youtube_id\n\n\n\t\tif(url_string.find(\"youtube.com\") != -1):\n\t\t\t#print url_string\n\t\t\t#use this for just printing a page with the links titled by their appearance. \n\t\t\t#l += \"
  • Video\" + `count` + \"
  • \"\n\t\t\thtml+= \"
  • \"\n\t\t\tcount+=1\n\t#close our tags\n\thtml += \"\"\n\t#write to file.\n\tf.write(html)\n\tf.close()\npagecontent+=\"\"\nlinkpage.write(pagecontent)\n\n\n\n\n\n\n\n\n","sub_path":"Scripts/Playlist/playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"144307632","text":"import numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nimport csv\nimport sys\nimport math\nfrom scipy.stats import ttest_ind # Package needed for statistical T-test\n\nclass run():\n def __init__(self,islandAmount,islandSize,migrationSize,crossoverRate,seed,epochAmount,fitnessMax,genotypeBest):\n self.islandAmount = int(islandAmount)\n self.islandSize = int(islandSize)\n self.migrationSize = (float(migrationSize)/float(islandSize))\n self.crossoverRate = float(crossoverRate)\n self.seed = [int(x) for x in seed.split(\";\")]\n self.epochAmount = [int(x) for x in epochAmount.split(\";\")]\n self.fitnessMax = [float(x) for x in fitnessMax.split(\";\")]\n self.genotypeBest = [[float(y) for y in x.split(\";\")] for x in genotypeBest.split(\"|\")]\n self.bestFitness = max(self.fitnessMax)\n self.meanBestFitness = sum(self.fitnessMax)/len(self.fitnessMax)\ndef read_files(filenames):\n filedata = []\n for file in filenames:\n with open(file,'r') as f:\n reader = csv.reader(f)\n for rows in reader:\n if not \"islandAmount\" in rows:\n filedata.append(run(rows[0],rows[1],rows[2],rows[3],rows[4],rows[5],rows[6],rows[7]))\n params = filedata[0].__dict__.keys()\n param2s = [\"bestFitness\",\"meanBestFitness\"]\n for param1 in params:\n for param2 in param2s:\n if param1 not in param2s:\n plot_2D(filedata,param1,param2)\n\"\"\"\n filedatas.append(filedata)\n for filedata in filedatas:\n for key,value in filedata.items():\n if key in data.keys():\n data[key].append(value)\n else:\n data[key] = [value]\n #print(data.keys())\n\"\"\"\ndef plot_2D(data,param1,param2):\n p1s, p2s = [],[]\n for run in data:\n p1s.append(getattr(run,param1))\n p2s.append(10-getattr(run,param2))\n try:\n plt.scatter(p1s,p2s)\n plt.xlabel(param1)\n plt.ylabel(\"10-{}\".format(param2))\n if not param1 in [\"crossoverRate\",\"migrationSize\"]:\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.savefig(\"{}_{}_plot.png\".format(param1,param2))\n # If this doesn't happen it keeps saving plots over itself\n plt.show()\n except:\n print(\"cant make plot\")\nif __name__ == \"__main__\":\n read_files([\"silvanNew.csv\",\"AdriaanNew.csv\",\"maartenNew.csv\"])\n","sub_path":"tuning/new_data/boonEval.py","file_name":"boonEval.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"115942828","text":"\"\"\"\nPow(x, n)\n\nImplement pow(x, n).\n\nSubscribe to see which companies asked this question\n\nHide Tags Binary Search Math\nHide Similar Problems (M) Sqrt(x) (M) Super Pow\n\n\n\"\"\"\n\nimport unittest, sys\n\n\nclass Solution:\n # @param {double} x the base number\n # @param {int} n the power number\n # @return {double} the result\n def myPow(self, x, n):\n # Write your code here\n if n == 0 or x == 1:\n return 1\n if x == 0:\n return 0\n if x == -1:\n return -1 if n % 2 else 1\n if n == 1 :\n return x\n\n result = 1\n revert = False\n if n < 0:\n n = -n\n revert = True\n\n while n > 0:\n if n % 2:\n result *= x\n x *= x\n n /= 2\n #result *= x # should not have this, NOTE 2^4, even number power will always come to n%2=1\n if revert:\n result = 1.0 / result\n return result\n\nclass Solution1(object):\n \"\"\"\n The purpose is to use multiply, divide and mod, etc?? only to calculate the result.\n # to do this in a bottom-up way for the power number n is not a good idea, cause\n # at the last step ( power * 2 < n) , power could already be (less than half of a big nubmer)\n # if n is a very big number.\n # To tackle in a reasonable way, think like this. Since n is int, it can only be even or odd.\n # So it is better to do recursively in a top-down way -- divide the n by 2 each recursion.\n # <---- the above made some sense, for the recursive version.\n But does not show the key point for the iterative version.\n x^7 = x^1 * (x^2)^3 = x^1 * (x^2 * (x^2)^2)\n x^8 = x^8 = (x^2)^4 = ((x^2)^2)^2 = (((x^2)^2)^2)^1\n x^15 = x^1 * x^14 = x^1 * (x^2)^7 = x^1 * ((x^2)*(x^2)^3)\n ...\n so think about the power n as binary code, 7 = 111, 8 = 1000, 15 = 1111\n \"\"\"\n def myPow_recursive(self, x, n):\n \"\"\"\n -- recursive version\n :type x: float\n :type n: int\n :rtype: float\n \"\"\"\n if x == 1 or x == 0:\n return x\n elif n == 0:\n return 1.0\n elif n < 0:\n return 1 / self.myPow(x, 0-n)\n elif n % 2:\n return x * self.myPow(x*x, n/2)\n else:\n return self.myPow(x*x, n/2)\n\n def myPow(self, x, n):\n \"\"\"\n -- iterative way\n :type x: float\n :type n: int\n :rtype: float\n \"\"\"\n if x == 1 or x == 0 or n == 1:\n return x\n elif n == 0:\n return 1.0\n elif n == -1:\n return 1.0/x # make sure result is float\n power = n\n reverse = False\n\n if n < 0:\n power = 0-n\n reverse = True\n\n result = 1\n multiplier = x\n while power > 0:\n if power % 2: # or power & 1\n result *= multiplier\n power /= 2 # or power = (power >> 1)\n multiplier *= multiplier\n #result *= multiplier\n if reverse:\n result = 1.0 / result\n return result\n\n\n def myPow_ref_iterative(self, x, n):\n if x == 0:\n if n == 0:\n return 1.0\n else:\n return 0\n if n == 0:\n return 1.0\n pos = True\n if n < 0:\n pos = False\n n = abs(n)\n np = x\n res = 1\n while n > 0:\n if n % 2:\n res *= np\n np *= np\n n /= 2\n return res if pos else 1.0 / res\n\n\n def myPow_tle(self, x, n):\n \"\"\"\n :type x: float\n :type n: int\n :rtype: float\n \"\"\"\n if x == 0 and n == -1: # ?? should be n<0\n return sys.float_info.max\n if n == 0:\n return 1.0 # should be float, so can't not return 1\n reverse = False\n if n < 0:\n reverse = True\n n = 0 - n\n if n == 1:\n result = x\n else:\n power = 1\n result = x\n while power * 2 <= n:\n result *= result\n power *= 2\n while power < n:\n result *= x\n power += 1\n if reverse:\n result = float(1) / float(result)\n return result\n\n def myPow_ref_recursive(self, x, n):\n if n == 0:\n return 1.0\n elif n < 0:\n return 1 / self.myPow(x, -n)\n elif n % 2:\n return self.myPow(x * x, n / 2) * x\n else:\n return self.myPow(x * x, n / 2)\n\n\n\nclass SolutionTester(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_case1(self):\n a = 2\n b = 1\n answer = 2.0\n result = self.sol.myPow(a, b)\n self.assertEqual(answer, result)\n\n def test_case2(self):\n a = 2\n b = -1\n answer = 0.5\n result = self.sol.myPow(a, b)\n self.assertEqual(answer, result)\n\n def test_case3(self):\n a = 2.0\n b = 3\n answer = 8.0\n result = self.sol.myPow(a, b)\n self.assertEqual(answer, result)\n\n def test_case4(self): ######=====> TLE\n a = 0.00001\n b = 2147483647\n answer = 0.0\n result = self.sol.myPow(a, b)\n self.assertEqual(answer, result)\n\n\n def test_case5(self): ######=====> wrong anser for iterative version\n a = 3.89707\n b = 2\n answer = 15.187154584899998\n result = self.sol.myPow(a, b)\n self.assertEqual(answer, result)\n\n def test_case6(self): ######=====> wrong anser for iterative version\n a = 4.70975\n b = -6\n answer = 0.00009162476446700508\n result = self.sol.myPow(a, b)\n self.assertEqual(answer, result)\n\n def test_case07(self):\n a = 2\n b = 4\n answer = 16\n result = self.sol.myPow(a, b)\n self.assertEqual(answer, result)\n\n\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(SolutionTester)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\nif __name__ == \"__main__\":\n main()","sub_path":"misc/pow_x_n.py","file_name":"pow_x_n.py","file_ext":"py","file_size_in_byte":6263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27096710","text":"import logging\nfrom common.forms import PageData\nfrom django.http import HttpResponse, JsonResponse\nimport traceback\nimport logging\nfrom django.db import models\nimport datetime\nfrom django.conf import settings\nimport pytz\nimport os\nimport configparser\n# Create your views here.\nlog = logging.getLogger('django')\n\n\ndef page_handler(f):\n \"\"\"\n 使用闭包形式处理分页请求,做预处理操作和结束操作,日志打印等。\n 视图层函数只需注解该函数,代码中处理简单的搜索功能,并返回QuerySet对象。\n 例见service.views.page函数\n :param f: 视图函数,注解注入\n :return: response\n \"\"\"\n def hander(*args, **kwargs):\n dp = PageData(args[0].GET)\n if dp.is_valid():\n page_info = dp.get_page_info()\n try:\n query = f(*args, **kwargs)\n order = dp.get_sort_rule()\n order_col = order[0]\n if order[1] != 'asc':\n order_col = \"-\" + order[0]\n data_q = query.order_by(order_col)[page_info[0]:page_info[0] + page_info[1]]\n data = [x.to_dict() for x in data_q]\n length = query.count()\n mp = dp.get_data(data, length)\n return JsonResponse(mp)\n except Exception:\n log.error('search got ERROR: ' + traceback.format_exc())\n return JsonResponse({'result': False, 'message': 'search get the ERROR: \\n' + traceback.format_exc()})\n return JsonResponse({'result': False, 'message': 'the Request params probably lose something.'})\n return hander\n\n\ndef handle_uploaded_file(path, upload_file):\n \"\"\"\n 上传文件保存到指定路径下。返回保存后文件的全路径\n :param path: 要保存的路径\n :param upload_file: InMemoryUploadedFile 对象\n :return: 文件全路径\n \"\"\"\n file_path = os.path.join(path, upload_file.name)\n print(\"file_path2\",file_path)\n print(\"upload_file.name\",upload_file.name)\n # 判断文件路径是否存在\n if not os.path.exists(path):\n os.makedirs(path)\n\n try:\n with open(file_path, 'wb+') as f:\n for chunk in upload_file.chunks():\n f.write(chunk) # 把文件存储到oa服务器上\n return file_path\n except Exception as e: # 忽略错误\n log.error('File upload got Error: \\n' + traceback.format_exc())\n raise e","sub_path":"common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"381800527","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom .views import *\n\n\nurlpatterns = [\n\n #list of all the urls for navigation\n\n url(r'login',login_view),\n url(r'signup',sign_up),\n url(r'feed',feed),\n url(r'post',post_view),\n url(r'like',like_view),\n url(r'comment',comment_view),\n url(r'log_out',logout_view),\n url(r'success',success),\n]","sub_path":"Swacch Bharat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"187363152","text":"# -*- coding: utf-8 -*-\n\"\"\"\nYOLO_V4\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nfrom yolov4 import utils\nfrom yolov4.config import cfg\n\n\ndef mish(inputs):\n \"\"\"Mish activation function\"\"\"\n return inputs * tf.tanh(tf.nn.softplus(inputs))\n\n\ndef conv(input_data, filters_shape, trainable, name, downsample=False,\n activate=True, bn=True, act_fun='leaky_relu'):\n \"\"\"Define Conv layer\"\"\"\n with tf.variable_scope(name):\n if downsample:\n pad_h, pad_w = (filters_shape[0] - 2) // 2 + 1, (filters_shape[1] - 2) // 2 + 1\n paddings = tf.constant([[0, 0], [pad_h, pad_h], [pad_w, pad_w], [0, 0]])\n input_data = tf.pad(input_data, paddings, 'CONSTANT')\n strides = (1, 2, 2, 1)\n padding = 'VALID'\n else:\n strides = (1, 1, 1, 1)\n padding = 'SAME'\n\n weight = tf.get_variable(name='weight', dtype=tf.float32, trainable=True,\n shape=filters_shape, initializer=tf.random_normal_initializer(stddev=0.01))\n conv = tf.nn.conv2d(input=input_data, filter=weight, strides=strides, padding=padding)\n\n if bn:\n conv = tf.layers.batch_normalization(conv, beta_initializer=tf.zeros_initializer(),\n gamma_initializer=tf.ones_initializer(),\n moving_mean_initializer=tf.zeros_initializer(),\n moving_variance_initializer=tf.ones_initializer(),\n training=trainable)\n else:\n bias = tf.get_variable(name='bias', shape=filters_shape[-1], trainable=True,\n dtype=tf.float32, initializer=tf.constant_initializer(0.0))\n conv = tf.nn.bias_add(conv, bias)\n # conv = tf.concat(conv, bias)\n\n # if activate == True: conv = tf.nn.leaky_relu(conv, alpha=0.1)\n if activate:\n if act_fun == 'mish':\n conv = mish(conv)\n else:\n conv = tf.nn.leaky_relu(conv, alpha=0.1)\n\n return conv\n\n\ndef res_block(input_data, input_channel, filter_num1, filter_num2, trainable, name):\n \"\"\"Define ResBlock\"\"\"\n short_cut = input_data\n with tf.variable_scope(name):\n input_data = conv(input_data, filters_shape=(1, 1, input_channel, filter_num1),\n trainable=trainable, name='conv1', act_fun='mish')\n input_data = conv(input_data, filters_shape=(3, 3, filter_num1, filter_num2),\n trainable=trainable, name='conv2', act_fun='mish')\n residual_ouput = input_data + short_cut\n\n return residual_ouput\n\n\ndef upsample(input_data, name, method='deconv'):\n \"\"\"Define Upsample layer\"\"\"\n assert method in ['resize', 'deconv']\n\n if method == 'resize':\n with tf.variable_scope(name):\n input_shape = tf.shape(input_data)\n output = tf.image.resize_nearest_neighbor(input_data, (input_shape[1]*2, input_shape[2]*2))\n\n if method == 'deconv':\n # replace resize_nearest_neighbor with conv2d_transpose To support TensorRT optimization\n num_filter = input_data.shape.as_list()[-1]\n output = tf.layers.conv2d_transpose(input_data, num_filter, kernel_size=2, padding='same',\n strides=(2, 2), kernel_initializer=tf.random_normal_initializer())\n return output\n\n\ndef cspfirst_stage(input_data, trainable, filters):\n \"\"\"\n First csp stage.\n :param input_data: The input tensor\n :param trainable: A bool parameter, True ==> training, False ==> not train.\n :param filters: Filter nums\n :return: Output tensors and the last Conv layer counter of this stage\n \"\"\"\n c = filters\n route = input_data\n route = conv(route, (1, 1, c, c), trainable=trainable, name='conv2', act_fun='mish')\n input_data = conv(input_data, (1, 1, c, c), trainable=trainable, name='conv3', act_fun='mish')\n for i in range(1):\n input_data = res_block(input_data, c, c/2, c, trainable=trainable, name='residual%d' % (i + 0))\n input_data = conv(input_data, (1, 1, c, c), trainable=trainable, name='conv6', act_fun='mish')\n input_data = tf.concat([input_data, route], axis=-1)\n layer_nums = 6\n return input_data, layer_nums\n\n\ndef cspstage(input_data, trainable, filters, loop, layer_nums, route_nums, res_nums):\n \"\"\"\n CSPNets stage\n :param input_data: The input tensor\n :param trainable: A bool parameter, True ==> training, False ==> not train.\n :param filters: Filter nums\n :param loop: ResBlock loop nums\n :param layer_nums: Counter of Conv layers\n :param route_nums: Counter of route nums\n :param res_nums: Counter of ResBlock nums\n :return: Output tensors and the last Conv layer counter of this stage\n \"\"\"\n c = filters\n out_layer = layer_nums + 1 + loop + 1\n route = input_data\n route = conv(route, (1, 1, c, c/2), trainable=trainable, name='conv_route%d' % route_nums, act_fun='mish')\n input_data = conv(input_data, (1, 1, c, c/2), trainable=trainable, name='conv%d' % (layer_nums + 1), act_fun='mish')\n for i in range(loop):\n input_data = res_block(input_data, c/2, c/2, c/2, trainable=trainable, name='residual%d' % (i + res_nums))\n input_data = conv(input_data, (1, 1, c/2, c/2), trainable=trainable, name='conv%d' % out_layer, act_fun='mish')\n input_data = tf.concat([input_data, route], axis=-1)\n\n return input_data, out_layer\n\n\ndef cspdarknet53(input_data, trainable):\n \"\"\"\n CSPDarknet53 body; source: https://arxiv.org/pdf/1911.11929.pdf\n :param input_data: Input tensor\n :param trainable: A bool parameter, True ==> training, False ==> not train.\n :return: Three stage tensors\n \"\"\"\n input_data = conv(input_data, (3, 3, 3, 32), trainable=trainable, name='conv0', act_fun='mish')\n input_data = conv(input_data, (3, 3, 32, 64), trainable=trainable, name='conv1', downsample=True, act_fun='mish')\n\n input_data, layer_num = cspfirst_stage(input_data, trainable, 64)\n input_data = conv(input_data, (1, 1, 128, 64), trainable=trainable, name='conv%d' % (layer_num+1), act_fun='mish')\n input_data = conv(input_data, (3, 3, 64, 128), trainable=trainable, name='conv%d' % (layer_num+2), downsample=True,\n act_fun='mish')\n layer_num = layer_num+2\n\n input_data, layer_num = cspstage(input_data, trainable, 128, 2, layer_num, 1, 1)\n input_data = conv(input_data, (1, 1, 128, 128), trainable=trainable, name='conv%d' % (layer_num+1), act_fun='mish')\n input_data = conv(input_data, (3, 3, 128, 256), trainable=trainable, name='conv%d' % (layer_num+2), downsample=True,\n act_fun='mish')\n layer_num = layer_num + 2\n\n input_data, layer_num = cspstage(input_data, trainable, 256, 8, layer_num, 2, 3)\n input_data = conv(input_data, (1, 1, 256, 256), trainable=trainable, name='conv%d' % (layer_num+1), act_fun='mish')\n route_1 = input_data # 256 x 256\n input_data = conv(input_data, (3, 3, 256, 512), trainable=trainable, name='conv%d' % (layer_num+2), downsample=True,\n act_fun='mish')\n layer_num = layer_num + 2\n\n input_data, layer_num = cspstage(input_data, trainable, 512, 8, layer_num, 3, 11)\n input_data = conv(input_data, (1, 1, 512, 512), trainable=trainable, name='conv%d' % (layer_num+1), act_fun='mish')\n route_2 = input_data # 512 x 512\n input_data = conv(input_data, (3, 3, 512, 1024), trainable=trainable, name='conv%d' % (layer_num+2), downsample=True,\n act_fun='mish')\n layer_num = layer_num + 2\n\n input_data, layer_num = cspstage(input_data, trainable, 1024, 4, layer_num, 4, 19)\n input_data = conv(input_data, (1, 1, 1024, 1024), trainable=trainable, name='conv%d' % (layer_num+1), act_fun='mish')\n\n return route_1, route_2, input_data\n\n\nclass YOLOV4(object):\n \"\"\"\n YOLOV4 Model\n \"\"\"\n\n def __init__(self, input_data, trainable):\n self.trainable = trainable\n self.classes = utils.read_class_names(cfg.YOLO.CLASSES)\n self.num_class = len(self.classes)\n self.strides = np.array(cfg.YOLO.STRIDES)\n self.anchors = utils.get_anchors(cfg.YOLO.ANCHORS)\n self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE\n self.iou_loss_thresh = cfg.YOLO.IOU_LOSS_THRESH\n self.upsample_method = cfg.YOLO.UPSAMPLE_METHOD\n\n try:\n self.conv_lbbox, self.conv_mbbox, self.conv_sbbox = self.__build_network(input_data)\n except:\n raise NotImplementedError('Can not build up yolov4 network')\n\n with tf.variable_scope('pred_sbbox'):\n self.pred_sbbox = self.decode(self.conv_sbbox, self.anchors[0], self.strides[0])\n\n with tf.variable_scope('pred_mbbox'):\n self.pred_mbbox = self.decode(self.conv_mbbox, self.anchors[1], self.strides[1])\n\n with tf.variable_scope('pred_lbbox'):\n self.pred_lbbox = self.decode(self.conv_lbbox, self.anchors[2], self.strides[2])\n\n def __build_network(self, input_data):\n \"\"\"\n Build yolov4 body, including SPP, PAN, Yolov3.\n :param input_data: Input tensor\n :return: Three stage outputs\n \"\"\"\n route_1, route_2, input_data = cspdarknet53(input_data, self.trainable)\n\n # 19 x 19 head\n y19 = conv(input_data, (1, 1, 1024, 512), self.trainable, 'conv68')\n y19 = conv(y19, (3, 3, 512, 1024), self.trainable, 'conv69')\n y19 = conv(y19, (1, 1, 1024, 512), self.trainable, 'conv70')\n maxpool1 = tf.nn.max_pool(y19, [1, 13, 13, 1], [1, 1, 1, 1], 'SAME')\n maxpool2 = tf.nn.max_pool(y19, [1, 9, 9, 1], [1, 1, 1, 1], 'SAME')\n maxpool3 = tf.nn.max_pool(y19, [1, 5, 5, 1], [1, 1, 1, 1], 'SAME')\n y19 = tf.concat([maxpool1, maxpool2, maxpool3, y19], axis=-1) # SPP\n y19 = conv(y19, (1, 1, 2048, 512), self.trainable, 'conv71')\n y19 = conv(y19, (3, 3, 512, 1024), self.trainable, 'conv72')\n y19 = conv(y19, (1, 1, 1024, 512), self.trainable, 'conv73')\n\n y19_1 = conv(y19, (1, 1, 512, 256), self.trainable, 'conv73_1')\n y19_upsample = upsample(y19_1, name='upsample0', method=self.upsample_method)\n\n # 38 x 38 head\n with tf.variable_scope('route_1'):\n y38 = conv(route_2, (1, 1, 512, 256), self.trainable, 'conv_route_1')\n y38 = tf.concat([y38, y19_upsample], axis=-1)\n y38 = conv(y38, (1, 1, 512, 256), self.trainable, 'conv74')\n y38 = conv(y38, (3, 3, 256, 512), self.trainable, 'conv75')\n y38 = conv(y38, (1, 1, 512, 256), self.trainable, 'conv76')\n y38 = conv(y38, (3, 3, 256, 512), self.trainable, 'conv77')\n y38 = conv(y38, (1, 1, 512, 256), self.trainable, 'conv78')\n\n y38_1 = conv(y38, (1, 1, 256, 128), self.trainable, 'conv78_1')\n y38_upsample = upsample(y38_1, name='upsample1', method=self.upsample_method)\n\n # 76 x 76 head\n with tf.variable_scope('route_2'):\n y76 = conv(route_1, (1, 1, 256, 128), self.trainable, 'conv_route_2')\n y76 = tf.concat([y76, y38_upsample], axis=-1)\n y76 = conv(y76, (1, 1, 256, 128), self.trainable, 'conv79')\n y76 = conv(y76, (3, 3, 128, 256), self.trainable, 'conv80')\n y76 = conv(y76, (1, 1, 256, 128), self.trainable, 'conv81')\n y76 = conv(y76, (3, 3, 128, 256), self.trainable, 'conv82')\n y76 = conv(y76, (1, 1, 256, 128), self.trainable, 'conv83')\n\n # 76 x 76 output\n y76_output = conv(y76, (3, 3, 128, 256), self.trainable, 'conv_sobj_branch')\n y76_output = conv(y76_output, (1, 1, 256, 3*(self.num_class + 5)),\n trainable=self.trainable, name='conv_sbbox', activate=False, bn=False)\n\n # 38 x 38 output\n y76_downsample = conv(y76, (3, 3, 128, 256), self.trainable, 'conv_downsample0', downsample=True)\n y38 = tf.concat([y76_downsample, y38], axis=-1)\n y38 = conv(y38, (1, 1, 512, 256), self.trainable, 'conv84')\n y38 = conv(y38, (3, 3, 256, 512), self.trainable, 'conv85')\n y38 = conv(y38, (1, 1, 512, 256), self.trainable, 'conv86')\n y38 = conv(y38, (3, 3, 256, 512), self.trainable, 'conv87')\n y38 = conv(y38, (1, 1, 512, 256), self.trainable, 'conv88')\n\n y38_output = conv(y38, (3, 3, 256, 512), self.trainable, name='conv_mobj_branch')\n y38_output = conv(y38_output, (1, 1, 512, 3*(self.num_class + 5)),\n trainable=self.trainable, name='conv_mbbox', activate=False, bn=False)\n\n # 19 x 19 output\n y38_downsample = conv(y38, (3, 3, 256, 512), self.trainable, 'conv_downsample1', downsample=True)\n y19 = tf.concat([y38_downsample, y19], axis=-1)\n y19 = conv(y19, (1, 1, 1024, 512), self.trainable, 'conv89')\n y19 = conv(y19, (3, 3, 512, 1024), self.trainable, 'conv90')\n y19 = conv(y19, (1, 1, 1024, 512), self.trainable, 'conv91')\n y19 = conv(y19, (3, 3, 512, 1024), self.trainable, 'conv92')\n y19 = conv(y19, (1, 1, 1024, 512), self.trainable, 'conv93')\n\n y19_output = conv(y19, (3, 3, 512, 1024), self.trainable, name='conv_lobj_branch')\n y19_output = conv(y19_output, (1, 1, 1024, 3*(self.num_class + 5)),\n trainable=self.trainable, name='conv_lbbox', activate=False, bn=False)\n\n return y19_output, y38_output, y76_output\n\n def decode(self, conv_ouput, anchors, strides):\n \"\"\"\n Decode yolov4, use sigmoid decode conv_output.\n :param conv_ouput: The output of yolov4 body.\n :param anchors: The anchors\n :param strides: Three dimensions, default [8, 16, 32]\n :return: The predict of conv_output.\n \"\"\"\n conv_shape = tf.shape(conv_ouput)\n batch_size = conv_shape[0]\n output_size = conv_shape[1]\n anchor_per_scale = len(anchors)\n\n conv_ouput = tf.reshape(conv_ouput, (batch_size, output_size, output_size, anchor_per_scale, 5 + self.num_class))\n conv_raw_xy = conv_ouput[:, :, :, :, 0:2]\n conv_raw_wh = conv_ouput[:, :, :, :, 2:4]\n conv_raw_conf = conv_ouput[:, :, :, :, 4:5]\n conv_raw_prob = conv_ouput[:, :, :, :, 5:]\n\n y = tf.tile(tf.range(output_size, dtype=tf.int32)[:, tf.newaxis], [1, output_size])\n x = tf.tile(tf.range(output_size, dtype=tf.int32)[tf.newaxis, :], [output_size, 1])\n\n xy_grid = tf.concat([x[:, :, tf.newaxis], y[:, :, tf.newaxis]], axis=-1)\n xy_grid = tf.tile(xy_grid[tf.newaxis, :, :, tf.newaxis, :], [batch_size, 1, 1, anchor_per_scale, 1])\n xy_grid = tf.cast(xy_grid, tf.float32)\n\n bbox_xy = (tf.sigmoid(conv_raw_xy) + xy_grid) * strides\n # bbox_wh = (tf.sigmoid(conv_raw_wh) * anchors) * strides\n bbox_wh = (tf.exp(conv_raw_wh) * anchors)\n pred_xywh = tf.concat([bbox_xy, bbox_wh], axis=-1)\n\n pred_box_confidence = tf.sigmoid(conv_raw_conf)\n pred_box_class_prob = tf.sigmoid(conv_raw_prob)\n\n return tf.concat([pred_xywh, pred_box_confidence, pred_box_class_prob], axis=-1)\n\n @staticmethod\n def bbox_iou(boxes1, boxes2):\n \"\"\"\n Calculate bbox iou; source:\n :param boxes1: Tensor, shape=(i1,...,iN, 4), xywh\n :param boxes2: Tensor, shape=(j, 4), xywh\n :return: Tensor, shape=(i1,...,iN, j)\n \"\"\"\n boxes1_area = boxes1[..., 2] * boxes1[..., 3]\n boxes2_area = boxes2[..., 2] * boxes2[..., 3]\n\n boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,\n boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)\n boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,\n boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)\n left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])\n right_down = tf.maximum(boxes1[..., 2:], boxes2[..., 2:])\n\n inter_section = tf.maximum(right_down - left_up, 0.0)\n inter_area = inter_section[..., 0] * inter_section[..., 1]\n union_area = boxes1_area + boxes2_area - inter_area\n iou = 1.0 * tf.compat.v1.div_no_nan(inter_area, union_area)\n\n return iou\n\n @staticmethod\n def bbox_giou(boxes1, boxes2):\n \"\"\"\n Calculate giou loss; source: https://arxiv.org/abs/1902.09630\n :param boxes1: Tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n :param boxes2: Tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n :return: Tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)\n \"\"\"\n boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,\n boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)\n boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,\n boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)\n\n boxes1 = tf.concat([tf.minimum(boxes1[..., :2], boxes1[..., 2:]),\n tf.maximum(boxes1[..., :2], boxes1[..., 2:])], axis=-1)\n boxes2 = tf.concat([tf.minimum(boxes2[..., :2], boxes2[..., 2:]),\n tf.maximum(boxes2[..., :2], boxes2[..., 2:])], axis=-1)\n\n boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])\n boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])\n\n left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])\n right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])\n\n inter_section = tf.maximum(right_down - left_up, 0.0)\n inter_area = inter_section[..., 0] * inter_section[..., 1]\n union_area = boxes1_area + boxes2_area - inter_area\n iou = tf.compat.v1.div_no_nan(inter_area, union_area)\n\n enclose_left_up = tf.minimum(boxes1[..., :2], boxes2[..., :2])\n enclose_right_down = tf.maximum(boxes1[..., 2:], boxes2[..., 2:])\n enclose = tf.maximum(enclose_right_down - enclose_left_up, 0.0)\n enclose_area = enclose[..., 0] * enclose[..., 1]\n giou = iou - 1.0 * tf.compat.v1.div_no_nan(enclose_area - union_area, enclose_area)\n\n return giou\n\n @staticmethod\n def bbox_diou(boxes1, boxes2):\n \"\"\"\n Calculate diou; source: https://arxiv.org/pdf/1911.08287v1.pdf\n :param boxes1: Tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n :param boxes2: Tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n :return: Tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)\n \"\"\"\n boxes1_center, boxes2_center = boxes1[..., :2], boxes2[..., :2]\n boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,\n boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)\n boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,\n boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)\n\n boxes1 = tf.concat([tf.minimum(boxes1[..., :2], boxes1[..., 2:]),\n tf.maximum(boxes1[..., :2], boxes1[..., 2:])], axis=-1)\n boxes2 = tf.concat([tf.minimum(boxes2[..., :2], boxes2[..., 2:]),\n tf.maximum(boxes2[..., :2], boxes2[..., 2:])], axis=-1)\n\n boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])\n boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])\n\n left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])\n right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])\n\n inter_section = tf.maximum(right_down - left_up, 0.0)\n inter_area = inter_section[..., 0] * inter_section[..., 1]\n union_area = boxes1_area + boxes2_area - inter_area\n iou = 1.0 * tf.compat.v1.div_no_nan(inter_area, union_area)\n\n center_distance = tf.reduce_sum(tf.square(boxes1_center - boxes2_center), axis=-1)\n enclose_left_up = tf.minimum(boxes1[..., :2], boxes2[..., :2])\n enclose_right_down = tf.maximum(boxes1[..., 2:], boxes2[..., 2:])\n enclose_wh = tf.maximum(enclose_right_down - enclose_left_up, 0.0)\n enclose_diagonal = tf.reduce_sum(tf.square(enclose_wh), axis=-1)\n diou = iou - 1.0 * tf.compat.v1.div_no_nan(center_distance, enclose_diagonal)\n\n return diou\n\n @staticmethod\n def bbox_ciou(boxes1, boxes2):\n \"\"\"\n Calculate ciou; source: https://arxiv.org/pdf/1911.08287v1.pdf\n :param boxes1: Tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n :param boxes2: Tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n :return: Tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)\n \"\"\"\n boxes1_1, boxes2_1 = boxes1, boxes2\n boxes1_center, boxes2_center = boxes1[..., :2], boxes2[..., :2]\n\n boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,\n boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)\n boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,\n boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)\n\n boxes1 = tf.concat([tf.minimum(boxes1[..., :2], boxes1[..., 2:]),\n tf.maximum(boxes1[..., :2], boxes1[..., 2:])], axis=-1)\n boxes2 = tf.concat([tf.minimum(boxes2[..., :2], boxes2[..., 2:]),\n tf.maximum(boxes2[..., :2], boxes2[..., 2:])], axis=-1)\n\n boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])\n boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])\n\n left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])\n right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])\n\n inter_section = tf.maximum(right_down - left_up, 0.0)\n inter_area = inter_section[..., 0] * inter_section[..., 1]\n union_area = boxes1_area + boxes2_area - inter_area\n iou = 1.0 * tf.compat.v1.div_no_nan(inter_area, union_area)\n\n center_distance = tf.reduce_sum(tf.square(boxes1_center - boxes2_center), axis=-1)\n enclose_left_up = tf.minimum(boxes1[..., :2], boxes2[..., :2])\n enclose_right_down = tf.maximum(boxes1[..., 2:], boxes2[..., 2:])\n enclose_wh = tf.maximum(enclose_right_down - enclose_left_up, 0.0)\n enclose_diagonal = tf.reduce_sum(tf.square(enclose_wh), axis=-1)\n diou = iou - 1.0 * tf.compat.v1.div_no_nan(center_distance, enclose_diagonal)\n\n v = 4 / (np.pi * np.pi) * (tf.square(tf.math.atan2(boxes1_1[..., 2], boxes1_1[..., 3]) -\n tf.math.atan2(boxes2_1[..., 2], boxes2_1[..., 3])))\n alp = tf.compat.v1.div_no_nan(v , 1.0 - iou + v)\n ciou = diou - alp * v\n\n return ciou\n\n @staticmethod\n def focal_loss(y_true, y_pred, gamma=2.0, alpha=1):\n \"\"\"\n Compute focal loss; source:https://arxiv.org/abs/1708.02002\n :param y_true: Ground truth targets, tensor of shape (?, num_boxes, num_classes).\n :param y_pred: Predicted logits, tensor of shape (?, num_boxes, num_classes).\n :param gamma: Exponent of the modulating factor (1 - p_t) ^ gamma.\n :param alpha: Optional alpha weighting factor to balance positives vs negatives.\n :return: Focal factor.\n \"\"\"\n focal_loss = alpha * tf.pow(tf.abs(y_true - y_pred), gamma)\n return focal_loss\n\n @staticmethod\n def _label_smoothing(y_true, label_smoothing):\n \"\"\"Label smoothing. source: https://arxiv.org/pdf/1906.02629.pdf\"\"\"\n label_smoothing = tf.constant(label_smoothing, dtype=tf.float32)\n return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing\n\n def yolov4_loss(self, conv, pred, label, bboxes, stride, iou_use=1, focal_use=False, label_smoothing=0):\n \"\"\"\n Reture yolov4_loss tensor.\n :param conv: The outputs of yolov4 body, conv_sbbox, conv_mbbox, conv_lbbox\n :param pred: The outputs of decode, pred_sbbox, pred_mbbox, pred_lbbox\n :param label: The input label boxes\n :param bboxes: The input boxes\n :param stride: Num of [8, 16, 32]\n :param iou_use: The iou loss (0, 1, 2) ==> (giou, diou, ciou)\n :param focal_use: The focal loss (0, 1, 2) ==> (normal, sigmoid_focal, focal)\n :param label_smoothing: The label smoothing\n :return: Tensor, shape=(1, )\n \"\"\"\n conv_shape = tf.shape(conv)\n batch_size = conv_shape[0]\n output_size = conv_shape[1]\n input_size = stride * output_size\n conv = tf.reshape(conv, (batch_size, output_size, output_size,\n self.anchor_per_scale, 5 + self.num_class))\n\n conv_raw_conf = conv[:, :, :, :, 4:5]\n conv_raw_prob = conv[:, :, :, :, 5:]\n\n pred_xywh = pred[:, :, :, :, 0:4]\n pred_conf = pred[:, :, :, :, 4:5]\n\n label_xywh = label[:, :, :, :, 0:4]\n respond_bbox = label[:, :, :, :, 4:5]\n label_prob = label[:, :, :, :, 5:]\n if label_smoothing != 0:\n label_prob = self._label_smoothing(label_prob, label_smoothing)\n\n iou = self.bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])\n max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1)\n respond_backgd = (1.0 - respond_bbox) * tf.cast(max_iou < self.iou_loss_thresh, tf.float32)\n\n input_size = tf.cast(input_size, tf.float32)\n bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2)\n\n if iou_use == 1:\n diou = tf.expand_dims(self.bbox_diou(pred_xywh, label_xywh), axis=-1)\n iou_loss = respond_bbox * bbox_loss_scale * (1 - diou)\n elif iou_use == 2:\n ciou = tf.expand_dims(self.bbox_ciou(pred_xywh, label_xywh), axis=-1)\n iou_loss = respond_bbox * bbox_loss_scale * (1 - ciou)\n else:\n giou = tf.expand_dims(self.bbox_giou(pred_xywh, label_xywh), axis=-1)\n iou_loss = respond_bbox * bbox_loss_scale * (1 - giou)\n\n if focal_use:\n focal = self.focal_loss(respond_bbox, pred_conf)\n conf_loss = focal * (respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox,\n logits=conv_raw_conf) +\n respond_backgd * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox,\n logits=conv_raw_conf))\n class_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=conv_raw_prob)\n else:\n conf_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox,\n logits=conv_raw_conf) + \\\n respond_backgd * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox,\n logits=conv_raw_conf)\n class_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=conv_raw_prob)\n\n iou_loss = tf.reduce_mean(tf.reduce_sum(iou_loss, axis=[1, 2, 3, 4]))\n conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1, 2, 3, 4]))\n class_loss = tf.reduce_mean(tf.reduce_sum(class_loss, axis=[1, 2, 3, 4]))\n\n return iou_loss, conf_loss, class_loss\n\n def compute_loss(self, label_sbbox, label_mbbox, label_lbbox, true_sbbox, true_mbbox, true_lbbox, iou_use, focal_use, label_smoothing):\n \"\"\"Compute loss; location loss, confidence loss, class prob loss \"\"\"\n with tf.name_scope('smaller_box_loss'):\n loss_sbbox = self.yolov4_loss(self.conv_sbbox, self.pred_sbbox, label_sbbox, true_sbbox,\n stride=self.strides[0], iou_use=iou_use, focal_use=focal_use,\n label_smoothing=label_smoothing)\n with tf.name_scope('medium_box_loss'):\n loss_mbbox = self.yolov4_loss(self.conv_mbbox, self.pred_mbbox, label_mbbox, true_mbbox,\n stride=self.strides[1], iou_use=iou_use, focal_use=focal_use,\n label_smoothing=label_smoothing)\n\n with tf.name_scope('lager_box_loss'):\n loss_lbbox = self.yolov4_loss(self.conv_lbbox, self.pred_lbbox, label_lbbox, true_lbbox,\n stride=self.strides[2], iou_use=iou_use, focal_use=focal_use,\n label_smoothing=label_smoothing)\n\n with tf.name_scope('iou_loss'):\n iou_loss = loss_sbbox[0] + loss_mbbox[0] + loss_lbbox[0]\n\n with tf.name_scope('conf_loss'):\n conf_loss = loss_sbbox[1] + loss_mbbox[1] + loss_lbbox[1]\n\n with tf.name_scope('class_loss'):\n class_loss = loss_sbbox[2] + loss_mbbox[2] + loss_lbbox[2]\n\n return iou_loss, conf_loss, class_loss\n\n\n","sub_path":"yolov4/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":29365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"388002017","text":"import os\nimport random\nimport subprocess\nfrom types import SimpleNamespace\nfrom statistics import mean, median\nfrom collections import defaultdict\n\nimport pysam\nimport numpy as np\nfrom scipy.stats import sem\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom baeklab.base import *\nfrom baeklab.genomics.coor import Coor\nfrom baeklab.genomics.htslib import cmdl_tabix_bed\nfrom baeklab.genomics.ucsckent import cmdl_bedsort, cmdl_bed2bb_np\n\n\nclass eCLIP(object):\n\n ENCODE_HEADERS = (\n ('accid', 'File accession'), # ENCFF990XRM\n ('format', 'File format'), # fastq, bam, bed narrowPeak, bigbed narrowPeak\n ('output', 'Output type'), # (plus/minus) strand signal of (all/unique) reads\n ('assay', 'Assay'), # eCLIP, RNA-seq\n ('sample', 'Biosample term name'), # K562, HepG2\n # ('subfrac', 'Biosample subcellular fraction term name'), # nucleus, cytosol\n ('target', 'Experiment target'), # DDX55-human, DDX55 eCLIP mock input-human\n # ('rbnscon', 'RBNS protein concentration'),\n ('biorep', 'Biological replicate(s)'), # 1, 2\n ('techrep', 'Technical replicate'), # 1, 2, 3, 4, 5\n ('readlen', 'Read length'), # 20, 40, 45\n ('paired', 'Paired end'), # 1, 2\n ('derived', 'Derived from'), # ENCFF044HZO, ENCFF087BBT, ...\n ('assembly', 'Assembly'), # hg19, GRCh38\n ('md5sum', 'md5sum'), # md5sum\n ('status', 'File Status'), # released, archived\n )\n ALLOWED_TERMS = ('accid', 'format', 'assembly', 'sample', 'target', 'biorep')\n ALLOWED_TYPES = (str, tuple, list)\n\n def __init__(self, name):\n self.name = name\n self.base_dir = opj('data/encode/eclip', self.name)\n in_file_pkl = opj(self.base_dir, 'metadata.pkl')\n if os.path.exists(in_file_pkl):\n self.files = pickle_load(in_file_pkl)\n\n def dump_metadata(self):\n \"\"\"Dump files from ``metadata.tsv``. (for ENCODE data)\"\"\"\n\n in_file_tsv = opj(self.base_dir, 'metadata.tsv')\n df = pd.read_table(in_file_tsv)\n\n namepairs = [namepair for namepair in self.ENCODE_HEADERS if namepair[1] in df.columns]\n fullnames = [name for _, name in namepairs]\n shortnames = [name for name, _ in namepairs]\n df = df[fullnames]\n df.columns = shortnames\n # pd.set_option('display.width', 240)\n\n files = []\n for row in df.itertuples():\n\n # file\n file = SimpleNamespace()\n files.append(file)\n\n # setattr\n for attrname in shortnames:\n value = getattr(row, attrname)\n if not pd.isnull(value):\n setattr(file, attrname, value)\n assert file.accid.startswith('ENCFF')\n\n # file.assembly\n if hasattr(file, 'assembly'):\n assert file.format in ('bam', 'bed narrowPeak')\n file.assembly = {'hg19': 'hg19', 'GRCh38': 'hg38'}[file.assembly]\n\n # file.target and file.biorep\n target_old = file.target # 'DDX55-human', 'DDX55 eCLIP mock input-human'\n target_new = target_old.rsplit('-', 1)[0].split()[0] # DDX55\n is_mock = ('eCLIP mock input' in target_old)\n biorep_new = 'mock' if is_mock else ('rep' + str(file.biorep))\n file.target = target_new\n file.biorep = biorep_new\n\n # file.path\n file_format = getattr(file, 'format', None)\n if file_format == 'bam':\n file.path = opj(self.base_dir, 'bam', file.accid+'.bam')\n elif file_format == 'bed narrowPeak':\n file.path = opj(self.base_dir, 'bed.tabix', file.accid+'.bed.gz')\n\n out_file_pkl = opj(self.base_dir, 'metadata.pkl')\n pickle_dump(files, out_file_pkl)\n\n return files\n\n def dump_metadata_inh(self):\n \"\"\"\n Dump metadata from directory listing. (for in-house called peaks)\n\n >>> eCLIP('inh180319').dump_metadata_inh()\n >>> eCLIP('enc180620cits').dump_metadata_inh()\n \"\"\"\n\n # files\n in_dir = opj(self.base_dir, 'bed')\n in_files_bed = [opj(in_dir, filename) for filename in os.listdir(in_dir)\n if os.path.splitext(filename)[1] == '.bed']\n\n # files\n files = []\n for in_file_bed in in_files_bed:\n\n # file\n file = SimpleNamespace()\n files.append(file)\n\n # setattr\n names = ('target', 'sample', 'biorep')\n values = os.path.splitext(os.path.basename(in_file_bed))[0].split('_')\n for name, value in zip(names, values):\n setattr(file, name, value)\n file.format = 'bed narrowPeak'\n file.assembly = 'hg38'\n file.source = in_file_bed\n\n # out_file_bb = opj(out_dir2, file.accid+'.bb')\n # chrom_sizes = opj('genome', 'hg38', 'chrom.sizes')\n # cmdl = cmdl_bed2bb_np(out_file_bed, chrom_sizes, out_file_bb)\n # os.system(cmdl)\n # print(cmdl)\n\n # file.accid, file.path\n out_dir = opj(self.base_dir, 'bed.tabix')\n files = sorted(files, key=lambda file: (file.sample, file.target, file.biorep))\n for i, file in enumerate(files):\n number = i + 1\n file.accid = f'ENCFF{number:06d}'\n file.path = opj(out_dir, f'{file.accid}.bed.gz')\n # print(file.accid, file.sample, file.target, sep='\\t')\n\n # bedsort, tabix\n omd(out_dir)\n for file in files:\n in_file_bed = file.source\n out_file_bed = opj(out_dir, f'{file.accid}.bed')\n cmdl = cmdl_bedsort(in_file_bed, out_file_bed)\n cmdl += cmdl_tabix_bed(out_file_bed)\n print(cmdl)\n subprocess.run(cmdl, shell=True)\n\n # pickle_dump\n out_file_pkl = opj(self.base_dir, 'metadata.pkl')\n pickle_dump(files, out_file_pkl)\n\n def get_files(self, **terms):\n \"\"\"\n Search eCLIP-seq files by search terms.\n\n >>> eclip = eCLIP('enc180620')\n >>> files = eclip.get_files(format='bed narrowPeak', sample='HepG2', target='IGF2BP1')\n \"\"\"\n # refine search terms with allowed terms and value types.\n terms = {k: (v,) if isinstance(v, str) else v\n for k, v in terms.items() if k in self.ALLOWED_TERMS\n and any(isinstance(v, t) for t in self.ALLOWED_TYPES)}\n files = [file for file in self.files if all(getattr(file, k, None) in v for k, v in terms.items())]\n return files\n\n def get_file(self, **terms):\n \"\"\"Return unique file which meets the criteria perfectly.\"\"\"\n files = self.get_files(**terms)\n assert len(files) == 1\n return files[0]\n\n def get_targets(self, assembly, sample=None):\n \"\"\"Return list of target objects\"\"\"\n terms = dict(sample=sample)\n terms = {k: v for k, v in terms.items() if v is not None}\n files = self.get_files(format='bed narrowPeak', assembly=assembly, **terms)\n targets = defaultdict(SimpleNamespace)\n for file in files:\n target = targets[file.target]\n if not hasattr(target, 'name'):\n target.name = file.target\n if not hasattr(target, 'files_byrep'):\n target.files_byrep = defaultdict(list)\n target.files_byrep[file.biorep].append(file)\n\n targets = sorted(targets.values(), key=lambda target: target.name)\n\n return targets\n\n def fetch_readdepth(self, gene, sample, target, biorep, normalize=True):\n\n \"\"\"Return read depth across the gene body\"\"\"\n terms = dict(format='bam', assembly=gene.genome.name, sample=sample, target=target, biorep=biorep)\n file = self.get_file(**terms)\n bam = pysam.AlignmentFile(file.path)\n\n readdepth = np.zeros(gene.exonsize)\n for read in bam.fetch(gene.chrom, gene.tx_start, gene.tx_end):\n\n # keep reads overlapped with exons\n coor = Coor(read.reference_name, read.reference_start, read.reference_end)\n if not any(exon.overlap(coor) for exon in gene.exons):\n continue\n\n # assertions\n nh = read.get_tag('NH')\n assert nh == 1\n\n # site\n for i in read.get_reference_positions():\n site1 = Coor(gene.chrom, i + 0, i + 1)\n site2 = site1.transform(gene.nmid, gene.exons, gene.strand)\n if site2 is None:\n continue\n readdepth[site2.start] += 1\n\n if normalize:\n readdepth = readdepth * (1_000_000 / bam.mapped)\n\n return readdepth\n\n def fetch_citsdepth(self, gene, sample, target, biorep, normalize=True):\n \"\"\"Return CITS depth across the gene body\"\"\"\n\n terms = dict(format='bam', assembly=gene.genome.name,\n sample=sample, target=target, biorep=biorep)\n file = self.get_file(**terms)\n bam = pysam.AlignmentFile(file.path)\n\n citsdepth = np.zeros(gene.exonsize)\n for read in bam.fetch(gene.chrom, gene.tx_start, gene.tx_end):\n\n # keep read 2 only\n if not read.is_read2:\n continue\n\n # keep reads on same strand\n strand = '-' if read.is_reverse else '+'\n if not (gene.strand == strand):\n continue\n\n # keep reads overlapped with exons\n coor = Coor(read.reference_name, read.reference_start, read.reference_end)\n if not any(exon.overlap(coor) for exon in gene.exons):\n continue\n\n # 5' end of read 2\n read5e = {'+': coor.start, '-': coor.end - 1}[strand]\n\n # assertions\n nh = read.get_tag('NH')\n assert nh == 1\n\n # site\n site1 = Coor(gene.chrom, read5e + 0, read5e + 1)\n site2 = site1.transform(gene.nmid, gene.exons, gene.strand)\n if site2 is None:\n continue\n citsdepth[site2.start] += 1\n\n if normalize:\n citsdepth = citsdepth * (1_000_000 / bam.mapped)\n\n return citsdepth\n\n def fetch_citssignal(self, gene, sample, target):\n \"\"\"Return CITS signal: (rep1 + rep2) / 2 - mock\"\"\"\n citsdepth1 = self.fetch_citsdepth(gene, sample, target, 'rep1')\n citsdepth2 = self.fetch_citsdepth(gene, sample, target, 'rep2')\n citsdepthm = self.fetch_citsdepth(gene, sample, target, 'mock')\n citssignal = (citsdepth1 + citsdepth2) / 2 - citsdepthm\n return citssignal\n\n def fetch_peaks(self, gene, sample=None, target=None, biorep=None):\n \"\"\"Return peaks(RBP binding sites) using tabix\"\"\"\n\n # gene regions (5'UTR, ORF, 3'UTR)\n regions = tuple(Coor(gene.nmid, s, e) if s < e else None for s, e in pairwise(gene.pos))\n\n # ENCODE narrowPeak: Narrow (or Point-Source) Peaks format\n # https://genome.ucsc.edu/FAQ/FAQformat#format12\n\n peaks = []\n terms = dict(sample=sample, target=target, biorep=biorep)\n terms = {k: v for k, v in terms.items() if v is not None}\n files = self.get_files(format='bed narrowPeak', assembly=gene.genome.name, **terms)\n\n for file in files:\n # print(file.accid, file.sample, file.target, file.biorep, sep='\\t')\n tabix = pysam.TabixFile(file.path)\n if gene.chrom not in tabix.contigs:\n continue\n\n for line in tabix.fetch(gene.chrom, gene.tx_start, gene.tx_end):\n\n # feature: peak as genomic position\n feature = parse_bed_narrow(line)\n if not (gene.strand == feature.strand and\n any(exon.overlap(feature) for exon in gene.exons)):\n continue\n\n # peak: peak as transcript-based position\n peak = feature.transform(gene.nmid, gene.exons, gene.strand)\n\n peak.feature = feature\n peak.score = feature.score\n peak.sigval = feature.sigval\n peak.logp = feature.logp\n\n if feature.peak is not None:\n peak_start = feature.start + feature.peak + 0\n peak_end = feature.start + feature.peak + 1\n peak_coor = Coor(feature.chrom, peak_start, peak_end)\n peak.peak = peak_coor.transform(gene.nmid, gene.exons, gene.strand)\n\n # file-derived attributes\n peak.sample = file.sample # 'K562', 'HepG2'\n peak.target = file.target # 'PCBP2', 'LARP4'\n peak.biorep = file.biorep # 'mock', 'rep1', 'rep2'\n\n # peak.u (0: 5'UTR, 1: ORF, 2: 3'UTR)\n peak_center = Coor(gene.nmid, peak.pos + 0, peak.pos + 1)\n b = tuple(region.include(peak_center) if region else False for region in regions)\n # b = tuple(between(p[i], peak.pos, p[i+1]) for i in range(3))\n assert b.count(True) == 1\n peak.u = b.index(True)\n\n # append\n peak.flag = 0\n peaks.append(peak)\n\n # sort by transcript-based position (5'end to 3'end)\n peaks = sorted(peaks)\n\n return tuple(peaks)\n\n def fetch_called(self, gene, sample, target, minscore=None):\n \"\"\"\n Fetch nucleotide-resolution array:\n Whether the position is called as a eCLIP peak.\n \"\"\"\n is_called = np.zeros(gene.exonsize, dtype=int)\n peaks = self.fetch_peaks(gene, sample=sample, target=target)\n if isinstance(minscore, int):\n peaks = [peak for peak in peaks if peak.score >= minscore]\n for peak in peaks:\n is_called[peak.start:peak.end] = 1\n return is_called\n\n def fetch_calldepth(self, gene, sample=None, minscore=None):\n \"\"\"\n Fetch nucleotide-resolution array:\n The number of RBPs binding at same position\n\n >>> calldepthL = eclip.fetch_calldepth(gene, sample='HepG2', minscore=200)\n >>> calldepthS = eclip.fetch_calldepth(gene, sample='HepG2', minscore=1000)\n \"\"\"\n\n calldepth = np.zeros(gene.exonsize, dtype=int)\n\n targets = self.get_targets(gene.genome.name, sample=sample)\n for target in targets:\n is_called = self.fetch_called(gene, sample, target.name, minscore)\n calldepth += is_called\n\n return calldepth\n\n def fetch_callscore(self, gene, sample, target=None):\n \"\"\"\n Fetch nucleotide-resolution array:\n >>> eclip.fetch_callscore(gene, 'HepG2', 'IGF2BP1')\n \"\"\"\n callscore = np.zeros(gene.exonsize, dtype=int)\n peaks = self.fetch_peaks(gene, sample=sample, target=target)\n\n for peak in peaks:\n # print(peak.start, peak.end, peak.sample, peak.target, peak.biorep, peak.score, sep='\\t')\n sliced = callscore[peak.start:peak.end]\n npfull = np.full(peak.len, peak.score)\n scores = np.max(np.stack((sliced, npfull)), axis=0)\n callscore[peak.start:peak.end] = scores\n return callscore\n\n\ndef bedsort_and_tabix(ecpname):\n eclip = eCLIP(ecpname)\n files = eclip.get_files(format='bed narrowPeak')\n for file in files:\n in_file_gz = opj(eclip.base_dir, 'bed', file.accid + '.bed.gz')\n out_file_bed = opj(eclip.base_dir, 'bed.tabix', file.accid + '.bed')\n cmdl = cmdl_bedsort(in_file_gz, out_file_bed)\n cmdl += cmdl_tabix_bed(out_file_bed)\n print(cmdl)\n os.system(cmdl)\n\n\ndef encode_file_check():\n\n ecpname = 'enc180620'\n samples = ('K562', 'HepG2')\n formats = ('bam', 'bed narrowPeak')\n assemblies = ('hg19', 'GRCh38')\n bioreps = ('mock', 'rep1', 'rep2')\n\n # | bam ----------------------------------- | bed narrowPeak ---------- |\n # | hg19 ------------- | GRCh38 ----------- | hg19 ------ | GRCh38 ---- |\n # | mock | rep1 | rep2 | mock | rep1 | rep2 | rep1 | rep2 | rep1 | rep2 |\n # | o | o | o | o | o | o | o | o | o | o |\n\n keys = [','.join((format, assembly, biorep)) for format in formats\n for assembly in assemblies for biorep in (bioreps if format == 'bam' else bioreps[1:])]\n\n eclip = eCLIP(ecpname)\n\n for sample in samples:\n\n files_bytarget = defaultdict(list)\n files = eclip.get_files(sample=sample)\n for file in files:\n files_bytarget[file.target].append(file)\n targets = sorted(list(files_bytarget.keys()))\n print(sample, len(targets)) # 123 RBPs\n\n for target in targets:\n _files = {key: None for key in keys}\n for file in eclip.get_files(target=target, sample=sample):\n key = ','.join((file.format, file.assembly, file.biorep))\n if _files[key] is not None:\n file1 = _files[key]\n file2 = file\n print('exist:', target,\n file1.accid, file1.status,\n file2.accid, file2.status,\n sep='\\t')\n _files[key] = file\n\n is_exist = ''.join(tuple(('.' if _files[key] is None else 'o') for key in keys))\n if not (is_exist == 'oooooooooo'):\n print(is_exist, sample, target, sep='\\t')\n\n\ndef check_pysam_assertions(read):\n # read name\n assert read.query_name == read.qname\n\n # read sequence\n assert read.seq == read.query\n assert read.seq == read.query_sequence\n assert read.seq == read.query_alignment_sequence\n\n # read phred quality score\n assert read.qqual == read.qual\n\n # end position of aligned read\n assert read.reference_end == read.aend\n\n # reference length of aligned read\n assert read.reference_length == read.alen\n\n # aligned pairs\n assert read.aligned_pairs == read.get_aligned_pairs()\n\n # mapping quality\n assert read.mapping_quality == read.mapq\n\n\n# def bam_fetch_depth_gene(bam, gene):\n# \"\"\"\n# fetch read depth on transcript\n# \"\"\"\n#\n# qstart = gene.exons[ 0].start\n# qend = gene.exons[-1].end\n#\n# assert qstart == gene.tx_start\n# assert qend == gene.tx_end\n#\n# depth = [0 for _ in range(gene.exonsize)]\n#\n# for read in bam.fetch(gene.chrom, qstart, qend):\n#\n# # keep read 2 only\n# # if not read.is_read2:\n# # continue\n#\n# # keep reads on same strand\n# # strand = '-' if read.is_reverse else '+'\n# # if not (gene.strand == strand):\n# # continue\n#\n# # keep reads overlapped with exons\n# coor = Coor(read.reference_name, read.reference_start, read.reference_end)\n# if not any(exon.overlap(coor) for exon in gene.exons):\n# continue\n#\n# nh = read.get_tag('NH')\n# assert nh == 1\n#\n# # print(read.reference_start, read.reference_end)\n#\n# # site\n# for i in read.get_reference_positions():\n# site1 = Coor(gene.chrom, i+0, i+1)\n# site2 = site1.transform(gene.nmid, gene.exons, gene.strand)\n# if site2 is None:\n# continue\n# depth[site2.start] += 1\n#\n# return depth\n\n\n# def bam_fetch_r2s_gene(bam, gene):\n# \"\"\"\n# fetch r2s(read 2 start) positions on transcript\n# \"\"\"\n#\n# qstart = gene.exons[ 0].start\n# qend = gene.exons[-1].end\n#\n# assert qstart == gene.tx_start\n# assert qend == gene.tx_end\n#\n# r2s = [0 for _ in range(gene.exonsize)]\n#\n# for read in bam.fetch(gene.chrom, qstart, qend):\n#\n# # keep read 2 only\n# if not read.is_read2:\n# continue\n#\n# # keep reads on same strand\n# strand = '-' if read.is_reverse else '+'\n# if not (gene.strand == strand):\n# continue\n#\n# # keep reads overlapped with exons\n# coor = Coor(read.reference_name, read.reference_start, read.reference_end)\n# if not any(exon.overlap(coor) for exon in gene.exons):\n# continue\n#\n# # 5' end of read 2\n# read5e = {'+': coor.start, '-': coor.end - 1}[strand]\n#\n# # assertions\n# # check_pysam_assertions(read)\n# nh = read.get_tag('NH')\n# assert nh == 1\n#\n# # site\n# site1 = Coor(gene.chrom, read5e + 0, read5e + 1)\n# site2 = site1.transform(gene.nmid, gene.exons, gene.strand)\n# if site2 is None:\n# continue\n# r2s[site2.start] += 1\n#\n# return r2s\n\n\n# def fetch_eclip_signal(ecpname, sn, target, gene):\n# \"\"\"\n# difference of normalized r2s(read 2 starts) values\n# = (norm_r2s_rep1 + norm_r2s_rep2) / 2 - norm_r2s_mock\n# \"\"\"\n#\n# assembly = {'hg19': 'hg19', 'hg38': 'GRCh38'}[gene.geneset.genome]\n# targets = None if target == 'agg' else (target,)\n# terms = dict(format='bam', assembly=assembly, sample=sn2samples(sn), target=targets)\n# eclip = eCLIP(ecpname)\n# files = eclip.get_files(**terms) # bam files\n#\n# # check files\n# assert len(files) == 3\n# assert set(file.biorep for file in files) == {'rep1', 'rep2', 'mock'}\n#\n# # signal\n# signal = [0.0 for _ in range(gene.exonsize)]\n# weight = {'rep1': 0.5, 'rep2': 0.5, 'mock': -1}\n#\n# for file in files:\n# bam = pysam.AlignmentFile(file.path)\n# coef = (10 ** 7) / bam.mapped\n# w = weight[file.biorep]\n# r2s = bam_fetch_r2s_gene(bam, gene)\n# for i, x in enumerate(r2s):\n# signal[i] += (x * coef * w)\n#\n# # print(file.sample, file.target, file.biorep, sum(r2s), max(r2s), sep='\\t')\n#\n# return signal\n\n\ndef parse_bed_narrow(line):\n \"\"\"\n Parse bed ENCODE narrow peaks.\n\n https://genome.ucsc.edu/FAQ/FAQformat.html#format12\n\n 1. chrom - Name of the chromosome (or contig, scaffold, etc.).\n 2. chromStart - The starting position of the feature in the chromosome or scaffold.\n 3. chromEnd - The ending position of the feature in the chromosome or scaffold.\n 4. name - Name given to a region (preferably unique). Use \".\" if no name is assigned.\n 5. score - Indicates how dark the peak will be displayed in the browser (0-1000).\n 6. strand - +/- to denote strand or orientation (whenever applicable). Use \".\" if no orientation is assigned.\n 7. signalValue - Measurement of overall (usually, average) enrichment for the region.\n 8. pValue - Measurement of statistical significance (-log10). Use -1 if no pValue is assigned.\n 9. qValue - Measurement of statistical significance using false discovery rate (-log10).\n 10. peak - Point-source called for this peak; 0-based offset from chromStart.\n \"\"\"\n\n col = line.strip('\\n').split('\\t')\n assert len(col) == 10\n\n feature = Coor(col[0], int(col[1]), int(col[2]))\n feature.name = col[3]\n feature.score = float(col[4]) # 200, 1000\n feature.strand = col[5]\n feature.sigval = float(col[6])\n feature.logp = float(col[7]) if col[7] != '-1' else None\n feature.logq = float(col[8]) if col[8] != '-1' else None\n feature.peak = int(col[9]) if col[9] != '-1' else None\n\n return feature\n\n\ndef get_merged_peaks(gene, peaks, ecpclps):\n\n if len(peaks) == 0:\n return list()\n\n length = gene.exonsize\n # gene regions (5'UTR, ORF, 3'UTR)\n regions = tuple(Coor(gene.nmid, s, e) if s < e else None\n for s, e in pairwise(gene.pos))\n\n # is_called\n is_called = np.zeros((2, length), dtype=bool)\n for peak in peaks:\n i = {'rep1': 0, 'rep2': 1}[peak.biorep]\n is_called[i][peak.start:peak.end] = True\n\n # u12: union of rep1 and rep2 (OR function)\n # i12: intersection of rep1 and rep2 (AND function)\n np_func = {'u12': np.any, 'i12': np.all}[ecpclps]\n arr = np_func(is_called, axis=0).astype(int)\n\n # np.pad\n arr = np.pad(arr, 1, 'constant', constant_values=0)\n diff = arr[1:] - arr[:-1]\n\n # collapsed\n collapsed = []\n starts = np.where(diff == 1)[0]\n ends = np.where(diff == -1)[0]\n assert starts.size == ends.size\n for start, end in zip(starts, ends):\n peak = Coor(gene.nmid, start, end)\n # peak.u (0: 5'UTR, 1: ORF, 2: 3'UTR)\n peak_center = Coor(gene.nmid, peak.pos + 0, peak.pos + 1)\n b = tuple(region.include(peak_center) if region else False for region in regions)\n assert b.count(True) == 1\n peak.u = b.index(True)\n collapsed.append(peak)\n\n return collapsed\n\n\ndef fetch_all_peaks(file):\n\n in_file_gz = file.path\n tabix = pysam.Tabixfile(in_file_gz)\n try:\n lines = tabix.fetch()\n except ValueError:\n lines = ()\n\n peaks = []\n\n for line in lines:\n data = parse_bed_narrow(line)\n peak = Coor(data.chrom, data.start, data.end)\n peak.strand = data.strand\n\n peak.score = data.score\n peak.sigval = data.sigval\n peak.logp = data.logp\n\n peak.sample = file.sample\n peak.target = file.target\n peak.biorep = file.biorep\n\n peaks.append(peak)\n\n return tuple(peaks)\n\n\ndef setattr_peaks(genes, ecpname, ecpcell, target, minscore, mrg, attrname):\n \"\"\"Set gene.peaks, gene.peaks_in\"\"\"\n\n eclip = eCLIP(ecpname)\n sample = {'k': 'K562', 'h': 'HepG2', 'b': ('K562', 'HepG2')}[ecpcell]\n target = {'agg': None}.get(target, target)\n\n for gene in genes:\n\n peaks = eclip.fetch_peaks(gene, sample=sample, target=target)\n peaks = [peak for peak in peaks if peak.score >= minscore]\n # setattr(gene, attr_name + '_raw', peaks)\n\n if len(peaks):\n gene.intensity = max(peak.sigval for peak in peaks)\n\n if mrg in ('u12', 'i12'):\n peaks = get_merged_peaks(gene, peaks, mrg)\n\n peaks_in = tuple(tuple(filter(lambda peak: peak.u == u, peaks)) for u in (0, 1, 2))\n\n setattr(gene, attrname, peaks)\n setattr(gene, attrname + '_in', peaks_in)\n\n\n# def get_encode_normr2s(gene, sample, biorep):\n# \"\"\"Normalized CITS\"\"\"\n#\n# nbioreps = len(biorep)\n# r2s_norm = [0.0 for _ in range(gene.exonsize)]\n#\n# assert gene.geneset.genome == 'hg38'\n# eclip = eCLIP('enc180620')\n# files = eclip.get_files(format='bam', assembly='GRCh38', sample=sample, biorep=biorep)\n#\n# for file in files:\n# bam = pysam.AlignmentFile(file.path)\n# coef = (10 ** 7) / (bam.mapped * nbioreps)\n# r2s = bam_fetch_r2s_gene(bam, gene)\n# for i, x in enumerate(r2s):\n# if x == 0:\n# continue\n# r2s_norm[i] += (x * coef)\n#\n# # print(file._target, file._biorep, bam.mapped, coef, sep='\\t')\n#\n# return r2s_norm\n\n\ndef get_encid():\n numbers = '0123456789'\n alphabets = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n id1 = ''.join(random.choices(numbers, k=3))\n id2 = ''.join(random.choices(alphabets, k=3))\n rv = 'ENCFF' + id1 + id2\n return rv\n\n\ndef filter_byp3u(genes, p3u):\n # p3u: peaks in 3'UTR\n\n if p3u == 'one3utr':\n def _filter(gene):\n npeaks_3utr = len(gene.peaks_in[2])\n return npeaks_3utr == 1\n\n elif p3u == 'omr3utr':\n def _filter(gene):\n npeaks_3utr = len(gene.peaks_in[2])\n return npeaks_3utr > 1\n\n elif p3u == 'all3utr':\n def _filter(gene):\n return True\n\n elif p3u == 'onenearmts':\n def _filter(gene):\n edge = (0, gene.exonsize)\n site_flanking = gene.site.flanking(50, edge)\n npeaks_nearmts = len([peak for peak in gene.peaks_in[2] if site_flanking.overlap(peak)])\n return npeaks_nearmts == 1\n\n else:\n raise ValueError(f\"Unknown p3u value: {p3u}\")\n\n genes = [gene for gene in genes if _filter(gene)]\n\n return genes\n\n\n### STATS\n\n\ndef stats_peaks_bytarget(ecpname, genome, sn):\n \"\"\"get stats by RBPs\"\"\"\n\n # f180116_dump(ecpname, genome, sn)\n # f180116_table(ecpname, genome, sn)\n f180116_plot(ecpname, genome, sn)\n\n\ndef f180116():\n\n # stats_peaks_bytarget('encode', 'hg38', 'ho')\n # stats_peaks_bytarget('inhouse1', 'hg38', 'ho')\n # stats_peaks_bytarget('inhouse05', 'hg38', 'ho')\n # stats_peaks_bytarget('inhouse01', 'hg38', 'ho')\n\n pass\n\n\ndef f180116_dump(ecpname, genome, sn):\n stats = []\n\n eclip = eCLIP(ecpname)\n targets = get_eclip_targets(ecpname, sn)\n samples = sn2samples(sn)\n\n for i, target in enumerate(targets):\n\n # fetch peaks\n files = eclip.get_files(format='bed narrowPeak', assembly=genome, sample=samples, target=target)\n peaks = [peak for file in files for peak in fetch_all_peaks(file)]\n assert len(files) == 2\n\n # peak number\n stat = SimpleNamespace()\n stat.target = target\n stat.npeaks = len(peaks) # the number of all peaks\n stat.npeaks2 = [0, 0, 0, 0] # the number of peaks by rep, score\n\n for peak in peaks:\n i = {'1': 0, '2': 1}[peak.biorep]\n j = {200: 0, 1000: 1}[peak.score]\n k = i * 2 + j\n stat.npeaks2[k] += 1\n\n assert stat.npeaks == sum(stat.npeaks2)\n\n # peak length\n stat.plen = [peak.len for peak in peaks]\n stat.plen_min = min(stat.plen)\n stat.plen_max = max(stat.plen)\n stat.plen_mean = mean(stat.plen)\n stat.plen_med = median(stat.plen)\n stat.plen_ci95 = sem(stat.plen) * 1.96\n\n stats.append(stat)\n print(stat.target, stat.npeaks, stat.npeaks2,\n stat.plen_min, stat.plen_med, stat.plen_max, sep='\\t')\n\n out_dir = opj('data/encode/eclip', ecpname, 'stats')\n omd(out_dir)\n\n # .dat\n filename = f'bytarget.{genome}.{sn}'\n out_file_dat = opj(out_dir, filename + '.dat')\n pickle_dump(stats, out_file_dat)\n\n\ndef f180116_table(ecpname, genome, sn):\n\n # stats\n basedir = opj('data/encode/eclip', ecpname, 'stats')\n filename = f'bytarget.{genome}.{sn}'\n in_file_dat = opj(basedir, filename + '.dat')\n stats = pickle_load(in_file_dat)\n\n # df\n rows = []\n columns = ('target', 'npeaks', 'npeaks1L', 'npeaks1S', 'npeaks2L', 'npeaks2S',\n 'plen_min', 'plen_mean', 'plen_med', 'plen_max', 'plen_ci95')\n for stat in stats:\n data = [stat.target, stat.npeaks, *stat.npeaks2,\n stat.plen_min, stat.plen_mean, stat.plen_med, stat.plen_max, stat.plen_ci95]\n row = {column: value for column, value in zip(columns, data)}\n rows.append(row)\n df = pd.DataFrame(rows)\n\n # df.to_csv\n out_file_tsv = opj(basedir, filename + '.tsv')\n df.to_csv(out_file_tsv, sep='\\t', index=False)\n\n\ndef f180116_plot(ecpname, genome, sn):\n basedir = opj('data/encode/eclip', ecpname, 'stats')\n filename = 'bytarget.{}.{}'.format(genome, sn)\n in_file_dat = opj(basedir, filename + '.dat')\n stats = pickle_load(in_file_dat)\n\n # plt.subplots\n ntarget = len(stats)\n w = ntarget * 0.25\n h = 3 * 3\n f, axes = plt.subplots(3, figsize=(w, h))\n\n # x-axis\n x = [(i + 1) for i in range(ntarget)]\n xlab = [stat.target for stat in stats]\n xmin = 0\n xmax = ntarget + 1\n\n # axes[0]: the number of peaks\n ax = axes[0]\n colors = ('pink', 'red', 'skyblue', 'blue')\n labels = ('rep1L', 'rep1S', 'rep2L', 'rep2S')\n\n for i in range(4):\n y = [stat.npeaks2[i] for stat in stats]\n yb = [sum(stat.npeaks2[:i]) for stat in stats]\n ax.bar(x, y, bottom=yb, color=colors[i], ecolor='black', align='center', label=labels[i])\n\n ax.legend(loc='upper right', fontsize='small')\n ax.set_xticks(x)\n ax.set_xticklabels(xlab, rotation=90)\n ax.set_xlim(xmin, xmax)\n ax.set_ylabel('npeaks')\n\n tx = 0.0\n ty = 1.0\n text = 'stats_peaks_bytarget: {}.{}.{}'.format(ecpname, genome, sn)\n ax.text(tx, ty, text, fontsize='small', va='bottom', transform=ax.transAxes)\n\n # axes[1]: median peak length\n ax = axes[1]\n y = [stat.plen_med for stat in stats]\n y_err = [stat.plen_ci95 for stat in stats]\n\n ax.bar(x, y, yerr=y_err, color='gray', ecolor='black', align='center')\n ax.set_xticks(x)\n ax.set_xticklabels(xlab, rotation=90)\n ax.set_xlim(xmin, xmax)\n ax.set_ylabel('median peak length')\n\n # axes[2]: max peak length\n ax = axes[2]\n y = [stat.plen_max for stat in stats]\n ax.bar(x, y, color='gray', align='center')\n ax.set_xticks(x)\n ax.set_xticklabels(xlab, rotation=90)\n ax.set_xlim(xmin, xmax)\n ax.set_ylabel('max peak length')\n\n # savefig\n out_file_png = opj(basedir, filename + '.png')\n savefig(out_file_png)\n\n\ndef stats_ploc_plot(sn):\n\n targets = eclip_targets(sn)\n stats = []\n for target in targets:\n in_file_dat = 'data/eclip/stats/ploc/%s/%s.dat' % (sn, target)\n stat = pickle_load(in_file_dat)\n stats.append(stat)\n stats = sorted(stats, key=lambda stat: stat.npeaks_pct[2], reverse=True) # 3'UTR fraction\n nstats = len(stats)\n\n x = [i + 1 for i in range(nstats)]\n xlabel = [stat.target for stat in stats]\n\n # ORF 5'UTR 3'UTR intronic intergenic multiple\n # red blue green yellow gray dark gray\n colors = ('#eb2b14', '#019be3', '#97cf46', '#f8f254', '#b2c7cc', '#444444')\n\n plt.figure(1, figsize=(15, 8))\n\n ax = plt.subplot(2, 1, 1)\n\n for i in range(6):\n y = [stat.npeaks[i] / 1000000 for stat in stats]\n y_ = [sum(stat.npeaks[0:i]) / 1000000 for stat in stats]\n plt.bar(x, y, 0.7, color=colors[i], align='center', edgecolor='black', bottom=y_, label=stats[0].header[i])\n\n plt.legend(loc='upper right', fontsize='small')\n\n plt.bar(x, y, color='gray', align='center', edgecolor='black')\n\n plt.xlim(0, nstats + 1)\n\n plt.xticks(x, xlabel, rotation=90)\n plt.ylabel('npeaks (Million)')\n\n cell = {'ko': 'K562', 'ho': 'HepG2'}[sn]\n text = 'Cell: %s (%d RBPs)' % (cell, nstats)\n plt.text(1, 1, text, verticalalignment='center')\n\n ax = plt.subplot(2, 1, 2)\n\n for i in range(6):\n y = [stat.npeaks_pct[i] for stat in stats]\n y_ = [sum(stat.npeaks_pct[0:i]) for stat in stats]\n plt.bar(x, y, 0.7, color=colors[i], align='center', edgecolor='black', bottom=y_, label=stats[0].header[i])\n\n plt.xlim(0, nstats + 1)\n plt.ylim(0, 1.0)\n\n plt.xticks(x, xlabel, rotation=90)\n plt.ylabel('Fraction of npeaks')\n text = 'Sorted by fraction of the number of peaks located on 3\\'UTR'\n plt.text(1.0, 1.02, text, fontsize='small')\n\n out_file_png = 'data/eclip/stats/stats_ploc.%s.png' % (sn)\n savefig(out_file_png)\n\n\ndef stats_ploc_plot2(sn):\n # pie chart\n\n targets = eclip_targets(sn)\n stats = []\n for target in targets:\n in_file_dat = 'data/eclip/stats/ploc/%s/%s.dat' % (sn, target)\n stat = pickle_load(in_file_dat)\n stats.append(stat)\n stats = sorted(stats, key=lambda stat: stat.npeaks_pct[2], reverse=True) # 3'UTR fraction\n nstats = len(stats)\n\n x = [i + 1 for i in range(nstats)]\n xlabel = [stat.target for stat in stats]\n\n # ORF 5'UTR 3'UTR intronic intergenic multiple\n # orangered blue green yellow gray dark gray\n colors = ('#ff4500', '#019be3', '#97cf46', '#f8f254', '#b2c7cc', '#444444')\n\n ncol = 10\n q, r = divmod(len(stats), ncol)\n nrow = q + (0 if r == 0 else 1)\n\n w = ncol * 3\n h = nrow * 3\n\n plt.figure(1, figsize=(w, h))\n\n for i, stat in enumerate(stats):\n n = i + 1\n plt.subplot(nrow, ncol, n)\n\n plt.title('%s (%.2fM)' % (stat.target, sum(stat.npeaks) / 10 ** 6))\n sizes = [pct for pct in stat.npeaks_pct]\n plt.pie(sizes, colors=colors, autopct='%.1f%%', startangle=90)\n plt.axis('equal')\n\n out_file_png = 'stats_ploc.%s.png' % (sn)\n savefig(out_file_png)\n\n\n# f170105a()\n# qsub_stats_ploc('ko')\n# qsub_stats_ploc('ho')\n# stats_ploc_plot('ko')\n# stats_ploc_plot('ho')\n# stats_ploc_plot2('ho')\n# stats_ploc_plot2('ko')\n","sub_path":"baeklab/genomics/encode.py","file_name":"encode.py","file_ext":"py","file_size_in_byte":35987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"1900940","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, print_function\nfrom flask import Flask\nfrom flask import request\nfrom spacy.en import English\nfrom io import BytesIO\nfrom dragnet import content_extractor, content_comments_extractor\nimport requests\nimport json\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.layout import LAParams\nfrom pdfminer.pdfpage import PDFPage\nfrom cStringIO import StringIO\nfrom bs4 import BeautifulSoup\nimport re\nimport Iscore\n\nnlp = English()\n\napp = Flask(__name__)\n\n@app.route('/get_text')\ndef get_text():\n url = request.args.get('target')\n page = requests.get(url)\n page = content_extractor.analyze(page.content)\n text = tokenize_texts(page.decode('utf-8'))\n return json.dumps(text)\n\n@app.route('/generate_kb')\ndef generate_kb():\n url = request.args.get('target')\n kb_name = request.args.get('kb_name')\n page = requests.get(url)\n page = BeautifulSoup(page.content, 'html.parser')\n for script in page.find_all(\"script\"):\n script.decompose()\n text = tokenize_texts(page.get_text())\n tt = []\n s = 'menu'\n for t in text:\n if t.endswith('.') and s not in t:\n t = t.replace('\\n', ' ')\n t = t.replace(':', ' ')\n t = t.replace('.', '')\n t = t.replace(',', '')\n t = t.replace('(', '')\n t = t.replace(')', '')\n t = t.replace('-', ' ')\n t = re.sub( '\\s+', ' ', t ).strip()\n t = re.sub('[^A-Za-z0-9]+', ' ', t)\n #t.sub(r'\\.([a-zA-Z])', r'. \\1', t)\n if len(t) > 1:\n tt.append(t)\n thefile = open(\"%s.txt\" % kb_name, 'w' )\n for item in tt:\n thefile.write(item.encode('utf-8').strip())\n thefile.write('\\n')\n thefile.close\n return json.dumps(tt)\n\n@app.route('/get_text_from_pdf')\ndef get_from_pdf():\n url = request.args.get('target')\n pdf = requests.get(url)\n memoryFile = BytesIO(pdf.content)\n tt = convert_pdf_to_txt(memoryFile)\n tt = clean_string(tt)\n tt = tokenize_texts(tt)\n return json.dumps(tt)\n\n@app.route('/tokenize')\ndef tokenize_texts(texts):\n raw_text = texts\n doc = nlp(raw_text)\n sentences = [sent.string.strip() for sent in doc.sents]\n return sentences\n\n@app.route('/answers')\ndef get_answers():\n question = request.args.get('question')\n file = request.args.get('file')\n answers = Iscore.dynamic_score(file, question)\n return json.dumps(answers)\n\n\ndef clean_string(t):\n t = t.decode(\"utf-8\")\n t = t.replace('\\n', ' ')\n t = \" \".join(t.split())\n return t\n\ndef convert_pdf_to_txt(m_file):\n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n codec = 'utf-8'\n laparams = LAParams()\n device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n password = \"\"\n maxpages = 0\n caching = True\n pagenos=set()\n\n for page in PDFPage.get_pages(m_file, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):\n interpreter.process_page(page)\n\n text = retstr.getvalue()\n\n device.close()\n retstr.close()\n return text\n\nif __name__ == '__main__':\n app.run(host= '0.0.0.0',port=8080, threaded=True)\n","sub_path":"nlp_utils.py","file_name":"nlp_utils.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"386699036","text":"from item import Item\n\n\nclass Compra:\n def __init__(self):\n self.itens = []\n\n def add_item(self, item):\n if isinstance(item, Item):\n self.itens.append(item)\n return 'Item Válido'\n else:\n return 'Item Inválido'\n\n def get_valor_compra(self):\n return sum(list(map(lambda item: item.get_valor_item(), self.itens)))\n\n def listar_itens(self):\n resultado = \"\"\n contador = 1\n\n if len(self.itens) > 0:\n for item in self.itens:\n resultado += '%4d:%30s|%16.2f R$|%8d|%24.2f R$\\n' % (contador, item.get_produto_nome(),\n item.get_produto_preco(), item.get_quantidade(),\n item.get_valor_item())\n contador += 1\n else:\n resultado += 'Sem items na Compra\\n'\n\n return resultado\n\n def listar_itens_separados(self, dado):\n resultado = \"\"\n contador = 1\n if len(self.itens) > 0:\n for item in self.itens:\n if dado == 'id':\n resultado += '%d\\n' % contador\n elif dado == 'nome':\n resultado += '%s\\n' % item.get_produto_nome()\n elif dado == 'preco':\n resultado += '%.2f R$\\n' % item.get_produto_preco()\n elif dado == 'quantidade':\n resultado += '%d\\n' % item.get_quantidade()\n elif dado == 'total':\n resultado += '%.2f R$\\n' % item.get_valor_item()\n contador += 1\n return resultado\n\n def remover_item(self, id_item):\n self.itens.pop(id_item)\n\n def set_item(self, lista):\n self.itens = lista\n\n def tamanho_lista(self):\n return len(self.itens)\n","sub_path":"Trabalho Loja Online - Aplicação Multi-Thread/compra.py","file_name":"compra.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"109966403","text":"from incl import *\n\nclass Dev (Incluid):\n def __init__(self, name: str, surname: str, age: int, posada: str, price: int, skills: str):\n Incluid.__init__(self, name, surname, age, posada, price)\n self.__skills = skills\n\n def show_person_info(self):\n print(\"Name: \", self.name, \"\\nSurname: \", self.surname, \"\\nAge: \",\n self.age, \"\\nPosition: \", \"\\nSkills: \", self.__skills)\n\n\ndev = Dev(\"Adam\", \"Dobson\", 23, \"Posada\", 13000, \"Skils\")\n\ndev.show_person_info()\ndev.name = \"Adamus\"\ndev.surname = \"Dobsunos\"\ndev.age = 24\ndev.posada = \"Posada 1\"\ndev.price = 10000\ndev.skils = \"Skills 1\"\ndev.show_person_info()\n","sub_path":"dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"357671404","text":"#pylint: disable=too-many-locals,too-many-statements, missing-docstring, pointless-string-statement\nfrom array import array\nimport yaml\n# pylint: disable=import-error, no-name-in-module, unused-import\nfrom ROOT import TH1F, TH2F, TCanvas, TGraph, TLatex, gPad, TFile, TF1\nfrom ROOT import gStyle, gROOT, TStyle, TLegendEntry, TLegend\n\n\"\"\"\nread predictions from arXiv.1907.12786\n\"\"\"\ndef scale(hadron=\"Omega_ccc\", model=\"SHMC_2021\", collision=\"PbPb\", \\\n brmode=\"central\"):\n with open(r'prediction.yaml') as fileparam:\n param = yaml.load(fileparam, Loader=yaml.FullLoader)\n yieldmid = param[\"models\"][model][collision][hadron]\n sigma_aa_b = param[\"statistics\"][collision][\"sigmaAA_b\"]\n lumiaa_monthi_invnb = param[\"statistics\"][collision][\"lumiAA_monthi_invnb\"]\n nevt = sigma_aa_b * lumiaa_monthi_invnb * 1e9\n bratio = param[\"branchingratio\"][hadron][brmode]\n enhanc = 1\n eff = 1.\n legendtext = '%s N_{ev}(%s) = %.1f B, BR=%.5f%%, #varepsilon=%.2f' \\\n % (model, collision, nevt/1e9, bratio*100, eff)\n scale_factor = bratio * enhanc * eff * nevt * yieldmid\n return scale_factor, legendtext\n\ndef analysis(hadron=\"Omega_ccc\"):\n gStyle.SetOptStat(0)\n with open(r'prediction.yaml') as fileparam:\n param = yaml.load(fileparam, Loader=yaml.FullLoader)\n models = param[\"comparison_models\"][hadron][\"models\"]\n collisions = param[\"comparison_models\"][hadron][\"collisions\"]\n brmode = param[\"comparison_models\"][hadron][\"brmode\"]\n colors = param[\"comparison_models\"][hadron][\"colors\"]\n useshape = param[\"comparison_models\"][hadron][\"useshape\"]\n ymin = param[\"comparison_models\"][hadron][\"ymin\"]\n ymax = param[\"comparison_models\"][hadron][\"ymax\"]\n binanal = array('d', param[\"pt_binning\"][\"hadron\"][hadron])\n dorebin = param[\"do_corr\"][\"dorebin\"]\n\n fin = TFile(\"../Inputs/\" + useshape +\".root\")\n histo_norm = fin.Get(\"hpred_norm\")\n\n canvas = TCanvas(\"canvas\", \"A Simple Graph Example\", 881, 176, 668, 616)\n gStyle.SetOptStat(0)\n canvas.SetHighLightColor(2)\n canvas.Range(-1.25, -4.625, 11.25, 11.625)\n canvas.SetFillColor(0)\n canvas.SetBorderMode(0)\n canvas.SetBorderSize(2)\n canvas.SetLogy()\n canvas.SetFrameBorderMode(0)\n canvas.SetFrameBorderMode(0)\n canvas.cd()\n gPad.SetLogy()\n\n hempty = TH2F(\"hempty\", \";p_{T};Yields\", 100, 0., 10., 100, ymin, ymax)\n hempty.GetXaxis().SetTitle(\"p_{T}\")\n hempty.GetXaxis().SetLabelFont(42)\n hempty.GetXaxis().SetTitleOffset(1)\n hempty.GetXaxis().SetTitleFont(42)\n hempty.GetYaxis().SetLabelFont(42)\n hempty.GetYaxis().SetTitleOffset(1.35)\n hempty.GetYaxis().SetTitleFont(42)\n hempty.GetZaxis().SetLabelFont(42)\n hempty.GetZaxis().SetTitleOffset(1)\n hempty.GetZaxis().SetTitleFont(42)\n hempty.Draw()\n histolist = [None]*len(models)\n\n leg = TLegend(0.1471471, 0.6108291, 0.3018018, 0.8747885, \"\", \"brNDC\")\n leg.SetBorderSize(1)\n leg.SetLineColor(0)\n leg.SetLineStyle(1)\n leg.SetLineWidth(1)\n leg.SetFillColor(0)\n leg.SetTextSize(0.022)\n leg.SetFillStyle(1001)\n\n for icase, _ in enumerate(models):\n histolist[icase] = histo_norm.Clone(\"histo_pred%d\" % icase)\n scalef, text = scale(hadron, models[icase], collisions[icase], brmode[icase])\n for ibin in range(histolist[icase].GetNbinsX()-1):\n binwdith = histolist[icase].GetBinWidth(ibin+1)\n yvalue = histolist[icase].GetBinContent(ibin+1)\n histolist[icase].SetBinContent(ibin+1, binwdith*scalef*yvalue)\n if dorebin is True:\n histolist[icase] = histolist[icase].Rebin(len(binanal)-1, \\\n \"histo_pred%d\" % icase, binanal)\n histolist[icase].SetLineColor(colors[icase])\n histolist[icase].SetMarkerColor(colors[icase])\n histolist[icase].SetLineWidth(2)\n histolist[icase].Draw(\"same\")\n text = text + \" Yield(tot)=%.2f\" % histolist[icase].Integral()\n leg.AddEntry(histolist[icase], text, \"pF\")\n leg.Draw()\n canvas.SaveAs(hadron+\"_results.pdf\")\n canvas.SaveAs(hadron+\"_results.C\")\n\nanalysis(\"Omega_ccc\")\nanalysis(\"Omega_cc\")\nanalysis(\"Xi_cc\")\nanalysis(\"X3872\")\n","sub_path":"analysis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"643758974","text":"import json\n\nfrom django.conf import settings\n\nfrom django_webtest import WebTest\n\nfrom .factories import BadgeFactory\nfrom .models import Badge, UserBadge\nfrom accounts.factories import AdminUserFactory, UserFactory\n\n\nclass APITests(WebTest):\n csrf_checks = False\n\n def setUp(self):\n self.api_root = settings.REST_FRAMEWORK['API_ROOT']\n self.user = UserFactory.create()\n self.admin_user = AdminUserFactory.create()\n self.badge = BadgeFactory.create()\n self.user_badge = UserBadge.objects.create(\n user=self.user, \n badge=self.badge\n )\n\n self.badge_url = self.api_root + 'badge/'\n self.user_badge_url = self.api_root + 'userbadge/'\n\n # data used to make requests\n self.badge_data = {\n 'title': self.badge.title,\n 'description': self.badge.description,\n }\n self.user_badge_data = {\n 'user': self.admin_user.id,\n 'badge': self.badge.id,\n }\n\n \"\"\"\n section for testing the badge model\n \"\"\"\n def test_create_badge(self):\n \"\"\"\n Create a new badge\n \"\"\"\n url = self.badge_url\n \n response = self.app.post(url, self.badge_data, \n user=self.admin_user)\n self.assertEqual(response.status_code, 201)\n\n def test_get_badges(self):\n \"\"\"\n Get all of the badge objects\n \"\"\"\n url = self.badge_url\n response = self.app.get(url)\n obj = json.loads(response.content.decode())\n\n # check that the response is good, and that it is not empty\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(obj, None)\n\n def test_get_badge(self):\n \"\"\"\n Get a specific badge\n \"\"\"\n url = self.badge_url + str(self.badge.id) + '/'\n response = self.app.get(url)\n obj = json.loads(response.content.decode())\n\n # check that the response is good, and that it is not empty\n self.assertEqual(response.status_code, 200)\n self.assertEqual(obj['id'], self.badge.id)\n\n\n \"\"\"\n section for testing the UserBadge model\n \"\"\"\n def test_create_user_badge(self):\n \"\"\"\n Create a new user_badge\n \"\"\"\n url = self.user_badge_url\n \n response = self.app.post(url, self.user_badge_data, \n user=self.admin_user)\n self.assertEqual(response.status_code, 201)\n\n def test_get_user_badges(self):\n \"\"\"\n Get all of the user_badge objects\n \"\"\"\n url = self.user_badge_url\n response = self.app.get(url)\n obj = json.loads(response.content.decode())\n\n # check that the response is good, and that it is not empty\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(obj, None)\n\n def test_unique_user_badge(self):\n \"\"\"\n Get a specific user_badge\n \"\"\"\n url = self.user_badge_url + str(self.user_badge.id) + '/'\n response = self.app.get(url)\n obj = json.loads(response.content.decode())\n\n # check that the response is good, and that it is not empty\n self.assertEqual(response.status_code, 200)\n self.assertEqual(obj['id'], self.user_badge.id)\n\n\n","sub_path":"badges/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"172724239","text":"import sys\nimport math\nfrom operator import itemgetter\nfrom django.contrib.auth import get_user_model\n\nfrom .models import UserProductRating\n\n\nclass ItemBasedCF(object):\n \"\"\" \n TopN recommendation - Item Based Collaborative Filtering \n \"\"\"\n def __init__(self):\n self.train_set = {}\n\n self.n_sim_product = 20\n self.n_rec_product = 10\n\n self.product_sim_mat = {}\n self.product_popular = {}\n self.product_count = 0\n\n def generate_dataset(self):\n train_set_len = 0\n qs = UserProductRating.objects.all()\n for instance in qs:\n user, product, rating = instance.user.id, instance.product.id, instance.rating\n self.train_set.setdefault(user, {})\n self.train_set[user][product] = float(rating if rating else 5)\n train_set_len += 1\n\n print('train set = %s' % train_set_len, file=sys.stderr)\n\n def calc_product_sim(self):\n \"\"\"\n calculate product similarity matrix\n \"\"\"\n for user, products in self.train_set.items():\n for product in products:\n if product not in self.product_popular:\n self.product_popular[product] = 0\n self.product_popular[product] += 1\n\n self.product_count = len(self.product_popular)\n print('total product number = %d' % self.product_count, file=sys.stderr)\n print('count products number and popularity succeed', file=sys.stderr)\n\n # count co-rated users between items\n itemsim_mat = self.product_sim_mat\n for user, products in self.train_set.items():\n for m1 in products:\n for m2 in products:\n if m1 == m2:\n continue\n itemsim_mat.setdefault(m1, {})\n itemsim_mat[m1].setdefault(m2, 0)\n itemsim_mat[m1][m2] += 1\n\n print('build co-rated users matrix succeed', file=sys.stderr)\n\n simfactor_count = 0\n for m1, related_products in itemsim_mat.items():\n for m2, count in related_products.items():\n itemsim_mat[m1][m2] = count / math.sqrt(\n self.product_popular[m1] * self.product_popular[m2])\n simfactor_count += 1\n\n print('calculate product similarity matrix(similarity factor) succeed', file=sys.stderr)\n print('Total similarity factor number = %d' % simfactor_count, file=sys.stderr)\n\n def recommend(self, user):\n \"\"\"\n Find K similar movies and recommend N movies.\n \"\"\"\n self.generate_dataset()\n self.calc_product_sim()\n\n K = self.n_sim_product\n N = self.n_rec_product\n\n if user.id in self.train_set:\n rank = {}\n purchased_products = self.train_set[user.id]\n\n for product, rating in purchased_products.items():\n for related_product, similarity_factor in sorted(self.product_sim_mat[product].items(),\n key=itemgetter(1), reverse=True)[:K]:\n if related_product in purchased_products:\n continue\n rank.setdefault(related_product, 0)\n rank[related_product] += similarity_factor * rating\n\n return sorted(rank.items(), key=itemgetter(1), reverse=True)[:N]\n else:\n return sorted(self.product_popular.items(), key=itemgetter(1), reverse=True)[0:N]\n\n\nif __name__ == '__main__':\n User = get_user_model()\n user = User.objects.all().first()\n item_based_cf = ItemBasedCF()\n recommendations = item_based_cf.recommend(user)\n","sub_path":"src/recommendations/item_cf.py","file_name":"item_cf.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"472151008","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import logout\nfrom django.contrib.auth import authenticate, login\nfrom .forms import *\n \n\ndef loginView(request):\n if request.user.is_authenticated():\n return redirect('indexDashboard')\n else:\n if 'login_form' in request.POST:\n login_form = LoginForm(request.POST)\n \n if login_form.is_valid():\n user = authenticate(username=login_form.cleaned_data['username'], password=login_form.cleaned_data['password'])\n if user is not None:\n try:\n if user.is_active:\n login(request, user)\n return redirect('indexDashboard')\n except:\n login_form = LoginForm()\n dataErrorLogin = \"Lo sentimos, su usuario no esta habilitado para ingresar al sistema\"\n return render(request, 'loginUser.html', {'login_form': login_form, 'dataErrorLogin': dataErrorLogin})\n else:\n login_form = LoginForm()\n dataErrorLogin = \"Usuario y/o contraseña no son válidos\"\n return render(request, 'loginUser.html', {'login_form': login_form, 'dataErrorLogin': dataErrorLogin})\n else:\n raise ('Error Login : Form Invalid')\n else:\n login_form = LoginForm()\n return render(request, 'loginUser.html', {'login_form': login_form})\n\n\n\ndef logoutView(request):\n logout(request)\n return redirect('loginView')\n\n\n ","sub_path":"apps/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"536619499","text":"# Sample taken from pyStrich GitHub repository\n# https://github.com/mmulqueen/pyStrich\nfrom pystrich.datamatrix import DataMatrixEncoder\n\nencoder = DataMatrixEncoder('This is a DataMatrix.')\nencoder.save('./datamatrix_test.png')\nprint(encoder.get_ascii())\nprint(\"Hello World\")\n\n#User Input If Else Example\nwhile True:\n prompt1=input('What is 10 divided by 2?').lower()\n\n if prompt1 == '5':\n print('Correct, good job!')\n else:\n print('No, the correct answer is 5.')","sub_path":"src/my_script.py","file_name":"my_script.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"355810373","text":"#! /usr/bin/env python3\r\n#Second assignment - convert txt to a dictionary and post too a webpage.\r\n#Using requests and post method.\r\n\r\nimport os\r\nimport requests\r\n\r\nfiles = os.listdir(\"/data/feedback\")\r\nurl = 'http://34.122.55.140/feedback/'\r\n\r\nfor f in files:\r\n feedback = {}\r\n name = \"/data/feedback/\" + f\r\n with open(name) as file:\r\n lines = file.readlines()\r\n feedback[\"title\"] = lines[0]\r\n feedback[\"name\"] = lines[1]\r\n feedback[\"date\"] = lines[2]\r\n feedback[\"feedback\"] = lines[3]\r\n\r\n post = requests.post(url, json=feedback)\r\n if post.status_code == 201:\r\n print('Post ok for file: ' + name)\r\n else:\r\n print('Error in posting file: ' + name)\r\n print(post.status_code)\r\n","sub_path":"C6-W2-post_reviews.py","file_name":"C6-W2-post_reviews.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"353427587","text":"# method 1: dp, p[i] means the length of the longest valid parentheses ending in s[i]\n\n\nclass Solution1(object):\n def longestValidParentheses(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if len(s) < 2:\n return 0\n dp = [0] * len(s)\n\n if s[0] == \"(\" and s[1] == \")\":\n dp[1] = 2\n\n for i in range(2, len(s)):\n if i - 1 - dp[i - 1] >= 0 and s[i] == \")\" and s[i - 1 - dp[i - 1]] == \"(\":\n dp[i] = 2 + dp[i - 1]\n if i - 2 - dp[i - 1] >= 0: # this if clause must be inside the first if clause!\n dp[i] += dp[i - 2 - dp[i - 1]]\n\n return max(dp)\n\n\n# dp, time/space O(n)\n# method 3: dp[i] means the length of the longest valid parentheses ending in s[i]\n# method 3: dp, simplified from method 1 by using padding\n\n\nclass Solution3(object):\n def longestValidParentheses(self, s):\n s = \"##\" + s # use padding, consumes extra O(n) space \n dp = [0]*len(s)\n \n res = 0\n for i in range(2, len(s)):\n if s[i] == \")\" and s[i-1-dp[i-1]] == \"(\":\n # easy to forget: + dp[i-2-dp[i-1]]\n dp[i] = 2 + dp[i-1] + dp[i-2-dp[i-1]] \n res = max(res, dp[i])\n \n return res\n\n\n# stack, time/space O(n)\n\n\nclass Solution2(object):\n def longestValidParentheses(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n stack = []\n last_bad_right = -1\n max_len = 0\n for i, c in enumerate(s):\n if c == \"(\":\n stack.append(i)\n else:\n if not stack:\n last_bad_right = i\n else:\n stack.pop()\n if stack:\n max_len = max(max_len, i - stack[-1])\n else:\n max_len = max(max_len, i - last_bad_right)\n return max_len\n\n\n\n\"\"\"\nGiven a string containing just the characters '(' and ')', \nfind the length of the longest valid (well-formed) parentheses \nsubstring.\n\nExample 1:\n\nInput: \"(()\"\nOutput: 2\nExplanation: The longest valid parentheses substring is \"()\"\nExample 2:\n\nInput: \")()())\"\nOutput: 4\nExplanation: The longest valid parentheses substring is \"()()\"\n\"\"\"\n","sub_path":"Templates/0032. Longest Valid Parentheses.py","file_name":"0032. Longest Valid Parentheses.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"466952332","text":"# winner prize = 50000000\n# bank rate = 12%\n# apartment now = 1100000000\n# future = 2016\n# past = 1988\n\nprize = 50000000\nAPART = 110000000\nRATE = 0.12\n\nyear = 1\nnow = 2016\npast = 1988\nwhile year < (now - past):\n prize = prize * (1 + RATE)\n year += 1\n\nif prize > APART:\n print(\"%.0f원 차이로 동일 아저씨의 말씀이 맞습니다.\" % (prize - APART))\nelse:\n print(\"%.0f원 차이로 미란 아주머니의 말씀이 맞습니다.\" % (APART - prize))\n\n#복리 계산 방법으로 풀어도 될거 같다\n","sub_path":"5-19 택이우승상금.py","file_name":"5-19 택이우승상금.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"161140772","text":"import random\nimport copy\nimport time\nimport math\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nG_APMS = nx.read_edgelist('yeast_AP-MS.txt')\nG = G_APMS.copy()\n\nnk = list(G.degree())\nnk.sort(key = lambda x: x[1], reverse = True)\n\nt0 = time.time()\nT = len(max(nx.connected_component_subgraphs(G_APMS), key=len))\nX,Y = [],[]\nN = len(G)\nfor i in range(1,N):\n x = i/N\n G.remove_node(nk[i][0])\n g = max(nx.connected_component_subgraphs(G), key=len)\n y = len(g)/T\n X.append(x)\n Y.append(y)\n\nt = time.time()\nprint(t-t0)\n\nX_degree = X\nY_degree = Y","sub_path":"Degree_centrality.py","file_name":"Degree_centrality.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"87257313","text":"__author__ = 'entsw team'\n\n__config__ = {\n 'bu_space': 'entsw',\n 'project_space': 'cat3',\n 'includes': {\n 'bu_configs': 'configs/trunk',\n 'bu_tools': 'tools/trunk',\n 'bu_externals': ['libs/trunk'],\n 'cisco_externals': ['libs/chamber/trunk'],\n }\n} # __config__end","sub_path":"cat3/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"106947866","text":"from Crypto.Cipher import AES\nfrom Crypto.Random import random\nfrom hashlib import sha256\n\n# Quick function to append spaces to the end of the JSON string\n# before it is encrypted. Spaces are used because they will not \n# invalidate the JSON once added, so no extra steps to remove \n# padding are needed after the decryption process at the other \n# end. This is a best practice, as this exact system need not be\n# used, and the JSON would still be valid if the spaces were\n# located in other areas.\nassert 'long description is longer than what it describes'\npad = lambda s: s + (16 - len(s) % 16) * ' '\n\nclass LocalStorageCipher(object):\n MODE = AES.MODE_CBC\n \n # Key MUST be length 16, 24 or 32. 32 is preferable for security.\n # Key will be the result of a function which returns the user's key.\n @property\n def key():\n raise NotImplementedError\n return function_which_gets_user_key()\n\n @classmethod\n def encrypt(string):\n \"\"\"Encrypt the given string using AES and the user's key. \n The string *must* be a JSON dump, otherwise the value will\n change during the encryption process due to padding.\n \"\"\"\n # Ensure that the string is a multiple of 16 in length.\n # This is a requirement of the AES cipher.\n string = pad(string)\n \n # An initialization vector (IV) is used in the AES cipher:\n # As each block of data is encrypted, one of the parameters in\n # this encryption is generated during the encryption of the \n # previous block. There is no block previous to the first block\n # encrypted, so an IV is used instead. The IV is a random string\n # of length 16, to be used in both encryption and decryption.\n # It is transparent-it can be stored in plain, along with the\n # encrypted file.\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))\n \n # The hash is used to ensure that the file hasn't been tampered \n # with between encryption time and decryption time. \n # It is 32 characters in length. \n hash = sha256(string).digest()\n \n # Create a cipher object. This must be done every time something\n # is encrypted, as internal variables are used in the encryption \n # process, so different results would be produced if the same \n # cipher object were used without being reinitialised. \n cipher = AES.new(self.key, AES.MODE, iv)\n \n # Return the IV concatenated to the string. The IV is used in\n # the decryption process, and is generated randomly for each\n # encryption, thus must be stored alongside the ciphertext.\n return hash + iv + cipher.encrypt(string)\n\n @classmethod\n def decrypt(string):\n \"\"\"Decrypt the given string using AES and the user's key.\"\"\"\n # Seperate hash, and the initialisation vector from the ciphertext.\n # Thankfully, they are of fixed length. (See above for details)\n oldhash, iv, string = string[0:32], string[32:48], string[48:]\n \n # Generate a new cipher object. See above. \n cipher = AES.new(self.key, self.MODE, iv)\n \n # Decrypt the string\n plaintext = cipher.decrypt(string)\n \n # Verify the decrypted string is the same as the one that was \n # initially incrypted, using the hash stored.\n # If it isn't, raise an exception.\n newhash = sha256(plaintext).digest() \n if newhash != oldhash:\n raise Exception('file has been tampered with')\n \n # As the decrypted string has been verified as the one we would like,\n # we return it. \n else: \n return plaintext\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","sub_path":"util/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"200255975","text":"import os\n\nDEBUG = True\n\n# secret key\nSECRET_KEY = os.environ.get('SECRET_KEY', 'the secret key')\n\n# db\nfiledir = os.path.abspath(os.path.dirname(__file__))\ndefault_db = os.path.join(filedir, '..', 'bosphorus.db')\n\n# env vars\nDB_LOC = os.environ.get('DB_LOC', default_db)\nORTHANC_HOST = os.environ.get('ORTHANC_HOST', 'localhost')\nREDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')\n\n# settings\nSQLALCHEMY_DATABASE_URI = 'sqlite:///%s' % DB_LOC\nSQLALCHEMY_ECHO = True\n\nORTHANC_URI = 'http://%s:8042' % ORTHANC_HOST\n\nCELERY_BROKER_URL = 'redis://%s:6379' % REDIS_HOST\nCELERY_RESULT_BACKEND = CELERY_BROKER_URL\n\nCACHE_TYPE = 'simple'\n\n# This allows us to test the forms from WTForm\nWTF_CSRF_ENABLED = True\n","sub_path":"bosphorus/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"514092448","text":"# Andy K\n# Lesson02\n# Generators & Closures\n\n\nimport pandas as pd\nmusic = pd.read_csv(\"featuresdf.csv\")\n\n\n# Generator by artist\n\"\"\"\nWrite a generator to find and print all of your favorite artist’s tracks from the data set\n\"\"\"\n\ndef artist_generator(name = \"\"):\n\n for artist_name_combo in ([artist, name] for artist, name in zip(music.artists, music.name)):\n if artist_name_combo[0] == name:\n yield artist_name_combo\n\nx = artist_generator(\"Ed Sheeran\")\nprint(list(x))\n\n\"\"\"\nResults:\n[['Ed Sheeran', 'Shape of You'],\n ['Ed Sheeran', 'Castle on the Hill'],\n ['Ed Sheeran', 'Galway Girl'],\n ['Ed Sheeran', 'Perfect']]\n\"\"\"\n\n\n# Closure\n\"\"\"\nUsing the same data set, write a closure to capture high energy tracks\n\"\"\"\n\ndef make_high_energy():\n def high_energy(val = 0):\n return sorted([(artist, track, energy) for artist, track, energy in zip(music.artists, music.name, music.energy)\n if energy > val], key = lambda x: x[1], reverse = True)\n return high_energy\n\nx = make_high_energy()\n\nx(0.8)\n\n\n\"\"\"\nResults:\n ('Jason Derulo', 'Swalla (feat. Nicki Minaj & Ty Dolla $ign)', 0.8170000000000001),\n ('Martin Jensen', 'Solo Dance', 0.836),\n ('Enrique Iglesias', 'SUBEME LA RADIO', 0.823),\n ('CNCO', 'Reggaetón Lento (Bailemos)', 0.838),\n ('Maggie Lindemann', 'Pretty Girl - Cheat Codes X CADE Remix', 0.868),\n ('Danny Ocean', 'Me Rehúso', 0.804),\n ('Steve Aoki', 'Just Hold On', 0.932),\n ('The Weeknd', 'I Feel It Coming', 0.813),\n ('Ed Sheeran', 'Galway Girl', 0.8759999999999999),\n ('Wisin', 'Escápate Conmigo', 0.8640000000000001),\n ('The Chainsmokers', \"Don't Let Me Down\", 0.8590000000000001),\n ('Luis Fonsi', 'Despacito - Remix', 0.815),\n ('Post Malone', 'Congratulations', 0.812),\n ('Katy Perry', 'Chained To The Rhythm', 0.8009999999999999),\n ('Ed Sheeran', 'Castle on the Hill', 0.8340000000000001),\n ('Starley', 'Call On Me - Ryan Riback Extended Remix', 0.843),\n ('The Vamps', 'All Night', 0.809),\n ('Bruno Mars', '24K Magic', 0.8029999999999999)]\n \"\"\"","sub_path":"Student/AndyKwon/Lesson02/generators_closures.py","file_name":"generators_closures.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"14332306","text":"import re\nimport logging\n\nfrom django.conf import settings\nfrom django.db import connection\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\nfrom tenant_extras.utils import TenantLanguage\n\nfrom bluebottle.clients import properties\nfrom bluebottle.clients.utils import LocalTenant\nfrom .tasks import queue_analytics_record\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _multi_getattr(obj, attr, **kw):\n attributes = attr.split(\".\")\n for i in attributes:\n try:\n obj = getattr(obj, i)\n if callable(obj):\n obj = obj()\n except AttributeError:\n if 'default' in kw:\n return kw['default']\n else:\n raise\n return obj\n\n\ndef process(instance, created):\n instance_name = instance.__class__.__name__\n\n # _merge_attrs combines the base and instance tag or field values with\n # the class values. It also handles translateable attrs.\n def _merge_attrs(data, attrs):\n try:\n items = attrs.iteritems()\n except AttributeError:\n logger.exception('analytics_merge_attrs')\n return\n\n for label, attr in items:\n options = {}\n # If a dict is passed then the key is the dotted\n # property string and the value is options.\n try:\n new_attr = attr.keys()[0]\n options = attr[new_attr]\n attr = new_attr\n except AttributeError:\n # TODO: Logging\n pass\n\n value = _multi_getattr(instance, attr, default='')\n\n if options.get('translate', False):\n with LocalTenant():\n # Translate using the default tenant language\n with TenantLanguage(getattr(properties, 'LANGUAGE_CODE', 'en')):\n # If attr is a string then try to translate\n # Note: tag values should always be strings.\n value = _(value)\n\n data[label] = value\n\n def snakecase(name):\n return re.sub(\"([A-Z])\", \"_\\\\1\", name).lower().lstrip(\"_\")\n\n if not getattr(settings, 'ANALYTICS_ENABLED', False):\n logger.debug('analytics_disabled')\n return\n\n # Return early if instance is a migration.\n if instance_name == 'Migration':\n return\n\n # Check if the instance has an _original_status and whether the status\n # has changed. If not then skip recording this save event. This can be\n # skipped if the record has been created as we will always record metrics\n # for a newly created record.\n try:\n if not created and instance._original_status == instance.status:\n return\n except AttributeError:\n pass\n\n # Return early if the instance doesn't have an Analytics class\n # or there is no tenant schema set.\n try:\n analytics_cls = instance.Analytics\n tenant_name = connection.schema_name\n except AttributeError:\n return\n\n analytics = analytics_cls()\n\n # Check if the analytics class for the instance has a skip\n # method and return if skip return true, otherwise continue\n try:\n if analytics.skip(instance, created):\n return\n except AttributeError:\n pass\n\n try:\n timestamp = analytics.timestamp(instance, created)\n except AttributeError:\n timestamp = timezone.now()\n\n # Check for instance specific tags\n try:\n tags = analytics.extra_tags(instance, created)\n except AttributeError:\n tags = {}\n\n tags['type'] = getattr(analytics, 'type', snakecase(instance_name))\n tags['tenant'] = tenant_name\n\n # Process tags\n _merge_attrs(tags, analytics.tags)\n\n # Check for instance specific fields\n try:\n fields = analytics.extra_fields(instance, created)\n except AttributeError:\n fields = {}\n\n # Process fields\n _merge_attrs(fields, analytics.fields)\n\n # If enabled, use celery to queue task\n if getattr(properties, 'CELERY_RESULT_BACKEND', None):\n queue_analytics_record.delay(timestamp=timestamp, tags=tags, fields=fields)\n else:\n queue_analytics_record(timestamp=timestamp, tags=tags, fields=fields)\n","sub_path":"bluebottle/analytics/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"586388848","text":"'''\nFile: \t\t\tfetchretro.py \nAuthor: \t\tDBurks\n\nDescription:\tThis script will allow the user\n \t\t\t\tto download retrosheet event zip files\n \t\t\t\tto their local system. You can download\n \t\t\t\tany year between 1950 and 2014. Default is to\n \t\t\t\tdownload all years between 1950 and 2014.\n \t\t\t\tThe files will be downloaded to directory \n \t\t\t\tthe script was called from.\n\nUsage:\t\t\tpython fetchretro.py -f -t \n\nNote: \t\t\tThis script was modified from the Baseball\n \t\t\t\tHacks Perl script by Joseph Adler.\t\t\n'''\nimport requests\nimport sys\nimport getopt\nimport os\nimport zipfile\nfrom io import BytesIO\n\nretro_base_url = \"http://www.retrosheet.org/\"\nevents_url = retro_base_url + \"events/\"\n\nversion = '1.0'\n\nglobal verbose\nglobal from_year\nglobal to_year\nglobal download_dir\n\n\n# set the default values\nverbose = False\nfrom_year = 1950\nto_year = 2014\ndownload_dir = '.'\n\ndef usage():\n print('python fetchretro.py [-f from_year][-t to_year][-d download_dir][-v][-b]')\n print('')\n print('options: ')\n print('\\t-f from_year, --from from_year : sets the year to start from')\n print('\\t-t to_year, --to to_year : sets the year to stop at')\n print('\\t-d download_dir, --dir download_dir : sets the directory to put files in')\n print('\\t-v, --version : prints out the version of this script')\n print('\\t-b, --verbose : prints out details during execution')\n print('\\t-h, --help : prints out this usage message')\n\ndef print_version_message(version):\n print('fetchretro.py Version: ', version)\n\ndef print_working_message(f, t):\n print(\"Downloading Regular Season Retrosheet Event Files\")\n print(\"--------------------------------------------------\")\n print(\"From Year: %d\\nTo Year: %d\\nDir: %s\" % (f, t, os.getcwd()))\n print(\"--------------------------------------------------\")\n\ndef print_retrieval_message(filename, download_url):\n\tprint(\"Downloading and extracting \", filename, \" from \", download_url)\n\n# parse the options\ntry:\n options, remainder = getopt.getopt(sys.argv[1:], 'f:t:vbd:h',['from=',\n\t\t\t\t\t\t\t\t\t\t\t\t 'to=',\n\t\t\t\t\t\t\t\t\t\t\t\t 'version',\n\t\t\t\t\t\t\t\t\t\t\t\t 'verbose',\n\t\t\t\t\t\t\t\t\t\t\t\t 'dir=',\n\t\t\t\t\t\t\t\t\t\t\t\t 'help'])\nexcept getopt.GetoptError as e:\n print(\"Can't understand one of the options given.\")\n usage()\n\ndef process_options(options):\n # function to overwrite options\n # if necessary\n global verbose\n global from_year\n global to_year\n global download_dir\n\n for opt, arg in options:\n if opt in ('-f', '--from'):\n from_year = int(arg)\n elif opt in ('-t', '--to'):\n to_year = int(arg)\n elif opt in ('-v', '--version'):\n print_version_message(version)\n sys.exit(0)\n elif opt in ('-b', '--verbose'):\n verbose = True\n elif opt in ('-d', '--dir'):\n download_dir = arg\n elif opt in ('-h', '--help'):\n usage()\n sys.exit(0)\n\n\n# process the options\nprocess_options(options)\n\n# change the download dir if necessary\nos.chdir(download_dir)\n\nprint_working_message(from_year, to_year)\n\nfor year in range(from_year, to_year+1):\n\n filename = str(year) + 'eve.zip'\n download_url = events_url + filename\n\n if verbose:\n print_retrieval_message(filename, download_url)\n\n # get the data\n r = requests.get(download_url)\n\n # extract and write the data\n if r.ok:\n z = zipfile.ZipFile(BytesIO(r.content))\n z.extractall()\n\n\n","sub_path":"fetchretro.py","file_name":"fetchretro.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615549370","text":"import sys\nimport qdarkstyle\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QHeaderView\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom main_window import Ui_MainWindow\nimport csv\nimport datetime\nimport os\nfrom pdfrw import PdfReader\nfrom models import *\nimport re\n\nclass App(QMainWindow, Ui_MainWindow):\n def __init__(self):\n QMainWindow.__init__(self)\n Ui_MainWindow.__init__(self)\n self.setupUi(self)\n self.title = 'ECE Instrument Room Inventory Tool'\n self.label_puid.setText(\"XXXXX-XXXXX\")\n self.label_puid.setMinimumWidth(500)\n self.initTables()\n self.text_search.textChanged.connect(self.searchTextChanged)\n self.initUI()\n self.initText()\n self.button_inductoradd.clicked.connect(self.buttonClicked)\n self.button_resistoradd.clicked.connect(self.buttonClicked)\n self.button_capadd.clicked.connect(self.buttonClicked)\n self.button_icadd.clicked.connect(self.buttonClicked)\n\n def initText(self):\n self.text_icquantity.setText(\"0\")\n self.text_icname.setText(\"\")\n\n self.text_resistorvalue.setText(\"\")\n self.text_resistorquantity.setText(\"0\")\n\n self.text_inductorquantity.setText(\"0\")\n self.text_inductorvalue.setText(\"\")\n\n self.text_capquantity.setText(\"0\")\n self.text_capvalue.setText(\"\")\n\n def getPrefix(self):\n if self.sender() == self.button_inductoradd:\n preText = re.search(r'\\((.)\\)', self.combo_inductor.currentText())\n\n if self.sender() == self.button_capadd:\n preText = re.search(r'\\((.)\\)', self.combo_cap.currentText())\n\n if self.sender() == self.button_resistoradd:\n preText = re.search(r'\\((.)\\)', self.combo_resistor.currentText())\n\n if preText == None:\n return \"\"\n return preText.group(1)\n\n def buttonClicked(self):\n if self.sender() == self.button_inductoradd:\n valueText = self.text_inductorvalue.toPlainText() + \" \" + self.getPrefix() + \"H\"\n self.model_quick.insertCompRow(\"Inductor\", valueText, self.text_inductorquantity.toPlainText())\n self.text_inductorquantity.setText(\"0\")\n self.text_inductorvalue.setText(\"\")\n\n if self.sender() == self.button_capadd:\n valueText = self.text_capvalue.toPlainText() + \" \" + self.getPrefix() + \"F\"\n self.model_quick.insertCompRow(\"Capacitor\", valueText, self.text_capquantity.toPlainText())\n self.text_capquantity.setText(\"0\")\n self.text_capvalue.setText(\"\")\n\n if self.sender() == self.button_resistoradd:\n valueText = self.text_resistorvalue.toPlainText() + \" \" + self.getPrefix() + \"Ω\"\n self.model_quick.insertCompRow(\"Resistor\", valueText, self.text_resistorquantity.toPlainText())\n self.text_resistorvalue.setText(\"\")\n self.text_resistorquantity.setText(\"0\")\n\n if self.sender() == self.button_icadd:\n self.model_quick.insertCompRow(\"IC\", self.text_icname.toPlainText(), self.text_icquantity.toPlainText())\n self.text_icname.setText(\"\")\n self.text_icquantity.setText(\"0\")\n\n def searchTextChanged(self):\n print(self.text_search.toPlainText())\n self.compList.searchText(self.text_search.toPlainText())\n\n def compListChanged(self, current, previous):\n if (current.column() == 2):\n self.compList.launchDatasheet(current.row(), current.column())\n\n\n def initTables(self):\n current_data = []\n with open(\"ICs_7400d.csv\", 'r') as f:\n reader = csv.reader(f, delimiter=',')\n header_row = next(reader)\n for line in reader:\n current_data.append(line)\n self.compList = CurrentModel(current_data, header_row)\n #self.table_current.horizontalHeader().setStretchLastSection(True)\n self.table_current.verticalHeader().setVisible(False)\n self.table_current.setModel(self.compList)\n self.compListSelection = QItemSelectionModel()\n self.compListSelection.setModel(self.compList)\n self.compListSelection.currentChanged.connect(self.compListChanged)\n self.table_current.setSelectionModel(self.compListSelection)\n self.table_current.horizontalHeader().setSectionResizeMode(1, QHeaderView.Stretch)\n self.table_current.setColumnWidth(0, 120)\n self.table_current.setColumnWidth(2, 160)\n #self.table_current.setRowHeight(0, self.table_current.fontMetrics().height() * 3)\n\n now = datetime.datetime.now()\n current_data = [[\" \", \"{}-{}-{}\".format(now.year, now.month, now.day), \"0\"]]\n header_row = ['Item', 'Date Issued', 'Quantity']\n self.model_borrowed = BorrowedModel(current_data, header_row)\n self.table_borrowed.horizontalHeader().setStretchLastSection(True)\n self.table_borrowed.verticalHeader().setVisible(False)\n self.table_borrowed.setModel(self.model_borrowed)\n self.table_borrowed.setColumnWidth(0, 600)\n\n current_data = [[\" \", \" \", \"{}-{}-{}\".format(now.year, now.month, now.day), \"0\"]]\n header_row = ['Item', 'Value / Part No.', 'Date Issued', 'Quantity']\n self.model_quick = QuickaddModel(current_data, header_row)\n self.table_quick.horizontalHeader().setStretchLastSection(True)\n self.table_quick.verticalHeader().setVisible(False)\n self.table_quick.setModel(self.model_quick)\n self.table_quick.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table_current.setColumnWidth(0, 60)\n\n now = datetime.datetime.now()\n current_data = [[\" \", \"{}-{}-{}\".format(now.year, now.month, now.day), \"0\"]]\n header_row = ['Item', 'Date Issued', 'Quantity']\n self.model_requested = BorrowedModel(current_data, header_row)\n self.table_requested.horizontalHeader().setStretchLastSection(True)\n self.table_requested.verticalHeader().setVisible(False)\n self.table_requested.setModel(self.model_requested)\n self.table_requested.setColumnWidth(0, 600)\n\n def initUI(self):\n self.setWindowTitle(self.title)\n # self.setGeometry(self.left, self.top, self.width, self.height)\n self.show()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = App()\n app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n ex.show()\n sys.exit(app.exec_())","sub_path":"SwipeMain.py","file_name":"SwipeMain.py","file_ext":"py","file_size_in_byte":6433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"374901659","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 31 15:56:08 2020\n\n@author: SParkhonyuk\n\"\"\"\n# this file will require the following package installation:\n# conda install -c conda-forge clickhouse-driver\nimport os\nfrom dotenv import load_dotenv, find_dotenv\nfrom clickhouse_driver import Client\nimport pandas as pd\nimport logging\nimport logging.config\nfrom datetime import datetime, timedelta\nimport time\n\n# in-house\nimport utilities_timers\n\n# find .env automagically by walking up directories until it's found\ndotenv_path = find_dotenv()\n# load up the entries as environment variables\nload_dotenv(dotenv_path)\nusr = os.environ.get(\"CLICKHOUSE_USER\")\npwd = os.environ.get(\"CLICKHOUSE_PWD\")\napiname=\"Clickhouse::\"\n# -----------------------------------------------------------------------------\ndef connect(instance_name=\"localhost\"):\n \"\"\"\n \n\n Parameters\n ----------\n instance_name : string\n ip adress of Clickhouse instance(i.e. 192.168.1.128). localhost by default.\n Username and password taken from .env file\n\n Returns\n -------\n Connector. Client object of clickhouse_driver.client module\n ping (list of tuples): responce from server\n\n \"\"\"\n logger = logging.getLogger(apiname + connect.__name__)\n\n logger.info(f\"connection to {instance_name } is in progress ...\")\n client = Client(instance_name, user=usr, password=pwd)\n\n try:\n ping = client.execute(\"SELECT 1\")\n # except InterfaceError: # not tested.\n # print(\"InterfaceError error is catched.\")\n except Exception as exc:\n # shall you see such kind of error:\n # InterfaceError: (pyodbc.InterfaceError) ('IM002', '[IM002] [Microsoft]\n # [ODBC Driver Manager] Data source name not found and\n # no default driver specified (0) (SQLDriverConnect)')\n # (Background on this error at: http://sqlalche.me/e/rvf5)\n #\n # then you need to download and install:\n # Microsoft ODBC Driver 17 for SQL Server\n #\n logger.error(f\"generated an exception: {exc}\")\n client = None\n ping = str(exc)\n else:\n logger.info(\"connected to database.\")\n\n return client, ping\n\n\n# -----------------------------------------------------------------------------\ndef close_connection(client):\n \"\"\"\n \n Parameters\n ----------\n client : clickhouse connector. Client object of clickhouse_driver.client module\n Check if connection exists. If yes, closes it.\n\n Returns\n -------\n None.\n\n \"\"\"\n\n if len(client.execute(\"SELECT 1\")) > 0:\n client.disconnect()\n\n\n# -----------------------------------------------------------------------------\ndef get_databases(client):\n \"\"\"\n Parameters\n ----------\n client : clickhouse connector. Client object of clickhouse_driver.client module\n\n Returns\n -------\n df: Pandas dataframe that contains names of the databases.\n \"\"\"\n result = client.execute(\"SHOW DATABASES\")\n df = pd.DataFrame(result)\n return df\n\n\n# -----------------------------------------------------------------------------\ndef get_tables_in_databases(client, database=\"default\"):\n \"\"\"\n \n\n Parameters\n ----------\n client : clickhouse connector. Client object of clickhouse_driver.client module\n database: (str). Name of database to connect. Default name is 'default'\n\n Returns\n -------\n df: Pandas dataframe that contains names of tables that exist in database.\n\n \"\"\"\n result = client.execute(f\"SHOW TABLES FROM {database}\")\n df = pd.DataFrame(result)\n return df\n\n\n# -----------------------------------------------------------------------------\ndef get_column_names_in_table(client, table_name):\n \"\"\"Get names of columns of given table stored on Clickhouse server.\n\n Args:\n client : clickhouse connector. Client object of clickhouse_driver.client module\n \n table_name (string) : table name in SQL database. If not exists will crush.\n\n Returns:\n col_names (list) : list of column names\n col_dtypes (list) : list of column data types\n \"\"\"\n\n # to get the columns. Replace 'name' by * to get 34 parameters of the table.\n # https://stackoverflow.com/questions/1054984/how-can-i-get-column-names-from-a-table-in-sql-server\n result = client.execute(f\"DESCRIBE TABLE {table_name}\")\n df = pd.DataFrame(result)\n col_names = df[0].to_list()\n col_dtypes = df[1].to_list()\n\n return col_names, col_dtypes\n\n\n# -----------------------------------------------------------------------------\ndef get_SQL_table(connection, table_name):\n \"\"\"Get data from given table stored on SQL server.\n Warning: Can take extreme amount of memory if you will querry whole minutes table!\n\n Args:\n client : clickhouse connector. Client object of clickhouse_driver.client module\n table_name (string) : table name in Clickhouse database. If not exists will crush.\n\n Returns:\n df (Pandas.DataFrame) : table with data\n \"\"\"\n\n start = time.time()\n logger = logging.getLogger(apiname + get_SQL_table.__name__)\n logger.info(f\"read table {table_name} from Clickhouse\")\n\n result, columns = connection.execute(\n f\"SELECT * FROM {table_name}\", with_column_types=True\n )\n df = pd.DataFrame(result, columns=[tuple[0] for tuple in columns])\n\n timer_string = utilities_timers.format_timer_string(time.time() - start)\n logger.info(timer_string)\n\n return df\n\n\n# -------------------------------\n##-------------------------------------------------------------------------------------------------\ndef query_data_by_time(\n channels_list,\n startTime=None,\n endTime=None,\n days_span=90,\n server_ip=\"localhost\",\n table_name=\"minutes\",\n instrument_type=\"Etf\",\n data_freq=\"min\",\n):\n \"\"\"\n Request data from SQL DataBase in time range [startTime, endTime].\n\n Args:\n channels_list (list) : list of channels to be quired from DataBase\n startTime (datetime) : data will be quired AFTER this time moment\n If not set: 1 year period from endTime.\n endTime (datetime) : data will be quired BEFORE this time moment\n If not set: datetime.now() is used.\n days_span (int) : number of days before the endTime, if StartTime is not set.\n server_ip : string\n ip adress of Clickhouse instance(i.e. 192.168.1.128). localhost by default.\n Username and password taken from .env file\n table_name (string) : table name in SQL database.minutes by default\n\n Returns\n (pd.DataFrame) : Table with requested channels in a given time range.\n \"\"\"\n start = time.time()\n logger = logging.getLogger(apiname + query_data_by_time.__name__)\n logger.info(\"data query in progress ...\")\n\n if channels_list == None:\n channels_list = [\"*\"]\n if len(channels_list) == 0:\n channels_list = [\"*\"]\n if endTime == None:\n endTime = datetime.now()\n\n if startTime == None:\n startTime = endTime - timedelta(days=days_span)\n # Check-up on proper instrument type\n if instrument_type not in [\"Etf\", \"Bond\", \"Stock\"]:\n logger.error(\"Unsupported instrument type. Only Etf, Bond, Stock are supported\")\n logger.error(\"Does not querry anything\")\n return None\n if data_freq not in [\"min\", \"day\", \"week\"]:\n logger.error(\"Unsupported data frequency. Only min, day, week are supported\")\n logger.error(\"Does not querry anything\")\n return None\n\n con, _ = connect(server_ip)\n channel_string = (\" ,\").join(channels_list)\n # converting values to strings to get clickhouse-compatible time format\n startTime = startTime.strftime(\"%Y-%m-%d %H:%M:%S\")\n endTime = endTime.strftime(\"%Y-%m-%d %H:%M:%S\")\n if data_freq == \"min\":\n\n msg1 = f\"select * from {table_name} \"\n msg2 = f\"where time BETWEEN '{startTime}' AND '{endTime}' \"\n msg3 = f\"AND type='{instrument_type}' AND name={channel_string}\"\n query = msg1 + msg2 + msg3\n elif data_freq == \"day\":\n\n logger.info(f\"startTime is {startTime}\")\n logger.info(f\"endTIme is {endTime}\")\n msg1 = f\"select uniq(time), count(), ticker, type,currency,name, day, argMin(o, time) as o,max(h) as h, min(l) as l, argMax(c, time) as c, sum(v) as v \"\n msg2 = f\"from minutes \"\n\n msg3 = f\"where time BETWEEN '{startTime}' AND '{endTime}' AND type='{instrument_type}' \"\n msg4 = \"GROUP BY day, ticker, type, currency, name ORDER BY day desc\"\n query = msg1 + msg2 + msg3 + msg4\n elif data_freq == \"week\":\n\n msg1 = \"SELECT uniq(time), count(),ticker,currency, name, toMonday(day) as monday, argMin(o, time) as o, max(h) as h, min(l) as l, argMax(c, time) as c, sum(v) as v \"\n msg2 = f\"from minutes \"\n msg3 = f\"where time BETWEEN '{startTime}' AND '{endTime}' AND type='{instrument_type}' \"\n msg4 = \"GROUP BY monday, ticker, type, currency, name ORDER BY monday desc\"\n query = msg1 + msg2 + msg3 + msg4\n\n logger.info(f\"query string: {query}\")\n\n result, columns = con.execute(query, with_column_types=True)\n df = pd.DataFrame(result, columns=[tuple[0] for tuple in columns])\n\n close_connection(con)\n\n timer_string = utilities_timers.format_timer_string(time.time() - start)\n logger.info(timer_string)\n logger.info(f\"data query complete. Dataframe has shape : {df.shape}.\")\n logger.info(\"--------------------------\")\n\n return df\n\n\n##-------------------------------------------------------------------------------------------------\ndef append_df_to_SQL_table(\n df=None, table_name=\"minutes\", server_ip=\"localhost\", is_tmp_table_to_delete=True,\n):\n \"\"\"write dataframe to SQL server. Only values with unique columns will be append.\n\n Args:\n df (Pandas.DataFrame) : data to be written to SQL\n table_name (string) : table name in Clickhouse database.\"minutes\" by default.\n server_ip (string) : Server IP. 'localhost' by default\n is_tmp_table_to_delete (bool) :\n True (default) to delete temporary table,\n which was used to keep new data when merging with the existing table.\n Set False if you want to keep tmp table for any reason (e.g. merge this table to multiple tables).\n\n Returns:\n nothing\n\n Raises:\n nothing\n\n \"\"\"\n start = time.time()\n\n logger = logging.getLogger(apiname + append_df_to_SQL_table.__name__)\n\n ##---------------------------------------------------------------------------------------------\n n_rows, n_cols = df.shape\n\n logger.info(f\"write (row x col) : ({n_rows} x {n_cols})\")\n expected_min = 1 * n_rows * n_cols / 130000 / 34 # experimental formula for ru0138\n logger.info(f\"expected time ~{int(expected_min)} minutes\")\n\n if n_rows == 0:\n logger.info(f\"DataFrame has 0 rows. Target table will not be modified. Exit.\")\n return\n\n logger.info(f\"list of columns in df: {df.columns}.\")\n\n if table_name[0].isdigit():\n table_name = \"_\" + table_name\n logger.warning(f\"table name started from digit. Rename as {table_name}.\")\n\n con, ping = connect(server_ip)\n logger.info(f\"data uploading to a temp table ...\")\n # dropping table tmp\n con.execute(\"DROP TABLE IF EXISTS tmp\")\n # creating new table\n con.execute(\n \"CREATE TABLE tmp (\"\n \"figi String, \"\n \"interval String, o Float64, \"\n \"c Float64, h Float64, \"\n \"l Float64, v Int64, \"\n \"time DateTime, ticker String, \"\n \"isin String, min_price_increment Float64,\"\n \"lot Int64, currency String,\"\n \"name String, type String) ENGINE = Log \"\n )\n con.execute(\"INSERT INTO tmp VALUES\", [tuple(x) for x in df.values])\n\n inserted_rows_count = con.execute(\"SELECT count(*) FROM tmp\")[0][0]\n logger.info(\n f\"Inserted {inserted_rows_count} rows to temporary table. Moving to 'minutes' table\"\n )\n initial_rows_count = con.execute(\"SELECT count(*) FROM minutes\")[0][0]\n logger.info(f\"initially minutes table has {initial_rows_count} rows\")\n con.execute(\n \"INSERT INTO minutes \"\n \"SELECT DISTINCT \"\n \"toDate(time) AS day,\"\n \"figi, \"\n \"interval, o, \"\n \"c , h, \"\n \"l , v, \"\n \"time, ticker, \"\n \"isin, min_price_increment,\"\n \"lot, currency,\"\n \"name, type FROM tmp WHERE (ticker, time) NOT IN (SELECT (ticker, time) FROM minutes)\"\n )\n\n logger.info(f\"merge complete\")\n new_rows_count = con.execute(\"SELECT count(*) FROM minutes\")[0][0]\n inserted_row_count = new_rows_count - initial_rows_count\n logger.info(f\"Inserted {inserted_row_count} unique rows to minutes table.\")\n logger.info(f\"now minutes table has {new_rows_count} rows\")\n if is_tmp_table_to_delete:\n con.execute(\"DROP TABLE IF EXISTS tmp\")\n\n timer_string = utilities_timers.format_timer_string(time.time() - start)\n logger.info(timer_string)\n\n logger.info(\"new data written to table 'minutes'\")\n\n\n##-----------------------------------------------------------------------------\n##-------------------------------------------------------------------------------------------------\nif __name__ == \"__main__\":\n\n logging.config.fileConfig(fname=\"logger.conf\", disable_existing_loggers=False)\n logger = logging.getLogger(__name__)\n\n logger.info(\"MySQLHelper main\")\n\n # testing the functions.\n # 1. Connection to database:\n dbname = \"192.168.1.128\"\n testcon, _ = connect(dbname)\n logger.info(f\"test connect: {testcon}\")\n logger.info(\n f\"expected answer: \"\n )\n close_connection(testcon)\n\n # 2.testing the data querying.\n # should return df with shape (3848, 17)\n server_adress = \"192.168.1.128\"\n db_name = \"default\"\n table_name = \"minutes\"\n con, ping = connect(server_adress)\n available_cols, available_cols_dtypes = get_column_names_in_table(\n client=con, table_name=table_name\n )\n cols = [\n \"day\",\n \"figi\",\n \"interval\",\n \"o\",\n \"c\",\n \"h\",\n \"l\",\n \"v\",\n \"time\",\n \"ticker\",\n \"isin\",\n \"min_price_increment\",\n \"lot\",\n \"currency\",\n \"name\",\n \"type\",\n ]\n\n startTime = datetime(2020, 1, 10, 11, 19, 9)\n frequency = [\"min\", \"day\", \"week\"]\n for freq in frequency:\n logger.info(f\"querrying with {freq} frequency\")\n df = query_data_by_time(\n channels_list=[], days_span=7, server_ip=\"192.168.1.128\", data_freq=freq\n )\n logger.info(f\"query returns table (rows, columns)={df.shape}\")\n\n close_connection(con)\n\n ##---------------------------------------------------------------------------------------------\n is_test_read_SQL_table = False\n if is_test_read_SQL_table:\n server_adress = \"192.168.1.128\"\n db_name = \"default\"\n table_name = \"minutes\"\n con, ping = connect(server_adress)\n logger.info(\"Testing function get_SQL_table\")\n df = get_SQL_table(connection=con, table_name=table_name)\n close_connection(con)\n\n ##---------------------------------------------------------------------------------------------\n is_test_append_table = False\n if is_test_append_table:\n server_adress = \"192.168.1.128\"\n db_name = \"default\"\n table_name = \"minutes\"\n con, ping = connect(server_adress)\n df.drop(\"day\", axis=1, inplace=True)\n logger.info(\"Testing function append_df_to_SQL_table\")\n append_df_to_SQL_table(\n df=df,\n table_name=table_name,\n server_ip=server_adress,\n is_tmp_table_to_delete=True,\n )\n\n close_connection(con)\n","sub_path":"src/data/ClickhouseHelper.py","file_name":"ClickhouseHelper.py","file_ext":"py","file_size_in_byte":15727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"487976981","text":"import wk\n\n\nclass NodeMetaClass(type):\n def __new__(cls, name, bases, attrs):\n debug=False\n if name == None:\n debug=True\n dict_attrs=['_attrs','environment']\n for name in dict_attrs:\n if name in attrs.keys():\n _attrs = attrs[name]\n tmp_dict = {}\n assert isinstance(_attrs, dict)\n for base in bases:\n if hasattr(base, name):\n base_attrs = getattr(base, name)\n tmp_dict.update(**base_attrs)\n tmp_dict.update(**_attrs)\n attrs[name] = tmp_dict\n return type.__new__(cls, name, bases, attrs)\n\n\nclass Node(metaclass=NodeMetaClass):\n tag = 'Node'\n self_closing = False\n _attrs = {}\n _children = []\n environment={}\n def __init__(self, **kwargs):\n def preprocess(kwargs):\n cls_attr = '_class'\n if cls_attr in kwargs.keys():\n kwargs['class'] = kwargs.pop(cls_attr)\n return kwargs\n\n self.attrs = {}\n self.attrs.update(**self._attrs)\n self.attrs = preprocess(self.attrs)\n self.children = self._children\n kwargs = preprocess(kwargs)\n children_name = 'children'\n if children_name in kwargs.keys():\n self.children = kwargs.pop(children_name)\n self.attrs.update(**kwargs)\n\n def to_string(self, indent=0, indent_step=2):\n tag_and_attrs_string = ' '.join([self.tag] + ['%s=\"%s\"' % (name, value) for name, value in self.attrs.items()])\n if self.self_closing:\n return '<{tag_and_attrs}>'.format(indent=' ' * indent, tag_and_attrs=tag_and_attrs_string)\n else:\n if len(self.children) == 1:\n \"Handle such case that the child is text or Var with type of text\"\n child = self.children[0]\n if isinstance(child, (str,)):\n children_string = str(child)\n return '<{tag_and_attrs}>{children_string}'.format(tag_and_attrs=tag_and_attrs_string,\n children_string=children_string,\n tag=self.tag)\n elif isinstance(child, Var) and child.attrs['type'] == 'text':\n children_string = child.to_string(indent=indent, indent_step=indent_step)\n return '<{tag_and_attrs}>{children_string}'.format(tag_and_attrs=tag_and_attrs_string,\n children_string=children_string,\n tag=self.tag)\n\n children_string = '\\n{indent}'.format(indent=' ' * (indent + indent_step)).join([child.to_string(\n indent=indent + indent_step, indent_step=indent_step) if isinstance(child, (Node,)) else ' ' * (\n indent + indent_step) + str(child) + '\\n' for child in self.children])\n if children_string:\n return '<{tag_and_attrs}>\\n{next_indent}' \\\n '{children_string}' \\\n '\\n{indent}'.format(next_indent=' ' * (indent + indent_step), indent=' ' * indent,\n tag_and_attrs=tag_and_attrs_string, children_string=children_string,\n tag=self.tag)\n else:\n return '<{tag_and_attrs}>'.format(tag_and_attrs=tag_and_attrs_string, tag=self.tag)\n def to_structure(self,indent=0,indent_step=2):\n tag_and_attrs_string = ' '.join([self.tag] + ['%s=\"%s\"' % (name, value) for name, value in self.attrs.items()])\n if self.self_closing:\n return '<{tag_and_attrs}>'.format(indent=' ' * indent, tag_and_attrs=tag_and_attrs_string)\n else:\n if len(self.children) == 1:\n \"Handle such case that the child is text or Var with type of text\"\n child = self.children[0]\n if isinstance(child, (str,)):\n children_string = str(child)\n return '<{tag_and_attrs}>{children_string}'.format(tag_and_attrs=tag_and_attrs_string,\n children_string=children_string,\n tag=self.tag)\n elif isinstance(child, Var) and child.attrs['type'] == 'text':\n children_string = child.to_structure(indent=indent, indent_step=indent_step)\n return '<{tag_and_attrs}>{children_string}'.format(tag_and_attrs=tag_and_attrs_string,\n children_string=children_string,\n tag=self.tag)\n\n children_string = '\\n{indent}'.format(indent=' ' * (indent + indent_step)).join([child.to_structure(\n indent=indent + indent_step, indent_step=indent_step) if isinstance(child, (Node,)) else ' ' * (\n indent + indent_step) + str(child) + '\\n' for child in self.children])\n if children_string:\n return '<{tag_and_attrs}>\\n{next_indent}' \\\n '{children_string}' \\\n '\\n{indent}'.format(next_indent=' ' * (indent + indent_step), indent=' ' * indent,\n tag_and_attrs=tag_and_attrs_string, children_string=children_string,\n tag=self.tag)\n else:\n return '<{tag_and_attrs}>'.format(tag_and_attrs=tag_and_attrs_string, tag=self.tag)\n def __str__(self):\n return self.to_string()\n # return self.to_structure()\n def __repr__(self):\n return self.to_structure()\n def __len__(self):\n return len(self.children)\n\n def __call__(self, children: list = []):\n if not isinstance(children, (list,)):\n assert isinstance(children, (Node, str, Var))\n children = [children]\n self.children = children\n return self\n\n def to_file(self, filepath):\n with open(filepath, 'w', encoding='utf-8') as f:\n f.write(self.render())\n\n def compile(self, **kwargs):\n ''' replace var with specific object , if the object is a list . the insert every element in this list into self.children'''\n if not len(self.children):\n return self\n index=0\n for i in range(len(self.children)):\n child=self.children[index]\n if isinstance(child, str):\n index+=1\n continue\n if isinstance(child, Var):\n name = child.attrs['name']\n if name in kwargs.keys():\n self.children.pop(index)\n new_nodes=kwargs[name]\n if not isinstance(new_nodes,(tuple,list)):\n new_nodes=[new_nodes]\n for new_node in new_nodes:\n self.children.insert(index,new_node)\n index+=1\n else:\n self.children[index] = child.compile(**kwargs)\n index+=1\n else:\n self.children[index] = child.compile(**kwargs)\n index+=1\n return self\n\n def render(self, **kwargs):\n render_kwargs={}\n render_kwargs.update(**self.environment)\n render_kwargs.update(**kwargs)\n from jinja2 import Environment,Template\n\n tem = Environment().from_string(self.to_string())\n return tem.render(**render_kwargs)\n\nclass Text(Node):\n tag = 'text'\n def to_string(self, indent=0, indent_step=2):\n return self.children[0]\n def __call__(self, children: list = []):\n if not isinstance(children, (list,)):\n assert isinstance(children, ( str, ))\n children = [children]\n self.children = children\n return self\n\nclass Var(Node):\n tag = 'var'\n _attrs = dict(type='node')\n\n def __init__(self, name, **kwargs):\n super().__init__(name=name, **kwargs)\n\n def to_string(self, indent=0, indent_step=2):\n if len(self.children) == 1:\n child = self.children[0]\n if isinstance(child, (str,)):\n return str(child)\n elif isinstance(child, Var) and child.attrs['type'] == 'text':\n return child.to_string(indent=indent, indent_step=indent_step)\n\n children_string = '\\n{indent}'.format(indent=' ' * (indent)).join([child.to_string(\n indent=indent, indent_step=indent_step) if isinstance(child, (Node,)) else ' ' * (\n indent) + str(child) + '\\n' for child in self.children])\n if children_string:\n return children_string\n else:\n return ''\n\n\nclass Html(Node):\n tag = 'html'\n\n\nclass Head(Node):\n tag = 'head'\n\n\nclass Body(Node):\n tag = 'body'\n\n\nclass Header(Node):\n tag = 'header'\n\n\nclass Footer(Node):\n tag = 'footer'\n\n\nclass Link(Node):\n tag = 'link'\n self_closing = True\n\n\nclass Meta(Node):\n tag = 'meta'\n self_closing = True\n\n\nclass Title(Node):\n tag = 'title'\n\n\nclass Script(Node):\n tag = 'script'\n\n\nclass Style(Node):\n tag = 'style'\n\n\nclass Nav(Node):\n tag = 'nav'\n\n\nclass Div(Node):\n tag = 'div'\n\n\nclass Span(Node):\n tag = 'span'\n\n\nclass H1(Node):\n tag = 'h1'\n\n\nclass H2(Node):\n tag = 'h2'\n\n\nclass H3(Node):\n tag = 'h3'\n\n\nclass H4(Node):\n tag = 'h4'\n\n\nclass H5(Node):\n tag = 'h5'\n\n\nclass H6(Node):\n tag = 'h6'\n\n\nclass P(Node):\n tag = 'p'\n\n\nclass Table(Node):\n tag = 'table'\n\n\nclass Caption(Node):\n tag = 'caption'\n\n\nclass Thead(Node):\n tag = 'thead'\n\n\nclass tbody(Node):\n tag = 'tbody'\n\n\nclass Tr(Node):\n tag = 'tr'\n\n\nclass Td(Node):\n tag = 'td'\n\n\nclass Th(Node):\n tag = 'th'\n\n\nclass Ul(Node):\n tag = 'ul'\n\n\nclass Ol(Node):\n tag = 'ol'\n\n\nclass Li(Node):\n tag = 'li'\n\n\nclass Form(Node):\n tag = 'form'\n\n\nclass Textarea(Node):\n tag = 'textarea'\n\n\nclass Input(Node):\n tag = 'input'\n self_closing = True\n\n\nclass Label(Node):\n tag = 'label'\n\n\nclass Select(Node):\n tag = 'select'\n\n\nclass A(Node):\n tag = 'a'\n\n\nclass B(Node):\n tag = 'b'\n\n\nclass Strong(Node):\n tag = 'strong'\n\n\nclass I(Node):\n tag = 'i'\n\n\nclass Em(Node):\n tag = 'em'\n\n\nclass Strike(Node):\n tag = 'strike'\n\n\nclass Del(Node):\n tag = 'del'\n\n\nclass Hr(Node):\n tag = 'hr'\n self_closing = True\n\n\nclass Br(Node):\n tag = 'br'\n self_closing = True\n\n\nclass U(Node):\n tag = 'u'\n\n\nclass Img(Node):\n tag = 'img'\n\n\nclass Sub(Node):\n tag = 'sub'\n\n\nclass Sup(Node):\n tag = 'sup'\n\n\nclass Big(Node):\n tag = 'big'\n\n\nclass Small(Node):\n tag = 'small'\n\n\nclass Button(Node):\n tag = 'button'\n\n\ndef smart_update_dict(dic1={}, dic2={}):\n '''if the some-value is also a dict , then try to update only the smaller dict'''\n for k, v in dic2.items():\n if not k in dic1.keys():\n dic1[k] = v\n else:\n if isinstance(dic1[k], dict) and isinstance(dic2[k], dict):\n smart_update_dict(dic1[k], dic2[k])\n else:\n dic1[k] = v\n return dic1\n","sub_path":"build/lib/wk/extra/node/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":11523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"292810691","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 30 15:19:03 2015\nUsage:\n./pick_seq_by_list.py dataset_path list_path outpath\n@author: CHEN\n\"\"\"\nimport sys\n\ndef readData(path):\n\thandle = open(path, 'r')\n\tlines = handle.readlines()\n\thandle.close()\n\tres = []\n\ttmp_seq = []\n\ttitle = ''\n\tfor line in lines:\n\t\tif line[0] != '>':\n\t\t\ttmp_seq = tmp_seq + line[:-1]\n\t\telse:\n\t\t\tres.append([title, tmp_seq])\n\t\t\ttitle = line.split(' ')[0][:-1]\n\t\t\ttmp_seq = ''\n\tres.append([title, tmp_seq])\n\treturn res[1:]\n\t\ndef loadList(path):\n\thandle = open(path, 'r')\n\tlines = handle.readlines()\n\thandle.close()\n\tlst = []\n\tfor line in lines:\n\t\tlst.append(line[:-1])\n\treturn(lst)\n\ndef filterData(data, lst):\n\tres = [[item for item in data if item[0][1:] == name] for name in lst]\n\treturn res\n\t\ndef writeRes(res, path):\n\thandle=open(path, 'w')\n\tfor item in res:\n\t\tfor group in item:\n\t\t\tfor term in group:\n\t\t\t\thandle.write(term + '\\n')\n\thandle.close()\n\treturn False\n\ndataset_path = sys.argv[1]\nlist_path = sys.argv[2]\nout_path = sys.argv[3]\n\ndata = readData(dataset_path)\nlst = loadList(list_path)\nres = filterData(data, lst)\nwriteRes(res, out_path)\n\n","sub_path":"pick_seq_by_list.py","file_name":"pick_seq_by_list.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"380366604","text":"# import StemmerFactory class\r\nfrom Sastrawi.Stemmer.StemmerFactory import StemmerFactory\r\n\r\n# create stemmer\r\nfactory = StemmerFactory()\r\nstemmer = factory.create_stemmer()\r\n\r\njawab = 'ya'\r\n# stemming process\r\nwhile (True):\r\n kalimat = input(\"Masukan Kata atau kalimat yang akan distemming : \")\r\n\r\n output = stemmer.stem(kalimat)\r\n# output = stemmer.stem(sentence)\r\n\r\n print(\"Kata Sebelum di Stemming: \" + kalimat) \r\n print(\"Kata Setelah di Stemming: \" + output)\r\n\r\n jawab = input(\"Coba Kalimat / Kata Lain??? [ya/tidak] \")\r\n if jawab == 'tidak':\r\n print(\"Terima Kasih Telah Mencoba\")\r\n break\r\n\r\n\r\n","sub_path":"stemming.py","file_name":"stemming.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45262078","text":"from .models import Cliente,Tarifa,Orden,Pedido,DomCliente, Archivo,Archivodos\nfrom .serializers import ClienteSerializer,OrdenSerializer,PedidoArticulosSerializer,ContadoTarifaserializer,OrdenClienteSerializer, DomicilioSerializer, ArchivoSerializer,ArchivodosSerializer\n\nfrom rest_framework import viewsets,permissions\nfrom rest_framework.views import APIView\nfrom rest_framework.viewsets import ModelViewSet\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.models import Permission, User\nfrom django.urls import reverse_lazy\nfrom rest_framework.response import Response\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import (\n ListView,\n UpdateView,\n DetailView,\n View\n)\nfrom django.db.models import Q\nfrom .forms import FormAprobar, FormValidar, FormSurtir, FormCliente\nfrom django.shortcuts import render\nfrom nfh.utileria import Render\nfrom django.http import Http404\n\napp_name='pedidos'\n\n################################################### PDF #############\n\nclass Pdf(LoginRequiredMixin, DetailView):\n def get(self,request,id):\n queryorden =Orden.objects.get(id=id)\n query=Pedido.objects.select_related().filter(orden_id=id)\n template_name = 'pedidos/orden/pdf.html'\n contexto={\n 'pedido':query,\n 'object':queryorden,\n 'request':request\n }\n return Render.render(template_name,contexto)\n\n#############################Views genericas para los paneles######\n\nclass TarifaListView(LoginRequiredMixin, ListView):\n template_name = 'pedidos/tarifas/tarifa_list.html'\n model = Tarifa\n\n\nclass ClienteListView(LoginRequiredMixin, ListView):\n template_name = 'pedidos/clientes/cliente_list.html'\n model = Cliente\n\n\nclass OrdenListView(LoginRequiredMixin, ListView):\n template_name = 'pedidos/orden/orden_list.html'\n model = Orden\n\n#class ClienteCreateView(LoginRequiredMixin, CreateView):\n # template_name = 'pedidos/clientes/cliente_form.html'\n # model = Cliente\n # fields = '__all__'\n #success_url = reverse_lazy('pedidos:clientes')\n\n\n\n#########################CREAR USUARIO#############################\nclass ClienteCreateView(LoginRequiredMixin, View):\n template_name = 'pedidos/clientes/cliente_form.html'\n\n def get(self, request):\n form = FormCliente\n context = {\n 'form': form,\n }\n return render(request, self.template_name, context)\n\n\n def post(self, request):\n form = FormCliente(request.POST)\n if form.is_valid():\n form.save()\n return redirect('pedidos:clientes')\n else:\n return render(request, self.template_name, {'form': form} )\n\n\n\n\nclass OrdendesDetail(LoginRequiredMixin, DetailView):\n def get(self,request,id):\n query=Orden.objects.get(id=id)\n pedido= Pedido.objects.select_related().filter(orden_id=id)\n archivos=Archivo.objects.all().filter(orden_id=id)\n archivodos=Archivodos.objects.all().filter(orden_id=id)\n template_name='pedidos/orden/orden_detail.html'\n context={\n 'object':query,\n 'pedido':pedido,\n 'archivos':archivos,\n 'archivodos':archivodos\n }\n return render(request,template_name,context)\n\n#######################Vistas para validar, aprovar, surtir y modificar##########\nclass OrdenAprobar(LoginRequiredMixin,View):\n def get(self, request, id):\n template_name=\"pedidos/orden/orden_aprobar.html\"\n query=Orden.objects.get(id=id)\n setquery = Orden.objects.get(id=id)\n archivos = Archivo.objects.all().filter(orden_id=id)\n archivodos = Archivodos.objects.all().filter(orden_id=id)\n form=FormAprobar(instance=query)\n context={\n 'form':form,\n 'setquery':setquery,\n 'archivos': archivos,\n 'archivodos': archivodos\n }\n return render(request,template_name,context)\n def post(self,request,id):\n query=Orden.objects.get(id=id)\n form=FormAprobar(request.POST, instance=query)\n if form.is_valid:\n form.save()\n return redirect('pedidos:vergenerada')\n else:\n return render(request, 'pedidos/orden/orden_aprobar.html', {'form':form})\n\nclass ClienteUpdate(LoginRequiredMixin,UpdateView):\n model = Cliente\n fields = ['NOMBRE','DIRECCIÓN','COLONIA','POBLACIÓN','MUNICIPIO','CP','RFC','TELÉFONO','CORREO']\n template_name = 'pedidos/clientes/cliente_form.html'\n\nclass ClienteDetail(DetailView):\n model = Cliente\n template_name = 'pedidos/clientes/cliente_detail.html'\n\n#######################Vistas para validar##########\nclass OrdenValidar(LoginRequiredMixin,View):\n def get(self, request, id):\n template_name=\"pedidos/orden/orden_validar.html\"\n query=Orden.objects.get(id=id)\n setquery = Orden.objects.get(id=id)\n archivos = Archivo.objects.all().filter(orden_id=id)\n archivodos=Archivodos.objects.all().filter(orden_id=id)\n form=FormValidar(instance=query)\n context={\n 'form':form,\n 'setquery':setquery,\n 'archivos':archivos,\n 'archivodos':archivodos\n }\n return render(request,template_name,context)\n\n def post(self,request,id):\n query=Orden.objects.get(id=id)\n form=FormValidar(request.POST, instance=query)\n if form.is_valid:\n form.save()\n return redirect('accounts:profile')\n\n\n#######################Vistas para surtir##########\nclass OrdenSurtir(LoginRequiredMixin,View):\n def get(self, request, id):\n template_name=\"pedidos/orden/orden_surtir.html\"\n query=Orden.objects.get(id=id)\n setquery = Orden.objects.get(id=id)\n form=FormSurtir(instance=query)\n context={\n 'form':form,\n 'setquery':setquery\n }\n return render(request,template_name,context)\n\n def post(self,request,id):\n query=Orden.objects.get(id=id)\n form=FormSurtir(request.POST, instance=query)\n if form.is_valid:\n form.save()\n return redirect('accounts:profile')\n\n\n\n\n\nclass AprobarListOrdenView(LoginRequiredMixin, View):\n def get(self,request):\n template_name = 'pedidos/orden/orden_list_aprobar.html'\n object_list=Orden.objects.all().filter(status='g')\n context={\n 'object_list':object_list\n }\n return render(request,template_name,context)\n#######################serializadores################################\n\n\n\nclass ClienteViewSet(viewsets.ModelViewSet):\n queryset =Cliente.objects.all()\n serializer_class=ClienteSerializer\n def get_queryset(self, *args, **kwargs):\n search = self.request.GET.get('s')\n queryset_list = super(ClienteViewSet, self).get_queryset()\n if search:\n queryset_list = queryset_list.filter(\n Q(NOMBRE__icontains=search)\n ).distinct()\n\n return queryset_list\n\n\n\nclass OrdenViewSet(ModelViewSet):\n queryset = Orden.objects.all()\n serializer_class = OrdenSerializer\n\n\nclass OrdenDetail(APIView):\n \"\"\"\n Obtenemos, el detalle de la orden\n \"\"\"\n def get_object(self, pk):\n try:\n return Orden.objects.get(pk=pk)\n except Orden.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n orden=self.get_object(pk)\n serializer=OrdenClienteSerializer(orden)\n return Response(serializer.data)\n\n\n\n\nclass PedidoViewSet(viewsets.ModelViewSet):\n queryset = Pedido.objects.all()\n serializer_class = PedidoArticulosSerializer\n\n\n\n############Serializer para lalo#######\n\nclass ContadoTarifaView(viewsets.ModelViewSet):\n queryset = Tarifa.objects.all()\n serializer_class = ContadoTarifaserializer\n\n\n\n\n############Serializador ORDENES DE CADA VENDEDOR#####\nclass OrdenesVendedorView(viewsets.ModelViewSet):\n queryset = Orden.objects.all()\n serializer_class = OrdenClienteSerializer\n\n\n\n\n\nclass MisOrdenesView(APIView):\n def get(self,request, format=None):\n mis_ordenes=Orden.objects.all().filter(vendedor=request.user.id)\n serializer=OrdenClienteSerializer\n\n def get_queryset(self, *args, **kwargs):\n search = self.request.GET.get('s')\n queryset_list = super(MisOrdenesView, self).get_queryset()\n if search:\n queryset_list = queryset_list.filter(\n Q(numerodeorden__icontains=search)\n ).distinct()\n return queryset_list\n\n\n\n\nclass DomiciliosViewSet(viewsets.ModelViewSet):\n queryset = DomCliente.objects.all()\n serializer_class =DomicilioSerializer\n\n def get_queryset(self, *args, **kwargs):\n cliente = self.request.GET.get('cli')\n queryset_list = super(DomiciliosViewSet, self).get_queryset()\n if cliente:\n queryset_list = queryset_list.filter(cliente=cliente)\n\n return queryset_list\n\n\nclass UploadedImagesViewSet(viewsets.ModelViewSet):\n queryset = Archivo.objects.all()\n serializer_class = ArchivoSerializer\n\nclass UploadedImagesdosViewSet(viewsets.ModelViewSet):\n queryset = Archivodos.objects.all()\n serializer_class = ArchivodosSerializer","sub_path":"pedidos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"415632226","text":"from collections import Counter\nimport datetime\nimport decimal\nfrom typing import Dict, List, Optional\n\nfrom pydantic import BaseModel, validator, conlist\nfrom sqlalchemy import MetaData, Table\nfrom sqlalchemy.sql import sqltypes\n\nfrom ..core.errors import DBError, Exc, Ok\n\nfrom ..utils.misc import group_list\n\n\nclass DDL(BaseModel):\n class Column(BaseModel):\n name: str\n type: Optional[str]\n primary: Optional[bool] = False\n not_null: Optional[bool] = False\n unique: Optional[bool] = False\n\n class Index(BaseModel):\n columns: conlist(str, min_items=1)\n\n columns: Optional[List[Column]] = list()\n indexes: Optional[Dict[str, Index]] = dict()\n primary_key: Optional[\n List[str]\n ] = [] # logic field - i.e. not added by the user in the ddl definition\n permissions: Optional[Dict[str, str]] = dict()\n\n @validator(\"columns\", pre=True)\n def transform_str_cols(cls, v, values):\n if v is not None and isinstance(v, List):\n return [{\"name\": c} if isinstance(c, str) else c for c in v]\n else:\n return v\n\n @validator(\"columns\")\n def columns_unique(cls, v, values):\n dupes = {k for k, v in Counter([e.name for e in v]).items() if v > 1}\n if len(dupes) > 0:\n raise ValueError(f\"Duplicate columns: {','.join(dupes)}\")\n else:\n return v\n\n @validator(\"indexes\")\n def index_columns_exists(cls, v, values):\n cols = [c.name for c in values.get(\"columns\", list())]\n if len(cols) > 0:\n missing_cols = group_list(\n [\n (index_name, index_column)\n for index_name, index in v.items()\n for index_column in index.columns\n if index_column not in cols\n ]\n )\n if len(missing_cols) > 0:\n cols_msg = \";\".join(\n [f\"On {i}: {','.join(c)}\" for i, c in missing_cols.items()]\n )\n raise ValueError(f\"Some indexes refer to missing columns: {cols_msg}\")\n\n return v\n\n @validator(\"primary_key\", always=True)\n def set_pk(cls, v, values):\n columns_pk = [c.name for c in values.get(\"columns\", []) if c.primary]\n\n indexes_pk = list()\n if values.get(\"indexes\", {}).get(\"primary_key\") is not None:\n indexes_pk = values.get(\"indexes\").get(\"primary_key\").columns\n\n if len(columns_pk) > 0 and len(indexes_pk) > 0:\n if set(columns_pk) != set(indexes_pk):\n columns_pk_str = \" ,\".join(columns_pk)\n indexes_pk_str = \" ,\".join(indexes_pk)\n raise ValueError(\n f\"Primary key defined in indexes ({indexes_pk_str}) does not match primary key defined in columns ({columns_pk_str}).\"\n )\n\n pk = columns_pk if len(columns_pk) > 0 else indexes_pk\n\n return pk\n\n def get_ddl(self):\n return {\n \"columns\": [c.dict() for c in self.columns],\n \"indexes\": {\n k: v.dict() for k, v in self.indexes.items() if k != \"primary_key\"\n },\n \"permissions\": self.permissions,\n \"primary_key\": self.primary_key,\n }\n\n\nclass Database:\n \"\"\"\n Base class for databases in SAYN.\n\n Databases are implemented using sqlalchemy, and the `engine` attribute is available\n when working in python tasks without the need for calling create_engine.\n\n Attributes:\n engine (sqlalchemy.Engine): A sqlalchemy engine referencing the database.\n name (str): Name of the db as defined in `required_credentials` in `project.yaml`.\n name_in_yaml (str): Name of db under `credentials` in `settings.yaml`.\n db_type (str): Type of the database.\n metadata (sqlalchemy.MetaData): A metadata object associated with the engine.\n \"\"\"\n\n ddl_validation_class = DDL\n sql_features = []\n # Supported sql_features\n # - CREATE IF NOT EXISTS\n # - CREATE TABLE NO PARENTHESES\n # - INSERT TABLE NO PARENTHESES\n # - DROP CASCADE\n # - NO SET SCHEMA\n # - NO ALTER INDEXES\n\n def __init__(self, name, name_in_settings, db_type, common_params):\n self.name = name\n self.name_in_settings = name_in_settings\n self.db_type = db_type\n self.max_batch_rows = common_params.get(\"max_batch_rows\", 50000)\n\n def _set_engine(self, engine):\n self.engine = engine\n self.metadata = MetaData(self.engine)\n\n # Force a query to test the connection\n engine.execute(\"select 1\")\n\n def _validate_ddl(self, ddl):\n if ddl is None:\n return Ok(self.ddl_validation_class().get_ddl())\n else:\n try:\n return Ok(self.ddl_validation_class(**ddl).get_ddl())\n except Exception as e:\n return Exc(e, db=self.name, type=self.db_type)\n\n def _transform_column_type(self, column_type, dialect):\n return self._py2sqa(column_type.python_type, dialect=dialect)\n\n def _refresh_metadata(self, only=None, schema=None):\n \"\"\"Refreshes the sqlalchemy metadata object.\n\n Args:\n only (list): A list of object names to filter the refresh on\n schema (str): The schema name to filter on the refresh\n \"\"\"\n self.metadata.reflect(only=only, schema=schema, extend_existing=True)\n\n def _py2sqa(self, from_type, dialect=None):\n python_types = {\n int: sqltypes.BigInteger,\n str: sqltypes.Unicode,\n float: sqltypes.Float,\n decimal.Decimal: sqltypes.Numeric,\n datetime.datetime: sqltypes.DateTime,\n bytes: sqltypes.LargeBinary,\n bool: sqltypes.Boolean,\n datetime.date: sqltypes.Date,\n datetime.time: sqltypes.Time,\n datetime.timedelta: sqltypes.Interval,\n list: sqltypes.ARRAY,\n dict: sqltypes.JSON,\n }\n\n if from_type not in python_types:\n raise ValueError(f'Type not supported \"{from_type}\"')\n elif dialect is not None:\n return python_types[from_type]().compile(dialect=dialect)\n else:\n return python_types[from_type]\n\n # API\n\n def execute(self, script):\n \"\"\"Executes a script in the database. Multiple statements are supported.\n\n Args:\n script (sql): The SQL script to execute\n \"\"\"\n with self.engine.connect().execution_options(autocommit=True) as connection:\n connection.execute(script)\n\n def read_data(self, query, **params):\n \"\"\"Executes the query and returns a list of dictionaries with the data.\n\n Args:\n query (str): The SELECT query to execute\n params (dict): sqlalchemy parameters to use when building the final query as per\n [sqlalchemy.engine.Connection.execute](https://docs.sqlalchemy.org/en/13/core/connections.html#sqlalchemy.engine.Connection.execute)\n\n Returns:\n list: A list of dictionaries with the results of the query\n\n \"\"\"\n if params is not None:\n res = self.engine.execute(query, **params)\n else:\n res = self.engine.execute(query)\n\n return [dict(zip(res.keys(), r)) for r in res.fetchall()]\n\n def _read_data_stream(self, query, **params):\n \"\"\"Executes the query and returns an iterator dictionaries with the data.\n\n The main difference with read_data() is that this method executes the query with a server-side\n cursor (sqlalchemy stream_results = True).\n\n Args:\n query (str): The SELECT query to execute\n params (dict): sqlalchemy parameters to use when building the final query as per\n [sqlalchemy.engine.Connection.execute](https://docs.sqlalchemy.org/en/13/core/connections.html#sqlalchemy.engine.Connection.execute)\n\n Returns:\n list: A list of dictionaries with the results of the query\n\n \"\"\"\n with self.engine.connect().execution_options(stream_results=True) as connection:\n res = connection.execute(query, **params)\n\n for record in res:\n yield dict(zip(res.keys(), record))\n\n def _load_data_batch(self, table, data, schema):\n \"\"\"Implements the load of a single data batch for `load_data`.\n\n Defaults to an insert many statement, but it's overloaded for specific\n database connector for more efficient methods.\n\n Args:\n table (str): The name of the target table\n data (list): A list of dictionaries to load\n schema (str): An optional schema to reference the table\n \"\"\"\n table_def = self._get_table(table, schema)\n if table_def is None:\n raise DBError(\n self.name,\n self.db_type,\n f\"Table {schema + '.' if schema is not None else ''}{table} does not exists\",\n )\n\n with self.engine.connect().execution_options(autocommit=True) as connection:\n connection.execute(table_def.insert().values(data))\n\n def load_data(\n self, table, data, schema=None, batch_size=None, replace=False, ddl=None\n ):\n \"\"\"Loads a list of values into the database\n\n The default loading mechanism is an INSERT...VALUES, but database drivers\n will implement more appropriate methods.\n\n Args:\n table (str): The name of the target table\n data (list): A list of dictionaries to load\n schema (str): An optional schema to reference the table\n batch_size (int): The max size of each load batch. Defaults to\n `max_batch_rows` in the credentials configuration (settings.yaml)\n replace (bool): Indicates whether the target table is to be replaced\n (True) or new records are to be appended to the existing table (default)\n ddl (dict): An optional ddl specification in the same format as used\n in autosql and copy tasks\n \"\"\"\n batch_size = batch_size or self.max_batch_rows\n buffer = list()\n if replace:\n self._drop_table(table, schema, execute=True)\n\n result = self._validate_ddl(ddl)\n if result.is_err:\n raise DBError(\n self.name,\n self.db_type,\n \"Incorrect ddl provided\",\n errors=result.error[\"errors\"],\n )\n else:\n ddl = result.value\n\n check_create = True\n table_exists_prior_load = self._table_exists(table, schema)\n\n for i, record in enumerate(data):\n if check_create and not table_exists_prior_load:\n # Create the table if required\n if len(ddl.get(\"columns\", list())) == 0:\n # If no columns are specified in the ddl, figure that out\n # based on the python types of the first record\n columns = [\n {\n \"name\": col,\n \"type\": self._py2sqa(type(val), self.engine.dialect),\n }\n for col, val in record.items()\n ]\n ddl = dict(ddl, columns=columns)\n\n self._create_table_ddl(table, schema, ddl, execute=True)\n check_create = False\n\n if i % batch_size == 0 and len(buffer) > 0:\n self._load_data_batch(table, buffer, schema)\n buffer = list()\n\n buffer.append(record)\n\n if len(buffer) > 0:\n self._load_data_batch(table, buffer, schema)\n\n def _get_table(self, table, schema):\n \"\"\"Create a SQLAlchemy Table object.\n\n Args:\n table (str): The table name\n schema (str): The schema or None\n\n Returns:\n sqlalchemy.Table: A table object from sqlalchemy\n \"\"\"\n table_def = Table(table, self.metadata, schema=schema, extend_existing=True)\n\n if table_def.exists():\n table_def = Table(\n table, self.metadata, schema=schema, extend_existing=True, autoload=True\n )\n return table_def\n\n def _table_exists(self, table, schema):\n return self._get_table(table, schema) is not None\n\n # ETL steps\n # =========\n # Methods that build sql used in autosql and\n # copy tasks. Can optionally also execute the\n # sql if `execute=True`\n\n def _create_table_select(\n self, table, schema, select, view=False, ddl=dict(), execute=False\n ):\n \"\"\"Returns SQL code for a create table from a select statement.\n\n Args:\n table (str): The target table name\n schema (str): The target schema or None\n select (str): A SQL SELECT query to build the table with\n view (bool): Indicates if the object to create is a view. Defaults to creating a table\n ddl (dict): Optionally specify a ddl dict. If provided, a `CREATE` with column specification\n followed by an `INSERT` rather than a `CREATE ... AS SELECT ...` will be issued\n execute (bool): Execute the query before returning it\n\n Returns:\n str: A SQL script for the CREATE...AS\n \"\"\"\n table = f\"{schema+'.' if schema else ''}{table}\"\n table_or_view = \"VIEW\" if view else \"TABLE\"\n\n q = \"\"\n if_not_exists = (\n \" IF NOT EXISTS\" if \"CREATE IF NOT EXISTS\" in self.sql_features else \"\"\n )\n if \"CREATE TABLE NO PARENTHESES\" in self.sql_features:\n q += f\"CREATE {table_or_view}{if_not_exists} {table} AS \\n{select}\\n;\"\n else:\n q += f\"CREATE {table_or_view}{if_not_exists} {table} AS (\\n{select}\\n);\"\n\n if execute:\n self.execute(q)\n\n return q\n\n def _create_table_ddl(self, table, schema, ddl, execute=False):\n \"\"\"Returns SQL code for a create table from a select statement.\n\n Args:\n table (str): The target table name\n schema (str): The target schema or None\n ddl (dict): A ddl task definition\n execute (bool): Execute the query before returning it\n\n Returns:\n str: A SQL script for the CREATE TABLE statement\n \"\"\"\n if len(ddl[\"columns\"]) == 0:\n raise DBError(\n self.name, self.db_type, \"DDL is missing columns specification\"\n )\n table_name = table\n table = f\"{schema+'.' if schema else ''}{table_name}\"\n\n # List of reserved keywords so columns are quoted\n # TODO find a better way\n reserved = (\"from\", \"to\", \"primary\")\n columns = [\n {k: f'\"{v}\"' if k == \"name\" and v in reserved else v for k, v in c.items()}\n for c in ddl[\"columns\"]\n ]\n\n columns = \"\\n , \".join(\n [\n (\n f'{c[\"name\"]} {c[\"type\"]}'\n f'{\" NOT NULL\" if c.get(\"not_null\", False) else \"\"}'\n )\n for c in columns\n ]\n )\n\n if len(ddl[\"primary_key\"]) > 0:\n pk = \" ,\".join(ddl[\"primary_key\"])\n pk = f\" , PRIMARY KEY ({pk})\"\n else:\n pk = \"\"\n\n q = \"\"\n if_not_exists = (\n \" IF NOT EXISTS\" if \"CREATE IF NOT EXISTS\" in self.sql_features else \"\"\n )\n q += f\"CREATE TABLE{if_not_exists} {table} (\\n {columns}\\n{pk}\\n);\"\n\n if execute:\n self.execute(q)\n\n return q\n\n def _create_indexes(self, table, schema, ddl, execute=False):\n \"\"\"Returns SQL to create indexes from ddl.\n\n Args:\n table (str): The target table name\n schema (str): The target schema or None\n ddl (dict): A ddl task definition\n execute (bool): Execute the query before returning it\n\n Returns:\n str: A SQL script for the CREATE INDEX statements\n \"\"\"\n table_name = table\n table = f\"{schema+'.' if schema else ''}{table}\"\n\n indexes = {\n idx: idx_def[\"columns\"]\n for idx, idx_def in ddl.get(\"indexes\", dict()).items()\n }\n\n q = \"\"\n if len(ddl[\"primary_key\"]) > 0:\n pk_cols = \", \".join(ddl[\"primary_key\"])\n q += f\"ALTER TABLE {table} ADD PRIMARY KEY ({pk_cols});\"\n\n q += \"\\n\".join(\n [\n f\"CREATE INDEX {table_name}_{name} ON {table}({', '.join(cols)});\"\n for name, cols in indexes.items()\n ]\n )\n\n if execute:\n self.execute(q)\n\n return q\n\n def grant_permissions(self, table, schema, ddl, execute=False):\n \"\"\"Returns a set of GRANT statements.\n\n Args:\n table (str): The target table name\n schema (str): The target schema or None\n ddl (dict): A ddl task definition\n execute (bool): Execute the query before returning it\n\n Returns:\n str: A SQL script for the GRANT statements\n \"\"\"\n q = \"\\n\".join(\n [\n f\"GRANT {priv} ON {schema+'.' if schema else ''}{table} TO \\\"{role}\\\";\"\n for role, priv in ddl.items()\n ]\n )\n\n if execute:\n self.execute(q)\n\n return q\n\n def _drop_table(self, table, schema, view=False, execute=False):\n \"\"\"Returns a DROP statement.\n\n Args:\n table (str): The target table name\n schema (str): The target schema or None\n view (bool): Indicates if the object to drop is a view. Defaults to dropping a table\n execute (bool): Execute the query before returning it\n\n Returns:\n str: A SQL script for the DROP statements\n \"\"\"\n table = f\"{schema+'.' if schema else ''}{table}\"\n table_or_view = \"VIEW\" if view else \"TABLE\"\n\n q = f\"DROP {table_or_view} IF EXISTS {table}\"\n\n if \"DROP CASCADE\" in self.sql_features:\n q += \" CASCADE;\"\n else:\n q += \";\"\n\n if execute:\n self.execute(q)\n\n return q\n\n def _insert(self, table, schema, select, columns=None, execute=False):\n \"\"\"Returns an INSERT statement from a SELECT query.\n\n Args:\n table (str): The target table name\n schema (str): The target schema or None\n select (str): The SELECT statement to issue\n columns (list): The list of column names specified in DDL. If provided, the insert will be reordered based on this order\n execute (bool): Execute the query before returning it\n\n Returns:\n str: A SQL script for the INSERT statement\n \"\"\"\n table = f\"{schema+'.' if schema else ''}{table}\"\n\n # we reshape the insert statement to avoid conflict if columns are not specified in same order between query and task group file\n if columns is not None:\n select = \"SELECT i.\" + \"\\n, i.\".join(columns) + f\"\\n\\nFROM ({select}) AS i\"\n columns = \"(\" + \", \".join(columns) + \")\"\n else:\n columns = \"\"\n\n if \"INSERT TABLE NO PARENTHESES\" in self.sql_features:\n q = f\"INSERT INTO {table} {columns} \\n{select}\\n;\"\n else:\n q = f\"INSERT INTO {table} {columns} (\\n{select}\\n);\"\n\n if execute:\n self.execute(q)\n\n return q\n\n def _move_table(\n self, src_table, src_schema, dst_table, dst_schema, ddl, execute=False\n ):\n \"\"\"Returns SQL code to rename a table and change schema.\n\n Note:\n Table movement is performed as a series of ALTER statements:\n\n * ALTER TABLE RENAME\n * ALTER TABLE SET SCHEMA (if the database supports it)\n * ALTER INDEX RENAME (to ensure consistency in the naming). Index names\n are taken from the ddl field\n\n Args:\n src_table (str): The source table name\n src_schema (str): The source schema or None\n dst_table (str): The target table name\n dst_schema (str): The target schema or None\n ddl (dict): A ddl task definition\n execute (bool): Execute the query before returning it\n\n Returns:\n str: A SQL script for moving the table\n \"\"\"\n rename = f\"ALTER TABLE {src_schema+'.' if src_schema else ''}{src_table} RENAME TO {dst_table};\"\n if dst_schema is not None and dst_schema != src_schema:\n change_schema = f\"ALTER TABLE {src_schema+'.' if src_schema else ''}{dst_table} SET SCHEMA {dst_schema};\"\n else:\n change_schema = \"\"\n\n pk_alter = []\n if \"NO ALTER INDEXES\" not in self.sql_features and len(ddl[\"primary_key\"]) > 0:\n # Change primary key name\n pk_alter.append(\n f\"ALTER INDEX {dst_schema+'.' if dst_schema else ''}{src_table}_pkey RENAME TO {dst_table}_pkey;\"\n )\n\n idx_alter = []\n if len(ddl[\"indexes\"]) > 0:\n # Change index names\n for idx in ddl[\"indexes\"].keys():\n if \"NO ALTER INDEXES\" in self.sql_features:\n idx_cols = \" ,\".join(ddl[\"indexes\"][idx][\"columns\"])\n idx_alter.append(\n f\"DROP INDEX {dst_schema+'.' if dst_schema else ''}{src_table}_{idx};\\n\"\n f\"CREATE INDEX {dst_table}_{idx} ON {dst_table}({idx_cols});\"\n )\n else:\n idx_alter.append(\n f\"ALTER INDEX {dst_schema+'.' if dst_schema else ''}{src_table}_{idx} \"\n f\"RENAME TO {dst_table}_{idx};\"\n )\n\n q = \"\\n\".join([rename, change_schema] + pk_alter + idx_alter)\n\n if execute:\n self.execute(q)\n\n return q\n\n def _merge_tables(\n self,\n src_table,\n src_schema,\n dst_table,\n dst_schema,\n delete_key,\n columns=None,\n execute=False,\n ):\n \"\"\"Returns SQL to merge data in incremental loads.\n\n Note:\n Data merge is performed by issuing these statements:\n\n * DELETE from target WHERE data exists in source\n * INSERT into target SELECT * from source\n\n Args:\n src_table (str): The source table name\n src_schema (str): The source schema or None\n dst_table (str): The target table name\n dst_schema (str): The target schema or None\n delete_key (str): The column name to use for deleting records from the target table\n columns (list): The list of column names specified in DDL. If provided, the insert will be reordered based on this order\n execute (bool): Execute the query before returning it\n\n Returns:\n str: A SQL script for moving the table\n \"\"\"\n dst = f\"{dst_schema+'.' if dst_schema else ''}{dst_table}\"\n src = f\"{src_schema+'.' if src_schema else ''}{src_table}\"\n\n delete = (\n f\"DELETE FROM {dst}\\n\"\n f\" WHERE EXISTS (SELECT *\\n\"\n f\" FROM {src}\\n\"\n f\" WHERE {src}.{delete_key} = {dst}.{delete_key});\"\n )\n\n select = f\"SELECT * FROM {src}\"\n insert = self._insert(dst_table, dst_schema, select, columns=columns)\n q = \"\\n\".join((delete, insert))\n\n if execute:\n self.execute(q)\n\n return q\n","sub_path":"sayn/database/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":23560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114050490","text":"intervals = [[3, 5], [8, 10], [1, 2], [6, 7], [12, 16]]\r\nnewInterval = [4, 8]\r\n# intervals = [[6, 9], [1, 3]]\r\n# newInterval = [2, 5]\r\nintervals.append(newInterval)\r\nintervals.sort()\r\n\r\nresult = []\r\ni = 0\r\nstep = 1\r\nwhile i < len(intervals):\r\n current = intervals[i].copy()\r\n right = current[1]\r\n while i + step < len(intervals) and right >= intervals[i + step][0]:\r\n right = max(right, intervals[i + step][1])\r\n current.extend(intervals[i + step])\r\n step += 1\r\n result.append([current[0], right])\r\n i += step\r\n step = 1\r\n\r\nprint(intervals)\r\nprint(result)","sub_path":"main7.py","file_name":"main7.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"650965498","text":"import random\r\n\r\ndef removeDuplicates (lista):\r\n\tlista = set(lista)\r\n\tlista = list(lista)\r\n\treturn lista\r\n\r\n\r\na = [random.randint(0, 20) for i in range(0, 20)]\r\n\r\nprint(\"Before:\\t\", sorted(a))\r\n\r\na = removeDuplicates(a)\r\n\r\nprint(\"After:\\t\", a)\r\n\r\n","sub_path":"Python/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338513402","text":"\nimport xlwings as xw\nfrom datetime import datetime\nimport os\nimport time\n\n\ncombo_data={}\nallfiles=os.listdir()\nfor filesname in allfiles:\n# 通过循环目录下所有的excel文件,每个文件的每个表格,形成一个字典包含\n# combo_data['文件名字']['sheet1']=range()\n# combo_data['文件名字']['sheet2']=range()\n# combo_data['文件名字']['sheet3']=range()\n#.......\n try:\n if filesname.split(\".\")[1]=='xlsx' or filesname.split(\".\")[1]=='xls':\n \n wb=xw.Book(filesname)\n combo_data[filesname]=[]\n wbsheets=wb.sheets\n for sheet in wbsheets:\n # print(sheet.name)\n used_range_rows = (sheet.api.UsedRange.Row,\n sheet.api.UsedRange.Row + sheet.api.UsedRange.Rows.Count)\n used_range_cols = (sheet.api.UsedRange.Column,\n sheet.api.UsedRange.Column + sheet.api.UsedRange.Columns.Count)\n # print(used_range_rows,used_range_cols)\n used_range = sheet.range(*zip(used_range_rows, used_range_cols))\n # print(sheet.name)\n print(used_range)\n combo_data[filesname].append(used_range.value)\n # print(combo_data[filesname])\n wb.close\n for app in xw.apps:\n app.quit()\n \n except Exception as e:\n # print(e)\n pass\ntime.sleep(3)\ncombo_wb = xw.Book()\nfor filesname in combo_data:\n for sheets_num in range(len(combo_data[filesname])):\n try:\n used_rows = combo_wb.sheets[sheets_num].api.UsedRange.Rows.Count + 1\n combo_wb.sheets[sheets_num].range(\"A\"+str(used_rows)).value = combo_data[filesname][sheets_num]\n except IndexError:\n combo_wb.sheets.add(after=combo_wb.sheets[sheets_num-1])\n used_rows = combo_wb.sheets[sheets_num].api.UsedRange.Rows.Count + 1\n combo_wb.sheets[sheets_num].range(\"A\"+str(used_rows)).value = combo_data[filesname][sheets_num]\n\n\n","sub_path":"merge_excel.py","file_name":"merge_excel.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"96734830","text":"import random\nimport pygame\nfrom pygame.locals import *\nimport time\n\n\n#use a list of (x,y) to represent snake\nclass snake():\n # +x (r), -x (l), +y (u), -y (d)\n __facing_options=[(1,0),(-1,0),(0,1),(0,-1)]\n\n def __init__(self,x_range,y_range):\n self.snake_list = [(random.randint(0,x_range),random.randint(0,y_range))] #initializes starting position\n ran = random.randint(0, 3)#(1,0) means facing +x direction\n self.facing = self.__facing_options[ran] #initialize random facing\n self.got_apple = False\n\n ## WILL CONTROL HEAD OF SNAKE (REST OF BODY IS CONTROLLED BELOW)\n def apply_action(self,*args):\n '''find out what direction the snake will be facing after the action and move snake once'''\n #for *immediate movement\n if args:\n #print(\"HERE\")\n if args[0] =='left':\n if self.facing[0] == 0: #originally moving in +/-y direction \n self.facing = self.facing[::-1]\n self.facing = (-self.facing[0],self.facing[1])\n else: #originally moving in +/-y direction\n self.facing = self.facing[::-1]\n\n elif args[0] =='right':\n if self.facing[0] == 0:\n self.facing = self.facing[::-1]\n else:\n self.facing = self.facing[::-1]\n self.facing = (-self.facing[0],self.facing[1])\n \n #detecting movement when snake is constantly moving forward\n else:\n for event in pygame.event.get():\n #cole added (7/31/19)\n if event.type == pygame.QUIT:\n pygame.quit()\n # \n #keys = pygame.key.get_pressed()\n if event.type == KEYDOWN:\n '''\n if event.key == pygame.K_RIGHT:#keys[pygame.K_LEFT]:\n if self.facing[0] == 0: #originally moving in +/-y direction \n self.facing = self.facing[::-1]\n self.facing = (-self.facing[0],self.facing[1])\n else: #originally moving in +/-y direction\n self.facing = self.facing[::-1]\n\n elif event.key == pygame.K_LEFT:#keys[pygame.K_RIGHT]:\n if self.facing[0] == 0:\n self.facing = self.facing[::-1]\n else:\n self.facing = self.facing[::-1]\n self.facing = (self.facing[0],-self.facing[1])\n '''\n\n #Cole added (7/31)\n #facing = (1,0), (-1,0), (0,1), (0,-1)\n if event.key == pygame.K_LEFT: \n print('left')\n #checing if facing vertical direction (if not nothing happens) \n if self.facing[1] == 1: \n self.facing = self.facing[::-1] #flips tuple\n self.facing = (-self.facing[0],self.facing[1])\n elif self.facing[1] == -1:\n self.facing = self.facing[::-1]\n else:\n pygame.quit()\n \n elif event.key == pygame.K_RIGHT:\n print('right')\n if self.facing[1] == 1: \n self.facing = self.facing[::-1]\n elif self.facing[1] == -1:\n self.facing = self.facing[::-1]\n self.facing = (-self.facing[0],self.facing[1])\n else:\n pygame.quit()\n elif event.key == pygame.K_DOWN:\n print('down')\n if self.facing[0] == 1: \n self.facing = self.facing[::-1]\n elif self.facing[0] == -1:\n self.facing = self.facing[::-1]\n self.facing = (self.facing[0],-self.facing[1])\n else:\n pygame.quit()\n\n elif event.key == pygame.K_UP:\n print('up')\n if self.facing[0] == 1: \n self.facing = self.facing[::-1]\n self.facing = (self.facing[0],-self.facing[1])\n elif self.facing[0] == -1:\n self.facing = self.facing[::-1]\n else:\n pygame.quit()\n \n self.move_snake()\n\n ## COLE ADDED (7/31) - INITIAL AGENT\n def agent0(self):\n t_end = time.time() + .1\n while time.time() < t_end:\n if self.facing[0] == 1:\n print('up')\n self.facing = self.facing[::-1]\n #self.facing = (self.facing[0],-self.facing[1])\n else: \n self.facing = self.facing[::-1]\n self.facing = (self.facing[0],-self.facing[1])\n\n self.move_snake()\n\n '''\n def agent1(self):\n move_to_be_chosen = 0\n \n while move_to_be_chosen:\n self.move_snake()\n '''\n\n ## WILL CONTROL REST OF BODY \n def move_snake(self):\n '''add new cube(rect) in the direction the snake is facing, and *(IF NO APPLE) chops off the tail'''\n if self.got_apple:\n self.snake_list.insert(0,((self.snake_list[0][0]+int(self.facing[0]))%20,(self.snake_list[0][1]+int(self.facing[1]))%20))\n self.got_apple = False\n else:\n self.snake_list.insert(0,((self.snake_list[0][0]+int(self.facing[0]))%20,(self.snake_list[0][1]+int(self.facing[1]))%20))\n del self.snake_list[-1]\n\nclass apple():\n def __init__(self,x_range,y_range,snake_object):\n while True:\n (x,y) = (random.randint(0,x_range)%20,random.randint(0,y_range)%20)\n if (x,y) not in snake_object.snake_list:\n break\n self.position = (x,y)\n\n def change_apple_position(self,x_range,y_range,snake_object):\n while True:\n (x,y) = (random.randint(0,x_range)%20,random.randint(0,y_range)%20)\n #make sure apple not in snake position\n if (x,y) not in snake_object.snake_list:\n break\n self.position = (x,y)\n\n#added another parameter so snake defaults at a size of 4\ndef detect_collision(apple_object,snake_object, helper):\n '''detect if snake ran into self/got apple'''\n snake_set = set(snake_object.snake_list) #use set to eliminate duplicate,duplicate means collision\n if (helper < 5):\n snake_object.got_apple = True\n if len(snake_set) != len(snake_object.snake_list):\n '''game over, return score'''\n return len(snake_set)\n if apple_object.position in snake_object.snake_list:\n '''got an apple, keep tail next time snake moves,and generate new apple'''\n snake_object.got_apple = True\n apple_object.change_apple_position(19,19,snake_object)\n\ndef draw_grid(w, rows, surface):\n '''draw grid for game window, takes in width, rows, surface(pygame object)'''\n sizeBtwn = w // rows #e.g. 20x20 would be 1 \n x = 0\n y = 0\n\n for l in range(rows):\n x = x + sizeBtwn\n y = y + sizeBtwn\n #draws 2 line - surface, dimensions, start pos, end pos\n pygame.draw.line(surface, (255,255,255), (x,0),(x,w)) #ypos wont change\n pygame.draw.line(surface, (255,255,255), (0,y),(w,y)) #xpos wont change\n\ndef draw_snake_apple(surface,snake_object,apple_object,dimension,rows):\n '''draw snake and apple on game window'''\n dis = dimension // rows\n #draw apple\n pygame.draw.rect(surface, (255,255,0), (apple_object.position[0]*dis+1,apple_object.position[1]*dis+1, dis-2, dis-2))\n #draw snake\n for tup in snake_object.snake_list:\n pygame.draw.rect(surface, (255,255,255), (tup[0]*dis+1,tup[1]*dis+1, dis-2, dis-2))\n\ndef draw_all(surface,snake_object,apple_object,dimension,rows):\n surface.fill((0,0,0))\n draw_snake_apple(surface,snake_object,apple_object,dimension,rows)\n draw_grid(dimension,rows,surface)\n\ndef get_env(game_win, fac, snake_list):\n '''return env,facing,reward'''\n #data = list(pygame.image.tostring(self.game_window, 'RGB'))\n #return data,self.s.facing,len(self.s.snake_list)+1\n data = list(pygame.image.tostring(game_win, 'RGB'))\n return data, fac, snake_list\n\ndef main():\n GAME_GRID_DIMENSION = 500\n GAME_GRID_ROWS = 20\n #create game window\n game_window = pygame.display.set_mode((GAME_GRID_DIMENSION, GAME_GRID_DIMENSION))\n #create snake and apple\n s = snake(GAME_GRID_ROWS,GAME_GRID_ROWS)\n a = apple(GAME_GRID_ROWS,GAME_GRID_ROWS,s)\n #clock\n clock = pygame.time.Clock()\n while True:\n pygame.time.delay(50)\n clock.tick(10)\n s.apply_action()\n #s.agent1()\n score = detect_collision(a,s)\n\n #terminate condition\n if score:\n print(\"Score: \" + str(score))\n s = snake(9,9)\n\n #print(s.snake_list)\n\n draw_all(game_window,s,a,GAME_GRID_DIMENSION,GAME_GRID_ROWS)\n \n pygame.display.update()\n\nif __name__ == \"__main__\":\n main()\n ","sub_path":"my_snake.py","file_name":"my_snake.py","file_ext":"py","file_size_in_byte":8093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"559188670","text":"# -*- coding: utf-8 -*-\n\nfrom unittest import TestCase\n\nfrom church import Church\n\nfrom tests.test_data import DummyCase\n\n# all locale dependent cases\nfrom tests.test_data.test_address import AddressTestCase\nfrom tests.test_data.test_business import BusinessTestCase\nfrom tests.test_data.test_datetime import DatetimeTestCase\nfrom tests.test_data.test_food import FoodTestCase\nfrom tests.test_data.test_personal import PersonalTestCase\nfrom tests.test_data.test_sciense import ScienceTestCase\nfrom tests.test_data.test_text import TextTestCase\n\n\nclass ChurchBase(DummyCase):\n\n def setUp(self):\n self.church = Church(self.LANG)\n\n def test_base_personal(self):\n result = self.church.personal.username()\n self.assertIsNotNone(result)\n\n def test_base_text(self):\n result = self.church.text.words()\n self.assertIsNotNone(result)\n\n def test_base_address(self):\n result = self.church.address.address()\n self.assertIsNotNone(result)\n\n def test_base_food(self):\n result = self.church.food.fruit_or_berry()\n self.assertIsNotNone(result)\n\n def test_base_science(self):\n result = self.church.science.scientist()\n self.assertIsNotNone(result)\n\n def test_base_business(self):\n result = self.church.business.copyright()\n self.assertIsNotNone(result)\n\n\nclass ChurchLocaleBase(ChurchBase, AddressTestCase, BusinessTestCase,\n DatetimeTestCase, FoodTestCase, PersonalTestCase,\n ScienceTestCase, TextTestCase):\n pass\n\n\nclass ChurchEnglishTestCase(ChurchLocaleBase, TestCase):\n LANG = 'en'\n\n\nclass ChurchGermanTestCase(ChurchLocaleBase, TestCase):\n LANG = 'de'\n\n\nclass ChurchRussianTestCase(ChurchLocaleBase, TestCase):\n LANG = 'ru'\n\n\nclass ChurchDanishTestCase(ChurchLocaleBase, TestCase):\n LANG = 'da'\n\n\nclass ChurchFrenchTestCase(ChurchLocaleBase, TestCase):\n LANG = 'fr'\n\n\nclass ChurchSpanishTestCase(ChurchLocaleBase, TestCase):\n LANG = 'es'\n\n\nclass ChurchItalianTestCase(ChurchLocaleBase, TestCase):\n LANG = 'it'\n\n\nclass ChurchPortugueseTestCase(ChurchLocaleBase, TestCase):\n LANG = 'pt-br'\n\n\nclass ChurchNorwegianTestCase(ChurchLocaleBase, TestCase):\n LANG = 'no'\n\n\nclass ChurchSwedishTestCase(ChurchLocaleBase, TestCase):\n LANG = 'sv'\n","sub_path":"tests/test_church.py","file_name":"test_church.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135939633","text":"import base64\n\nfrom chargebee import APIError, compat\nfrom chargebee.main import ChargeBee\nfrom chargebee.main import Environment\nfrom chargebee.version import VERSION\n\ndef _basic_auth_str(username):\n return 'Basic ' + base64.b64encode(('%s:' % username).encode('latin1')).strip().decode('latin1')\n\n\ndef request(method, url, env, params=None):\n if not env:\n raise APIError('No environment configured.')\n\n headers = {}\n\n url = env.api_url(url)\n params = utf8_encode_dict(params)\n if method.lower() in ('get', 'head', 'delete'):\n url = '%s?%s' % (url, compat.urlencode(params))\n payload = None\n else:\n payload = compat.urlencode(params)\n headers['Content-type'] = 'application/x-www-form-urlencoded'\n\n headers.update({\n 'User-Agent': 'ChargeBee-Python-Client v%s' % VERSION,\n 'Accept': 'application/json',\n 'Authorization': _basic_auth_str(env.api_key),\n })\n\n meta = compat.urlparse(url)\n if ChargeBee.verify_ca_certs:\n connection = compat.VerifiedHTTPSConnection(meta.netloc)\n connection.set_cert(ca_certs=ChargeBee.ca_cert_path)\n else:\n if Environment.chargebee_domain is None:\n connection = compat.HTTPSConnection(meta.netloc)\n else:\n connection = compat.HTTPConnection(meta.netloc) \n \n connection.request(method.upper(), meta.path + '?' + meta.query, payload, headers)\n\n try:\n response = connection.getresponse()\n data = response.read()\n if compat.is_py3:\n data = data.decode('utf-8')\n\n return process_response(data, response.status)\n except compat.HTTPException:\n raise APIError('Error while connecting to chargebee. If you see this repeatedly, contact us at support@chargebee.com')\n finally:\n connection.close()\n\n\ndef process_response(response, http_code):\n try:\n resp_json = compat.json.loads(response)\n except ValueError:\n raise APIError('Invalid response object from API', http_code, response)\n\n if http_code < 200 or http_code > 299:\n handle_api_resp_error(http_code, resp_json)\n\n return resp_json\n\ndef utf8_encode_dict(input):\n result = {}\n for key, value in input.iteritems():\n if isinstance(value, unicode):\n value = value.encode('utf8')\n elif isinstance(value, dict):\n value = utf8_encode_dict(value)\n\n result[key] = value\n\n return result\n\ndef handle_api_resp_error(http_code, resp_json):\n message = ''\n\n if 'error_param' in resp_json:\n message = 'param %s ' % resp_json['error_param']\n\n message += resp_json['error_msg']\n\n raise APIError(message, http_code, 0, resp_json)\n","sub_path":"chargebee/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"383229205","text":"from PIL import Image\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import EMPTY_VALUES\nfrom django.forms import FileField, ImageField\n\nimport six\n\nfrom snippets.forms import validators\nfrom snippets.forms.widgets import MultipleFileInput\n\n\nclass MultipleFileField(FileField):\n widget = MultipleFileInput\n empty_values = list(EMPTY_VALUES)\n\n def to_python(self, data):\n if data in self.empty_values:\n return None\n for data_item in data:\n self.data_item_to_python(data_item)\n return data\n\n def data_item_to_python(self, data):\n if data is None:\n return None\n\n try:\n file_name = data.name\n file_size = data.size\n except AttributeError:\n raise ValidationError(self.error_messages['invalid'], code='invalid')\n\n if self.max_length is not None and len(file_name) > self.max_length:\n params = {\n 'max': self.max_length,\n 'length': len(file_name),\n }\n raise ValidationError(\n self.error_messages['max_length'], code='max_length', params=params\n )\n\n if not file_name:\n raise ValidationError(self.error_messages['invalid'], code='invalid')\n\n if not self.allow_empty_file and not file_size:\n raise ValidationError(self.error_messages['empty'], code='empty')\n\n return data\n\n\nclass MultipleImageField(MultipleFileField, ImageField):\n default_validators = [validators.validate_image_file_extension_multiple]\n\n def data_item_to_python(self, data):\n \"\"\"\n Checks that uploaded data contains a valid image\n (GIF, JPG, PNG or whatever the PIL supports)\n See ImageField at https://github.com/django/django/blob/stable/1.5.x/django/forms/\n fields.py for details\n \"\"\"\n data = super(MultipleImageField, self).data_item_to_python(data)\n\n # PIL is required to verify file\n\n if hasattr(data, 'temporary_file_path'):\n data_file = data.temporary_file_path()\n else:\n if hasattr(data, 'read'):\n data_file = six.BytesIO(data.read())\n else:\n data_file = six.BytesIO(data['content'])\n\n try:\n # Image.verify() must be called immediately after the constructor\n Image.open(data_file).verify()\n except Exception:\n raise ValidationError(self.error_messages['invalid_image'])\n\n if hasattr(data, 'seek') and callable(data.seek):\n data.seek(0)\n\n return data\n","sub_path":"snippets/forms/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"636521089","text":"\nimport logging\nimport os\nfrom mwk.config import Config\nfrom logging.handlers import RotatingFileHandler\nfrom pyrogram import Client\n\nif os.path.exists(\"Log.txt\"):\n with open(\"Log.txt\", \"r+\") as f_d:\n f_d.truncate(0)\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n datefmt=\"%d-%b-%y %H:%M:%S\",\n handlers=[\n RotatingFileHandler(\n \"Log.txt\",\n maxBytes=1000000,\n backupCount=10\n ),\n logging.StreamHandler()\n ]\n)\n\nlog = logging.getLogger(__name__)\n\nimport pyrogram\nlogging.getLogger(\"pyrogram\").setLevel(logging.WARNING)\n\n\nclass Bot(Client):\n\n def __init__(self):\n super().__init__(\n session_name=\"RENAMEBOT\",\n api_id=Config.APP_ID,\n api_hash=Config.API_HASH,\n bot_token=Config.TG_BOT_TOKEN,\n plugins={\"root\": \"mwk/rename\"},\n sleep_threshold=5,\n )\n\n async def start(self):\n await super().start()\n if not os.path.isdir(Config.DOWNLOAD_LOCATION):\n os.makedirs(Config.DOWNLOAD_LOCATION)\n log.info(\"<<[Bot Started]>>\")\n async def stop(self, *args):\n await super().stop()\n log.info(\"<<[Bot Stopped]>>\")\n\napp = Bot()\napp.run()\n\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"175997230","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport json\nimport time\n\nimport kafka\nfrom data_transmission.application.helper import LOG_WARNING\nfrom data_transmission.application.helper.run import wrapper\nfrom data_transmission.sdk.mq import MQClient\nfrom data_transmission.application.helper.mysql_helper import MysqlHelper\n\n\nclass KafkaHelper(object):\n def __init__(self, db_url):\n self._kafka_producer = None\n self.mysql_obj = MysqlHelper(db_url)\n\n @staticmethod\n def _send_by_mq(data, kwargs):\n mq_url = kwargs.get(\"mq_url\")\n queue_name = kwargs.get(\"queue_name\")\n try:\n MQClient.send_message(url=mq_url, exchange_name=\"\", routing_key=queue_name, message=data)\n return True\n except:\n LOG_WARNING.error(u\"向rabbitmq发送数据失败,url为:{}\".format(mq_url))\n return False\n\n def send_to_kafka(self, task_id, data, type_of, source, topic, hosts=\"127.0.0.1:9092\"):\n self.mysql_obj.insert_record(task_id, send_params=data, typeof=type_of, source=source, success_flag=0)\n while not self._kafka_producer:\n self._kafka_producer = kafka.KafkaProducer(bootstrap_servers=hosts)\n flag = wrapper(self._send_to_kafka, data, topic)\n if flag:\n self.mysql_obj.set_except_flag(task_id, 1)\n\n def _send_to_kafka(self, data, topic):\n try:\n if not isinstance(data, str):\n data = json.dumps(data)\n if not isinstance(data, bytes):\n data = bytes(data, encoding=\"utf8\")\n self._kafka_producer.send(topic=topic, value=data)\n time.sleep(1)\n return True\n except:\n LOG_WARNING.error(u\"向Kafka的topic为{}发送数据失败\".format(topic))\n return False\n","sub_path":"data_transmission/data_transmission/application/helper/kafka_helper.py","file_name":"kafka_helper.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"193357690","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom lxml import etree\nfrom openerp.osv import osv,fields\nfrom openerp.tools.translate import _\nfrom openerp.addons.account_check_writing.amount_to_text_en import amount_to_text\n\n\nclass account_voucher(osv.Model):\n _inherit = 'account.voucher'\n\n def _get_currency(self, cr, uid, context=None):\n res = super(account_voucher, self)._get_currency(cr, uid, context)\n if not res:\n user_pool = self.pool.get(\"res.users\")\n res = user_pool.browse(cr, uid, uid).company_id.currency_id.id\n return res\n\n def _make_journal_search(self, cr, uid, ttype, context=None):\n if context is None: \n context = {}\n journal_pool = self.pool.get('account.journal')\n if context.get('write_check',False) :\n return journal_pool.search(cr, uid, [('allow_check_writing', '=', True)], limit=1)\n return journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1)\n\n def _get_amount_in_word(self, cr, uid, ids, field_name, arg, context=None):\n res = {}\n for check in self.browse(cr, uid, ids, context=context):\n res.update({check.id: amount_to_text(check.amount)})\n return res\n \n _columns = {\n 'amount_in_word': fields.function(_get_amount_in_word, type='char', readonly=True, store=True),\n 'allow_check' : fields.related('journal_id', 'allow_check_writing', type='boolean', string='Allow Check Writing'),\n 'check_number': fields.char('Check Number', size=32),\n 'journal_id':fields.many2one('account.journal', 'Journal', required=True, readonly=True, states={'draft':[('readonly',False)]}),\n 'check_done': fields.boolean(\"Check Printed\")\n }\n\n _sql_constraints = [\n ('check_per_journal_uniq', 'unique(check_number, journal_id)', 'Check Number Must be Unique Per Journal!'),\n ]\n\n def onchange_amount(self, cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=None):\n \"\"\" Inherited - add amount_in_word and allow_check_writing in returned value dictionary \"\"\"\n if not context:\n context = {}\n default = super(account_voucher, self).onchange_amount(cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=context)\n if 'value' in default:\n amount = 'amount' in default['value'] and default['value']['amount'] or amount\n amount_in_word = amount_to_text(amount)\n default['value'].update({'amount_in_word':amount_in_word})\n if journal_id:\n allow_check_writing = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context).allow_check_writing\n default['value'].update({'allow_check':allow_check_writing})\n return default\n\n def onchange_journal(self, cr, uid, ids, journal_id, line_ids, tax_id, partner_id, date, amount, ttype, company_id, context=None):\n vals = super(account_voucher, self).onchange_journal(cr, uid, ids, journal_id, line_ids, tax_id,partner_id, date, amount, ttype, company_id, context=context)\n if vals and not vals.get('value',{})['currency_id']:\n user_pool = self.pool.get(\"res.users\")\n vals['value']['currency_id'] = user_pool.browse(cr, uid, uid).company_id.currency_id.id\n return vals\n\n def copy(self, cr, uid, ids, default=None, context=None):\n if not default:\n default = {}\n default.update({'check_number': False})\n return super(account_voucher, self).copy(cr, uid, ids, default=default, context=context)\n\n def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):\n \"\"\"\n Add domain 'allow_check_writing = True' on journal_id field and remove 'widget = selection' on the same\n field because the dynamic domain is not allowed on such widget\n \"\"\"\n if not context: context = {}\n res = super(account_voucher, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)\n doc = etree.XML(res['arch'])\n nodes = doc.xpath(\"//field[@name='journal_id']\")\n if context.get('write_check', False) :\n for node in nodes:\n node.set('domain', \"[('type', '=', 'bank'), ('allow_check_writing','=',True)]\")\n node.set('widget', '')\n res['arch'] = etree.tostring(doc)\n return res\n def print_check(self, cr, uid, ids, context=None):\n if context == None:\n context = {}\n value = {}\n model_data = self.pool.get('ir.model.data')\n check_layout_report = {\n 'top' : 'account.print.check.top',\n 'middle' : 'account.print.check.middle',\n 'bottom' : 'account.print.check.bottom',\n }\n check = self.browse(cr, uid, ids[0], context=context)\n if check.check_number or check.journal_id.use_preprint_check :\n check_layout = check.company_id.check_layout\n value = {\n 'type': 'ir.actions.report.xml', \n 'report_name':check_layout_report[check_layout],\n 'datas': {\n 'model':'account.voucher',\n 'id': ids and ids[0] or False,\n 'ids': ids and ids or [],\n 'report_type': 'pdf'\n },\n 'nodestroy': True\n }\n else:\n form_view = model_data.get_object_reference(cr, uid, 'account_check_writing', 'view_account_check_write')\n value = {\n 'name': _('Print Check'),\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'account.check.write',\n 'views': [(form_view and form_view[1] or False, 'form'),(False, 'tree')],\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context,\n }\n return value\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"7.0/account_check_writing/account_voucher.py","file_name":"account_voucher.py","file_ext":"py","file_size_in_byte":7204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"568990274","text":"# Python Module\nimport os\nimport pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport subprocess\nimport rpy2.robjects as robjects\nfrom rpy2.robjects import pandas2ri\npandas2ri.activate()\n# Directory path\nroot_dir = \"/home/marie/Documents/\"\n\n# Build-in Module\nos.chdir(\"%sTNBC_Drug_response/SRC/Python\"%root_dir)\nfrom Functions_misc import *\n\n################################################################################\n# STEP 5a - mRNA = ARACNe\n################################################################################\n\n## a] Load and Merge TGCA BRCA expression files\nos.chdir(\"%sINPUT/RNASeqV2/UNC__IlluminaHiSeq_RNASeqV2/Level_3\"%root_dir)\ninput_files = os.listdir(\"%sINPUT/RNASeqV2/UNC__IlluminaHiSeq_RNASeqV2/Level_3\"%root_dir)\nexpr_files = list_search(\".*rsem.genes.results\",input_files)\nprint(len(expr_files))\n# for f in range(0,1200,1):\n# # f =0\n# expr = pandas.read_table(expr_files[f])\n# if f == 0:\n# Genes = [x.split(\"|\")[0] for x in expr.loc[:,\"gene_id\"].tolist()]\n# columns_id = [\"P%d\"%(x) for x in range(1,1201,1)]\n# expr_raw = pandas.DataFrame(numpy.nan,index=Genes,columns=columns_id)\n#\n# if expr.columns.values[1] == \"raw_count\":\n# expr_raw.iloc[:,f] = expr.loc[:,\"raw_count\"].tolist()\n# if f%100 == 0:\n# print(f)\n#\nos.chdir(\"%sPROCESSED/Step_5_mRNA/ARACNe_input\"%root_dir)\n# expr_raw.to_csv(\"BRCA_raw_count.tsv\",sep=\"\\t\")\n#\n# expr_raw = pandas.read_table(\"BRCA_raw_count.tsv\",index_col=0)\n# ## b] Normalize BRCA count\n# raw_sum = expr_raw.sum(axis=0)\n#\n# # print(raw_sum[1:12])\n# print(len(raw_sum))\n#\n# print(\"Normalization processing ...\")\n# divided_count = expr_raw\n# for j in range(0,expr_raw.shape[1]):\n# divided_count.iloc[:,j]=expr_raw.iloc[:,j]/raw_sum[j]\n#\n# norm_count = divided_count\n# norm_count = divided_count * numpy.mean(raw_sum)\n#\n#\n# # Log transform\n# print(\"Scaling processing ...\")\n# add_count = norm_count\n# add_count = norm_count + 4\n# log_count = add_count\n# log_count = numpy.log2(add_count)\n#\n# print(log_count.iloc[0:3,0:3])\n#\n# log_count.to_csv(\"BRCA_log_count.tsv\",sep=\"\\t\")\nlog_count = pandas.read_table(\"BRCA_log_count.tsv\",index_col=0)\n\n# Get the gene list used for the regulon object\nprint(\"Genes to consider...\")\nos.chdir(\"%sviper/pathways\"%root_dir)\nfile_regulon = pandas.read_csv(\"brca-tf-regulon.sets\")\nprint(file_regulon.shape)\nall_gene_regulon = list()\nTF_list = list()\nfor i in range(0,file_regulon.shape[0]):\n TF_list.append(file_regulon.iloc[i,0].split(\"\\t\")[0])\n all_gene_regulon = all_gene_regulon + file_regulon.iloc[i,0].split(\"\\t\")\nset_gene_regulon = list(set(all_gene_regulon))\n\nos.chdir(\"%sPROCESSED/Step_1_mRNA\"%root_dir)\nstep_1_files = os.listdir(\"%sPROCESSED/Step_1_mRNA\"%root_dir)\ncount_files = list_search(\"logCount.tsv\",step_1_files)\n\ngene_exp = pandas.read_table(count_files[0])\ngene_list = [val for val in set_gene_regulon if val in gene_exp.iloc[:,0].tolist()]\nprint(len(gene_list))\n\n\ngene_exp = log_count.loc[gene_list,:]\ngene_exp = gene_exp.iloc[:,:1000]\nonly_gene_exp = gene_exp.dropna(axis=0)\nprint(gene_exp.shape)\nprint(only_gene_exp.shape)\nonly_gene_exp.index.names = ['Gene']\n\nTF_intersect = [val for val in TF_list if val in only_gene_exp.index]\nprint(len(TF_intersect))\n\nos.chdir(\"%sPROCESSED/Step_5_mRNA/ARACNe_input\"%root_dir)\nonly_gene_exp.to_csv(\"BRCA_expressed_gene.tsv\",sep=\"\\t\")\n\n# TF_df = pandas.DataFrame(TF_intersect,index=range(0,len(TF_intersect)),columns = [\"Gene\"])\n# print(len(TF_intersect))\nf = open(\"BRCA_TF.tsv\",\"w\")\nfor line in TF_intersect:\n f.write(\"%s\\n\"%line)\nf.close()\n# TF_df.to_csv(\"BRCA_TF.tsv\",sep=\"\\t\",index=False,header=False)\n\n# ## c] Perform ARACNe analysis\nos.chdir(\"%sPROCESSED/Step_5_mRNA\"%root_dir)\n# subprocess.call(['java -Xmx512m -jar /home/marie/Documents/PROCESSED/Step_5_mRNA/aracne2.jar aracne -i /home/marie/Documents/PROCESSED/Step_5_mRNA/ARACNe_input/BRCA_expressed_gene.tsv -k 0.15 -t 0.05 -r 1'],shell=True)\nsubprocess.call(['java -Xmx512m -jar /home/marie/Documents/PROCESSED/Step_5_mRNA/aracne2.jar aracne -i /home/marie/Documents/PROCESSED/Step_5_mRNA/ARACNe_input/BRCA_expressed_gene.tsv -o /home/marie/Documents/PROCESSED/Step_5_mRNA/ARACNe_output/BRCA_expr_fast_ap.adj -a ap -t 0.05 -l /home/marie/Documents/PROCESSED/Step_5_mRNA/ARACNe_input/BRCA_TF.tsv -r 1'],shell=True)\n","sub_path":"SRC/Python/Step_5a_mRNA.py","file_name":"Step_5a_mRNA.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389138168","text":"from .graph import GraphWithRepetitiveNodesWithRoot\nfrom .graph import lr_node\nfrom .graph_diff_algorithm import GraphDiffAlgorithm\nfrom .graph_map import GraphMap, GraphMapComparator, GraphMapComparatorByEdgeNum\n\n\nclass BaselineAlgorithm(GraphDiffAlgorithm):\n class BLPermutationsForLabel:\n def __init__(self, label: int, graph):\n self._label = label\n self._length_of_another_graph = len(graph.get(label))\n\n def list_map_permutations(self, list_to_perm, current):\n if self._length_of_another_graph <= current:\n return [[]]\n res = []\n list_copy = list_to_perm.copy()\n for i in range(0, len(list_to_perm)):\n list_copy[0], list_copy[i] = list_copy[i], list_copy[0]\n res += [[list_copy[0]] + perm for perm in self.list_map_permutations(list_copy[1:], current + 1)]\n if len(list_to_perm) + current < self._length_of_another_graph:\n res += [[lr_node(self._label, 0)] + perm for perm in self.list_map_permutations(list_copy, current + 1)]\n return res\n\n class RNRGraphForBLAlg:\n def __init__(self, graph: GraphWithRepetitiveNodesWithRoot):\n from collections import defaultdict\n self._label_to_node_map = defaultdict(list)\n self._nodes_in_graph = list(graph)\n for node in graph:\n self._label_to_node_map[node.Label].append(node)\n\n def get(self, label: int):\n return self._label_to_node_map[label]\n\n def __iter__(self):\n return iter([label for label, node_list in self._label_to_node_map.items() if bool(node_list)])\n\n def __len__(self):\n return len(self._label_to_node_map.keys())\n\n def keys(self):\n return self._nodes_in_graph\n\n def items(self):\n return self._label_to_node_map.items()\n\n def extend_graph(self, graph):\n for label in graph:\n if label not in self:\n self._label_to_node_map[label] = []\n return self\n\n def add_zero_nodes(self, graph):\n for label, list_of_nodes in self._label_to_node_map.items():\n list_of_nodes += [lr_node(label, 0)] * len([node for node in graph.get(label) if bool(node.Number)])\n return self\n\n def __init__(self, comparator: GraphMapComparator = GraphMapComparatorByEdgeNum()):\n self.comparator = comparator\n\n def construct_diff(self, graph1: GraphWithRepetitiveNodesWithRoot,\n graph2: GraphWithRepetitiveNodesWithRoot):\n graph1_internal = BaselineAlgorithm.RNRGraphForBLAlg(graph1)\n graph2_internal = BaselineAlgorithm.RNRGraphForBLAlg(graph2)\n graph1_internal.extend_graph(graph2_internal)\n graph2_internal.extend_graph(graph1_internal)\n\n # graph1_internal.add_zero_nodes(graph2_internal)\n # graph2_internal.add_zero_nodes(graph1_internal)\n\n def produce_all_possible_maps(graph_maps_for_each_label):\n from functools import reduce\n from itertools import product\n graph_maps_for_each_label = [graph_map for _, graph_map in graph_maps_for_each_label.items()]\n return reduce(product, graph_maps_for_each_label)\n\n def sum_tuples(pair):\n if type(pair) == tuple:\n pair, dictionary = pair\n return list(dictionary.items()) + list(sum_tuples(pair))\n elif type(pair) == dict:\n return pair.items()\n raise Exception(\"\")\n\n graph_maps = self.graph_maps_for_each_label(graph1_internal, graph2_internal)\n graph_maps = produce_all_possible_maps(graph_maps)\n graph_maps = [dict(sum_tuples(pair)) for pair in graph_maps]\n graph_maps = [GraphMap.construct_graph_map(graph_map, graph1, graph2) for graph_map in graph_maps]\n\n return max(graph_maps, key=lambda x: self.comparator.comparable_representation(x))\n\n def graph_maps_for_each_label(self, graph1, graph2):\n res = { label: self.zip_all(l1, BaselineAlgorithm.BLPermutationsForLabel(label, graph1).list_map_permutations(l2, 0))\n for (label, l1), (label2, l2) in zip(sorted(graph1.items()), sorted(graph2.items()))\n }\n res = { label: ({node_from_self: node_from_graph for node_from_self, node_from_graph in zip(lr1, lr2)\n if node_from_self.Number != 0 or node_from_graph.Number != 0}\n for lr1, lr2 in label_permutes)\n for label, label_permutes in res.items()\n }\n return res\n\n @staticmethod\n def zip_all(l1, l2_perms):\n return [(l1, l2) for l2 in l2_perms]\n","sub_path":"graph_diff/baseline_algorithm.py","file_name":"baseline_algorithm.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390766151","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.5-i386/egg/fileinfo/plugins/fileinfo_inv_plugin_osxbundles.py\n# Compiled at: 2008-06-17 03:14:06\n\"\"\"A fileinfo plug-in for Mac OS X bundles.\n\nThis makes sense only on Mac OS X.\n\"\"\"\nimport os, sys, shutil, tempfile\nfrom os.path import join, exists, basename, splitext, dirname\nfrom fileinfo.investigator import BaseInvestigator\n\ndef readPlistFile(path):\n \"\"\"Read content of property list file, either XML or binary.\"\"\"\n try:\n from Foundation import NSDictionary\n plist = NSDictionary.dictionaryWithContentsOfFile_(path)\n except:\n try:\n from plistlib import readPlist\n plist = readPlist(path)\n except:\n (fd, destPath) = tempfile.mkstemp()\n shutil.copy2(path, destPath)\n os.popen(\"plutil -convert xml1 '%s'\" % destPath)\n plist = readPlist(destPath)\n os.remove(destPath)\n\n return plist\n\n\nclass OSXBundleInvestigator(BaseInvestigator):\n \"\"\"A class for determining attributes of OS X bundles.\"\"\"\n __module__ = __name__\n attrMap = {'bundlename': 'getName', 'bundleversion': 'getVersion', 'bundleminsysversion': 'getMinSysVersion'}\n totals = ()\n\n def activate(self):\n \"\"\"Try activating self, setting 'active' variable.\"\"\"\n path = self.path\n base = splitext(basename(path))[0]\n if exists(join(path, 'Contents', 'Info.plist')):\n plPath = join(path, 'Contents', 'Info.plist')\n self.active = True\n self.plist = readPlistFile(plPath)\n elif exists(join(path, 'Info.plist')):\n plPath = join(path, 'Info.plist')\n self.plist = readPlistFile(plPath)\n self.active = True\n else:\n self.active = False\n return self.active\n\n def getName(self):\n \"\"\"Return OS X bundle name.\"\"\"\n if not self.active:\n return 'n/a'\n try:\n nameString = self.plist['CFBundleName']\n except:\n nameString = 'n/a'\n\n return nameString\n\n def getVersion(self):\n \"\"\"Return OS X bundle version.\"\"\"\n if not self.active:\n return 'n/a'\n try:\n versionString = self.plist['CFBundleShortVersionString']\n except:\n try:\n versionString = self.plist['CFBundleVersion']\n except:\n versionString = 'n/a'\n\n return versionString\n\n def getMinSysVersion(self):\n \"\"\"Return OS X bundle minimum system version.\"\"\"\n if not self.active:\n return 'n/a'\n try:\n versionString = self.plist['LSMinimumSystemVersion']\n except:\n versionString = 'n/a'\n\n return versionString","sub_path":"pycfiles/fileinfo-0.3.3-py2.4/fileinfo_inv_plugin_osxbundles.py","file_name":"fileinfo_inv_plugin_osxbundles.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"41669788","text":"from pymongo import MongoClient,ASCENDING\nfrom pymongo.errors import NetworkTimeout,DuplicateKeyError\nfrom bson.objectid import ObjectId\nfrom epepin_api.exceptions.db_exceptions import DBException,EpepinException\nfrom epepin_api.codes.codes import CODE_DB_NOT_FOUND_ERROR\nfrom epepin_api.codes.messages import MSG_UPDATE_NOT_FOUND, MSG_GET_NOT_FOUND\nfrom flask import current_app\n\nclass Database(object):\n\n def __init__(self, endpoint, port):\n self.endpoint = endpoint\n self.port = port\n\n def connection(self):\n try:\n client = MongoClient(self.endpoint, self.port, serverSelectionTimeoutMS=2000)\n db = client.Requirement\n except Exception as e:\n DBException(e.message)\n current_app.logger.info('..Database.connection...' + str(db)) \n return db\n\n @classmethod\n def insert_requirement(cls, database, data):\n try:\n requirement = database.Requirements.insert_one(data)\n except NetworkTimeout as e:\n raise\n except Exception as e:\n raise DBException(e.message)\n return requirement\n\n @classmethod\n def insert_comment(cls, database, data):\n try:\n comment = database.comments.insert_one(data)\n except NetworkTimeout as e:\n raise\n except Exception as e:\n raise DBException(e.message)\n return comment\n \n @classmethod\n def insert_color(cls, database, data):\n try:\n color = database.colors.find_one({\"strid\": data.getID()})\n if (color is None):\n color = database.colors.insert_one(data.toJSON())\n except NetworkTimeout as e:\n raise\n except Exception as e:\n raise DBException(e.message)\n return color\n\n @classmethod\n def get_requirements(cls, database):\n return database.Requirements.find().sort(\"strid\",ASCENDING)\n \n @classmethod\n def get_colors(cls, database):\n return database.colors.find().sort(\"strid\",ASCENDING)\n \n @classmethod\n def get_comments(cls, database):\n return database.comments.find().sort(\"date\",ASCENDING)\n\n @classmethod\n def get_users(cls, database):\n return database.users.find().sort(\"username\",ASCENDING)\n\n @classmethod\n def get_reviewers(cls, database):\n return database.reviewers.find().sort(\"req\",ASCENDING)\n\n @classmethod\n def get_color(cls, database, item_id):\n try:\n color = database.colors.find_one({'strid': item_id})\n except Exception as e:\n raise DBException(e.message)\n return color\n\n @classmethod\n def get_requirement(cls, database, requirement_id):\n try:\n requirement = database.Requirements.find_one({'strid': requirement_id})\n except Exception as e:\n raise DBException(e.message)\n if requirement is None:\n raise EpepinException(CODE_DB_NOT_FOUND_ERROR, MSG_GET_NOT_FOUND)\n return requirement\n\n @classmethod\n def update_requirement(cls, database, requirement_id, data):\n try:\n requirement = database.Requirements.update_one({'_id':ObjectId(requirement_id)},\n {'$set': data})\n except Exception as e:\n raise DBException(e.message)\n if not requirement.raw_result[\"updatedExisting\"]:\n raise EpepinException(CODE_DB_NOT_FOUND_ERROR, MSG_UPDATE_NOT_FOUND)\n return requirement\n\n @classmethod\n def delete_requirement(cls, database, requirement_id):\n try:\n requirement = database.Requirements.delete_one({'_id': ObjectId(requirement_id)})\n except Exception as e:\n raise DBException(e.message)\n return requirement\n\n @classmethod\n def delete_requirement_all(cls, db_connection):\n try:\n requirement = db_connection.Requirements.delete_many({})\n except Exception as e:\n raise DBException(e.message)\n return requirement\n \n @classmethod\n def delete_user_all(cls, db_connection):\n try:\n requirement = db_connection.users.delete_many({})\n except Exception as e:\n raise DBException(e.message)\n return requirement\n\n @classmethod\n def setReviewers(cls, db_connection, requirements, users):\n try: \n actions = [None] * len(users)\n for s in requirements:\n db_connection.reviewers.delete_one({'req':s})\n jenkins_tc = db_connection.reviewers.insert_one({'req':s,'reviewers':users,'actions':actions})\n except DuplicateKeyError:\n jenkins_tc=None\n pass\n except NetworkTimeout as e: \n raise\n except Exception as e:\n raise DBException(e.message)\n return jenkins_tc\n\n @classmethod\n def updateReviewer(cls, db_connection, requirement_id, user,action):\n try: \n requirement = db_connection.reviewers.find_one({'req': requirement_id})\n pos_user=requirement['reviewers'].index(user)\n requirement['actions'][pos_user]=action\n db_connection.reviewers.update_one({'_id':requirement['_id']}, {\"$set\": requirement}, upsert=False)\n except DuplicateKeyError:\n requirement=None\n pass\n except NetworkTimeout as e: \n raise\n except Exception as e:\n raise DBException(e.message)\n return requirement\n\n @classmethod\n def insert_jenkins_tc(cls, db_connection, data):\n try:\n jenkins_tc = db_connection.jenkins_tc.insert_one(data)\n except DuplicateKeyError:\n jenkins_tc=None\n pass\n except NetworkTimeout as e: \n raise\n except Exception as e:\n raise DBException(e.message)\n return jenkins_tc\n\n @classmethod\n def get_user(cls, database, username):\n user=None\n try:\n user = database.users.find_one({'username': username})\n except Exception as e:\n raise DBException(e.message)\n return user\n\n @classmethod\n def insert_user(cls, db_connection, data):\n try:\n user = cls.get_user(db_connection,data['username'])\n if user is None:\n user = db_connection.users.insert_one(data)\n except DuplicateKeyError:\n user=None\n pass\n except NetworkTimeout as e: \n raise\n except Exception as e:\n raise DBException(e.message)\n return user\n\n @classmethod\n def delete_jenkins_tc_all(cls, db_connection):\n try:\n jenkins_tc = db_connection.jenkins_tc.delete_many({})\n except Exception as e:\n raise DBException(e.message)\n return jenkins_tc\n\n @classmethod\n def delete_jenkins_tc(cls, db_connection, jenkins_tc_id):\n try:\n jenkins_tc = db_connection.jenkins_tc.delete_one({'strid': jenkins_tc_id})\n except Exception as e:\n raise DBException(e.message)\n return jenkins_tc\n\n @classmethod\n def get_jenkins_tc(cls, db_connection,jenkins_tc_id):\n try:\n element = db_connection.jenkins_tc.find_one({'strid': jenkins_tc_id})\n except Exception as e:\n raise DBException(e.message)\n return element","sub_path":"epepin_api/database/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":7455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"325967225","text":"class Solver:\n def __init__(self, matrix, debug):\n self.matrix = matrix\n self.debug = debug\n\n def solve(self):\n pass\n \n def debug_print(self, message):\n if self.debug: print(message)\n\n\nclass SimpleSolver(Solver):\n def __init__(self, matrix, debug=False):\n super().__init__(matrix, debug)\n\n def solve(self, epochs=10):\n def fix(cell, num):\n self.debug_print('update %s:%s \"%s\" => \"%s\"' % (cell.row+1, cell.col+1, cell.value, num))\n cell.value = num\n cell.is_fixed = True\n cell.candidates.data = []\n\n for cells in [self.matrix.get_row(cell.row).cells, self.matrix.get_col(cell.col).cells]:\n for cell in cells:\n if num in cell.candidates.data:\n self.debug_print('remove %s from %s:%s' % (num, cell.row+1, cell.col+1))\n cell.candidates.data.remove(num)\n\n for _ in range(epochs):\n for num in range(self.matrix.order):\n num += 1\n\n for target in [self.matrix.get_row, self.matrix.get_col]:\n for i in range(self.matrix.order): \n unit = target(i)\n if num in unit.values: continue\n\n cells = [cell for cell in unit.cells if num in cell.candidates.data]\n if len(cells) is 1:\n fix(cells[0], num)\n break\n","sub_path":"examples/lib/solvers.py","file_name":"solvers.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"94349525","text":"# Proprietary: Benten Technologies, Inc.\n# Author: Pranav H. Deo\n# Copyright Content\n\n# Code Description:\n# Appending the column containing UNBC PSPI values to the final CSV file...\n\nimport pandas as pd\nimport os\n\n'''\n# Part 1: Append the values from the individual files to the CSVs\ninpath = '/Users/pranavdeo/Desktop/PSPI/'\noutpath = '/Users/pranavdeo/Desktop/OUT/'\n\n# Part 1: Create csv files for every video file with UNBC PSPI score\ndir = os.listdir(inpath)\nfor d in dir:\n if d != '.DS_Store':\n dirL1 = os.listdir(inpath + d)\n\n for d2 in dirL1:\n if d2 != '.DS_Store':\n print(d2)\n storage_list = []\n dirL2 = sorted(os.listdir(inpath + d + '/' + d2))\n\n for txtfile in dirL2:\n #print(txtfile)\n fl = open(inpath + d + '/' + d2 + '/' + txtfile)\n storage_list.append(float(fl.readline()))\n fl.close()\n\n DF = pd.DataFrame(storage_list, columns=['UNBC_PSPI'])\n DF['UNBC_PSPI'] = storage_list\n DF.to_csv(outpath+d2+'.csv', index=False)\n'''\n\n# Part 2: From these CSVs\noutpath = '/Users/pranavdeo/Desktop/UNBC_Out/'\ninpath = '/Users/pranavdeo/Desktop/OUT/'\nfile_list = os.listdir(inpath)\n\ndir = os.listdir(outpath)\n\nfor d in dir:\n if d != '.DS_Store':\n print(d)\n lst = os.listdir(outpath+d)\n for file in lst:\n col = ['UNBC_PSPI']\n print(outpath+d+'/'+file, ' : ', inpath+file)\n df_main_csv = pd.read_csv(outpath+d+'/'+file)\n df_small_csv = pd.read_csv(inpath+file, usecols=col)\n df_main_csv['UNBC_PSPI'] = df_small_csv['UNBC_PSPI']\n df_main_csv.to_csv(outpath+d+'/'+file, index=False)\n","sub_path":"Back_End/Facial Pain/Archive/Appending_UNBC_PSPI_to_final_CSVs.py","file_name":"Appending_UNBC_PSPI_to_final_CSVs.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"280479787","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\n\n\nimport os \n\nimport numpy as np\n\n\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread \n\nfrom ase import Atoms\n\nfrom ase.io import read, write \nfrom ase.visualize import view\n\n\"\"\"\npaper figure 1. \nstructure file, surface_model.con\n\"\"\"\n\ndef write_surface_png(atoms, rot, povname):\n # running index for the bonds \n bondatoms = []\n\n # set texttures \n textures = ['ase3' for a in atoms]\n\n # set rotation \n \n # Common kwargs for eps, png, pov\n kwargs = {\n 'rotation' : rot, # text string with rotation (default='' )\n 'radii' : .85, # float, or a list with one float per atom\n 'colors' : None,# List: one (r, g, b) tuple per atom\n 'show_unit_cell': 0, # 0, 1, or 2 to not show, show, and show all of cell\n 'celllinewidth' : 0.1, # Radius of the cylinders representing the cell\n 'bondatoms' : bondatoms, # list of tuples \n 'bondlinewidth' : 0.2, # linewidth of bond \n 'textures' : textures, # Length of atoms list of texture names\n }\n \n # Extra kwargs only available for povray (All units in angstrom)\n kwargs.update({\n 'run_povray' : True, # Run povray or just write .pov + .ini files\n 'display' : False,# Display while rendering\n 'pause' : True, # Pause when done rendering (only if display)\n 'transparent' : False,# Transparent background\n 'canvas_width' : None, # Width of canvas in pixels\n 'canvas_height': 50, # Height of canvas in pixels \n 'camera_dist' : 50., # Distance from camera to front atom\n 'image_plane' : None, # Distance from front atom to image plane\n 'camera_type' : 'perspective', # perspective, ultra_wide_angle\n 'point_lights' : [], # [[loc1, color1], [loc2, color2],...]\n 'area_light' : [(2., 3., 40.), # location\n 'White', # color\n .7, .7, 3, 3], # width, height, Nlamps_x, Nlamps_y\n 'background' : 'White', # color\n })\n \n # Write the .pov (and .ini) file. If run_povray=False, you must run command\n # `povray filename.ini` to convert .pov file to .png\n write(povname+'.pov', atoms, **kwargs)\n\nif __name__ == '__main__':\n # read atoms\n atom_names = ['H', 'C', 'O', 'Ir', 'Ge', 'Ti']\n\n for atom_name in atom_names:\n atom = Atoms(atom_name)\n write_surface_png(atom, '0x,0y,0z', atom_name)\n","sub_path":"LasAndClf-dev/processing_methods/plot_paper_figrues/plot_structura_figures/atoms/write_atoms_povs.py","file_name":"write_atoms_povs.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"479311944","text":"__author__ = ('Ka-Ping Yee ', 'Yury Selivanov ')\rimport abc\rimport dis\rimport collections.abc\rimport enum_lib as enum\rimport importlib.machinery\rimport itertools\rimport linecache\rimport os\rimport re\rimport sys\rimport tokenize\rimport token\rimport types\rimport warnings\rimport functools\rimport builtins\rfrom operator import attrgetter\rfrom collections import namedtuple, OrderedDict\rmod_dict = globals()\rfor (k, v) in dis.COMPILER_FLAG_NAMES.items():\r\n mod_dict['CO_' + v] = k\rTPFLAGS_IS_ABSTRACT = 1048576\r\r\ndef ismodule(object):\r\n return isinstance(object, types.ModuleType)\r\n\r\r\ndef isclass(object):\r\n return isinstance(object, type)\r\n\r\r\ndef ismethod(object):\r\n return isinstance(object, types.MethodType)\r\n\r\r\ndef ismethoddescriptor(object):\r\n if isclass(object) or ismethod(object) or isfunction(object):\r\n return False\r\n tp = type(object)\r\n return hasattr(tp, '__get__') and not hasattr(tp, '__set__')\r\n\r\r\ndef isdatadescriptor(object):\r\n if isclass(object) or ismethod(object) or isfunction(object):\r\n return False\r\n tp = type(object)\r\n return hasattr(tp, '__set__') and hasattr(tp, '__get__')\r\n\rif hasattr(types, 'MemberDescriptorType'):\r\n\r\n def ismemberdescriptor(object):\r\n return isinstance(object, types.MemberDescriptorType)\r\n\r\nelse:\r\n\r\n def ismemberdescriptor(object):\r\n return False\r\n\rif hasattr(types, 'GetSetDescriptorType'):\r\n\r\n def isgetsetdescriptor(object):\r\n return isinstance(object, types.GetSetDescriptorType)\r\n\r\nelse:\r\n\r\n def isgetsetdescriptor(object):\r\n return False\r\n\r\r\ndef isfunction(object):\r\n return isinstance(object, types.FunctionType)\r\n\r\r\ndef isgeneratorfunction(object):\r\n if not isfunction(object):\r\n pass\r\n return bool(object.__code__.co_flags & CO_GENERATOR)\r\n\r\r\ndef iscoroutinefunction(object):\r\n if not isfunction(object):\r\n pass\r\n return bool(object.__code__.co_flags & CO_COROUTINE)\r\n\r\r\ndef isasyncgenfunction(object):\r\n if not isfunction(object):\r\n pass\r\n return bool(object.__code__.co_flags & CO_ASYNC_GENERATOR)\r\n\r\r\ndef isasyncgen(object):\r\n return isinstance(object, types.AsyncGeneratorType)\r\n\r\r\ndef isgenerator(object):\r\n return isinstance(object, types.GeneratorType)\r\n\r\r\ndef iscoroutine(object):\r\n return isinstance(object, types.CoroutineType)\r\n\r\r\ndef isawaitable(object):\r\n return isinstance(object, types.CoroutineType) or isinstance(object, collections.abc.Awaitable)\r\n\r\r\ndef istraceback(object):\r\n return isinstance(object, types.TracebackType)\r\n\r\r\ndef isframe(object):\r\n return isinstance(object, types.FrameType)\r\n\r\r\ndef iscode(object):\r\n return isinstance(object, types.CodeType)\r\n\r\r\ndef isbuiltin(object):\r\n return isinstance(object, types.BuiltinFunctionType)\r\n\r\r\ndef isroutine(object):\r\n return isbuiltin(object) or (isfunction(object) or (ismethod(object) or ismethoddescriptor(object)))\r\n\r\r\ndef isabstract(object):\r\n if not isinstance(object, type):\r\n return False\r\n if object.__flags__ & TPFLAGS_IS_ABSTRACT:\r\n return True\r\n if not issubclass(type(object), abc.ABCMeta):\r\n return False\r\n if hasattr(object, '__abstractmethods__'):\r\n return False\r\n for (name, value) in object.__dict__.items():\r\n if getattr(value, '__isabstractmethod__', False):\r\n return True\r\n for base in object.__bases__:\r\n for name in getattr(base, '__abstractmethods__', ()):\r\n value = getattr(object, name, None)\r\n if getattr(value, '__isabstractmethod__', False):\r\n return True\r\n return False\r\n\r\r\ndef getmembers(object, predicate=None):\r\n if isclass(object):\r\n mro = (object,) + getmro(object)\r\n else:\r\n mro = ()\r\n results = []\r\n processed = set()\r\n names = dir(object)\r\n try:\r\n for base in object.__bases__:\r\n for (k, v) in base.__dict__.items():\r\n if isinstance(v, types.DynamicClassAttribute):\r\n names.append(k)\r\n except AttributeError:\r\n pass\r\n for key in names:\r\n try:\r\n value = getattr(object, key)\r\n if key in processed:\r\n raise AttributeError\r\n except AttributeError:\r\n for base in mro:\r\n if key in base.__dict__:\r\n value = base.__dict__[key]\r\n break\r\n continue\r\n if predicate and predicate(value):\r\n results.append((key, value))\r\n processed.add(key)\r\n results.sort(key=lambda pair: pair[0])\r\n return results\r\n\rAttribute = namedtuple('Attribute', 'name kind defining_class object')\r\r\ndef classify_class_attrs(cls):\r\n mro = getmro(cls)\r\n metamro = getmro(type(cls))\r\n metamro = tuple(cls for cls in metamro if cls not in (type, object))\r\n class_bases = (cls,) + mro\r\n all_bases = class_bases + metamro\r\n names = dir(cls)\r\n for base in mro:\r\n for (k, v) in base.__dict__.items():\r\n if isinstance(v, types.DynamicClassAttribute):\r\n names.append(k)\r\n result = []\r\n processed = set()\r\n for name in names:\r\n homecls = None\r\n get_obj = None\r\n dict_obj = None\r\n if name not in processed:\r\n try:\r\n if name == '__dict__':\r\n raise Exception(\"__dict__ is special, don't want the proxy\")\r\n get_obj = getattr(cls, name)\r\n except Exception as exc:\r\n pass\r\n homecls = getattr(get_obj, '__objclass__', homecls)\r\n if homecls not in class_bases:\r\n homecls = None\r\n last_cls = None\r\n for srch_cls in class_bases:\r\n srch_obj = getattr(srch_cls, name, None)\r\n if srch_obj is get_obj:\r\n last_cls = srch_cls\r\n for srch_cls in metamro:\r\n try:\r\n srch_obj = srch_cls.__getattr__(cls, name)\r\n except AttributeError:\r\n continue\r\n if srch_obj is get_obj:\r\n last_cls = srch_cls\r\n if last_cls is not None:\r\n homecls = last_cls\r\n for base in all_bases:\r\n if name in base.__dict__:\r\n dict_obj = base.__dict__[name]\r\n homecls = base\r\n break\r\n if homecls is None:\r\n pass\r\n else:\r\n obj = get_obj if get_obj is not None else dict_obj\r\n if isinstance(dict_obj, (staticmethod, types.BuiltinMethodType)):\r\n kind = 'static method'\r\n obj = dict_obj\r\n elif isinstance(dict_obj, (classmethod, types.ClassMethodDescriptorType)):\r\n kind = 'class method'\r\n obj = dict_obj\r\n elif isinstance(dict_obj, property):\r\n kind = 'property'\r\n obj = dict_obj\r\n elif isroutine(obj):\r\n kind = 'method'\r\n else:\r\n kind = 'data'\r\n result.append(Attribute(name, kind, homecls, obj))\r\n processed.add(name)\r\n return result\r\n\r\r\ndef getmro(cls):\r\n return cls.__mro__\r\n\r\r\ndef unwrap(func, *, stop=None):\r\n if stop is None:\r\n\r\n def _is_wrapper(f):\r\n return hasattr(f, '__wrapped__')\r\n\r\n else:\r\n\r\n def _is_wrapper(f):\r\n return hasattr(f, '__wrapped__') and not stop(f)\r\n\r\n f = func\r\n memo = {id(f): f}\r\n recursion_limit = sys.getrecursionlimit()\r\n while _is_wrapper(func):\r\n func = func.__wrapped__\r\n id_func = id(func)\r\n if id_func in memo or len(memo) >= recursion_limit:\r\n raise ValueError('wrapper loop when unwrapping {!r}'.format(f))\r\n memo[id_func] = func\r\n return func\r\n\r\r\ndef indentsize(line):\r\n expline = line.expandtabs()\r\n return len(expline) - len(expline.lstrip())\r\n\r\r\ndef _findclass(func):\r\n cls = sys.modules.get(func.__module__)\r\n if cls is None:\r\n return\r\n else:\r\n for name in func.__qualname__.split('.')[:-1]:\r\n cls = getattr(cls, name)\r\n if not isclass(cls):\r\n return\r\n return cls\r\n\r\r\ndef _finddoc(obj):\r\n if isclass(obj):\r\n for base in obj.__mro__:\r\n if base is not object:\r\n try:\r\n doc = base.__doc__\r\n except AttributeError:\r\n continue\r\n if doc is not None:\r\n return doc\r\n return\r\n if ismethod(obj):\r\n name = obj.__func__.__name__\r\n self = obj.__self__\r\n if isclass(self) and getattr(getattr(self, name, None), '__func__') is obj.__func__:\r\n cls = self\r\n else:\r\n cls = self.__class__\r\n elif isfunction(obj):\r\n name = obj.__name__\r\n cls = _findclass(obj)\r\n if cls is None or getattr(cls, name) is not obj:\r\n return\r\n elif isbuiltin(obj):\r\n name = obj.__name__\r\n self = obj.__self__\r\n if isclass(self) and self.__qualname__ + '.' + name == obj.__qualname__:\r\n cls = self\r\n else:\r\n cls = self.__class__\r\n elif isinstance(obj, property):\r\n func = obj.fget\r\n name = func.__name__\r\n cls = _findclass(func)\r\n if cls is None or getattr(cls, name) is not obj:\r\n return\r\n elif ismethoddescriptor(obj) or isdatadescriptor(obj):\r\n name = obj.__name__\r\n cls = obj.__objclass__\r\n if getattr(cls, name) is not obj:\r\n return\r\n else:\r\n return\r\n for base in cls.__mro__:\r\n try:\r\n doc = getattr(base, name).__doc__\r\n except AttributeError:\r\n continue\r\n if doc is not None:\r\n return doc\r\n\r\r\ndef getdoc(object):\r\n try:\r\n doc = object.__doc__\r\n except AttributeError:\r\n return\r\n if doc is None:\r\n try:\r\n doc = _finddoc(object)\r\n except (AttributeError, TypeError):\r\n return\r\n if not isinstance(doc, str):\r\n return\r\n return cleandoc(doc)\r\n\r\r\ndef cleandoc(doc):\r\n try:\r\n lines = doc.expandtabs().split('\\n')\r\n except UnicodeError:\r\n return\r\n margin = sys.maxsize\r\n for line in lines[1:]:\r\n content = len(line.lstrip())\r\n if content:\r\n indent = len(line) - content\r\n margin = min(margin, indent)\r\n if lines:\r\n lines[0] = lines[0].lstrip()\r\n if margin < sys.maxsize:\r\n for i in range(1, len(lines)):\r\n lines[i] = lines[i][margin:]\r\n while lines and not lines[-1]:\r\n lines.pop()\r\n while lines and not lines[0]:\r\n lines.pop(0)\r\n return '\\n'.join(lines)\r\n\r\r\ndef getfile(object):\r\n if ismodule(object):\r\n if getattr(object, '__file__', None):\r\n return object.__file__\r\n raise TypeError('{!r} is a built-in module'.format(object))\r\n if isclass(object):\r\n if hasattr(object, '__module__'):\r\n object = sys.modules.get(object.__module__)\r\n if getattr(object, '__file__', None):\r\n return object.__file__\r\n raise TypeError('{!r} is a built-in class'.format(object))\r\n if ismethod(object):\r\n object = object.__func__\r\n if isfunction(object):\r\n object = object.__code__\r\n if istraceback(object):\r\n object = object.tb_frame\r\n if isframe(object):\r\n object = object.f_code\r\n if iscode(object):\r\n return object.co_filename\r\n raise TypeError('module, class, method, function, traceback, frame, or code object was expected, got {}'.format(type(object).__name__))\r\n\r\r\ndef getmodulename(path):\r\n fname = os.path.basename(path)\r\n suffixes = [(-len(suffix), suffix) for suffix in importlib.machinery.all_suffixes()]\r\n suffixes.sort()\r\n for (neglen, suffix) in suffixes:\r\n if fname.endswith(suffix):\r\n return fname[:neglen]\r\n\r\r\ndef getsourcefile(object):\r\n filename = getfile(object)\r\n all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]\r\n all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]\r\n if any(filename.endswith(s) for s in all_bytecode_suffixes):\r\n filename = os.path.splitext(filename)[0] + importlib.machinery.SOURCE_SUFFIXES[0]\r\n elif any(filename.endswith(s) for s in importlib.machinery.EXTENSION_SUFFIXES):\r\n return\r\n if os.path.exists(filename):\r\n return filename\r\n if getattr(getmodule(object, filename), '__loader__', None) is not None:\r\n return filename\r\n elif filename in linecache.cache:\r\n return filename\r\n\r\r\ndef getabsfile(object, _filename=None):\r\n if _filename is None:\r\n _filename = getsourcefile(object) or getfile(object)\r\n return os.path.normcase(os.path.abspath(_filename))\r\n\rmodulesbyfile = {}\r_filesbymodname = {}\r\r\ndef getmodule(object, _filename=None):\r\n if ismodule(object):\r\n return object\r\n if hasattr(object, '__module__'):\r\n return sys.modules.get(object.__module__)\r\n if _filename is not None and _filename in modulesbyfile:\r\n return sys.modules.get(modulesbyfile[_filename])\r\n try:\r\n file = getabsfile(object, _filename)\r\n except TypeError:\r\n return\r\n if file in modulesbyfile:\r\n return sys.modules.get(modulesbyfile[file])\r\n for (modname, module) in list(sys.modules.items()):\r\n if ismodule(module) and hasattr(module, '__file__'):\r\n f = module.__file__\r\n if f == _filesbymodname.get(modname, None):\r\n pass\r\n else:\r\n _filesbymodname[modname] = f\r\n f = getabsfile(module)\r\n modulesbyfile[f] = modulesbyfile[os.path.realpath(f)] = module.__name__\r\n if file in modulesbyfile:\r\n return sys.modules.get(modulesbyfile[file])\r\n main = sys.modules['__main__']\r\n if not hasattr(object, '__name__'):\r\n return\r\n if hasattr(main, object.__name__):\r\n mainobject = getattr(main, object.__name__)\r\n if mainobject is object:\r\n return main\r\n else:\r\n builtin = sys.modules['builtins']\r\n if hasattr(builtin, object.__name__):\r\n builtinobject = getattr(builtin, object.__name__)\r\n if builtinobject is object:\r\n return builtin\r\n else:\r\n builtin = sys.modules['builtins']\r\n if hasattr(builtin, object.__name__):\r\n builtinobject = getattr(builtin, object.__name__)\r\n if builtinobject is object:\r\n return builtin\r\n\r\r\ndef findsource(object):\r\n file = getsourcefile(object)\r\n if file:\r\n linecache.checkcache(file)\r\n else:\r\n file = getfile(object)\r\n if not (file.startswith('<') and file.endswith('>')):\r\n raise OSError('source code not available')\r\n module = getmodule(object, file)\r\n if module:\r\n lines = linecache.getlines(file, module.__dict__)\r\n else:\r\n lines = linecache.getlines(file)\r\n if not lines:\r\n raise OSError('could not get source code')\r\n if ismodule(object):\r\n return (lines, 0)\r\n if isclass(object):\r\n name = object.__name__\r\n pat = re.compile('^(\\\\s*)class\\\\s*' + name + '\\\\b')\r\n candidates = []\r\n for i in range(len(lines)):\r\n match = pat.match(lines[i])\r\n if match:\r\n if lines[i][0] == 'c':\r\n return (lines, i)\r\n candidates.append((match.group(1), i))\r\n if candidates:\r\n candidates.sort()\r\n return (lines, candidates[0][1])\r\n raise OSError('could not find class definition')\r\n if ismethod(object):\r\n object = object.__func__\r\n if isfunction(object):\r\n object = object.__code__\r\n if istraceback(object):\r\n object = object.tb_frame\r\n if isframe(object):\r\n object = object.f_code\r\n if iscode(object):\r\n if not hasattr(object, 'co_firstlineno'):\r\n raise OSError('could not find function definition')\r\n lnum = object.co_firstlineno - 1\r\n pat = re.compile('^(\\\\s*def\\\\s)|(\\\\s*async\\\\s+def\\\\s)|(.*(? 0:\r\n if pat.match(lines[lnum]):\r\n break\r\n lnum = lnum - 1\r\n return (lines, lnum)\r\n raise OSError('could not find code object')\r\n\r\r\ndef getcomments(object):\r\n try:\r\n (lines, lnum) = findsource(object)\r\n except (OSError, TypeError):\r\n return\r\n if ismodule(object):\r\n start = 0\r\n if lines[0][:2] == '#!':\r\n start = 1\r\n while lines and start < len(lines) and lines[start].strip() in ('', '#'):\r\n start = start + 1\r\n if start < len(lines) and lines[start][:1] == '#':\r\n comments = []\r\n end = start\r\n while end < len(lines) and lines[end][:1] == '#':\r\n comments.append(lines[end].expandtabs())\r\n end = end + 1\r\n return ''.join(comments)\r\n elif lnum > 0:\r\n indent = indentsize(lines[lnum])\r\n end = lnum - 1\r\n if end >= 0 and lines[end].lstrip()[:1] == '#' and indentsize(lines[end]) == indent:\r\n comments = [lines[end].expandtabs().lstrip()]\r\n if end > 0:\r\n end = end - 1\r\n comment = lines[end].expandtabs().lstrip()\r\n while comment[:1] == '#' and indentsize(lines[end]) == indent:\r\n comments[:0] = [comment]\r\n end = end - 1\r\n if end < 0:\r\n break\r\n comment = lines[end].expandtabs().lstrip()\r\n while comments and comments[0].strip() == '#':\r\n comments[:1] = []\r\n while comments and comments[-1].strip() == '#':\r\n comments[-1:] = []\r\n return ''.join(comments)\r\n\r\r\nclass EndOfBlock(Exception):\r\n pass\r\n\r\r\nclass BlockFinder:\r\n\r\n def __init__(self):\r\n self.indent = 0\r\n self.islambda = False\r\n self.started = False\r\n self.passline = False\r\n self.indecorator = False\r\n self.decoratorhasargs = False\r\n self.last = 1\r\n\r\n def tokeneater(self, type, token, srowcol, erowcol, line):\r\n if self.started or not self.indecorator:\r\n if token == '@':\r\n self.indecorator = True\r\n elif token in ('def', 'class', 'lambda'):\r\n if token == 'lambda':\r\n self.islambda = True\r\n self.started = True\r\n self.passline = True\r\n elif token == '(':\r\n if self.indecorator:\r\n self.decoratorhasargs = True\r\n elif token == ')':\r\n if self.indecorator:\r\n self.indecorator = False\r\n self.decoratorhasargs = False\r\n elif type == tokenize.NEWLINE:\r\n self.passline = False\r\n self.last = srowcol[0]\r\n if self.islambda:\r\n raise EndOfBlock\r\n if not self.decoratorhasargs:\r\n self.indecorator = False\r\n elif self.passline:\r\n pass\r\n elif type == tokenize.INDENT:\r\n self.indent = self.indent + 1\r\n self.passline = True\r\n elif type == tokenize.DEDENT:\r\n self.indent = self.indent - 1\r\n if self.indent <= 0:\r\n raise EndOfBlock\r\n elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):\r\n raise EndOfBlock\r\n\r\r\ndef getblock(lines):\r\n blockfinder = BlockFinder()\r\n try:\r\n tokens = tokenize.generate_tokens(iter(lines).__next__)\r\n for _token in tokens:\r\n blockfinder.tokeneater(*_token)\r\n except (EndOfBlock, IndentationError):\r\n pass\r\n return lines[:blockfinder.last]\r\n\r\r\ndef getsourcelines(object):\r\n object = unwrap(object)\r\n (lines, lnum) = findsource(object)\r\n if ismodule(object):\r\n return (lines, 0)\r\n else:\r\n return (getblock(lines[lnum:]), lnum + 1)\r\n\r\r\ndef getsource(object):\r\n (lines, lnum) = getsourcelines(object)\r\n return ''.join(lines)\r\n\r\r\ndef walktree(classes, children, parent):\r\n results = []\r\n classes.sort(key=attrgetter('__module__', '__name__'))\r\n for c in classes:\r\n results.append((c, c.__bases__))\r\n if c in children:\r\n results.append(walktree(children[c], children, c))\r\n return results\r\n\r\r\ndef getclasstree(classes, unique=False):\r\n children = {}\r\n roots = []\r\n for c in classes:\r\n if c.__bases__:\r\n for parent in c.__bases__:\r\n if parent not in children:\r\n children[parent] = []\r\n if c not in children[parent]:\r\n children[parent].append(c)\r\n if unique and parent in classes:\r\n break\r\n elif c not in roots:\r\n roots.append(c)\r\n for parent in children:\r\n if parent not in classes:\r\n roots.append(parent)\r\n return walktree(roots, children, None)\r\n\rArguments = namedtuple('Arguments', 'args, varargs, varkw')\r\r\ndef getargs(co):\r\n (args, varargs, kwonlyargs, varkw) = _getfullargs(co)\r\n return Arguments(args + kwonlyargs, varargs, varkw)\r\n\r\r\ndef _getfullargs(co):\r\n if not iscode(co):\r\n raise TypeError('{!r} is not a code object'.format(co))\r\n nargs = co.co_argcount\r\n names = co.co_varnames\r\n nkwargs = co.co_kwonlyargcount\r\n args = list(names[:nargs])\r\n kwonlyargs = list(names[nargs:nargs + nkwargs])\r\n step = 0\r\n nargs += nkwargs\r\n varargs = None\r\n if co.co_flags & CO_VARARGS:\r\n varargs = co.co_varnames[nargs]\r\n nargs = nargs + 1\r\n varkw = None\r\n if co.co_flags & CO_VARKEYWORDS:\r\n varkw = co.co_varnames[nargs]\r\n return (args, varargs, kwonlyargs, varkw)\r\n\rArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')\r\r\ndef getargspec(func):\r\n warnings.warn('inspect.getargspec() is deprecated, use inspect.signature() or inspect.getfullargspec()', DeprecationWarning, stacklevel=2)\r\n (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann) = getfullargspec(func)\r\n if kwonlyargs or ann:\r\n raise ValueError('Function has keyword-only parameters or annotations, use getfullargspec() API which can support them')\r\n return ArgSpec(args, varargs, varkw, defaults)\r\n\rFullArgSpec = namedtuple('FullArgSpec', 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')\r\r\ndef getfullargspec(func):\r\n try:\r\n sig = _signature_from_callable(func, follow_wrapper_chains=False, skip_bound_arg=False, sigcls=Signature)\r\n except Exception as ex:\r\n raise TypeError('unsupported callable') from ex\r\n args = []\r\n varargs = None\r\n varkw = None\r\n kwonlyargs = []\r\n annotations = {}\r\n defaults = ()\r\n kwdefaults = {}\r\n if sig.return_annotation is not sig.empty:\r\n annotations['return'] = sig.return_annotation\r\n for param in sig.parameters.values():\r\n kind = param.kind\r\n name = param.name\r\n if kind is _POSITIONAL_ONLY:\r\n args.append(name)\r\n elif kind is _POSITIONAL_OR_KEYWORD:\r\n args.append(name)\r\n if param.default is not param.empty:\r\n defaults += (param.default,)\r\n elif kind is _VAR_POSITIONAL:\r\n varargs = name\r\n elif kind is _KEYWORD_ONLY:\r\n kwonlyargs.append(name)\r\n kwdefaults[name] = param.default\r\n elif kind is _VAR_KEYWORD:\r\n varkw = name\r\n if param.annotation is not param.empty:\r\n annotations[name] = param.annotation\r\n if not kwdefaults:\r\n kwdefaults = None\r\n if not defaults:\r\n defaults = None\r\n return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwdefaults, annotations)\r\n\rArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')\r\r\ndef getargvalues(frame):\r\n (args, varargs, varkw) = getargs(frame.f_code)\r\n return ArgInfo(args, varargs, varkw, frame.f_locals)\r\n\r\r\ndef formatannotation(annotation, base_module=None):\r\n if getattr(annotation, '__module__', None) == 'typing':\r\n return repr(annotation).replace('typing.', '')\r\n if isinstance(annotation, type):\r\n if annotation.__module__ in ('builtins', base_module):\r\n return annotation.__qualname__\r\n return annotation.__module__ + '.' + annotation.__qualname__\r\n return repr(annotation)\r\n\r\r\ndef formatannotationrelativeto(object):\r\n module = getattr(object, '__module__', None)\r\n\r\n def _formatannotation(annotation):\r\n return formatannotation(annotation, module)\r\n\r\n return _formatannotation\r\n\r\r\ndef formatargspec(args, varargs=None, varkw=None, defaults=None, kwonlyargs=(), kwonlydefaults={}, annotations={}, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), formatreturns=lambda text: ' -> ' + text, formatannotation=formatannotation):\r\n from warnings import warn\r\n warn('`formatargspec` is deprecated since Python 3.5. Use `signature` and the `Signature` object directly', DeprecationWarning, stacklevel=2)\r\n\r\n def formatargandannotation(arg):\r\n result = formatarg(arg)\r\n if arg in annotations:\r\n result += ': ' + formatannotation(annotations[arg])\r\n return result\r\n\r\n specs = []\r\n if defaults:\r\n firstdefault = len(args) - len(defaults)\r\n for (i, arg) in enumerate(args):\r\n spec = formatargandannotation(arg)\r\n if i >= firstdefault:\r\n spec = spec + formatvalue(defaults[i - firstdefault])\r\n specs.append(spec)\r\n if varargs is not None:\r\n specs.append(formatvarargs(formatargandannotation(varargs)))\r\n elif kwonlyargs:\r\n specs.append('*')\r\n if kwonlyargs:\r\n for kwonlyarg in kwonlyargs:\r\n spec = formatargandannotation(kwonlyarg)\r\n if kwonlyarg in kwonlydefaults:\r\n spec += formatvalue(kwonlydefaults[kwonlyarg])\r\n specs.append(spec)\r\n if varkw is not None:\r\n specs.append(formatvarkw(formatargandannotation(varkw)))\r\n result = '(' + ', '.join(specs) + ')'\r\n if 'return' in annotations:\r\n result += formatreturns(formatannotation(annotations['return']))\r\n return result\r\n\r\r\ndef formatargvalues(args, varargs, varkw, locals, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value)):\r\n\r\n def convert(name, locals=locals, formatarg=formatarg, formatvalue=formatvalue):\r\n return formatarg(name) + formatvalue(locals[name])\r\n\r\n specs = []\r\n for i in range(len(args)):\r\n specs.append(convert(args[i]))\r\n if varargs:\r\n specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))\r\n if varkw:\r\n specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))\r\n return '(' + ', '.join(specs) + ')'\r\n\r\r\ndef _missing_arguments(f_name, argnames, pos, values):\r\n names = [repr(name) for name in argnames if name not in values]\r\n missing = len(names)\r\n if missing == 1:\r\n s = names[0]\r\n elif missing == 2:\r\n s = '{} and {}'.format(*names)\r\n else:\r\n tail = ', {} and {}'.format(*names[-2:])\r\n del names[-2:]\r\n s = ', '.join(names) + tail\r\n raise TypeError('%s() missing %i required %s argument%s: %s' % (f_name, missing, 'positional' if pos else 'keyword-only', '' if missing == 1 else 's', s))\r\n\r\r\ndef _too_many(f_name, args, kwonly, varargs, defcount, given, values):\r\n atleast = len(args) - defcount\r\n kwonly_given = len([arg for arg in kwonly if arg in values])\r\n if varargs:\r\n plural = atleast != 1\r\n sig = 'at least %d' % (atleast,)\r\n elif defcount:\r\n plural = True\r\n sig = 'from %d to %d' % (atleast, len(args))\r\n else:\r\n plural = len(args) != 1\r\n sig = str(len(args))\r\n kwonly_sig = ''\r\n if kwonly_given:\r\n msg = ' positional argument%s (and %d keyword-only argument%s)'\r\n kwonly_sig = msg % ('s' if given != 1 else '', kwonly_given, 's' if kwonly_given != 1 else '')\r\n raise TypeError('%s() takes %s positional argument%s but %d%s %s given' % (f_name, sig, 's' if plural else '', given, kwonly_sig, 'was' if given == 1 and not kwonly_given else 'were'))\r\n\r\r\ndef getcallargs(*func_and_positional, **named):\r\n func = func_and_positional[0]\r\n positional = func_and_positional[1:]\r\n spec = getfullargspec(func)\r\n (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann) = spec\r\n f_name = func.__name__\r\n arg2value = {}\r\n if func.__self__ is not None:\r\n positional = (func.__self__,) + positional\r\n num_pos = len(positional)\r\n num_args = len(args)\r\n num_defaults = len(defaults) if ismethod(func) and defaults else 0\r\n n = min(num_pos, num_args)\r\n for i in range(n):\r\n arg2value[args[i]] = positional[i]\r\n if varargs:\r\n arg2value[varargs] = tuple(positional[n:])\r\n possible_kwargs = set(args + kwonlyargs)\r\n if varkw:\r\n arg2value[varkw] = {}\r\n for (kw, value) in named.items():\r\n if kw not in possible_kwargs:\r\n if not varkw:\r\n raise TypeError('%s() got an unexpected keyword argument %r' % (f_name, kw))\r\n arg2value[varkw][kw] = value\r\n else:\r\n if kw in arg2value:\r\n raise TypeError('%s() got multiple values for argument %r' % (f_name, kw))\r\n arg2value[kw] = value\r\n if num_pos > num_args and not varargs:\r\n _too_many(f_name, args, kwonlyargs, varargs, num_defaults, num_pos, arg2value)\r\n if num_pos < num_args:\r\n req = args[:num_args - num_defaults]\r\n for arg in req:\r\n if arg not in arg2value:\r\n _missing_arguments(f_name, req, True, arg2value)\r\n for (i, arg) in enumerate(args[num_args - num_defaults:]):\r\n if arg not in arg2value:\r\n arg2value[arg] = defaults[i]\r\n missing = 0\r\n for kwarg in kwonlyargs:\r\n if kwarg not in arg2value:\r\n if kwonlydefaults and kwarg in kwonlydefaults:\r\n arg2value[kwarg] = kwonlydefaults[kwarg]\r\n else:\r\n missing += 1\r\n if missing:\r\n _missing_arguments(f_name, kwonlyargs, False, arg2value)\r\n return arg2value\r\n\rClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')\r\r\ndef getclosurevars(func):\r\n if ismethod(func):\r\n func = func.__func__\r\n if not isfunction(func):\r\n raise TypeError('{!r} is not a Python function'.format(func))\r\n code = func.__code__\r\n if func.__closure__ is None:\r\n nonlocal_vars = {}\r\n else:\r\n nonlocal_vars = {var: cell.cell_contents for (var, cell) in zip(code.co_freevars, func.__closure__)}\r\n global_ns = func.__globals__\r\n builtin_ns = global_ns.get('__builtins__', builtins.__dict__)\r\n if ismodule(builtin_ns):\r\n builtin_ns = builtin_ns.__dict__\r\n global_vars = {}\r\n builtin_vars = {}\r\n unbound_names = set()\r\n for name in code.co_names:\r\n if name in ('None', 'True', 'False'):\r\n pass\r\n else:\r\n try:\r\n global_vars[name] = global_ns[name]\r\n except KeyError:\r\n try:\r\n builtin_vars[name] = builtin_ns[name]\r\n except KeyError:\r\n unbound_names.add(name)\r\n return ClosureVars(nonlocal_vars, global_vars, builtin_vars, unbound_names)\r\n\rTraceback = namedtuple('Traceback', 'filename lineno function code_context index')\r\r\ndef getframeinfo(frame, context=1):\r\n if istraceback(frame):\r\n lineno = frame.tb_lineno\r\n frame = frame.tb_frame\r\n else:\r\n lineno = frame.f_lineno\r\n if not isframe(frame):\r\n raise TypeError('{!r} is not a frame or traceback object'.format(frame))\r\n filename = getsourcefile(frame) or getfile(frame)\r\n if context > 0:\r\n start = lineno - 1 - context//2\r\n try:\r\n (lines, lnum) = findsource(frame)\r\n except OSError:\r\n lines = index = None\r\n start = max(0, min(start, len(lines) - context))\r\n lines = lines[start:start + context]\r\n index = lineno - 1 - start\r\n else:\r\n lines = index = None\r\n return Traceback(filename, lineno, frame.f_code.co_name, lines, index)\r\n\r\r\ndef getlineno(frame):\r\n return frame.f_lineno\r\n\rFrameInfo = namedtuple('FrameInfo', ('frame',) + Traceback._fields)\r\r\ndef getouterframes(frame, context=1):\r\n framelist = []\r\n while frame:\r\n frameinfo = (frame,) + getframeinfo(frame, context)\r\n framelist.append(FrameInfo(*frameinfo))\r\n frame = frame.f_back\r\n return framelist\r\n\r\r\ndef getinnerframes(tb, context=1):\r\n framelist = []\r\n while tb:\r\n frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)\r\n framelist.append(FrameInfo(*frameinfo))\r\n tb = tb.tb_next\r\n return framelist\r\n\r\r\ndef currentframe():\r\n if hasattr(sys, '_getframe'):\r\n return sys._getframe(1)\r\n\r\r\ndef stack(context=1):\r\n return getouterframes(sys._getframe(1), context)\r\n\r\r\ndef trace(context=1):\r\n return getinnerframes(sys.exc_info()[2], context)\r\n\r_sentinel = object()\r\r\ndef _static_getmro(klass):\r\n return type.__dict__['__mro__'].__get__(klass)\r\n\r\r\ndef _check_instance(obj, attr):\r\n instance_dict = {}\r\n try:\r\n instance_dict = object.__getattribute__(obj, '__dict__')\r\n except AttributeError:\r\n pass\r\n return dict.get(instance_dict, attr, _sentinel)\r\n\r\r\ndef _check_class(klass, attr):\r\n for entry in _static_getmro(klass):\r\n if _shadowed_dict(type(entry)) is _sentinel:\r\n try:\r\n return entry.__dict__[attr]\r\n except KeyError:\r\n pass\r\n return _sentinel\r\n\r\r\ndef _is_type(obj):\r\n try:\r\n _static_getmro(obj)\r\n except TypeError:\r\n return False\r\n return True\r\n\r\r\ndef _shadowed_dict(klass):\r\n dict_attr = type.__dict__['__dict__']\r\n for entry in _static_getmro(klass):\r\n try:\r\n class_dict = dict_attr.__get__(entry)['__dict__']\r\n except KeyError:\r\n pass\r\n if class_dict.__name__ == '__dict__':\r\n if not class_dict.__objclass__ is entry:\r\n return class_dict\r\n return class_dict\r\n return _sentinel\r\n\r\r\ndef getattr_static(obj, attr, default=_sentinel):\r\n instance_result = _sentinel\r\n if not _is_type(obj):\r\n klass = type(obj)\r\n dict_attr = _shadowed_dict(klass)\r\n if dict_attr is _sentinel or type(dict_attr) is types.MemberDescriptorType:\r\n instance_result = _check_instance(obj, attr)\r\n else:\r\n klass = obj\r\n klass_result = _check_class(klass, attr)\r\n if instance_result is not _sentinel and (klass_result is not _sentinel and _check_class(type(klass_result), '__get__') is not _sentinel) and _check_class(type(klass_result), '__set__') is not _sentinel:\r\n return klass_result\r\n if instance_result is not _sentinel:\r\n return instance_result\r\n if klass_result is not _sentinel:\r\n return klass_result\r\n if obj is klass:\r\n for entry in _static_getmro(type(klass)):\r\n if _shadowed_dict(type(entry)) is _sentinel:\r\n try:\r\n return entry.__dict__[attr]\r\n except KeyError:\r\n pass\r\n if default is not _sentinel:\r\n return default\r\n raise AttributeError(attr)\r\n\rGEN_CREATED = 'GEN_CREATED'\rGEN_RUNNING = 'GEN_RUNNING'\rGEN_SUSPENDED = 'GEN_SUSPENDED'\rGEN_CLOSED = 'GEN_CLOSED'\r\r\ndef getgeneratorstate(generator):\r\n if generator.gi_running:\r\n return GEN_RUNNING\r\n if generator.gi_frame is None:\r\n return GEN_CLOSED\r\n elif generator.gi_frame.f_lasti == -1:\r\n return GEN_CREATED\r\n return GEN_SUSPENDED\r\n\r\r\ndef getgeneratorlocals(generator):\r\n if not isgenerator(generator):\r\n raise TypeError('{!r} is not a Python generator'.format(generator))\r\n frame = getattr(generator, 'gi_frame', None)\r\n if frame is not None:\r\n return generator.gi_frame.f_locals\r\n else:\r\n return {}\r\n\rCORO_CREATED = 'CORO_CREATED'\rCORO_RUNNING = 'CORO_RUNNING'\rCORO_SUSPENDED = 'CORO_SUSPENDED'\rCORO_CLOSED = 'CORO_CLOSED'\r\r\ndef getcoroutinestate(coroutine):\r\n if coroutine.cr_running:\r\n return CORO_RUNNING\r\n if coroutine.cr_frame is None:\r\n return CORO_CLOSED\r\n elif coroutine.cr_frame.f_lasti == -1:\r\n return CORO_CREATED\r\n return CORO_SUSPENDED\r\n\r\r\ndef getcoroutinelocals(coroutine):\r\n frame = getattr(coroutine, 'cr_frame', None)\r\n if frame is not None:\r\n return frame.f_locals\r\n else:\r\n return {}\r\n\r_WrapperDescriptor = type(type.__call__)\r_MethodWrapper = type(all.__call__)\r_ClassMethodWrapper = type(int.__dict__['from_bytes'])\r_NonUserDefinedCallables = (_WrapperDescriptor, _MethodWrapper, _ClassMethodWrapper, types.BuiltinFunctionType)\r\r\ndef _signature_get_user_defined_method(cls, method_name):\r\n try:\r\n meth = getattr(cls, method_name)\r\n except AttributeError:\r\n return\r\n if not isinstance(meth, _NonUserDefinedCallables):\r\n return meth\r\n\r\r\ndef _signature_get_partial(wrapped_sig, partial, extra_args=()):\r\n old_params = wrapped_sig.parameters\r\n new_params = OrderedDict(old_params.items())\r\n partial_args = partial.args or ()\r\n partial_keywords = partial.keywords or {}\r\n if extra_args:\r\n partial_args = extra_args + partial_args\r\n try:\r\n ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords)\r\n except TypeError as ex:\r\n msg = 'partial object {!r} has incorrect arguments'.format(partial)\r\n raise ValueError(msg) from ex\r\n transform_to_kwonly = False\r\n for (param_name, param) in old_params.items():\r\n try:\r\n arg_value = ba.arguments[param_name]\r\n except KeyError:\r\n pass\r\n if param.kind is _POSITIONAL_ONLY:\r\n new_params.pop(param_name)\r\n elif param.kind is _POSITIONAL_OR_KEYWORD:\r\n if param_name in partial_keywords:\r\n transform_to_kwonly = True\r\n new_params[param_name] = param.replace(default=arg_value)\r\n else:\r\n new_params.pop(param.name)\r\n else:\r\n if param.kind is _KEYWORD_ONLY:\r\n new_params[param_name] = param.replace(default=arg_value)\r\n if transform_to_kwonly:\r\n if param.kind is _POSITIONAL_OR_KEYWORD:\r\n new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY)\r\n new_params[param_name] = new_param\r\n new_params.move_to_end(param_name)\r\n elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD):\r\n new_params.move_to_end(param_name)\r\n elif param.kind is _VAR_POSITIONAL:\r\n new_params.pop(param.name)\r\n return wrapped_sig.replace(parameters=new_params.values())\r\n\r\r\ndef _signature_bound_method(sig):\r\n params = tuple(sig.parameters.values())\r\n if params and params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):\r\n raise ValueError('invalid method signature')\r\n kind = params[0].kind\r\n if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):\r\n params = params[1:]\r\n elif kind is not _VAR_POSITIONAL:\r\n raise ValueError('invalid argument type')\r\n return sig.replace(parameters=params)\r\n\r\r\ndef _signature_is_builtin(obj):\r\n return isbuiltin(obj) or (ismethoddescriptor(obj) or (isinstance(obj, _NonUserDefinedCallables) or obj in (type, object)))\r\n\r\r\ndef _signature_is_functionlike(obj):\r\n if callable(obj) and isclass(obj):\r\n return False\r\n name = getattr(obj, '__name__', None)\r\n code = getattr(obj, '__code__', None)\r\n defaults = getattr(obj, '__defaults__', _void)\r\n kwdefaults = getattr(obj, '__kwdefaults__', _void)\r\n annotations = getattr(obj, '__annotations__', None)\r\n return isinstance(code, types.CodeType) and (isinstance(name, str) and isinstance(annotations, dict))\r\n\r\r\ndef _signature_get_bound_param(spec):\r\n pos = spec.find(',')\r\n if pos == -1:\r\n pos = spec.find(')')\r\n cpos = spec.find(':')\r\n cpos = spec.find('=')\r\n return spec[2:pos]\r\n\r\r\ndef _signature_strip_non_python_syntax(signature):\r\n if not signature:\r\n return (signature, None, None)\r\n self_parameter = None\r\n last_positional_only = None\r\n lines = [l.encode('ascii') for l in signature.split('\\n')]\r\n generator = iter(lines).__next__\r\n token_stream = tokenize.tokenize(generator)\r\n delayed_comma = False\r\n skip_next_comma = False\r\n text = []\r\n add = text.append\r\n current_parameter = 0\r\n OP = token.OP\r\n ERRORTOKEN = token.ERRORTOKEN\r\n t = next(token_stream)\r\n for t in token_stream:\r\n type = t.type\r\n string = t.string\r\n if type == OP:\r\n if string == ',':\r\n if skip_next_comma:\r\n skip_next_comma = False\r\n else:\r\n delayed_comma = True\r\n current_parameter += 1\r\n if string == '/':\r\n skip_next_comma = True\r\n last_positional_only = current_parameter - 1\r\n elif type == ERRORTOKEN and string == '$':\r\n self_parameter = current_parameter\r\n else:\r\n if delayed_comma:\r\n delayed_comma = False\r\n if not (type == OP and string == ')'):\r\n add(', ')\r\n add(string)\r\n if string == ',':\r\n add(' ')\r\n elif string == '/':\r\n skip_next_comma = True\r\n last_positional_only = current_parameter - 1\r\n elif type == ERRORTOKEN and string == '$':\r\n self_parameter = current_parameter\r\n else:\r\n if delayed_comma:\r\n delayed_comma = False\r\n if not (type == OP and string == ')'):\r\n add(', ')\r\n add(string)\r\n if string == ',':\r\n add(' ')\r\n if type == ERRORTOKEN and string == '$':\r\n self_parameter = current_parameter\r\n else:\r\n if delayed_comma:\r\n delayed_comma = False\r\n if not (type == OP and string == ')'):\r\n add(', ')\r\n add(string)\r\n if string == ',':\r\n add(' ')\r\n clean_signature = ''.join(text)\r\n return (clean_signature, self_parameter, last_positional_only)\r\n\r\r\ndef _signature_fromstr(cls, obj, s, skip_bound_arg=True):\r\n import ast\r\n Parameter = cls._parameter_cls\r\n (clean_signature, self_parameter, last_positional_only) = _signature_strip_non_python_syntax(s)\r\n program = 'def foo' + clean_signature + ': pass'\r\n try:\r\n module = ast.parse(program)\r\n except SyntaxError:\r\n module = None\r\n if not isinstance(module, ast.Module):\r\n raise ValueError('{!r} builtin has invalid signature'.format(obj))\r\n f = module.body[0]\r\n parameters = []\r\n empty = Parameter.empty\r\n invalid = object()\r\n module = None\r\n module_dict = {}\r\n module_name = getattr(obj, '__module__', None)\r\n if module_name:\r\n module = sys.modules.get(module_name, None)\r\n if module:\r\n module_dict = module.__dict__\r\n sys_module_dict = sys.modules\r\n\r\n def parse_name(node):\r\n if node.annotation != None:\r\n raise ValueError('Annotations are not currently supported')\r\n return node.arg\r\n\r\n def wrap_value(s):\r\n try:\r\n value = eval(s, module_dict)\r\n except NameError:\r\n try:\r\n value = eval(s, sys_module_dict)\r\n except NameError:\r\n raise RuntimeError()\r\n if isinstance(value, str):\r\n return ast.Str(value)\r\n if isinstance(value, (int, float)):\r\n return ast.Num(value)\r\n if isinstance(value, bytes):\r\n return ast.Bytes(value)\r\n if value in (True, False, None):\r\n return ast.NameConstant(value)\r\n raise RuntimeError()\r\n\r\n class RewriteSymbolics(ast.NodeTransformer):\r\n\r\n def visit_Attribute(self, node):\r\n a = []\r\n n = node\r\n while isinstance(n, ast.Attribute):\r\n a.append(n.attr)\r\n n = n.value\r\n if not isinstance(n, ast.Name):\r\n raise RuntimeError()\r\n a.append(n.id)\r\n value = '.'.join(reversed(a))\r\n return wrap_value(value)\r\n\r\n def visit_Name(self, node):\r\n if not isinstance(node.ctx, ast.Load):\r\n raise ValueError()\r\n return wrap_value(node.id)\r\n\r\n def p(name_node, default_node, default=empty):\r\n name = parse_name(name_node)\r\n if name is invalid:\r\n return\r\n if default_node is not _empty:\r\n try:\r\n default_node = RewriteSymbolics().visit(default_node)\r\n o = ast.literal_eval(default_node)\r\n except ValueError:\r\n o = invalid\r\n if o is invalid:\r\n return\r\n default = o if o is not invalid else default\r\n parameters.append(Parameter(name, kind, default=default, annotation=empty))\r\n\r\n args = reversed(f.args.args)\r\n defaults = reversed(f.args.defaults)\r\n iter = itertools.zip_longest(args, defaults, fillvalue=None)\r\n if last_positional_only is not None:\r\n kind = Parameter.POSITIONAL_ONLY\r\n else:\r\n kind = Parameter.POSITIONAL_OR_KEYWORD\r\n for (i, (name, default)) in enumerate(reversed(list(iter))):\r\n p(name, default)\r\n if i == last_positional_only:\r\n kind = Parameter.POSITIONAL_OR_KEYWORD\r\n if f.args.vararg:\r\n kind = Parameter.VAR_POSITIONAL\r\n p(f.args.vararg, empty)\r\n kind = Parameter.KEYWORD_ONLY\r\n for (name, default) in zip(f.args.kwonlyargs, f.args.kw_defaults):\r\n p(name, default)\r\n if f.args.kwarg:\r\n kind = Parameter.VAR_KEYWORD\r\n p(f.args.kwarg, empty)\r\n if self_parameter is not None:\r\n _self = getattr(obj, '__self__', None)\r\n self_isbound = _self is not None\r\n self_ismodule = ismodule(_self)\r\n if self_isbound and (self_ismodule or skip_bound_arg):\r\n parameters.pop(0)\r\n else:\r\n p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY)\r\n parameters[0] = p\r\n return cls(parameters, return_annotation=cls.empty)\r\n\r\r\ndef _signature_from_builtin(cls, func, skip_bound_arg=True):\r\n if not _signature_is_builtin(func):\r\n raise TypeError('{!r} is not a Python builtin function'.format(func))\r\n s = getattr(func, '__text_signature__', None)\r\n if not s:\r\n raise ValueError('no signature found for builtin {!r}'.format(func))\r\n return _signature_fromstr(cls, func, s, skip_bound_arg)\r\n\r\r\ndef _signature_from_function(cls, func):\r\n is_duck_function = False\r\n if not isfunction(func):\r\n if _signature_is_functionlike(func):\r\n is_duck_function = True\r\n else:\r\n raise TypeError('{!r} is not a Python function'.format(func))\r\n Parameter = cls._parameter_cls\r\n func_code = func.__code__\r\n pos_count = func_code.co_argcount\r\n arg_names = func_code.co_varnames\r\n positional = tuple(arg_names[:pos_count])\r\n keyword_only_count = func_code.co_kwonlyargcount\r\n keyword_only = arg_names[pos_count:pos_count + keyword_only_count]\r\n annotations = func.__annotations__\r\n defaults = func.__defaults__\r\n kwdefaults = func.__kwdefaults__\r\n if defaults:\r\n pos_default_count = len(defaults)\r\n else:\r\n pos_default_count = 0\r\n parameters = []\r\n non_default_count = pos_count - pos_default_count\r\n for name in positional[:non_default_count]:\r\n annotation = annotations.get(name, _empty)\r\n parameters.append(Parameter(name, annotation=annotation, kind=_POSITIONAL_OR_KEYWORD))\r\n for (offset, name) in enumerate(positional[non_default_count:]):\r\n annotation = annotations.get(name, _empty)\r\n parameters.append(Parameter(name, annotation=annotation, kind=_POSITIONAL_OR_KEYWORD, default=defaults[offset]))\r\n if func_code.co_flags & CO_VARARGS:\r\n name = arg_names[pos_count + keyword_only_count]\r\n annotation = annotations.get(name, _empty)\r\n parameters.append(Parameter(name, annotation=annotation, kind=_VAR_POSITIONAL))\r\n for name in keyword_only:\r\n default = _empty\r\n if kwdefaults is not None:\r\n default = kwdefaults.get(name, _empty)\r\n annotation = annotations.get(name, _empty)\r\n parameters.append(Parameter(name, annotation=annotation, kind=_KEYWORD_ONLY, default=default))\r\n if func_code.co_flags & CO_VARKEYWORDS:\r\n index = pos_count + keyword_only_count\r\n if func_code.co_flags & CO_VARARGS:\r\n index += 1\r\n name = arg_names[index]\r\n annotation = annotations.get(name, _empty)\r\n parameters.append(Parameter(name, annotation=annotation, kind=_VAR_KEYWORD))\r\n return cls(parameters, return_annotation=annotations.get('return', _empty), __validate_parameters__=is_duck_function)\r\n\r\r\ndef _signature_from_callable(obj, *, follow_wrapper_chains=True, skip_bound_arg=True, sigcls):\r\n if not callable(obj):\r\n raise TypeError('{!r} is not a callable object'.format(obj))\r\n if isinstance(obj, types.MethodType):\r\n sig = _signature_from_callable(obj.__func__, follow_wrapper_chains=follow_wrapper_chains, skip_bound_arg=skip_bound_arg, sigcls=sigcls)\r\n if skip_bound_arg:\r\n return _signature_bound_method(sig)\r\n return sig\r\n if follow_wrapper_chains:\r\n obj = unwrap(obj, stop=lambda f: hasattr(f, '__signature__'))\r\n if isinstance(obj, types.MethodType):\r\n return _signature_from_callable(obj, follow_wrapper_chains=follow_wrapper_chains, skip_bound_arg=skip_bound_arg, sigcls=sigcls)\r\n try:\r\n sig = obj.__signature__\r\n except AttributeError:\r\n pass\r\n if sig is not None:\r\n if not isinstance(sig, Signature):\r\n raise TypeError('unexpected object {!r} in __signature__ attribute'.format(sig))\r\n return sig\r\n try:\r\n partialmethod = obj._partialmethod\r\n except AttributeError:\r\n pass\r\n if isinstance(partialmethod, functools.partialmethod):\r\n wrapped_sig = _signature_from_callable(partialmethod.func, follow_wrapper_chains=follow_wrapper_chains, skip_bound_arg=skip_bound_arg, sigcls=sigcls)\r\n sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))\r\n first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]\r\n if first_wrapped_param.kind is Parameter.VAR_POSITIONAL:\r\n return sig\r\n sig_params = tuple(sig.parameters.values())\r\n new_params = (first_wrapped_param,) + sig_params\r\n return sig.replace(parameters=new_params)\r\n if isfunction(obj) or _signature_is_functionlike(obj):\r\n return _signature_from_function(sigcls, obj)\r\n if _signature_is_builtin(obj):\r\n return _signature_from_builtin(sigcls, obj, skip_bound_arg=skip_bound_arg)\r\n if isinstance(obj, functools.partial):\r\n wrapped_sig = _signature_from_callable(obj.func, follow_wrapper_chains=follow_wrapper_chains, skip_bound_arg=skip_bound_arg, sigcls=sigcls)\r\n return _signature_get_partial(wrapped_sig, obj)\r\n sig = None\r\n if isinstance(obj, type):\r\n call = _signature_get_user_defined_method(type(obj), '__call__')\r\n if call is not None:\r\n sig = _signature_from_callable(call, follow_wrapper_chains=follow_wrapper_chains, skip_bound_arg=skip_bound_arg, sigcls=sigcls)\r\n else:\r\n new = _signature_get_user_defined_method(obj, '__new__')\r\n if new is not None:\r\n sig = _signature_from_callable(new, follow_wrapper_chains=follow_wrapper_chains, skip_bound_arg=skip_bound_arg, sigcls=sigcls)\r\n else:\r\n init = _signature_get_user_defined_method(obj, '__init__')\r\n if init is not None:\r\n sig = _signature_from_callable(init, follow_wrapper_chains=follow_wrapper_chains, skip_bound_arg=skip_bound_arg, sigcls=sigcls)\r\n if sig is None:\r\n for base in obj.__mro__[:-1]:\r\n try:\r\n text_sig = base.__text_signature__\r\n except AttributeError:\r\n pass\r\n if text_sig:\r\n return _signature_fromstr(sigcls, obj, text_sig)\r\n if type not in obj.__mro__:\r\n if obj.__init__ is object.__init__ and obj.__new__ is object.__new__:\r\n return signature(object)\r\n else:\r\n raise ValueError('no signature found for builtin type {!r}'.format(obj))\r\n if sig is not None:\r\n if skip_bound_arg:\r\n return _signature_bound_method(sig)\r\n return sig\r\n elif not isinstance(obj, _NonUserDefinedCallables):\r\n call = _signature_get_user_defined_method(type(obj), '__call__')\r\n if call is not None:\r\n try:\r\n sig = _signature_from_callable(call, follow_wrapper_chains=follow_wrapper_chains, skip_bound_arg=skip_bound_arg, sigcls=sigcls)\r\n except ValueError as ex:\r\n msg = 'no signature found for {!r}'.format(obj)\r\n raise ValueError(msg) from ex\r\n if sig is not None:\r\n if skip_bound_arg:\r\n return _signature_bound_method(sig)\r\n return sig\r\n if isinstance(obj, types.BuiltinFunctionType):\r\n msg = 'no signature found for builtin function {!r}'.format(obj)\r\n raise ValueError(msg)\r\n raise ValueError('callable {!r} is not supported by signature'.format(obj))\r\n\r\r\nclass _void:\r\n pass\r\n\r\r\nclass _empty:\r\n pass\r\n\r\r\nclass _ParameterKind(enum.IntEnum):\r\n POSITIONAL_ONLY = 0\r\n POSITIONAL_OR_KEYWORD = 1\r\n VAR_POSITIONAL = 2\r\n KEYWORD_ONLY = 3\r\n VAR_KEYWORD = 4\r\n\r\n def __str__(self):\r\n return self._name_\r\n\r_POSITIONAL_ONLY = _ParameterKind.POSITIONAL_ONLY\r_POSITIONAL_OR_KEYWORD = _ParameterKind.POSITIONAL_OR_KEYWORD\r_VAR_POSITIONAL = _ParameterKind.VAR_POSITIONAL\r_KEYWORD_ONLY = _ParameterKind.KEYWORD_ONLY\r_VAR_KEYWORD = _ParameterKind.VAR_KEYWORD\r_PARAM_NAME_MAPPING = {_VAR_KEYWORD: 'variadic keyword', _KEYWORD_ONLY: 'keyword-only', _VAR_POSITIONAL: 'variadic positional', _POSITIONAL_OR_KEYWORD: 'positional or keyword', _POSITIONAL_ONLY: 'positional-only'}\r_get_paramkind_descr = _PARAM_NAME_MAPPING.__getitem__\r\r\nclass Parameter:\r\n __slots__ = ('_name', '_kind', '_default', '_annotation')\r\n POSITIONAL_ONLY = _POSITIONAL_ONLY\r\n POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD\r\n VAR_POSITIONAL = _VAR_POSITIONAL\r\n KEYWORD_ONLY = _KEYWORD_ONLY\r\n VAR_KEYWORD = _VAR_KEYWORD\r\n empty = _empty\r\n\r\n def __init__(self, name, kind, *, default=_empty, annotation=_empty):\r\n try:\r\n self._kind = _ParameterKind(kind)\r\n except ValueError:\r\n raise ValueError(f'value {kind} is not a valid Parameter.kind')\r\n if default is not _empty and self._kind in (_VAR_POSITIONAL, _VAR_KEYWORD):\r\n msg = '{} parameters cannot have default values'\r\n msg = msg.format(_get_paramkind_descr(self._kind))\r\n raise ValueError(msg)\r\n self._default = default\r\n self._annotation = annotation\r\n if name is _empty:\r\n raise ValueError('name is a required attribute for Parameter')\r\n if not isinstance(name, str):\r\n msg = 'name must be a str, not a {}'.format(type(name).__name__)\r\n raise TypeError(msg)\r\n if name[1:].isdigit():\r\n if self._kind != _POSITIONAL_OR_KEYWORD:\r\n msg = 'implicit arguments must be passed as positional or keyword arguments, not {}'\r\n msg = msg.format(_get_paramkind_descr(self._kind))\r\n raise ValueError(msg)\r\n self._kind = _POSITIONAL_ONLY\r\n name = 'implicit{}'.format(name[1:])\r\n if not (name[0] == '.' and name.isidentifier()):\r\n raise ValueError('{!r} is not a valid parameter name'.format(name))\r\n self._name = name\r\n\r\n def __reduce__(self):\r\n return (type(self), (self._name, self._kind), {'_default': self._default, '_annotation': self._annotation})\r\n\r\n def __setstate__(self, state):\r\n self._default = state['_default']\r\n self._annotation = state['_annotation']\r\n\r\n @property\r\n def name(self):\r\n return self._name\r\n\r\n @property\r\n def default(self):\r\n return self._default\r\n\r\n @property\r\n def annotation(self):\r\n return self._annotation\r\n\r\n @property\r\n def kind(self):\r\n return self._kind\r\n\r\n def replace(self, *, name=_void, kind=_void, annotation=_void, default=_void):\r\n if name is _void:\r\n name = self._name\r\n if kind is _void:\r\n kind = self._kind\r\n if annotation is _void:\r\n annotation = self._annotation\r\n if default is _void:\r\n default = self._default\r\n return type(self)(name, kind, default=default, annotation=annotation)\r\n\r\n def __str__(self):\r\n kind = self.kind\r\n formatted = self._name\r\n if self._annotation is not _empty:\r\n formatted = '{}: {}'.format(formatted, formatannotation(self._annotation))\r\n if self._default is not _empty:\r\n if self._annotation is not _empty:\r\n formatted = '{} = {}'.format(formatted, repr(self._default))\r\n else:\r\n formatted = '{}={}'.format(formatted, repr(self._default))\r\n if kind == _VAR_POSITIONAL:\r\n formatted = '*' + formatted\r\n elif kind == _VAR_KEYWORD:\r\n formatted = '**' + formatted\r\n return formatted\r\n\r\n def __repr__(self):\r\n return '<{} \"{}\">'.format(self.__class__.__name__, self)\r\n\r\n def __hash__(self):\r\n return hash((self.name, self.kind, self.annotation, self.default))\r\n\r\n def __eq__(self, other):\r\n if self is other:\r\n return True\r\n if not isinstance(other, Parameter):\r\n return NotImplemented\r\n return self._name == other._name and (self._kind == other._kind and (self._default == other._default and self._annotation == other._annotation))\r\n\r\r\nclass BoundArguments:\r\n __slots__ = ('arguments', '_signature', '__weakref__')\r\n\r\n def __init__(self, signature, arguments):\r\n self.arguments = arguments\r\n self._signature = signature\r\n\r\n @property\r\n def signature(self):\r\n return self._signature\r\n\r\n @property\r\n def args(self):\r\n args = []\r\n for (param_name, param) in self._signature.parameters.items():\r\n if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):\r\n break\r\n try:\r\n arg = self.arguments[param_name]\r\n except KeyError:\r\n break\r\n if param.kind == _VAR_POSITIONAL:\r\n args.extend(arg)\r\n else:\r\n args.append(arg)\r\n return tuple(args)\r\n\r\n @property\r\n def kwargs(self):\r\n kwargs = {}\r\n kwargs_started = False\r\n for (param_name, param) in self._signature.parameters.items():\r\n if not kwargs_started:\r\n if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):\r\n kwargs_started = True\r\n elif param_name not in self.arguments:\r\n kwargs_started = True\r\n elif not kwargs_started:\r\n pass\r\n else:\r\n try:\r\n arg = self.arguments[param_name]\r\n except KeyError:\r\n pass\r\n if param.kind == _VAR_KEYWORD:\r\n kwargs.update(arg)\r\n else:\r\n kwargs[param_name] = arg\r\n elif not kwargs_started:\r\n pass\r\n else:\r\n try:\r\n arg = self.arguments[param_name]\r\n except KeyError:\r\n pass\r\n if param.kind == _VAR_KEYWORD:\r\n kwargs.update(arg)\r\n else:\r\n kwargs[param_name] = arg\r\n return kwargs\r\n\r\n def apply_defaults(self):\r\n arguments = self.arguments\r\n new_arguments = []\r\n for (name, param) in self._signature.parameters.items():\r\n try:\r\n new_arguments.append((name, arguments[name]))\r\n except KeyError:\r\n if param.default is not _empty:\r\n val = param.default\r\n elif param.kind is _VAR_POSITIONAL:\r\n val = ()\r\n elif param.kind is _VAR_KEYWORD:\r\n val = {}\r\n else:\r\n continue\r\n new_arguments.append((name, val))\r\n self.arguments = OrderedDict(new_arguments)\r\n\r\n def __eq__(self, other):\r\n if self is other:\r\n return True\r\n if not isinstance(other, BoundArguments):\r\n return NotImplemented\r\n return self.signature == other.signature and self.arguments == other.arguments\r\n\r\n def __setstate__(self, state):\r\n self._signature = state['_signature']\r\n self.arguments = state['arguments']\r\n\r\n def __getstate__(self):\r\n return {'_signature': self._signature, 'arguments': self.arguments}\r\n\r\n def __repr__(self):\r\n args = []\r\n for (arg, value) in self.arguments.items():\r\n args.append('{}={!r}'.format(arg, value))\r\n return '<{} ({})>'.format(self.__class__.__name__, ', '.join(args))\r\n\r\r\nclass Signature:\r\n __slots__ = ('_return_annotation', '_parameters')\r\n _parameter_cls = Parameter\r\n _bound_arguments_cls = BoundArguments\r\n empty = _empty\r\n\r\n def __init__(self, parameters=None, *, return_annotation=_empty, __validate_parameters__=True):\r\n if parameters is None:\r\n params = OrderedDict()\r\n elif __validate_parameters__:\r\n params = OrderedDict()\r\n top_kind = _POSITIONAL_ONLY\r\n kind_defaults = False\r\n for (idx, param) in enumerate(parameters):\r\n kind = param.kind\r\n name = param.name\r\n if kind < top_kind:\r\n msg = 'wrong parameter order: {} parameter before {} parameter'\r\n msg = msg.format(_get_paramkind_descr(top_kind), _get_paramkind_descr(kind))\r\n raise ValueError(msg)\r\n elif kind > top_kind:\r\n kind_defaults = False\r\n top_kind = kind\r\n if param.default is _empty:\r\n if kind_defaults:\r\n msg = 'non-default argument follows default argument'\r\n raise ValueError(msg)\r\n else:\r\n kind_defaults = True\r\n if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD) and name in params:\r\n msg = 'duplicate parameter name: {!r}'.format(name)\r\n raise ValueError(msg)\r\n params[name] = param\r\n else:\r\n params = OrderedDict((param.name, param) for param in parameters)\r\n self._parameters = types.MappingProxyType(params)\r\n self._return_annotation = return_annotation\r\n\r\n @classmethod\r\n def from_function(cls, func):\r\n warnings.warn('inspect.Signature.from_function() is deprecated, use Signature.from_callable()', DeprecationWarning, stacklevel=2)\r\n return _signature_from_function(cls, func)\r\n\r\n @classmethod\r\n def from_builtin(cls, func):\r\n warnings.warn('inspect.Signature.from_builtin() is deprecated, use Signature.from_callable()', DeprecationWarning, stacklevel=2)\r\n return _signature_from_builtin(cls, func)\r\n\r\n @classmethod\r\n def from_callable(cls, obj, *, follow_wrapped=True):\r\n return _signature_from_callable(obj, sigcls=cls, follow_wrapper_chains=follow_wrapped)\r\n\r\n @property\r\n def parameters(self):\r\n return self._parameters\r\n\r\n @property\r\n def return_annotation(self):\r\n return self._return_annotation\r\n\r\n def replace(self, *, parameters=_void, return_annotation=_void):\r\n if parameters is _void:\r\n parameters = self.parameters.values()\r\n if return_annotation is _void:\r\n return_annotation = self._return_annotation\r\n return type(self)(parameters, return_annotation=return_annotation)\r\n\r\n def _hash_basis(self):\r\n params = tuple(param for param in self.parameters.values() if param.kind != _KEYWORD_ONLY)\r\n kwo_params = {param.name: param for param in self.parameters.values() if param.kind == _KEYWORD_ONLY}\r\n return (params, kwo_params, self.return_annotation)\r\n\r\n def __hash__(self):\r\n (params, kwo_params, return_annotation) = self._hash_basis()\r\n kwo_params = frozenset(kwo_params.values())\r\n return hash((params, kwo_params, return_annotation))\r\n\r\n def __eq__(self, other):\r\n if self is other:\r\n return True\r\n if not isinstance(other, Signature):\r\n return NotImplemented\r\n return self._hash_basis() == other._hash_basis()\r\n\r\n def _bind(self, args, kwargs, *, partial=False):\r\n arguments = OrderedDict()\r\n parameters = iter(self.parameters.values())\r\n parameters_ex = ()\r\n arg_vals = iter(args)\r\n while True:\r\n try:\r\n arg_val = next(arg_vals)\r\n except StopIteration:\r\n try:\r\n param = next(parameters)\r\n except StopIteration:\r\n break\r\n if param.kind == _VAR_POSITIONAL:\r\n break\r\n elif param.name in kwargs:\r\n if param.kind == _POSITIONAL_ONLY:\r\n msg = '{arg!r} parameter is positional only, but was passed as a keyword'\r\n msg = msg.format(arg=param.name)\r\n raise TypeError(msg) from None\r\n parameters_ex = (param,)\r\n break\r\n elif param.kind == _VAR_KEYWORD or param.default is not _empty:\r\n parameters_ex = (param,)\r\n break\r\n elif partial:\r\n parameters_ex = (param,)\r\n break\r\n else:\r\n msg = 'missing a required argument: {arg!r}'\r\n msg = msg.format(arg=param.name)\r\n raise TypeError(msg) from None\r\n try:\r\n param = next(parameters)\r\n except StopIteration:\r\n raise TypeError('too many positional arguments') from None\r\n if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):\r\n raise TypeError('too many positional arguments') from None\r\n if param.kind == _VAR_POSITIONAL:\r\n values = [arg_val]\r\n values.extend(arg_vals)\r\n arguments[param.name] = tuple(values)\r\n break\r\n if param.name in kwargs:\r\n raise TypeError('multiple values for argument {arg!r}'.format(arg=param.name)) from None\r\n arguments[param.name] = arg_val\r\n kwargs_param = None\r\n for param in itertools.chain(parameters_ex, parameters):\r\n if param.kind == _VAR_KEYWORD:\r\n kwargs_param = param\r\n elif param.kind == _VAR_POSITIONAL:\r\n pass\r\n else:\r\n param_name = param.name\r\n try:\r\n arg_val = kwargs.pop(param_name)\r\n except KeyError:\r\n if partial or param.kind != _VAR_POSITIONAL and param.default is _empty:\r\n raise TypeError('missing a required argument: {arg!r}'.format(arg=param_name)) from None\r\n if param.kind == _POSITIONAL_ONLY:\r\n raise TypeError('{arg!r} parameter is positional only, but was passed as a keyword'.format(arg=param.name))\r\n arguments[param_name] = arg_val\r\n if kwargs:\r\n if kwargs_param is not None:\r\n arguments[kwargs_param.name] = kwargs\r\n else:\r\n raise TypeError('got an unexpected keyword argument {arg!r}'.format(arg=next(iter(kwargs))))\r\n return self._bound_arguments_cls(self, arguments)\r\n\r\n def bind(*args, **kwargs):\r\n return args[0]._bind(args[1:], kwargs)\r\n\r\n def bind_partial(*args, **kwargs):\r\n return args[0]._bind(args[1:], kwargs, partial=True)\r\n\r\n def __reduce__(self):\r\n return (type(self), (tuple(self._parameters.values()),), {'_return_annotation': self._return_annotation})\r\n\r\n def __setstate__(self, state):\r\n self._return_annotation = state['_return_annotation']\r\n\r\n def __repr__(self):\r\n return '<{} {}>'.format(self.__class__.__name__, self)\r\n\r\n def __str__(self):\r\n result = []\r\n render_pos_only_separator = False\r\n render_kw_only_separator = True\r\n for param in self.parameters.values():\r\n formatted = str(param)\r\n kind = param.kind\r\n if kind == _POSITIONAL_ONLY:\r\n render_pos_only_separator = True\r\n elif render_pos_only_separator:\r\n result.append('/')\r\n render_pos_only_separator = False\r\n if kind == _VAR_POSITIONAL:\r\n render_kw_only_separator = False\r\n elif render_kw_only_separator:\r\n result.append('*')\r\n render_kw_only_separator = False\r\n result.append(formatted)\r\n if render_pos_only_separator:\r\n result.append('/')\r\n rendered = '({})'.format(', '.join(result))\r\n if self.return_annotation is not _empty:\r\n anno = formatannotation(self.return_annotation)\r\n rendered += ' -> {}'.format(anno)\r\n return rendered\r\n\r\r\ndef signature(obj, *, follow_wrapped=True):\r\n return Signature.from_callable(obj, follow_wrapped=follow_wrapped)\r\n\r\r\ndef _main():\r\n import argparse\r\n import importlib\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('object', help=\"The object to be analysed. It supports the 'module:qualname' syntax\")\r\n parser.add_argument('-d', '--details', action='store_true', help='Display info about the module rather than its source code')\r\n args = parser.parse_args()\r\n target = args.object\r\n (mod_name, has_attrs, attrs) = target.partition(':')\r\n try:\r\n obj = module = importlib.import_module(mod_name)\r\n except Exception as exc:\r\n msg = 'Failed to import {} ({}: {})'.format(mod_name, type(exc).__name__, exc)\r\n print(msg, file=sys.stderr)\r\n exit(2)\r\n if has_attrs:\r\n parts = attrs.split('.')\r\n obj = module\r\n for part in parts:\r\n obj = getattr(obj, part)\r\n if module.__name__ in sys.builtin_module_names:\r\n print(\"Can't get info for builtin modules.\", file=sys.stderr)\r\n exit(1)\r\n if args.details:\r\n print('Target: {}'.format(target))\r\n print('Origin: {}'.format(getsourcefile(module)))\r\n print('Cached: {}'.format(module.__cached__))\r\n if obj is module:\r\n print('Loader: {}'.format(repr(module.__loader__)))\r\n if hasattr(module, '__path__'):\r\n print('Submodule search path: {}'.format(module.__path__))\r\n else:\r\n try:\r\n (__, lineno) = findsource(obj)\r\n except Exception:\r\n pass\r\n print('Line: {}'.format(lineno))\r\n print('\\n')\r\n else:\r\n print(getsource(obj))\r\n\rif __name__ == '__main__':\r\n _main()\r","sub_path":"EA/base/lib/inspect.py","file_name":"inspect.py","file_ext":"py","file_size_in_byte":72690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"34080991","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom spt3g import core, calibration, mapmaker\n\n# makes a plot of the offline offset given the date\ndef CoaddedMaps0537441(request):\n try:\n data = [fr for fr in core.G3File('{}/{}/{}.g3' \\\n .format(request['caldatapath'],\n request['source'],\n request['observation']))]\n except RuntimeError:\n return \"Could not find data file.\"\n\n fig = plt.figure(figsize=(12,4))\n for jband in range(len(data)):\n plt.subplot(1,3,jband+1)\n mp = data[jband]['T']\n plt.imshow(mp, extent=np.array([ mp.alpha_center - (mp.shape[1]*mp.y_res)/2,\n mp.alpha_center + (mp.shape[1]*mp.y_res)/2, \n mp.delta_center - (mp.shape[0]*mp.x_res)/2,\n mp.delta_center + (mp.shape[0]*mp.x_res)/2]) / core.G3Units.deg)\n \n plt.xlabel('RA [deg]')\n plt.ylabel('dec [deg]')\n plt.axis([mp.alpha_center / core.G3Units.deg - 0.25,\n mp.alpha_center / core.G3Units.deg + 0.25,\n mp.delta_center / core.G3Units.deg - 0.25,\n mp.delta_center / core.G3Units.deg + 0.25])\n plt.title(data[jband]['Id'])\n plt.tight_layout()\n\n return fig\n","sub_path":"plot/CoaddedMaps0537441.py","file_name":"CoaddedMaps0537441.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"371060007","text":"import os\nimport yaml\n\nfrom app.errors import NotFound\n\n\nif os.environ.get('VCAP_SERVICES'):\n # on cloudfoundry, config is a json blob in VCAP_SERVICES - unpack it, and populate\n # standard environment variables from it\n from app.paas_config import extract_paas_config\n\n extract_paas_config()\n\n\ndef get_default_path():\n with open('teams.yml') as f:\n teams_yml = yaml.load(f)\n return teams_yml.get('default-path', 'teams/gds/delivery-and-support/technology-operations')\n\n\ndef get_team_profile(team_id=None):\n with open('teams.yml') as f:\n teams_yml = yaml.load(f)\n\n if not team_id:\n return teams_yml['teams']\n else:\n team = [t for t in teams_yml['teams'] if str(t.get('id')) == team_id]\n\n if not team:\n raise NotFound(f\"Team id {team_id} not found in teams.yml\")\n\n return team[0]\n\n\ndef get_org_structure():\n with open('org-structure.yml') as f:\n org_structure = yaml.load(f)\n return [org_structure]\n\n\nclass Config:\n # encyption secret/salt\n SECRET_KEY = os.getenv('SECRET_KEY')\n DANGEROUS_SALT = os.getenv('DANGEROUS_SALT')\n\n\n# DB conection string\nSQLALCHEMY_DATABASE_URI = os.getenv('SQLALCHEMY_DATABASE_URI')\n\nTM_JIRA_USER = os.getenv('TM_JIRA_USER')\nTM_JIRA_PAT = os.getenv('TM_JIRA_PAT')\nTM_JIRA_HOST = os.getenv('TM_JIRA_HOST')\nTM_PIVOTAL_PAT = os.getenv('TM_PIVOTAL_PAT')\nTM_TRELLO_PAT = os.getenv('TM_TRELLO_PAT')\nTM_TRELLO_TOKEN = os.getenv('TM_TRELLO_TOKEN')\nTM_TRELLO_ORG_ID = os.getenv('TM_TRELLO_ORG_ID')\nTM_TRELLO_SECRET = os.getenv('TM_TRELLO_SECRET')\nTM_GITHUB_PAT = os.getenv('TM_GITHUB_PAT')\n\nTEAM_PROFILES = get_team_profile()\nORG_STRUCTURE = get_org_structure()\nDEFAULT_PATH = get_default_path()\n\nCLIENT_ID = os.getenv('CLIENT_ID')\nCLIENT_SECRET = os.getenv('CLIENT_SECRET')\nREDIRECT_URI = os.getenv('REDIRECT_URI', 'http://localhost:5000/oauth2callback')\nAUTH_URI = 'https://accounts.google.com/o/oauth2/auth'\nTOKEN_URI = 'https://accounts.google.com/o/oauth2/token'\nREVOKE_URI = 'https://accounts.google.com/o/oauth2/revoke'\nUSER_INFO = 'https://www.googleapis.com/userinfo/v2/me'\nSCOPE = ['profile', 'email']\nOAUTHLIB_INSECURE_TRANSPORT = os.getenv('OAUTHLIB_INSECURE_TRANSPORT')\n","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"649326943","text":"from dataclasses import asdict\nfrom datetime import datetime\n\nimport psycopg2\nimport sqlalchemy\nfrom app.configs.database import db\nfrom app.exceptions.job_exceptions import FieldCreateJobError\nfrom app.exceptions.users_exceptions import UserNotFoundError\nfrom app.models.contractor_model import ContractorModel\nfrom app.models.developer_model import DeveloperModel\nfrom app.models.job_model import JobModel\nfrom flask import current_app, jsonify, request\nfrom flask_jwt_extended import get_jwt_identity, jwt_required\nfrom sqlalchemy import and_, exc\n\n\n@jwt_required()\ndef create_job():\n \n try :\n\n current_contractor = get_jwt_identity()\n \n found_contractor = ContractorModel.query.filter_by(email=current_contractor['email']).first()\n\n if not found_contractor:\n raise UserNotFoundError\n\n data = request.json\n\n data['contractor_id'] = found_contractor.id\n if 'progress' in data:\n data['progress'] = None\n \n new_job = JobModel(**data)\n \n db.session.add(new_job)\n \n db.session.commit()\n \n new_job.format_expiration_date()\n \n serialized_data = asdict(new_job)\n \n del serialized_data['contractor']\n \n del serialized_data['developer']\n \n return jsonify(serialized_data), 200\n \n except UserNotFoundError as e:\n return {'message': str(e)}, 404\n \n except sqlalchemy.exc.IntegrityError as e:\n \n if type(e.orig) == psycopg2.errors.NotNullViolation:\n return {'Message': 'Job must be created with name, description, price, difficulty_level and expiration_date'}, 406\n \n except (TypeError, KeyError):\n e = FieldCreateJobError()\n return jsonify(e.message), 406\n\ndef get_job_by_id(job_id: int):\n\n job = JobModel.query.filter_by(id=job_id).first()\n if job is None:\n return {\"message\": \"This job does not exist\"}, 404\n if job.developer_id:\n return {\"message\": \"This specific job already has a developer assigned to it.\"}, 403\n \n else:\n return jsonify(job)\n\n@jwt_required()\ndef get_job_by_id_authenticated(job_id: int):\n try:\n user = get_jwt_identity()\n found_contractor = ContractorModel.query.filter_by(email=user['email']).first()\n found_developer = DeveloperModel.query.filter_by(email=user['email']).first()\n\n job = JobModel.query.filter_by(id=job_id).first()\n job.format_expiration_date()\n if found_contractor:\n if job.contractor_id == found_contractor.id:\n return jsonify(job)\n if found_developer:\n if job.developer_id == found_developer.id:\n return jsonify(job)\n return jsonify({\"message\": \"Only the contractor that created this job or the developer assigned to it can see it's information.\"}), 403\n\n except AttributeError:\n return {\"message\": \"This job does not exist\"}, 404\n\n@jwt_required()\ndef update_job_by_id(job_id: int):\n try:\n data = request.json\n \n contractor = get_jwt_identity()\n found_contractor = ContractorModel.query.filter_by(email=contractor[\"email\"]).first()\n job = JobModel.query.filter_by(id=job_id).first()\n\n\n if not job.contractor_id == found_contractor.id:\n return jsonify({\"message\": \"Only the contractor of this specific job can update it\"}), 403\n\n if job is None:\n return {\"message\": \"Job not found!\"}, 404\n \n if 'developer' in data: \n if data['developer'] == None:\n return JobModel.update_job_if_developer_or_progress_is_null(job)\n else:\n return JobModel.update_job_if_developer_in_data(data, job_id, job)\n\n if 'progress' in data:\n if data['progress'] == None:\n return JobModel.update_job_if_developer_or_progress_is_null(job)\n\n elif job.developer_id:\n return JobModel.update_job_if_developer_not_in_data(job, job_id, data)\n\n \n JobModel.query.filter_by(id=job.id).update(data) \n db.session.commit()\n return jsonify({\"name\": job.name, \"description\": job.description, \"price\": job.price, \"difficulty_level\": job.difficulty_level, \"expiration_date\": job.expiration_date, \"progress\": job.progress})\n \n except exc.InvalidRequestError as e: \n return {\"message\": \"The available keys for job update are: name, description, price, difficulty_level, expiration_date, progress and developer\"}, 409\n\n except sqlalchemy.exc.InvalidRequestError:\n return {'message': 'Job must be created with name, description, price, difficulty_level and expiration_date'}, 406\n \n except sqlalchemy.exc.ProgrammingError:\n return {'message': \"You need to send one of these keys to update a job: name, description, price, difficulty_level, expiration_date, progress and developer\"}, 409\n \n@jwt_required()\ndef delete_job_by_id(job_id: int):\n \n session = current_app.db.session\n current_user = get_jwt_identity()\n \n found_contractor = ContractorModel.query.filter_by(email=current_user['email']).first()\n \n try:\n job = JobModel.query.get(job_id)\n \n if job.contractor_id == found_contractor.id:\n session.delete(job)\n session.commit()\n return '', 204\n \n else:\n return {'message': \"You don't have permission to delete this job\"}, 403\n \n except AttributeError:\n return {'message': 'job not found'}, 404\n\ndef get_all_jobs():\n session = current_app.db.session\n \n jobs = session.query(JobModel)\\\n .filter(JobModel.progress==None)\\\n .all()\n \n serialized_data = []\n \n for job in jobs:\n \n job = asdict(job)\n \n job['expiration_date'] = datetime.strftime(job['expiration_date'], \"%d/%m/%Y %H:%M\")\n \n if job.get('developer'):\n job['developer']['birthdate'] = datetime.strftime(job['developer']['birthdate'], \"%d/%m/%Y\")\n \n serialized_data.append(job)\n \n \n return jsonify(serialized_data), 200\n\n\n \ndef get_job_by_tech() :\n \n data = request.args\n \n if data :\n \n techs = data.getlist('tech') \n jobs = []\n for tech in techs :\n \n query = JobModel.query.filter(and_(JobModel.description.ilike(f'%{tech}%'),JobModel.developer == None)).all()\n \n if len(query) > 0 :\n \n new_arr = [{\"job_id\": item.id, \"name\":item.name,\"description\":item.description,\"price\":item.price,\"difficulty_level\":item.difficulty_level, \"expiration_date\":datetime.strftime(item.expiration_date, \"%d/%m/%Y %H:%M\"),\"progress\":item.progress,\n \"developer\":item.developer,\"contractor\":item.contractor} for item in query ]\n \n jobs.append(new_arr)\n \n return jsonify(jobs),200\n \n return jsonify([]),200\n \n\n\ndef get_price_difficulty_level():\n \n try :\n \n data = request.args\n \n if 'price' in data and 'difficulty' in data:\n\n query = JobModel.query.filter(and_(JobModel.price >= int(data['price']) , JobModel.difficulty_level.ilike(data['difficulty'] ))).all()\n return jsonify(query),200 \n\n if 'price' in data and not 'difficulty' in data :\n print('oi')\n query = JobModel.query.filter(JobModel.price >= int(data['price'])).all()\n return jsonify(query),200 \n \n else :\n query = JobModel.query.filter(JobModel.difficulty_level.ilike(data['difficulty'] )).all()\n return jsonify(query),200 \n \n except :\n return jsonify([])","sub_path":"app/controllers/job_controller.py","file_name":"job_controller.py","file_ext":"py","file_size_in_byte":7869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"411442436","text":"from models import Author, Paper\nimport arxiv\nimport tabula\nimport urllib\nimport urllib.request\nimport os, glob, sys, re\nfrom tqdm import tqdm\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import TextConverter, PDFPageAggregator\nfrom pdfminer.layout import LAParams, LTFigure, LTImage\nfrom pdfminer.pdfpage import PDFPage\nfrom io import StringIO\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\nfrom tabula import wrapper\nfrom pdf_processor import extract_text, extract_text2\nimport logging\nimport time\nlogging.basicConfig(level=logging.WARNING)\nlogger = logging.getLogger(__name__)\n\n# the path where you want to store the downloaded pdf file\nstore_path = './pdf_files/'\n\ndef find_images_in_thing(outer_layout):\n image_count = 0\n for thing in outer_layout:\n if isinstance(thing, LTImage):\n image_count += 1\n return image_count\n\ndef get_figure_count(path):\n fp = open(path, 'rb')\n parser = PDFParser(fp)\n document = PDFDocument(parser)\n rsrcmgr = PDFResourceManager()\n laparams = LAParams()\n device = PDFPageAggregator(rsrcmgr, laparams=laparams)\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n\n figures = 0\n pages = 0\n for page in PDFPage.create_pages(document):\n interpreter.process_page(page)\n pages += 1\n pdf_item = device.get_result()\n for thing in pdf_item:\n if isinstance(thing, LTImage):\n figures += 1\n if isinstance(thing, LTFigure):\n figures += find_images_in_thing(thing)\n return figures, pages\n\n\ndef download_extract(paper, extract_figure=False, extract_table=False):\n if paper.pages >= 0 and paper.table >= 0:\n return False\n paper_info = {\n 'pdf_url': paper.url,\n 'title': paper.title,\n }\n api_paper = arxiv.query(id_list=[paper.arvixID])[0]\n if 'pdf_url' not in api_paper:\n return False\n pdf_url = api_paper['pdf_url']\n # pdf_url = 'https://arxiv.org/pdf/' + paper.url.split('/')[-1] +'.pdf'\n file_path = os.path.join(store_path, paper.paperId+'.pdf')\n # if not os.path.isfile(file_path):\n urllib.request.urlretrieve(pdf_url, file_path)\n\n if extract_table:\n df = wrapper.read_pdf(file_path, multiple_tables=True, pages='all') \n table_count = len(df)\n del df\n\n if extract_figure:\n figure_count, page_count = get_figure_count(file_path)\n modified = False\n if paper.pages == -1:\n modified = True\n paper.pages = page_count\n else:\n page_count = paper.pages\n if paper.table == -1:\n modified = True\n paper.table = table_count\n if os.path.exists(file_path):\n os.remove(file_path)\n if modified:\n Paper.update(table=table_count, pages=page_count).where(Paper.arvixID == paper.arvixID).execute()\n # paper.save()\n return modified\n # api_paper = arxiv.query(id_list=[paper.arvixID])[0]\n # if 'pdf_url' not in api_paper:\n # return False\n # pdf_url = api_paper['pdf_url']\n texts = extract_text(file_path, pdf_url)\n if texts is None:\n print(\"PDF either do not exists or failed : \", paper.url)\n return False\n affiliation = []\n for text in texts.split():\n if re.match(\"[^@]+@[^@]+\\.[^@]+\", text):\n domain_name = text.split('@')[-1]\n affiliation.append(domain_name)\n if len(affiliation) > 0:\n Paper.update(affiliation=affiliation).where(Paper.arvixID == paper.arvixID).execute()\n\n return False\n\n\n\ndef main():\n papers = Paper.select().where((Paper.year == 2015))\n for paper in tqdm(papers):\n try:\n download_extract(paper)\n except KeyboardInterrupt:\n sys.exit()\n # except:\n # logging.warning(\"\\Failed in : %s\" % (str(paper.paperId)))\n # papers = Paper.select().where(Paper.year == 2017)\n # for paper in tqdm(papers):\n # try:\n # download_extract(paper)\n # except KeyboardInterrupt:\n # sys.exit()\n # except:\n # logging.warning(\"\\Failed in : %s\" % (str(paper.paperId)))\n\nif __name__ == \"__main__\":\n # papers = Paper.select().where(Paper.arvixID == '1705.09871')\n # for paper in tqdm(papers):\n # try:\n # changed = download_extract(paper)\n # print(changed)\n # except KeyboardInterrupt:\n # sys.exit()\n main()\n","sub_path":"crawler/crawl_meta.py","file_name":"crawl_meta.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"391894953","text":"CAR_INIT_POS = (20, 20)\nCAR_DIRECTION = (1, 0)\n\nCAR_SIZE = (12, 9)\nCAR_IMAGE_PATH = 'images/car.png'\nCAR_SPEED = 5\nCAR_ANGLE_SPEED = 1\nTHRESHOLD = 30\nMIN_ANGLE = 15\nMAX_SCAN_DISTANCE = 40\n","sub_path":"constants/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574580023","text":"# crear 20 numeros aleaatorios entre el 0 y el 100 \n#imprimir un alista de los numeros generados \n#ordenados ascendentemente, primero los pares y luegos los impares\n\n#ejemplo: si los numeros generados son [4,3,5,6,2]\n#el resultado sera: [2,4,6,3,5]\n\nimport random\nlistaA=[]\nnumeros = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,95,74,65,35,81,54]\nlistapar=[]\nlistaimpar=[]\nwhile len(numeros)>0:\n\tnum = numeros.pop()\n\tif (num % 2 == 0):\n\t\tlistapar.append(num)\n\telse:\n\t\tlistaimpar.append(num)\n\nfor i in range(20):\n\n\tlistaA.append(random.randint(0, 100))\n\nprint (listaA)\nlistaA.sort()\nprint (\"orden ascendente\")\nprint (listaA)\nprint (\"numeros pares\", )\nprint (listapar)\nlistapar.sort()\nprint (\"orden ascendente de pares\", listapar)\nprint (\"numeros impares\", )\nprint (listaimpar)\nlistaimpar.sort()\nprint (\"orden ascendente de impares\", listaimpar)\nlistaimpar\n #random.randrange(0,99) #genera numeros aleatorios, entre esos valores\n","sub_path":"numeroslist.py","file_name":"numeroslist.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"12463601","text":"\"\"\"empty message\n\nRevision ID: 3f1845b2e2b1\nRevises: 9a69200a6889\nCreate Date: 2021-07-29 15:44:38.333111\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '3f1845b2e2b1'\ndown_revision = '9a69200a6889'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('show',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('venue_fk1', sa.Integer(), nullable=False),\n sa.Column('artist_fk2', sa.Integer(), nullable=False),\n sa.Column('start_time', sa.DateTime(timezone=True), nullable=False),\n sa.ForeignKeyConstraint(['artist_fk2'], ['artist.id'], ),\n sa.ForeignKeyConstraint(['venue_fk1'], ['venue.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.drop_table('Show')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('Show',\n sa.Column('id', sa.INTEGER(), server_default=sa.text('nextval(\\'\"Show_id_seq\"\\'::regclass)'), autoincrement=True, nullable=False),\n sa.Column('venue_fk1', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.Column('artist_fk2', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.Column('start_time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=False),\n sa.ForeignKeyConstraint(['artist_fk2'], ['artist.id'], name='Show_artist_fk2_fkey'),\n sa.ForeignKeyConstraint(['venue_fk1'], ['venue.id'], name='Show_venue_fk1_fkey'),\n sa.PrimaryKeyConstraint('id', name='Show_pkey')\n )\n op.drop_table('show')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/3f1845b2e2b1_.py","file_name":"3f1845b2e2b1_.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"613969066","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\n Criado em 22 de Março de 2017 \n Descricao: \n \n @author: Denis Varise Bernardes & Eder Martioli\n \n Laboratorio Nacional de Astrofisica, Brazil.\n\n \n\texample: ./biasCompleto.py --list=list\n \n \"\"\"\n\n__version__ = \"1.0\"\n\n__copyright__ = \"\"\"\n Copyright (c) ... All rights reserved.\n \"\"\"\n\nimport os, sys\nimport datetime\n\ndate = 'Data: ' + str(datetime.datetime.now()).split('.')[0].split(' ')[0]\n\nclass arquivoCaract:\n\tdef __init__(self):\n\t\tself.dic = {'StrEspectroEQ':[], 'TemperaturaDC':'', 'nomeCamera':'', 'DCCalculado':'', 'ganhoCalculado':'', 'RNCalculado':''}\n\n\t\t\n\tdef testArqExists(self):\n\t\tself.ListaArquivos = ['BiasLog', 'DCLog', 'GanhoLog', 'EQLog']\n\t\tfor arq in self.ListaArquivos:\n\t\t\tif os.path.isfile(os.getcwd()+'/'+arq) is not True:\n\t\t\t\texit()\n\n\n\tdef getValues(self, Object):\n\t\tintervEQ=False\t\n\t\tStrEspectroEQ = []\n\t\tObject.testArqExists()\t\n\n\t\tRNCalculado, DCCalculado, TemperaturaDC, ganhoCalculado, taxaLeitura, preAmp, Vshift, StrEspectroEQ, Data = '','','','','','','',[],''\n\t\tfor arq in self.ListaArquivos:\n\t\t\twith open(arq) as arq:\n\t\t\t\tlinhas = arq.read().splitlines()\n\t\t\t\tArqDC = False\n\t\t\t\tfor linha in linhas:\t\t\t\t\t\t\t\t\n\t\t\t\t\tif 'Ruido de Leitura:' in linha: \n\t\t\t\t\t\tRNCalculado = linha.split(':')[1]\n\t\t\t\t\tif 'Corrente de escuro:' in linha: \n\t\t\t\t\t\tDCCalculado = linha.split(':')[1]\n\t\t\t\t\t\tArqDC = True\t\t\t\t\n\t\t\t\t\tif ArqDC == True:\n\t\t\t\t\t\ttry:\t\t\t\t\t\t\n\t\t\t\t\t\t\tif float(linha.split('\\t\\t')[1]) < TemperaturaDC:\n\t\t\t\t\t\t\t\tTemperaturaDC = linha.split('\\t\\t')[1]\n\t\t\t\t\t\texcept: 1\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tif 'Ganho:' in linha: \t\t\t\t\t\t\n\t\t\t\t\t\tganhoCalculado = linha.split(':')[1]\n\t\t\t\t\tif ' Lambda (nm) \t EQ (%)' in linha: intervEQ=True\n\t\t\t\t\tif linha == '': intervEQ = False\n\t\t\t\t\tif intervEQ == True:\n\t\t\t\t\t\tStrEspectroEQ.append(linha)\n\t\t\t\t\tif 'Camera:' in linha:\n\t\t\t\t\t\tnomeCamera = linha.split(':')[1]\n\t\t\t\t\tif 'Taxa de leitura:' in linha:\n\t\t\t\t\t\ttaxaLeitura = linha.split(':')[1]\n\t\t\t\t\tif 'VShift Speed:' in linha:\n\t\t\t\t\t\tVshift = linha.split(':')[1]\n\t\t\t\t\tif 'Pre-amplificacao:' in linha:\n\t\t\t\t\t\tpreAmp = linha.split(':')[1]\n\t\t\t\t\tif 'Data do experimento:' in linha:\n\t\t\t\t\t\tData = linha.split(':')[1]\n\t\t\t\t\tif 'Tabela para pagina wiki' in linha:\n\t\t\t\t\t\tbreak\n \n\n\t\t\t\t\n\t\t\t\tarq.close()\n\t\tlistValues = [RNCalculado, DCCalculado,TemperaturaDC, ganhoCalculado, StrEspectroEQ, nomeCamera, taxaLeitura, preAmp, Vshift, Data]\t\t\t\t\t\n\t\treturn listValues\n\t\t\t\n\t\t\n\n\n\tdef criaArq(self, Object):\t\n\t\t\t\n\t\tObject.getValuesArqCaract()\n\t\tlistValues = Object.getValues(Object)\t\n\t\tObject.atualizaVariavel(listValues)\t\t\n\t\tlistaEQ = self.dic['StrEspectroEQ'][2:]\n\t\tStrEQ = ''\n\t\tfor linha in listaEQ:\n\t\t\tStrEQ += '|' + linha.split('\\t\\t')[0][1:] + '||' + linha.split('\\t\\t')[1] + '\\n' + '|-' + '\\n'\n\n\t\tStrNomeCamera = 'Camera:%s' %(self.dic['nomeCamera']) \n\t\tStrTaxaLeitura = 'Taxa de leitura:%s' %(self.dic['taxaLeitura'])\n\t\tStrPreAmp = 'Pre-amplificacao:%s' %(self.dic['preAmp'])\n\t\tStrVShift = 'VShift Speed:%s' %(self.dic['Vshift'])\n\t\tStrTemp = 'Temperatura minima:%s ºC' %(self.dic['TemperaturaDC'])\n\t\tStrBias = 'Ruido de Leitura:%s' %(self.dic['RNCalculado'])\n\t\tStrDC = 'Corrente de escuro:%s' %(self.dic['DCCalculado'])\n\t\tStrGanho = 'Ganho:%s' %(self.dic['ganhoCalculado'])\n\t\tStrTabelaWiki = ['\\n\\n\\n', 'Tabela para pagina wiki', '------------------------','\\n',\n'{| class=\"wikitable floatleft\" style=\"text-align: center;\"', \n'! style=\"background: #808080;\"| Câmera: || style=\"background: #808080;\" | %s'%(self.dic['nomeCamera']),\n'|-', \n'| Taxa de Leitura: || %s'%(self.dic['taxaLeitura']),\n'|-', \t\n'| Pré-amplificação: || %s'%(self.dic['preAmp']),\n'|-',\n'| VShift Speed: || %s'%(self.dic['Vshift']),\n'|-',\n'| Data: || %s'%(self.dic['Data']), \n'|-',\n'| Temperatura minima: || %s ºC'%(self.dic['TemperaturaDC']),\n'|-',\n'| Ruido de Leitura: || %s'%(self.dic['RNCalculado']),\n'|-',\n'| Corrente de escuro: || %s'%(self.dic['DCCalculado']),\n'|-',\n'| Ganho: || %s'%(self.dic['ganhoCalculado']),\n'|-',\n'! style=\"background: #808080;\"| Lambda (nm) || style=\"background: #808080;\"| EQ (%)',\n'|-',\nStrEQ+'|}']\n\n\n\n\n\n\t\tStrArqTexto = [StrNomeCamera, StrTaxaLeitura, StrPreAmp, StrVShift, date, StrTemp, StrBias, StrDC, StrGanho] + [''] + self.dic['StrEspectroEQ']\n\t\tarqCaract = open('arquivoCaracterizacao', 'w')\n\t\tfor Str in StrArqTexto:\n\t\t\tarqCaract.write(Str+'\\n')\n\t\tfor Str in StrTabelaWiki:\n\t\t\tarqCaract.write(Str+'\\n')\n\t\tarqCaract.close()\n\t\t\n\t\t\n\tdef getValuesArqCaract(self):\n\t\tintervEQ=False\n\t\ttry:\n\t\t\twith open('arquivoCaracterizacao') as arq:\n\t\t\t\tlinhas = arq.read().splitlines()\n\t\t\t\tarq.close()\t\t\n\t\t\tfor linha in linhas:\t\t\t\t\t\n\t\t\t\tif 'Ruido de Leitura' in linha: \t\t\t\t\t\n\t\t\t\t\tself.dic['RNCalculado'] = linha.split(':')[1]\n\t\t\t\tif 'Corrente de escuro' in linha: \n\t\t\t\t\tself.dic['DCCalculado'] = linha.split(':')[1]\n\t\t\t\tif 'Ganho' in linha: \n\t\t\t\t\tself.dic['ganhoCalculado'] = linha.split(':')[1]\n\t\t\t\tif ' Lambda (nm) \t EQ (%)' in linha: intervEQ=True\t\t\t\t\n\t\t\t\tif intervEQ == True:\n\t\t\t\t\tself.dic['StrEspectroEQ'].append(linha)\n\t\t\t\tif 'Temperatura minima' in linha:\n\t\t\t\t\tself.dic['TemperaturaDC'] = linha.split(':')[1].split(' ')[0]\n\t\t\t\tif 'Camera' in linha:\n\t\t\t\t\tself.dic['nomeCamera'] = linha.split(':')[1]\n\t\t\t\tif 'Tabela para pagina wiki' in linha:\n\t\t\t\t\tbreak\n\t\texcept: 1\n\t\t\n\n\tdef atualizaVariavel(self, lista):\t\t\n\t\ti=0\n\t\tlistaVariaveis = ['RNCalculado', 'DCCalculado', 'TemperaturaDC', 'ganhoCalculado', 'StrEspectroEQ', 'nomeCamera', 'taxaLeitura', 'preAmp', 'Vshift', 'Data']\n\t\tfor dado in lista:\n\t\t\tif dado != '':\n\t\t\t\tself.dic[listaVariaveis[i]] = dado\t\t\n\t\t\ti+=1\n\t\t\n\t\t\n\t\t\t\n","sub_path":"Ruido_de_Leitura/Codigo/criaArq_resultadoCaract.py","file_name":"criaArq_resultadoCaract.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"430713811","text":"from aiida.common.example_helpers import test_and_get_code # noqa\nfrom aiida.orm.data.structure import StructureData # noqa\nfrom aiida.orm.data.parameter import ParameterData # noqa\nfrom aiida.orm.data.base import Str\nfrom aiida.work.run import submit\n\nimport ase.build\nfrom aiida_cp2k.workflows import Cp2kMdWorkChain\n\natoms = ase.build.molecule('H2O')\natoms.center(vacuum=2.0)\nstructure = StructureData(ase=atoms)\nstructure.label='H2O'\nstructure.store()\noptions_dict = {\n \"resources\": {\n \"num_machines\": 1,\n \"num_mpiprocs_per_machine\": 2,\n },\n \"max_wallclock_seconds\": 3 * 60 * 60,\n }\noptions = ParameterData(dict=options_dict)\n\nparams_dict = {\n 'FORCE_EVAL':{\n 'DFT':{\n 'UKS': True,\n },\n },\n }\n\nparameters = ParameterData(dict=params_dict)\ncode = test_and_get_code('cp2k-5.1@localhost', expected_code_type='cp2k')\nsubmit(Cp2kMdWorkChain,\n code=code,\n structure=structure,\n parameters=parameters,\n options=options,\n _label='MyFirstWokchain',\n )\n","sub_path":"test/workflows/run_mdnvt_water.py","file_name":"run_mdnvt_water.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"352959017","text":"from functools import partial\nimport pydash as _\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom slm_lab.lib import logger\n\n\ndef get_activation_fn(activation):\n '''Helper to generate activation function layers for net'''\n layer = None\n if activation == 'sigmoid':\n layer = nn.Sigmoid()\n elif activation == 'lrelu':\n layer = nn.LeakyReLU(negative_slope=0.05)\n elif activation == 'tanh':\n layer = nn.Tanh()\n elif activation == 'relu':\n layer = nn.ReLU()\n else:\n logger.debug(\"No activation fn or unrecognised activation fn\")\n layer = nn.ReLU()\n return layer\n\n\ndef get_loss_fn(cls, loss_param):\n '''Helper to parse loss param and construct loss_fn for net'''\n loss_param = loss_param or {}\n loss_fn = getattr(F, _.get(loss_param, 'name', 'mse_loss'))\n loss_param = _.omit(loss_param, 'name')\n if not _.is_empty(loss_param):\n loss_fn = partial(loss_fn, **loss_param)\n return loss_fn\n\n\ndef get_optim(cls, optim_param):\n '''Helper to parse optim param and construct optim for net'''\n optim_param = optim_param or {}\n OptimClass = getattr(torch.optim, _.get(optim_param, 'name', 'Adam'))\n optim_param = _.omit(optim_param, 'name')\n optim = OptimClass(cls.parameters(), **optim_param)\n return optim\n\n\ndef get_optim_multinet(params, optim_param):\n '''Helper to parse optim param and construct optim for net'''\n optim_param = optim_param or {}\n OptimClass = getattr(torch.optim, _.get(optim_param, 'name', 'Adam'))\n optim_param.pop('name', None)\n optim = OptimClass(params, **optim_param)\n return optim\n\n\ndef flatten_params(net):\n '''Flattens all of the parameters in a net\n Source: https://discuss.pytorch.org/t/running-average-of-parameters/902/2'''\n return torch.cat([param.data.view(-1) for param in net.parameters()], 0)\n\n\ndef load_params(net, flattened):\n '''Loads flattened parameters into a net\n Source: https://discuss.pytorch.org/t/running-average-of-parameters/902/2'''\n offset = 0\n for param in net.parameters():\n param.data.copy_(\n flattened[offset:offset + param.nelement()]).view(param.size())\n offset += param.nelement()\n return net\n\n\ndef init_layers(layers, layer_type):\n '''\n Initializes all of the layers of type 'Linear' or 'Conv' using xavier uniform initialization for the weights and 0.01 for the biases\n Initializes all layers of type 'BatchNorm' using univform initialization for the weights and the same as above for the biases\n '''\n biasinit = 0.01\n for layer in layers:\n classname = layer.__class__.__name__\n if classname.find(layer_type) != -1:\n if layer_type == 'BatchNorm':\n torch.nn.init.uniform(layer.weight.data)\n else:\n torch.nn.init.xavier_uniform(layer.weight.data)\n layer.bias.data.fill_(biasinit)\n","sub_path":"slm_lab/agent/net/net_util.py","file_name":"net_util.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533934341","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 28 23:29:16 2016\n\n@author: zz\n\"\"\"\n\nfrom CIWBELM import CIWBELM\nfrom util import *\n\ntrain_data, train_label, test_data, test_label = loadData(0.1)\nfeature_dim = train_data.shape[1]\nlabel_dim = train_label.shape[1]\n \ntrain_data = normalizeData(train_data)#/5000#int(feature_dim*1)\ntest_data = normalizeData(test_data)#/5000#int(feature_dim*1)\n\ntrain_label[train_label==1] = 250\ntest_label[test_label==1] = 250\n\nciwbelm = CIWBELM(feature_dim, int(feature_dim*10), label_dim, 'lite', 'ciw', \\\n train_data, train_label, H=0.25, \\\n binaryTrain=True, binaryTest=True)\n\nciwbelm.trainModel(train_data, train_label)\n#ciwbelm.save(r\"D:\\workspace\\Data\\ELM\\weights\\ciwbelm\")\nciwbelm.testModel(test_data, test_label)","sub_path":"testCIWBELM.py","file_name":"testCIWBELM.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135492865","text":"# First have to monkey patch ntlk to find it data in a local directory\nimport os\nfrom nltk import data\n\ncurrent_path = os.path.dirname(os.path.realpath(__file__))\nnltk_data = os.path.join(current_path, 'nltk_data', '')\ndata.path = [nltk_data]\n\nfrom flask import Flask\nfrom flask.ext.restful import Resource, Api, reqparse\n\n\nimport newspaper\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napi = Api(app)\n\nconfig = newspaper.Config()\nconfig.is_memoize_articles = False\nconfig.keep_article_html = True\n\narticle_parser = reqparse.RequestParser()\narticle_parser.add_argument('url', type=unicode, help='The url of the site to scrape')\n\n\nclass ArticleSimple(Resource):\n def get(self):\n args = article_parser.parse_args()\n url = args['url']\n article = newspaper.build_article(url, config)\n article.download()\n article.parse()\n article.nlp()\n\n return {\n 'url': article.url,\n 'title': article.title,\n 'top_image': article.top_img,\n 'images': [x for x in article.imgs],\n 'text': article.text,\n 'html': article.article_html,\n 'keywords': article.keywords,\n 'authors': article.authors,\n 'summary': article.summary,\n 'meta_description': article.meta_description,\n 'meta_lang': article.meta_lang,\n 'meta_favicon': article.meta_favicon,\n 'meta_keywords': article.meta_keywords,\n 'canonical_link': article.canonical_link,\n 'tags': [unicode(x) for x in article.tags],\n 'movies': article.movies,\n 'additional_data': article.additional_data,\n }\n\n\nsite_parser = reqparse.RequestParser()\nsite_parser.add_argument('url', type=unicode, help='The url of the site to scrape')\n\n\nclass SourceSimple(Resource):\n\n def get(self):\n args = site_parser.parse_args()\n source = newspaper.build(args['url'], config)\n return {\n 'domain': source.domain,\n 'logo_url': source.logo_url,\n 'favicon': source.favicon,\n 'brand': source.brand,\n 'description': source.description,\n 'categories': [unicode(x.url) for x in source.categories],\n 'feeds': [unicode(x.url) for x in source.feeds],\n 'articles': [unicode(x.url) for x in source.articles],\n }\n\n\napi.add_resource(ArticleSimple, '/article')\napi.add_resource(SourceSimple, '/source')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"459384214","text":"import random\nimport time\n\n\ndef display_intro():\n print('''You are in a land full of dragons. In front of you, you see two caves. In one cave, the dragon is \n friendly and will share their treasure with you. The other dragon is greedy and hungry, and will eat you on \n sight.''')\n print()\n\n\ndef choose_cave():\n cave = ''\n while cave != '1' and cave != '2':\n print('Which cave will you go into? (1 or 2)')\n cave = input()\n\n return cave\n\n\ndef check_cave(chosen_cave):\n print(\"You approach the cave...\")\n time.sleep(2)\n print(\"It's dark and spooky...\")\n time.sleep(2)\n print(\"A large dragon jumps out in front of you! She opens her jaws and ...\")\n time.sleep(2)\n\n friendly_cave = random.randint(1, 2)\n\n if chosen_cave == str(friendly_cave):\n print(\"Gives you her treasure!\")\n else:\n print(\"Gobbles you up in one bite!\")\n\n\nplay_again = 'yes'\nwhile play_again == 'yes' or play_again == 'y':\n display_intro()\n cave_number = choose_cave()\n check_cave(cave_number)\n\n print(\"Do you want to play again? (yes or no)\")\n play_again = input().lower()\n","sub_path":"dragon.py","file_name":"dragon.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"99116306","text":"class SaturdayNightStay:\n def countOptions(self, firstDay, firstMonth, lastDay, lastMonth):\n def get_day_cnt(month, day):\n # 是第几天\n months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n cnt = 0\n for t in range(month - 1):\n cnt += months[t]\n cnt += day\n return cnt\n\n def get_weekday(cnt):\n # 1, 1 Tuesday\n cnt %= 7\n return (2 + cnt - 1) % 7\n\n cnt1 = get_day_cnt(firstMonth, firstDay)\n cnt2 = get_day_cnt(lastMonth, lastDay)\n\n head = 0\n day = cnt1\n while get_weekday(day) != 0:\n day += 1\n head += 1\n if day > cnt2:\n break\n\n tail = 0\n day = cnt2\n while get_weekday(day) != 6:\n day -= 1\n tail += 1\n if day < cnt1:\n break\n\n mid = cnt2 - cnt1 + 1 - head - tail\n # there may be no mid\n # print(head, mid, tail)\n res = head * (mid + tail)\n for i in range(1, mid + 1):\n res += (mid - i) // 7 * 7 + tail\n return res\n\n\nfirstDay = 31\nfirstMonth = 1\nlastDay = 1\nlastMonth = 2\nprint(SaturdayNightStay().countOptions(firstDay, firstMonth, lastDay, lastMonth))\n","sub_path":"TCO/TCO 2019 China Regionals/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"486028978","text":"__author__ = 'Nattachai Chaiwiriya'\n\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nfrom sklearn import svm\n\niris = datasets.load_iris()\nprint (iris.data.shape, iris.target.shape)\n\n# 40% of the data for testing classifier:\nX_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.4, random_state=0)\n\nprint (X_train.shape, y_train.shape)\nprint (X_test.shape, y_test.shape)\n\n# Train a linear support vector machine\nclf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train)\nprint (clf.score(X_test, y_test))\n\n\n\nfrom sklearn.model_selection import cross_val_score\nclf = svm.SVC(kernel='linear', C=1)\nscores = cross_val_score(clf, iris.data, iris.target, cv=5)\n\nprint (scores)\nprint(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n","sub_path":"Project/Train_Test_Split.py","file_name":"Train_Test_Split.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224463460","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nfrom socket import *\nimport argparse\nimport sys\nimport re\nimport time\nfrom threading import Semaphore\n\n# Semaphore\nscreenLock = Semaphore(value=1)\n\nRESULTS = {}\n\nPORTS = \"\"\"\n7 Echo\n19 Chargen\n20 FTP\n21 FTP\n22 SSH/SCP\n23 Telnet\n25 SMTP\n42 WINS-Replication\n43 WHOIS\n49 TACACS\n53 DNS\n67-68 DHCP/BOOTP\n69 TFTP\n70 Gopher\n79 Finger\n80 HTTP\n88 Kerberos\n102 MS-Exchange\n110 POP3\n113 Ident\n119 NNTP-(Usenet)\n123 NTP\n135 Microsoft-RPC\n137 NetBIOS\n138 NetBIOS\n139 NetBIOS\n143 IMAP4\n161 SNMP\n162 SNMP\n177 XDMCP\n179 BGP\n201 AppleTalk\n264 BGMP\n318 TSP\n381 HP-Openview\n382 HP-Openview\n383 HP-Openview\n389 LDAP\n411 Direct-Connect\n412 Direct-Connect\n443 HTTP-over-SSL\n445 Microsoft-DS\n464 Kerberos\n465 SMTP-over-SSL\n497 Retrospect\n500 ISAKMP\n512 rexec\n513 rlogin\n514 syslog\n515 LPD/LPR\n520 RIP\n521 RIPng-(IPv6)\n540 UUCP\n554 RTSP\n546 DHCPv6\n547 DHCPv6\n560 rmonitor\n563 NNTP-over-SSL\n587 SMTP\n591 FileMaker\n593 Microsoft-DCOM\n631 Internet-Printing\n636 LDAP-over-SSL\n639 MSDP-(PIM)\n646 LDP-(MPLS)\n691 MS-Exchange\n860 iSCSI\n873 rsync\n902 VMware-Server\n989 FTP-over-SSL\n990 FTP-over-SSL\n993 IMAP4-over-SSL\n995 POP3-over-SSL\n1025 Microsoft-RPC\n1026 Windows-Messenger\n1027 Windows-Messenger\n1028 Windows-Messenger\n1029 Windows-Messenger\n1080 SOCKS-Proxy\n1080 MyDoom\n1194 OpenVPN\n1214 Kazaa\n1241 Nessus\n1311 Dell-OpenManage\n1337 WASTE\n1433 Microsoft-SQL\n1434 Microsoft-SQL\n1512 WINS\n1589 Cisco-VQP\n1701 L2TP\n1723 MS-PPTP\n1725 Steam\n1741 CiscoWorks-2000\n1755 MS-Media-Server\n1812 RADIUS\n1813 RADIUS\n1863 MSN\n1985 Cisco-HSRP\n2000 Cisco-SCCP\n2002 Cisco-ACS\n2049 NFS\n2082 cPanel\n2083 cPanel\n2100 Oracle-XDB\n2222 DirectAdmin\n2302 Halo\n2483-2484 Oracle-DB\n2745 Bagle.H\n2967 Symantec-AV\n3050 Interbase-DB\n3074 XBOX-Live\n3124 HTTP-Proxy\n3127 MyDoom\n3128 HTTP-Proxy\n3222 GLBP\n3260 iSCSI-Target\n3306 MySQL\n3389 Terminal-Server\n3689 iTunes\n3690 Subversion\n3724 World-of-Warcraft\n3784 Ventrilo\n3785 Ventrilo\n4333 mSQL\n4444 Blaster/custom\n4664 Google-Desktop\n4672 eMule\n4899 Radmin\n5000 UPnP\n5001 Slingbox\n5001 iperf\n5004 RTP\n5005 RTP\n5050 Yahoo!-Messenger\n5060 SIP\n5190 AIM/ICQ\n5222 XMPP/Jabber\n5223 XMPP/Jabber\n5432 PostgreSQL\n5500 VNC-Server\n5554 Sasser\n5631 pcAnywhere\n5632 pcAnywhere\n5800 VNC-over-HTTP\n5900+ VNC-Server\n6000 X11\n6001 X11\n6112 Battle.net\n6129 DameWare\n6257 WinMX\n6346 Gnutella\n6347 Gnutella\n6500 GameSpy-Arcade\n6566 SANE\n6588 AnalogX\n6665 IRC\n6666 IRC\n6667 IRC\n6668 IRC\n6669 IRC\n6679 IRC-over-SSL\n6697 IRC-over-SSL\n6699 Napster\n6881-6999 BitTorrent\n6891-6901 Windows-Live\n6970 Quicktime\n7212 GhostSurf\n7648 CU-SeeMe\n7649 CU-SeeMe\n8000 Internet-Radio\n8080 HTTP-Proxy\n8086 Kaspersky-AV\n8087 Kaspersky-AV\n8118 Privoxy\n8200 VMware-Server\n8500 Adobe-ColdFusion\n8767 TeamSpeak\n8866 Bagle.B\n9100 HP-JetDirect\n9101 Bacula\n9102 Bacula\n9103 Bacula\n9119 MXit\n9800 WebDAV\n9898 Dabber\n9988 Rbot/Spybot\n9999 Urchin\n10000 Webmin\n10000 BackupExec\n10113 NetIQ\n10114 NetIQ\n10115 NetIQ\n10116 NetIQ\n11371 OpenPGP\n12035 Second-Life\n12036 Second-Life\n12345 NetBus\n13720 NetBackup\n13721 NetBackup\n14567 Battlefield\n15118 Dipnet/Oddbob\n19226 AdminSecure\n19638 Ensim\n20000 Usermin\n24800 Synergy\n25999 Xfire\n27015 Half-Life\n27374 Sub7\n28960 Call-of-Duty\n31337 Back-Orifice\n33434+ traceroute\n\"\"\"\nPORTS = dict(port.split(\" \") for port in PORTS.split('\\n') if len(port) > 1)\n\nfor _ in PORTS.keys():\n if \"+\" in _:\n _.replace('+', '')\n\n# PORTS = {k:v for k,v in PORTS.split('\\n') if len(k) > 1}\n\nip_regex = \"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$\"\nhostname_regex = \"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[\"\"A-Za-z0-9])$\"\n\n\ndef show_results():\n pass\n\n\ndef name_resolve_util(address):\n if not re.match(ip_regex, address):\n if not re.match(hostname_regex, address):\n print(\"[!] IP address or hostname doesn't match a regex pattern [!]\")\n print(\"[!] Please enter a valid IP address or hostname [!]\")\n print('[!] Exiting [!]')\n sys.exit(1)\n\n if address[0].isdigit():\n try:\n return address, gethostbyaddr(address)\n except error:\n return address, \"HostNotIdentified\"\n else:\n try:\n return gethostbyname(address), address\n except error:\n raise Exception(\"Could not get host by name/www. Exiting\")\n\n\ndef connect(address, port, protocol):\n port_description = PORTS.get(port, '')\n\n if protocol == 'udp':\n conn_skt = socket(AF_INET, SOCK_DGRAM)\n else:\n conn_skt = socket(AF_INET, SOCK_STREAM)\n\n try:\n conn_skt.connect((address[0], int(port)))\n conn_skt.send(b'TEST!TEST!TEST!')\n banner = conn_skt.recv(1024)\n\n print(\"\\r[+] {} Port {} {} is open {} ({}) [+]\".format(protocol.upper(), port, port_description, address[0],\n address[1]))\n print(\"\\r\\t\" + \"[+] Banner - {} [+]\".format(banner.rstrip()))\n\n RESULTS[port] = dict()\n RESULTS[port]['banner'] = banner\n RESULTS[port]['protocol'] = protocol\n\n except error:\n print(\"\\r[-] {} Port {} {} is closed {} ({}) [-]\".format(protocol.upper(), port, port_description, address[0],\n address[1]))\n finally:\n conn_skt.close()\n\n\ndef scanTarget(ip_or_host, port, protocol='tcp'):\n for _ in port:\n sys.stdout.write(\"\\r[+] Scanning {0} on port {1} [+]\".format(ip_or_host, _))\n time.sleep(1.2)\n sys.stdout.flush()\n connect(name_resolve_util(ip_or_host), _, protocol[:3])\n\n if protocol == 'tcp & udp':\n print('\\n' + '#' * 100)\n print('Scanning UDP ports now')\n print('#' * 100, '\\n')\n scanTarget(ip_or_host, port, protocol='udp')\n\n\ndef main():\n parser = argparse.ArgumentParser(\"Port scanner app\")\n parser.add_argument(\"-a\", \"--address\", type=str, help=\"Target ip address\")\n parser.add_argument('-p', \"--port\", type=str, help=\"Target port address\")\n parser.add_argument('-u', '--udp', action='store_true', default=False, help=\"Use if you want to scan UDP ports\")\n parser.add_argument('-i', '--include', action='store_true', default=False, help=\"Use to scan both TCP & UDP ports\")\n parser.add_argument('--all', action='store_true', default=False, help='Use if you want to scan all comm ports')\n\n args = parser.parse_args()\n\n try:\n ip_or_host = args.address\n\n if args.all:\n ports = PORTS.keys()\n else:\n ports = args.port.split(',')\n\n if args.udp and not args.include:\n protocol = 'udp'\n elif args.udp and args.include:\n protocol = 'tcp & udp'\n else:\n protocol = 'tcp'\n\n except:\n protocol = 'tcp & udp'\n ip_or_host = 'www.hackthissite.org'\n ports = '23,22,21,443,80,4444,6112'\n ports = [port for port in ports.split(',') if len(port) > 0]\n print(\n \"[!] Using debug mode. Checking {} on ports 22,77,80,777,443,22 [!]\".format(name_resolve_util(ip_or_host)))\n\n scanTarget(ip_or_host, ports, protocol)\n print(\"\\n[+] Following ports on the target {} are open : {}\".format(name_resolve_util(ip_or_host),\n ','.join(RESULTS.keys())))\n for k in RESULTS.keys():\n print(\"[+] Printing out banners : {} {} {}\".format(RESULTS[k]['protocol'].upper(), k, RESULTS[k]['banner']))\n\n\nif __name__ == \"__main__\":\n setdefaulttimeout(1)\n main()\n","sub_path":"portscanner.py","file_name":"portscanner.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"199439399","text":"# coding:utf-8\nfrom io import BytesIO\nimport xmltodict\nimport zipfile\nimport requests\n\n\ndef download_zip():\n liste_station = None\n zip_url = 'https://donnees.roulez-eco.fr/opendata/instantane'\n file = requests.get(zip_url)\n with zipfile.ZipFile(BytesIO(file.content)) as zip_file:\n for myzip in zip_file.namelist():\n with zip_file.open(myzip) as myfile:\n liste_station = myfile.read()\n\n return xmltodict.parse(liste_station, process_namespaces=False)\n\n\ndef trouve_station(liste_station, code_postal):\n liste = {}\n\n for pdv_liste in liste_station.values():\n for pdvs in pdv_liste.values():\n for indice, ville in enumerate(pdvs):\n if ville['ville']:\n if ville['ville'].replace(\"-\", \" \").lower()==code_postal.lower():\n liste[indice] = ville\n elif ville['@cp']==code_postal:\n liste[indice] = ville\n elif ville['ville'].replace(\"-\", \" \").lower().startswith(code_postal):\n liste[indice] = ville\n elif ville['@cp'].startswith(code_postal):\n liste[indice] = ville\n else:\n if ville['@cp']==code_postal:\n liste[indice] = ville\n return liste\n\n\ndef run(liste_station, code_postal):\n result = trouve_station(liste_station, code_postal)\n return result\n","sub_path":"essence.py","file_name":"essence.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"484770032","text":"####################\n#ウィルス流行シミュレーション(のまねっこ)v0.95 by K.Sakurai 2020.4.29\n#Using Python Mode for Processing 3\n#\n#紹介していただいた道越 秀吾さんのシミュレーションを\n#Processing(Python mode)で真似しようとしたものです.元ネタURLは以下.\n#https://rad-it21.com/サイエンス/michikoshi-shugo_20200331/\n#\n#※繰り返し処理は,状態が変化しなくなったら自動的に停止します.\n#※画面内をクリックすると,状態をリセットして再実行します.\n#\n#☆これからやっていく\n#セルの移動に対応する\n#ステップ毎のS,I,Rを画面表示し,.csvに書き出せるようにする\n####################\n\nimport copy\nimport random\n\n#パラメータ入力\nn_siz = 100 #モデルのサイズ(32以上できれいに正方形.大きすぎると重くなるよ)\ninf_rate = 0.1 #感染確率Ir\nrec_rate = 0.1 #回復確率Rr\nn_initial = 0.01 #初期感染者割合Init\nn_contact_average = 4.8 #接触数平均\nn_contact_sigma = 1.0 # 接触数標準偏差\nn_void = 0 #空隙率Voiid\nwait_time = 0 #待ち時間\n\n#箱庭と感染者数チェッカーの用意\ncells = [[0 for i in range(n_siz)] for j in range(n_siz)] \n\ndef setup():\n size(n_siz * 8, n_siz * 8 + 120) #ウィンドウサイズは800x800(1セル8x8)\n background(255)\n myFont = createFont(\"メイリオ\", 48)\n textFont(myFont)\n\n initialize()\n \n #フレーム撮影する場合は下の1行のコメントアウトを外す その1\n #saveFrame(\"frames/######.png\")\n \ndef draw():\n \n global cells\n \n #次状態を記録するリストを用意し初期化(全体をSとして初期化する)\n cells_next = [[0 for i in range(n_siz)] for j in range(n_siz)] \n \n #処理1:感染\n for i in range(n_siz):\n for j in range(n_siz):\n \n #そのセルがIなら,接触マスを指定して感染処理\n if cells[i][j] == 1:\n \n #周囲8マスのうちランダムで(接触数)だけ接触する.\n cont = touch()\n \n #接触マスに感染確率の割合でIを上書き\n if cont[0] == 1:\n if random.random() <= inf_rate:\n cells_next[i-1][j-1] = 1\n if cont[1] == 1:\n if random.random() <= inf_rate:\n cells_next[i-1][j] = 1\n if cont[2] == 1:\n if random.random() <= inf_rate:\n cells_next[i-1][(j+1)%n_siz] = 1\n if cont[3] == 1:\n if random.random() <= inf_rate:\n cells_next[i][j-1] = 1\n if cont[4] == 1:\n if random.random() <= inf_rate:\n cells_next[i][(j+1)%n_siz] = 1\n if cont[5] == 1:\n if random.random() <= inf_rate:\n cells_next[(i+1)%n_siz][j-1] = 1\n if cont[6] == 1:\n if random.random() <= inf_rate:\n cells_next[(i+1)%n_siz][j] = 1\n if cont[7] == 1:\n if random.random() <= inf_rate:\n cells_next[(i+1)%n_siz][(j+1)%n_siz] = 1\n \n #処理2:回復\n for i in range(n_siz):\n for j in range(n_siz):\n \n #現在の感染者は回復確率で回復\n if cells[i][j] == 1:\n if random.random() <= rec_rate:\n cells_next[i][j] = 2\n else:\n cells_next[i][j] = 1\n \n #処理3:回復者上書き\n for i in range(n_siz):\n for j in range(n_siz):\n #そのセルが回復者なら,感染処理後も回復者(感染してても上書き)\n if cells[i][j] == 2:\n cells_next[i][j] = 2\n \n #処理4:空隙上書き \n for i in range(n_siz):\n for j in range(n_siz):\n #そのセルが空隙なら,感染処理後も空隙(感染してても上書き)\n if cells[i][j] == 3:\n cells_next[i][j] = 3\n\n \n #全処理が終わったらcells_nextをcellsに移す\n cells = copy.deepcopy(cells_next)\n \n #SIRそれぞれの総数をカウント\n S_now = sum(v.count(0) for v in cells)\n I_now = sum(v.count(1) for v in cells)\n R_now = sum(v.count(2) for v in cells)\n \n #感染者が0ならばループ解除\n if I_now == 0:\n noLoop()\n println(\"stopped\")\n \n #表示を更新\n paint()\n \n fill(192)\n rect(0, n_siz*8, n_siz*8, 60)\n fill(0)\n text(\"S:\" + str(S_now) + \" I:\" + str(I_now) + \" R:\" + str(R_now), 8, n_siz*8 + 48)\n \n #フレーム撮影する場合は下の1行のコメントアウトを外す その2\n #saveFrame(\"frames/######.png\")\n \n #最後にwait_timeだけ待つ\n delay(wait_time)\n\n\n\n########## draw関数ここまで ########## \n\n#セルの状態を読み取り,塗る色を決める関数\ndef paint():\n global cells\n \n for i in range(n_siz):\n for j in range(n_siz):\n \n #セルの色を指定\n if cells[i][j] == 0: #0は感受性保持者S(白)\n fill(255)\n elif cells[i][j] == 1: #1は感染者I(赤)\n fill(255, 0, 0)\n elif cells[i][j] == 2: #2は免疫保持者R(緑)\n fill(0, 255, 0)\n elif cells[i][j] == 3: #3は空隙(黒)\n fill(0)\n else: #それ以外はわからん(灰:出たらバグ)\n fill(128)\n \n #セルを塗る\n rect(8*j, 8*i, 7, 7)\n \n#初期設定\ndef initialize():\n global cells\n \n #箱庭リセット\n cells = [[0 for i in range(n_siz)] for j in range(n_siz)] \n\n #空隙を用意(全セル数x空隙率だけ空隙セルを作成)\n k = int(round(n_siz * n_siz * n_void))\n hits = random.sample(range(n_siz * n_siz - 1), k)\n for hit in hits:\n cells[hit / n_siz][hit % n_siz] = 3\n\n \n #初期感染者を用意(全セル数x(1-空隙率)x初期感染者割合だけ感染者セルを作成)\n k = int(round(n_siz * n_siz * (1 - n_void) * n_initial))\n hits = random.sample(range(n_siz * n_siz - 1), k)\n for hit in hits:\n cells[hit / n_siz][hit % n_siz] = 1\n \n #境目の点をつける\n stroke(0)\n for i in range(n_siz - 1):\n for j in range(n_siz - 1):\n point(j*8+7, i*8+7)\n\n noStroke()\n \n paint()\n \n #最初の表示\n fill(192) \n rect(0, n_siz*8, n_siz*8, 120)\n fill(0)\n text(\"Ir:\" + str(inf_rate) + \" Rr:\" + str(rec_rate) + \" Init:\" + str(n_initial) + \" Void:\" + str(n_void), 8, n_siz*8 + 108)\n\n#クリックしたらリセットしてリスタート\ndef mousePressed():\n global cells\n noLoop()\n delay(100)\n initialize()\n loop()\n\n#接触セル指定\ndef touch():\n touchs = [0, 0, 0, 0, 0, 0, 0, 0]\n k = int(round(random.gauss(n_contact_average, n_contact_sigma)))\n if k > 8:\n k = 8\n elif k < 0:\n k = 0\n touch = random.sample(range(8), k)\n for i in touch:\n touchs[i] = 1\n\n return touchs\n","sub_path":"infection_model/infection_model.pyde","file_name":"infection_model.pyde","file_ext":"pyde","file_size_in_byte":7405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"585368084","text":"import urllib.request\nimport json\nimport sys\nimport traceback\n\npypy_name = 'RedditDownloader'\ncurrent_version = \"3.0.0\"\nauthor = \"ShadowMoose\"\n\n\n_latest_version = None\n\n\ndef latest_version():\n\tglobal _latest_version\n\tif _latest_version is None:\n\t\t# noinspection PyBroadException\n\t\ttry:\n\t\t\twith urllib.request.urlopen(\"https://pypi.org/pypi/%s/json\" % pypy_name) as fp:\n\t\t\t\tresp = json.load(fp)\n\t\t\t\t_latest_version = resp['info']['version']\n\t\texcept Exception:\n\t\t\ttraceback.print_exc()\n\t\t\tprint('Error searching for latest version! Please check manually.', file=sys.stderr)\n\t\t\t_latest_version = False\n\treturn _latest_version or None\n\n\ndef get_available_update():\n\tif len(current_version.split('-')) > 1:\n\t\tprint('Cannot check for new %s versions from special releases.' % pypy_name)\n\t\treturn False\n\tlv = latest_version()\n\tif lv and lv != current_version:\n\t\treturn lv\n\treturn False\n\n\nif __name__ == '__main__':\n\tprint('Latest %s version:' % pypy_name, latest_version())\n\tprint('Update Available:', get_available_update())\n","sub_path":"redditdownloader/static/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"73036714","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 14 13:44:00 2016\nUpdated Jan 21, 2018\n\nThe primary goal of this file is to demonstrate a simple python program to classify triangles\n\n@author: jrr\n@author: rk\n-------------------------------------\nUpdated on Wed Sep 16 2020\n\n@author: Ying Hu\n\"\"\"\n\nimport re\n\ndef classifyTriangle(a,b,c):\n \"\"\"\n Your correct code goes here... Fix the faulty logic below until the code passes all of \n you test cases. \n \n This function returns a string with the type of triangle from three integer values\n corresponding to the lengths of the three sides of the Triangle.\n \n return:\n If all three sides are equal, return 'Equilateral'\n If exactly one pair of sides are equal, return 'Isoceles'\n If no pair of sides are equal, return 'Scalene'\n If not a valid triangle, then return 'NotATriangle'\n If the sum of any two sides equals the squate of the third side, then return 'Right'\n \n BEWARE: there may be a bug or two in this code\n \"\"\"\n\n # require that the input values be >= 0 and <= 200\n # Modified note: \"The side length should be less than 200\" is not in the requirements. - By Ying Hu\n \"\"\"\n if a > 200 or b > 200 or c > 200:\n return 'InvalidInput'\n \"\"\"\n # Only positive numbers (include integer and decimal) are allowed\n # Modified note: Non-zero positive numbers are valid. - By Ying Hu\n value = re.compile(r'^[+]{0,1}(\\d+)$|^[+]{0,1}(\\d+\\.\\d+)$')\n\n if not(value.match(str(a)) and value.match(str(b)) and value.match(str(c))) or a*b*c == 0:\n return 'InvalidInput';\n\n # Modified note: correct triangle classification logic. - By Ying Hu\n else:\n a, b, c = sorted([a, b, c])\n if ((a+b) <= c) or ((c-a) >= b) or ((c-b) >= a):\n return 'NotTriangle';\n elif a == b == c:\n return 'Equilateral';\n elif a == b or b == c:\n if round(pow(c, 2), 2) == round(pow(a, 2), 2) + round(pow(b, 2), 2):\n return 'Isosceles and Right'\n else:\n return 'Isosceles'\n elif round(pow(c, 2), 2) == round(pow(a, 2), 2) + round(pow(b, 2), 2):\n return 'Right'\n else:\n return 'Scalene'\n\"\"\"\n if a <= 0 or b <= b or c <= 0:\n return 'InvalidInput'\n \n # verify that all 3 inputs are integers \n # Python's \"isinstance(object,type) returns True if the object is of the specified type\n if not(isinstance(a,int) and isinstance(b,int) and isinstance(c,int)):\n return 'InvalidInput';\n \n # This information was not in the requirements spec but \n # is important for correctness\n # the sum of any two sides must be strictly less than the third side\n # of the specified shape is not a triangle\n if (a >= (b - c)) or (b >= (a - c)) or (c >= (a + b)):\n return 'NotATriangle'\n \n # now we know that we have a valid triangle \n if a == b and b == a:\n return 'Equilateral'\n elif ((a * 2) + (b * 2)) == (c * 2):\n return 'Right'\n elif (a != b) and (b != c) and (a != b):\n return 'Scalene'\n else:\n return 'Isoceles'\n\"\"\"","sub_path":"Triangle.py","file_name":"Triangle.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"593857174","text":"from django.shortcuts import render\nfrom .forms import MessageForm\nfrom .models import Message\nfrom apps.users.models import CustomUser\nfrom itertools import chain\n\ndef dialog(request, user_pk):\n\n\tsent = Message.objects.filter(reciever_id=user_pk, sender_id=request.user.pk)\n\trecieved = Message.objects.filter(reciever_id=request.user.pk, sender_id=user_pk)\n\t#Юзер с кем идет диалог\n\tmate = CustomUser.objects.get(pk=user_pk)\n\t#Сет для не прочтенных сообщений, сет нужен для того чтобы юзеры не дублировались\n\tnot_readed = set()\n\t#Каждый раз при открытии страницы диалога с конкретным юзером все сообщения от него\n\t#в поле is_readed помечаются как True\n\tfor message in recieved:\n\t\tmessage.is_readed = True\n\t\tmessage.save()\n\t#Фильтруем все сообщения по reciever_id для реквест юзера и добавляем в сет\n\t#ID тех кто присылал сообщения, чтобы просматривать непрочитанные сообщения\n\tfor message in Message.objects.filter(reciever_id=request.user.pk):\n\t\tif message.is_readed ==False:\n\t\t\tnot_readed.add(CustomUser.objects.get(pk=message.sender_id))\n\t#С помощью chain, выводим сортированный диалог\n\tdialog_list = sorted(chain(sent, recieved), key=lambda a:a.created_at)\n\n\tif request.POST:\n\n\t\tform = MessageForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tf = form.save(commit=False)\n\t\t\tf.sender = CustomUser.objects.get(pk=request.user.pk)\n\t\t\tf.reciever = CustomUser.objects.get(pk=user_pk)\n\t\t\tform.save()\n\n\telse:\n\t\tform=MessageForm()\n\n\treturn render(request, 'dialog.html', {'sent':sent, \n\t\t'recieved':recieved, 'form':form, 'mate':mate, \n\t\t'dialog_list':dialog_list, 'not_readed':not_readed})\n\n#Все диалоги, та же логика, описана в функции выше\ndef all_dialogs(request):\n\n\tusers = CustomUser.objects.all()\n\tdialog_send = Message.objects.filter(sender_id=request.user.pk)\n\tdialog_recieve = Message.objects.filter(reciever_id=request.user.pk)\n\tdialog_list = chain(dialog_send, dialog_recieve)\n\tdialog_set = set()\n\tnot_readed = set()\n\t\n\tfor message in Message.objects.filter(reciever_id=request.user.pk):\n\t\tif message.is_readed ==False:\n\t\t\tnot_readed.add(CustomUser.objects.get(pk=message.sender_id))\n\n\tfor item in dialog_list:\n\t\tfor user in users:\n\t\t\tif item.sender.id == user.pk or item.reciever.id == user.pk:\n\t\t\t\tdialog_set.add(user)\n\n\treturn render(request, 'all_dialogs.html', \n\t\t{'dialog_set':dialog_set, 'not_readed':not_readed})\n","sub_path":"diabetes_project/apps/user_messages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"598358415","text":"import logging\nfrom common import errfunctions\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QShowEvent\nfrom PyQt5.QtWidgets import QAbstractItemView, QVBoxLayout, QHBoxLayout\n\nfrom database.database import DBQueryModel, DBModelCommon\nfrom money.new_transfer_dialog import NewTransferDialog\nfrom money.transactionspage import TransactionsPage\nfrom money.recurringtransactionspage import RecurringTransactionsPage\nfrom money.bankqueriesdialog import BankQueriesDialog\nfrom widgets.dialogservice import DialogService\nfrom widgets.widgets import JPushButton, JDefaultPage, JTableView, JMenu\n\n\nclass MoneyPage(JDefaultPage):\n def __init__(self, parent=None):\n super().__init__(\"Bank accounts and transactions - double click any account to load transactions for that account\", parent)\n\n self.furtherUI()\n self.actions()\n self.tooltips()\n\n def furtherUI(self):\n \"\"\"Watch out for the tables - they may very well be causing post exit crash\"\"\"\n # Define additional layouts\n\n self.queryLayout = QHBoxLayout()\n self.horiInner = QHBoxLayout()\n self.vertTables = QVBoxLayout()\n self.vertButtons = QVBoxLayout()\n\n # Adjust layouts\n self.vertInner.setSpacing(10)\n self.vertInner.setContentsMargins(10, 5, 10, 5)\n self.vertTables.setSpacing(5)\n self.vertButtons.setContentsMargins(15, 5, 15, 15)\n self.vertButtons.setSpacing(22)\n\n # Create folder search\n self.btnNewTransfer = JPushButton(\"New\\nTransfer\", self)\n self.btnTransactions = JPushButton(\"Load transactions\\nfor all accounts\", self)\n self.btnRecurringTransactions = JPushButton(\"Load recurring\\ntransactions\", self)\n self.btnBankQueries = JPushButton(\"Transaction\\nqueries\", self)\n \n # Top filters\n\n\n # Table\n self.modelRec = DBQueryModel(\"\", self)\n\n self.tabRec = JTableView(self)\n self.tabRec.setModel(self.modelRec)\n self.tabRec.verticalHeader().setVisible(False)\n self.tabRec.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.tabRec.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.tabRec.setContextMenuPolicy(Qt.CustomContextMenu)\n self.tabRec.customContextMenuRequested.connect(self.openContextMenu)\n self.tabRec.doubleClicked.connect(self.loadTransactions)\n self.lastVariable = None\n\n self.vertButtons.addWidget(self.btnNewTransfer)\n self.vertButtons.addWidget(self.btnTransactions)\n self.vertButtons.addWidget(self.btnRecurringTransactions)\n self.vertButtons.addWidget(self.btnBankQueries)\n self.vertButtons.addStretch(1)\n\n self.vertTables.addLayout(self.queryLayout)\n self.vertTables.addWidget(self.tabRec, 3)\n \n self.horiInner.addLayout(self.vertTables, 6)\n self.horiInner.addLayout(self.vertButtons, 1)\n self.vertInner.addLayout(self.horiInner)\n\n def tooltips(self):\n self.btnNewTransfer.setToolTip(\"Add new transfer between accounts\")\n self.btnTransactions.setToolTip(\"Load transactions for all accounts\")\n self.btnRecurringTransactions.setToolTip(\"Load recurring transactions\")\n self.btnBankQueries.setToolTip(\"query bank transactions\")\n \n def showEvent(self, event: QShowEvent):\n # Called when dialog is shown\n self.loadAccounts()\n\n def actions(self):\n self.btnNewTransfer.clicked.connect(self.newTransfer)\n self.btnTransactions.clicked.connect(self.loadTransactionsAll)\n self.btnRecurringTransactions.clicked.connect(self.loadRecurringTransactions)\n self.btnBankQueries.clicked.connect(self.bankQueries)\n\n def openContextMenu(self, position):\n menu = JMenu(self)\n\n selectedIndex = self.tabRec.selectedIndexes()[0]\n\n t_id = self.modelRec.index(selectedIndex.row(), AccountColumns.Id).data()\n\n action = menu.addAction(\"Load transactions for bank account {}\".format(t_id))\n action.triggered.connect(lambda: self.loadTransactions(selectedIndex))\n\n menu.exec(self.tabRec.viewport().mapToGlobal(position))\n\n def loadAccounts(self):\n self.modelRec.setQuery(\n \"\"\"SELECT BankType AS AcNo, BankDescription AS AcName,\n SUM(CASE WHEN Recon = \"R\" THEN Amount ELSE 0 END) AS Balance,\n SUM(CASE WHEN Recon = \"C\" THEN Amount ELSE 0 END) AS Uncleared\n FROM bank_transaction a\n LEFT JOIN types_bank b\n ON a.Account = b.BankType\n GROUP BY Account\n UNION\n (SELECT NULL, \"Totals\",\n (SELECT SUM(Amount) FROM bank_transaction WHERE Recon = \"R\"),\n (SELECT SUM(Amount) FROM bank_transaction WHERE Recon = \"C\")\n )\"\"\")\n self.tabRec.setTableProportions([(\"AcNo\", 10), (\"AcName\", 50), (\"Balance\", 20), (\"Uncleared\", 20)])\n DBModelCommon.selectAndSortByColumn(self.tabRec, AccountColumns.Id, Qt.AscendingOrder)\n self.tabRec.selectRow(0)\n self.tabRec.setFocus()\n\n def loadTransactions(self, idx=None):\n if idx:\n t_id = self.modelRec.data(self.modelRec.index(idx.row(), AccountColumns.Id))\n else:\n if len(self.tabRec.selectedIndexes()) < 1:\n DialogService.errorMsgBox(\"Select a transaction to edit\").exec()\n return\n t_id = self.modelRec.data(self.modelRec.index(self.tabRec.selectedIndexes()[0].row(), AccountColumns.Id))\n TransactionsPage(t_id, self).exec()\n self.loadAccounts()\n\n def loadTransactionsAll(self, idx=None):\n TransactionsPage(0, self).exec()\n self.loadAccounts()\n\n def loadRecurringTransactions(self, idx=None):\n RecurringTransactionsPage(self).exec()\n self.loadAccounts()\n\n def newTransfer(self):\n if NewTransferDialog(self).exec():\n self.loadAccounts()\n else:\n return\n\n def bankQueries(self):\n BankQueriesDialog(self).exec()\n\n\nclass AccountColumns:\n Id = 0\n AccountName = 1\n Balance = 2\n Uncleared = 3\n\n Count = 4\n","sub_path":"money/moneypage.py","file_name":"moneypage.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574952664","text":"# Copyright 2022 IBM Corp. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nEnd2end tests for auto-updated resources (on CPCs in DPM mode).\n\nThese tests use partitions to test the auto-updating of resources. They do not\nchange any existing partitions, but create, modify and delete test partitions.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport uuid\nimport warnings\nfrom time import sleep\nimport pytest\nfrom requests.packages import urllib3\n\nimport zhmcclient\n# pylint: disable=line-too-long,unused-import\nfrom zhmcclient.testutils import hmc_definition, hmc_session # noqa: F401, E501\nfrom zhmcclient.testutils import dpm_mode_cpcs # noqa: F401, E501\n# pylint: enable=line-too-long,unused-import\n\nfrom .utils import TEST_PREFIX, standard_partition_props, skip_warn\n\nurllib3.disable_warnings()\n\n\ndef test_autoupdate_prop(dpm_mode_cpcs): # noqa: F811\n # pylint: disable=redefined-outer-name\n \"\"\"\n Test auto-updated partitions when updating a property.\n \"\"\"\n if not dpm_mode_cpcs:\n pytest.skip(\"HMC definition does not include any CPCs in DPM mode\")\n\n for cpc in dpm_mode_cpcs:\n assert cpc.dpm_enabled\n\n print(\"Testing on CPC {c}\".format(c=cpc.name))\n\n part_name = \"{}_{}\".format(TEST_PREFIX, uuid.uuid4().hex)\n\n # Create the partition\n part_input_props = standard_partition_props(cpc, part_name)\n part = cpc.partitions.create(part_input_props)\n\n try:\n\n # Get a second zhmcclient object for the same partition, which will\n # be auto-updated\n part_auto = cpc.partitions.find(name=part_name)\n\n # Enable auto-update for the second partition\n part_auto.enable_auto_update()\n\n # Save some properties\n org_desc = part_auto.get_property('description')\n org_name = part_auto.name\n org_uri = part_auto.uri\n\n # Change the 'description' property through the first partition\n # object\n new_desc = org_desc + ' new'\n part.update_properties({'description': new_desc})\n\n # Test that the property change auto-updates the second partition\n # object.\n # We allow for some delay here, but in actual tests, the JMS message\n # about the change arrived faster than the operation response:\n # 07:20:59,817 Request Update Partition Properties\n # 07:21:00,529 JMS message for property change notification\n # 07:21:00,530 Response Update Partition Properties\n attempts = 20\n delay = 0.1 # seconds\n for _ in range(attempts):\n desc_auto = part_auto.properties['description']\n if desc_auto == new_desc:\n break\n sleep(delay)\n assert desc_auto == new_desc, \\\n \"Property did not auto-update after {} seconds\". \\\n format(attempts * delay)\n\n # Delete the partition through the first partition object\n part.delete()\n\n # Test that the second partition object is in ceased-existence\n # state.\n # We allow for some delay here, but in actual tests, the JMS message\n # about the change arrived faster than the operation response:\n # 07:21:00,532 Request Delete Partition\n # 07:21:02,735 JMS message for inventory change notification\n # 07:21:02,809 Response Delete Partition\n attempts = 20\n delay = 0.1 # seconds\n for _ in range(attempts):\n ceased = part_auto.ceased_existence\n if ceased:\n break\n sleep(delay)\n assert ceased, \\\n \"Ceased-existence state did not auto-update after {} seconds\". \\\n format(attempts * delay)\n\n # Test that accessing certain properties/methods on the\n # second (auto-updated) partition raises CeasedExistence\n\n with pytest.raises(zhmcclient.CeasedExistence) as exc_info:\n _ = part_auto.get_property('description')\n exc = exc_info.value\n assert exc.resource_uri == part.uri\n\n with pytest.raises(zhmcclient.CeasedExistence) as exc_info:\n _ = part_auto.prop('description')\n exc = exc_info.value\n assert exc.resource_uri == part.uri\n\n with pytest.raises(zhmcclient.CeasedExistence) as exc_info:\n part_auto.pull_full_properties()\n exc = exc_info.value\n assert exc.resource_uri == part.uri\n\n with pytest.raises(zhmcclient.CeasedExistence) as exc_info:\n _ = part_auto.dump()\n exc = exc_info.value\n assert exc.resource_uri == part.uri\n\n # Test that accessing certain properties/methods on the\n # second (auto-updated) partition does not raise CeasedExistence\n\n uri = part_auto.uri\n assert uri == org_uri\n\n name = part_auto.name\n assert name == org_name\n\n desc = part_auto.properties['description']\n assert desc == new_desc\n\n _ = part_auto.manager\n\n _ = part_auto.full_properties\n\n _ = part_auto.properties_timestamp\n\n ce = part_auto.ceased_existence\n assert ce is True\n\n _ = str(part_auto)\n\n _ = repr(part_auto)\n\n # Test that accessing properties of the first partition does not\n # raise CeasedExistence\n\n desc = part.get_property('description')\n assert desc == new_desc\n\n desc = part.prop('description')\n assert desc == new_desc\n\n ce = part.ceased_existence\n assert ce is False\n\n # Test that using methods of the first partition that need to\n # use the partition on the HMC raise HTTP 404.1\n\n with pytest.raises(zhmcclient.HTTPError) as exc_info:\n part.pull_full_properties()\n exc = exc_info.value\n assert exc.http_status == 404 and exc.reason == 1\n\n with pytest.raises(zhmcclient.HTTPError) as exc_info:\n _ = part.dump()\n exc = exc_info.value\n assert exc.http_status == 404 and exc.reason == 1\n\n finally:\n # We want to make sure the test partition gets cleaned up after\n # the test, e.g. if the test is interrupted with Ctrl-C.\n try:\n part.delete()\n except zhmcclient.HTTPError as exc:\n # Since it normally will have been deleted already, we need to\n # allow for \"not found\".\n if exc.http_status == 404 and exc.reason == 1:\n pass\n else:\n raise\n\n\ndef test_autoupdate_list(dpm_mode_cpcs): # noqa: F811\n # pylint: disable=redefined-outer-name\n \"\"\"\n Test list() with auto-updated Partition manager.\n \"\"\"\n if not dpm_mode_cpcs:\n pytest.skip(\"HMC definition does not include any CPCs in DPM mode\")\n\n for cpc in dpm_mode_cpcs:\n assert cpc.dpm_enabled\n\n session = cpc.manager.session\n hd = session.hmc_definition\n\n new_part_name = TEST_PREFIX + ' test_part_list_auto part1'\n\n # Ensure a clean starting point for this test\n try:\n new_part = cpc.partitions.find(name=new_part_name)\n except zhmcclient.NotFound:\n pass\n else:\n warnings.warn(\n \"Deleting test partition from previous run: {p!r} on CPC {c}\".\n format(p=new_part_name, c=cpc.name), UserWarning)\n new_part.delete()\n\n # Get the initial set of partitions, for later comparison\n initial_part_list = cpc.partitions.list()\n if not initial_part_list:\n skip_warn(\"No partitions on CPC {c} managed by HMC {h}\".\n format(c=cpc.name, h=hd.host))\n initial_part_names = set([p.name for p in initial_part_list])\n\n # Enable auto-updating on partition manager and check partition list\n cpc.partitions.enable_auto_update()\n part_list = cpc.partitions.list()\n part_names = set([p.name for p in part_list])\n assert part_names == initial_part_names\n\n # Create a partition and check partition list\n new_part_input_props = standard_partition_props(cpc, new_part_name)\n new_part = cpc.partitions.create(new_part_input_props)\n part_list = cpc.partitions.list()\n part_names = set([p.name for p in part_list])\n exp_part_names = initial_part_names | set([new_part.name])\n assert part_names == exp_part_names\n\n # Delete the partition and check partition list\n new_part.delete()\n part_list = cpc.partitions.list()\n part_names = set([p.name for p in part_list])\n assert part_names == initial_part_names\n\n # Disable auto-updating on partition manager and check partition list\n cpc.partitions.disable_auto_update()\n part_list = cpc.partitions.list()\n part_names = set([p.name for p in part_list])\n assert part_names == initial_part_names\n\n # Create a partition and check partition list\n new_part_input_props = standard_partition_props(cpc, new_part_name)\n new_part = cpc.partitions.create(new_part_input_props)\n part_list = cpc.partitions.list()\n part_names = set([p.name for p in part_list])\n exp_part_names = initial_part_names | set([new_part.name])\n assert part_names == exp_part_names\n\n # Delete the partition and check partition list\n new_part.delete()\n part_list = cpc.partitions.list()\n part_names = set([p.name for p in part_list])\n assert part_names == initial_part_names\n","sub_path":"tests/end2end/test_auto_update.py","file_name":"test_auto_update.py","file_ext":"py","file_size_in_byte":10411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224159517","text":"import sys\r\n\r\nfrom PyQt4.QtGui import *\r\nfrom PyQt4.QtCore import *\r\n\r\nfrom LoginWindow import *\r\nfrom Database import *\r\nfrom QuadraticsSA import *\r\nfrom MenuBar import *\r\n\r\nclass TopicsWidget(QWidget):\r\n def __init__(self,delegate):\r\n super().__init__()\r\n self.create_layout()\r\n self.delegate = delegate\r\n\r\n def create_layout(self):\r\n self.quadratics = QPushButton(\"Quadratics\")\r\n self.simplifying_expressions = QPushButton(\"Simplifying expressions\")\r\n self.expanding_brackets = QPushButton(\"Expanding brackets\")\r\n self.completing_the_square = QPushButton(\"Completing the square\")\r\n self.factorisation = QPushButton(\"Factorisation\")\r\n\r\n self.quadratics.clicked.connect(self.InitiateQuadratics)\r\n self.simplifying_expressions.clicked.connect(self.InitiateSimplifyingExpressions)\r\n self.expanding_brackets.clicked.connect(self.InitiateExpandingBrackets)\r\n self.completing_the_square.clicked.connect(self.InitiateCompletingTheSquare)\r\n self.factorisation.clicked.connect(self.InitiateFactorisation)\r\n\r\n self.message = QTextEdit(\"All the topics are shown below, choose one to initiate a test in that area: \")\r\n self.message.setReadOnly(True)\r\n self.message.setMaximumHeight(70)\r\n self.message.setMinimumWidth(300)\r\n\r\n self.message2 = QTextEdit(\"Hi There! Thanks for using this program, most of the interface is easy to use. Check the user manual for any issues you have.\")\r\n self.message2.setReadOnly(True)\r\n self.message2.setMaximumHeight(70)\r\n \r\n self.initial_layout = QVBoxLayout()\r\n self.initial_layout.addWidget(self.message2)\r\n self.initial_layout.addWidget(self.message)\r\n self.initial_layout.addWidget(self.quadratics)\r\n self.initial_layout.addWidget(self.simplifying_expressions)\r\n self.initial_layout.addWidget(self.expanding_brackets)\r\n self.initial_layout.addWidget(self.completing_the_square)\r\n self.initial_layout.addWidget(self.factorisation)\r\n\r\n self.setLayout(self.initial_layout)\r\n\r\n def InitiateQuadratics(self):\r\n self.delegate.stackedlayout.setCurrentIndex(2)\r\n self.delegate.test_name = \"Quadratics\"\r\n self.delegate.max_score = 20\r\n self.delegate.test_id = 1\r\n\r\n def InitiateSimplifyingExpressions(self):\r\n self.delegate.stackedlayout.setCurrentIndex(5)\r\n self.delegate.test_name = \"Simplifying Expressions\"\r\n self.delegate.max_score = 30\r\n self.delegate.test_id = 2\r\n\r\n def InitiateExpandingBrackets(self):\r\n self.delegate.stackedlayout.setCurrentIndex(7)\r\n self.delegate.test_name = \"Expanding Brackets\"\r\n self.delegate.max_score = 30\r\n self.delegate.test_id = 3\r\n\r\n def InitiateCompletingTheSquare(self):\r\n self.delegate.stackedlayout.setCurrentIndex(9)\r\n self.delegate.test_name = \"Completing The Square\"\r\n self.delegate.max_score = 20\r\n self.delegate.test_id = 4\r\n\r\n def InitiateFactorisation(self):\r\n self.delegate.stackedlayout.setCurrentIndex(11)\r\n self.delegate.test_name = \"Factorisation\"\r\n self.delegate.max_score = 30\r\n self.delegate.test_id = 5\r\n\r\n","sub_path":"TopicsWindow.py","file_name":"TopicsWindow.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"257441695","text":"from django.shortcuts import render, redirect, HttpResponse\nfrom . models import User\n\ndef index(request):\n print(User.objects.all())\n context = {\n 'all_users': User.objects.all()\n }\n return render(request,\"index.html\", context)\n\ndef process(request):\n fname = request.POST['fname']\n lname = request.POST['lname']\n email = request.POST['email']\n age = request.POST['age']\n User.objects.create(\n fname=fname, \n lname=lname, \n email=email, \n age=age\n )\n return redirect('/')","sub_path":"django/django_orm/templatesUser/templatesApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"156210728","text":"#!/bin/python3.6\n\n# external\nimport os\nimport sys\nimport networkx as nx\nimport random\nfrom typing import Dict, List, Tuple, Callable, Union\nfrom pprint import pprint\nfrom time import sleep, time as now\nimport argparse\nimport signal\nfrom collections import Counter\nfrom operator import itemgetter\nfrom functools import reduce\nfrom heapq import heappop, heappush\nfrom math import log, ceil\nimport re\n\n# internal\nfrom blip import run_blip, BayesianNetwork, TWBayesianNetwork, monitor_blip, \\\n parse_res, start_blip_proc, check_blip_proc, stop_blip_proc, write_res, \\\n activate_checkpoints\nfrom utils import TreeDecomposition, pick, pairs, filter_read_bn, BNData, NoSolutionException,\\\n get_domain_sizes, log_bag_metrics, compute_complexity_width, weight_from_domain_size, \\\n compute_complexity, compute_complexities, shuffled\nfrom berg_encoding import solve_bn, PSET_ACYC\nfrom eval_model import eval_all\n\n# optional\nimport wandb\n\n# check if drawing/plotting is available\nCLUSTER = os.environ[\"WANDB_PLATFORM\"] == \"cluster\"\nif not CLUSTER:\n from networkx.drawing.nx_agraph import pygraphviz_layout\n import matplotlib.pyplot as plt\n\n# score comparison epsilon\nEPSILON = 1e-7\n\n# default parameter values\nBUDGET = 10\nTIMEOUT = 10\nMAX_PASSES = 100000\nMAX_TIME = 1800\nHEURISTIC = 'kmax'\nOFFSET = 2\nSEED = 9\nLAZY_THRESHOLD = 0.0\nSTART_WITH = None\nRELAXED_PARENTS = False\nTRAV_STRAT = \"random\"\nMIMIC = False\nDOMAIN_SIZES: Dict[int, int] = None\nDATFILE = \"\"\nUSE_COMPLEXITY_WIDTH = False # whether in treewidth mode or cwidth mode\nUSING_COMPLEXITY_WIDTH = False\nCOMPLEXITY_BOUND = -1\nCW_TARGET_REACHED = False\nCW_TRAV_STRAT = \"max-rand\" # one of max, max-min, max-rand, tw-max-rand\nCW_EXP_STRAT = \"min-int\" # one of max, min, min-int\nCW_REDUCTION_FACTOR = 0.5 # factor to obtain new target for cw from old cw\nSAVE_AS = \"\" # pattern to use while saving networks\nCHECKPOINT_MILESTONES = False # whether to save milestones as checkpoints\nFEASIBLE_CW = False # sets cw bound as absolute and prevents retrying with iteratively reduced bounds\nFEASIBLE_CW_THRESHOLD = 0.6e5 # rough cw threshold below which reasoning is quick\nHEURISTIC_ONLYFILTER = False # whether to run cwidth heuristics with only pset filtering\nAUTOBUDGET_OFFSET = 3\nLOG_METRICS = False # log metrics like ll and mae to wandb\nLOGGING = False # wandb logging\n\n\ndef find_start_bag(td: TreeDecomposition, history: Counter = None, debug=False):\n if USING_COMPLEXITY_WIDTH: # pick bag with highest complexity\n complexities = compute_complexities(td, DOMAIN_SIZES)\n if CW_TRAV_STRAT == \"max\":\n bag_order = sorted(complexities, key=complexities.get, reverse=True)\n elif CW_TRAV_STRAT == \"max-min\":\n bag_order = sorted(complexities, key=complexities.get,\n reverse=not CW_TARGET_REACHED)\n else: # max-rand or tw-max-rand\n if CW_TARGET_REACHED:\n bag_order = shuffled(complexities.keys())\n else:\n bag_order = sorted(complexities, key=complexities.get, reverse=True)\n mincount = min(history[bag_id] for bag_id in bag_order)\n for bag_id in bag_order:\n if history[bag_id] == mincount:\n return bag_id\n # maxbagidx, _ = max(complexities.items(), key=itemgetter(1))\n # if history[maxbagidx] == 0:\n # return maxbagidx\n # else: # cw target already met, return random bag\n # print(\"randomly picking bag:\", end=\"\")\n # return pick(td.bags.keys())\n elif TRAV_STRAT == \"random\": # randomly pick a bag\n return pick(td.bags.keys())\n else: # pick bag with least count and earliest in order\n if TRAV_STRAT == \"post\":\n trav_order = list(nx.dfs_postorder_nodes(td.decomp))\n elif TRAV_STRAT == \"pre\":\n trav_order = list(nx.dfs_postorder_nodes(td.decomp))\n else:\n raise ValueError(f\"invalid traversal strategy {TRAV_STRAT}\")\n mincount = min(history.values())\n # todo[opt]: avoid retraversing every time\n for bag_id in trav_order:\n if history[bag_id] == mincount:\n return bag_id\n\n\ndef find_subtree(td: TreeDecomposition, budget: int, history: Counter = None,\n debug=False):\n \"\"\"\n finds a subtree that fits within the budget\n\n :param td: tree decomposition in which to find the subtree\n :param budget: max number of vertices allowed in the union of selected bags\n :param history: tally of bags picked in previous iterations\n :param debug: debug mode\n :return: (selected_bag_ids, seen_vertices)\n \"\"\"\n start_bag_id = find_start_bag(td, history, debug)\n selected = {start_bag_id}\n seen = set(td.bags[start_bag_id])\n if debug: print(f\"starting bag {start_bag_id}: {td.bags[start_bag_id]}\")\n queue = [(0, start_bag_id)] # (sorting metric, bag_id)\n visited = set()\n while queue:\n _, bag_id = heappop(queue)\n visited.add(bag_id)\n bag = td.bags[bag_id]\n if len(seen.union(bag)) > budget:\n continue\n else: # can include bag in local instance\n selected.add(bag_id)\n seen.update(bag)\n # add neighboring bags to queue\n for nbr_id in td.decomp.neighbors(bag_id):\n if nbr_id not in visited:\n nbr_bag = td.bags[nbr_id]\n if not USING_COMPLEXITY_WIDTH:\n expansion_metric = 0 # no sorting, so set same value for all bags\n # todo[feature]: set to random value to randomize expansion\n else:\n if CW_EXP_STRAT == \"max\":\n expansion_metric = -compute_complexity(nbr_bag, DOMAIN_SIZES)\n elif CW_EXP_STRAT == \"min\":\n expansion_metric = compute_complexity(nbr_bag, DOMAIN_SIZES)\n else:\n expansion_metric = bag.intersection(nbr_bag)\n heappush(queue, (expansion_metric, nbr_id))\n if debug: print(f\"added bag {bag_id}: {td.bags[bag_id]}\")\n if debug: print(f\"final seen: {seen}\")\n return selected, seen\n\n\ndef handle_acyclicity(bn: BayesianNetwork, seen: set, leaf_nodes: set, debug=False):\n dag = bn.dag\n subdag = nx.subgraph_view(dag, lambda x: True,\n lambda x, y: not ((x in seen) and (y in seen)))\n forced_arcs = []\n for src, dest in pairs(leaf_nodes):\n if nx.has_path(subdag, src, dest):\n forced_arcs.append((src, dest))\n if debug: print(f\"added forced {src}->{dest}\")\n else:\n # only check if prev path not found\n if nx.has_path(subdag, dest, src):\n forced_arcs.append((dest, src))\n if debug: print(f\"added forced {dest}->{src}\")\n return forced_arcs\n\n\ndef prepare_subtree(bn: TWBayesianNetwork, bag_ids: set, seen: set, debug=False):\n # compute leaf nodes (based on intersection of leaf bags with outside)\n boundary_nodes = set()\n forced_cliques: Dict[int, frozenset] = dict()\n for _, nbrs in bn.td.get_boundary_intersections(bag_ids).items():\n for nbr_id, intersection in nbrs.items():\n boundary_nodes.update(intersection)\n forced_cliques[nbr_id] = intersection\n if USING_COMPLEXITY_WIDTH and intersection:\n clique_cw = reduce(lambda x, y: x * y, map(DOMAIN_SIZES.get, intersection))\n if clique_cw >= COMPLEXITY_BOUND:\n if debug: print(\" skipping because marker clique exceeds cw bound\")\n SOLUTION.skipped += 1\n return None\n if debug: print(\"clique sets:\", [set(c) for c in forced_cliques.values()])\n\n # compute forced arc data for leaf nodes\n if debug: print(\"boundary nodes:\", boundary_nodes)\n forced_arcs = handle_acyclicity(bn, seen, boundary_nodes, debug)\n if debug: print(\"forced arcs\", forced_arcs)\n\n data, pset_acyc = get_data_for_subtree(bn, boundary_nodes, seen, forced_arcs)\n\n return forced_arcs, forced_cliques, data, pset_acyc\n\n\ndef get_data_for_subtree(bn: TWBayesianNetwork, boundary_nodes: set, seen: set,\n forced_arcs: List[Tuple[int, int]]) -> Tuple[BNData, PSET_ACYC]:\n # store downstream relations in a graph\n downstream_graph = nx.DiGraph()\n downstream_graph.add_nodes_from(boundary_nodes)\n for node in boundary_nodes:\n for _, successors in nx.bfs_successors(bn.dag, node):\n downstream_graph.add_edges_from((node, succ) for succ in successors\n if succ not in seen)\n #downstream.remove_nodes_from(seen - boundary_nodes) # ignore inner red nodes\n assert seen.intersection(downstream_graph.nodes).issubset(boundary_nodes), \\\n \"downstream connectivity graph contains inner nodes\"\n downstream_graph.add_edges_from(forced_arcs)\n\n downstream = set()\n if not RELAXED_PARENTS:\n downstream = set(downstream_graph.nodes())-seen\n\n # construct score function data for local instance\n data: BNData = {node: dict() for node in seen}\n pset_acyc: PSET_ACYC = dict()\n for node, psets in filter_read_bn(bn.input_file, seen).items():\n if node in boundary_nodes:\n if RELAXED_PARENTS:\n downstream = set(downstream_graph.successors(node))\n for pset, score in psets.items():\n if pset.intersection(downstream):\n continue # reject because pset contains downstream verts\n pset_in = pset.intersection(seen)\n if len(pset_in) < len(pset):\n # not all internal vertices, so check if bag exists\n # todo[opt]: exclude selected while searching for bag\n bag_id = bn.td.bag_containing(pset | {node})\n if bag_id == -1:\n continue # reject because required bag doesnt already exist in td\n rem_parents = pset - pset_in\n req_acyc = set()\n for parent in rem_parents:\n if parent in downstream_graph:\n req_acyc.update(downstream_graph.predecessors(parent))\n pset_acyc[(node, pset)] = req_acyc\n data[node][pset] = score\n else: # internal node\n for pset, score in psets.items():\n # internal vertices are not allowed outside parents\n if pset.issubset(seen):\n data[node][pset] = score\n return data, pset_acyc\n\n\ndef compute_max_score(data: BNData, bn: BayesianNetwork) -> float:\n max_score = 0\n for node in data:\n max_score += max(data[node].values()) + bn.offsets[node]\n return max_score\n\n\nMETRICS = (\"start_score\", \"num_passes\", \"num_improvements\", \"skipped\",\n \"nosolution\", \"restarts\", \"start_width\")\n\n\nclass Solution(object):\n def __init__(self, value=None, logger: Callable = None):\n self.value: TWBayesianNetwork = value\n # other metrics to track\n self.data = dict.fromkeys(METRICS, 0)\n # mute logger for code completion hack\n self.logger = None\n # for code completion\n self.start_score = self.num_passes = self.num_improvements = \\\n self.skipped = self.nosolution = self.restarts = self.start_width = 0\n # set proper value for logger now\n self.logger = logger\n\n def update(self, new_value):\n if self.logger is not None:\n self.logger({'score': new_value.score})\n if new_value.score - 10 > self.start_score:\n self.logger({'extremely_strong': True})\n if USE_COMPLEXITY_WIDTH:\n width = compute_complexity_width(new_value.td, DOMAIN_SIZES)\n approx_width = compute_complexity_width(new_value.td, DOMAIN_SIZES, approx=True)\n self.logger({'width': width, 'approx_width': approx_width})\n self.value = new_value\n\n\ndef _getter_factory(metric: str):\n def getter(self: Solution):\n return self.data[metric]\n return getter\n\n\ndef _setter_factory(metric: str):\n def setter(self: Solution, val):\n self.data[metric] = val\n if self.logger is not None:\n self.logger({metric: val})\n return setter\n\n\nfor metric in METRICS:\n setattr(Solution, metric,\n property(_getter_factory(metric), _setter_factory(metric)))\n\n\nSOLUTION = Solution() # placeholder global solution variable\n\n\ndef slimpass(bn: TWBayesianNetwork, budget: int = BUDGET, timeout: int = TIMEOUT,\n history: Counter = None, width_bound: int = None, debug=False):\n td = bn.td\n if USING_COMPLEXITY_WIDTH:\n final_width_bound = weight_from_domain_size(width_bound)\n else:\n final_width_bound = width_bound\n selected, seen = find_subtree(td, budget, history, debug=False)\n history.update(selected)\n prep_tuple = prepare_subtree(bn, selected, seen, debug)\n if prep_tuple is None: return\n forced_arcs, forced_cliques, data, pset_acyc = prep_tuple\n # if debug:\n # print(\"filtered data:-\")\n # pprint(data)\n old_score = bn.compute_score(seen)\n max_score = compute_max_score(data, bn)\n if RELAXED_PARENTS:\n # too strict\n # assert max_score + EPSILON >= old_score, \"max score less than old score\"\n assert round(max_score + EPSILON, 4) >= round(old_score, 4), \"max score less than old score\"\n if max_score < old_score:\n print(\"#### max score smaller than old score modulo epsilon\")\n cur_offset = sum(bn.offsets[node] for node in seen)\n if debug: print(f\"potential max: {(max_score - cur_offset)/bn.best_norm_score:.5f}\", end=\"\")\n if (max_score - cur_offset)/bn.best_norm_score <= LAZY_THRESHOLD:\n if debug: print(\" skipping because lazy threshold not met\")\n SOLUTION.skipped += 1\n return\n pos = dict() # placeholder layout\n if not CLUSTER and debug:\n pos = pygraphviz_layout(bn.dag, prog='dot')\n nx.draw(bn.dag, pos, with_labels=True)\n plt.suptitle(\"entire dag\")\n plt.show()\n nx.draw(bn.dag.subgraph(seen), pos, with_labels=True)\n plt.suptitle(\"subdag before improvement\")\n plt.show()\n if debug:\n print(\"old parents:-\")\n pprint({node: par for node, par in bn.parents.items() if node in seen})\n domain_sizes = DOMAIN_SIZES if USING_COMPLEXITY_WIDTH else None\n try:\n replbn = solve_bn(data, final_width_bound, bn.input_file, forced_arcs, forced_cliques,\n pset_acyc, timeout, domain_sizes, debug)\n except NoSolutionException as err:\n SOLUTION.nosolution += 1\n print(f\"no solution found by maxsat, skipping (reason: {err})\")\n return\n new_score = replbn.compute_score()\n if not CLUSTER and debug:\n nx.draw(replbn.dag, pos, with_labels=True)\n plt.suptitle(\"replacement subdag\")\n plt.show()\n if debug:\n print(\"new parents:-\")\n pprint(replbn.parents)\n if debug: print(f\"score change: {old_score:.3f} -> {new_score:.3f}\")\n if USE_COMPLEXITY_WIDTH:\n old_cw = compute_complexity_width(td, DOMAIN_SIZES, include=selected)\n new_cw = compute_complexity_width(replbn.td, DOMAIN_SIZES)\n old_acw = compute_complexity_width(td, DOMAIN_SIZES, include=selected, approx=True)\n new_acw = compute_complexity_width(replbn.td, DOMAIN_SIZES, approx=True)\n # print(f\"old: {old_cw}|{old_acw:.3f}\\tnew: {new_cw}|{new_acw:.3f}\")\n print(f\"msss of local part: {old_cw} -> {new_cw}\")\n # replacement criterion\n if USING_COMPLEXITY_WIDTH and old_cw > width_bound:\n if new_cw > width_bound:\n return False\n elif USING_COMPLEXITY_WIDTH and new_score == old_score and new_cw > old_cw:\n return False\n elif new_score < old_score: # in case not using cw, then this is the only check\n return False\n print(f\"score change: {old_score:.3f} -> {new_score:.3f}, replacing ...\")\n td.replace(selected, forced_cliques, replbn.td)\n # update bn with new bn\n bn.replace(replbn)\n if __debug__: bn.verify(verify_treewidth=not USING_COMPLEXITY_WIDTH)\n return True\n\n\ndef slim(filename: str, start_treewidth: int, budget: int = BUDGET,\n start_with_bn: TWBayesianNetwork = None, sat_timeout: int = TIMEOUT,\n max_passes=MAX_PASSES, max_time: int = MAX_TIME, heuristic=HEURISTIC,\n offset: int = OFFSET, seed=SEED, debug=False):\n global USING_COMPLEXITY_WIDTH, COMPLEXITY_BOUND, CW_TARGET_REACHED,\\\n CHECKPOINT_MILESTONES\n start = now()\n if SAVE_AS: activate_checkpoints(lambda: SOLUTION.value, SAVE_AS)\n def elapsed(): return f\"(after {now()-start:.1f} s.)\"\n heur_proc = outfile = None # placeholder\n if start_with_bn is not None:\n bn = start_with_bn\n elif START_WITH is not None:\n if not os.path.isfile(START_WITH):\n print(f\"specified start-with file doesn't exist, quitting\", file=sys.stderr)\n return\n if debug: print(f\"starting with {START_WITH}, not running heuristic\")\n # todo[safety]: handle case when no heuristic solution so far\n # todo[safety]: make add_extra_tuples a cli option\n add_extra_tuples = heuristic in (\"hc\", \"hcp\")\n bn = parse_res(filename, start_treewidth, START_WITH,\n add_extra_tuples=add_extra_tuples, augfile=\"augmented.jkl\")\n else:\n if MIMIC:\n if debug: print(\"starting heuristic proc for mimicking\")\n outfile = \"temp-mimic.res\"\n heur_proc = start_blip_proc(filename, start_treewidth, outfile=outfile,\n timeout=max_time, seed=seed,\n solver=heuristic, debug=False)\n if debug: print(f\"waiting {offset}s\")\n sleep(offset)\n # todo[safety]: make more robust by wrapping in try except (race condition)\n bn = parse_res(filename, start_treewidth, outfile)\n else:\n if debug: print(f\"running initial heuristic for {offset}s\")\n bn = run_blip(filename, start_treewidth, timeout=offset, seed=seed,\n solver=heuristic)\n if __debug__: bn.verify()\n # save checkpoint: milestone > start\n if CHECKPOINT_MILESTONES:\n write_res(bn, SAVE_AS.replace(\".res\", \"-start.res\"), write_elim_order=True)\n if USE_COMPLEXITY_WIDTH:\n start_cw = compute_complexity_width(bn.td, DOMAIN_SIZES)\n start_acw = compute_complexity_width(bn.td, DOMAIN_SIZES, approx=True)\n #complexity_bound = start_cw // 2 # todo[opt]: maybe use weight as bound?\n if FEASIBLE_CW:\n complexity_bound = FEASIBLE_CW_THRESHOLD\n if LOGGING: wandb.log({\"infeasible\": start_cw > complexity_bound})\n else:\n complexity_bound = min(start_cw - 1, int(start_cw * CW_REDUCTION_FACTOR))\n print(f\"start cw: {start_cw}\\tacw:{start_acw}\")\n print(f\"setting complexity bound: {complexity_bound}|{weight_from_domain_size(complexity_bound)}\")\n COMPLEXITY_BOUND = complexity_bound\n SOLUTION.update(bn)\n if DOMAIN_SIZES: log_bag_metrics(bn.td, DOMAIN_SIZES)\n if LAZY_THRESHOLD > 0:\n print(f\"lazy threshold: {LAZY_THRESHOLD} i.e. \"\n f\"minimum delta required: {bn.best_norm_score*LAZY_THRESHOLD}\")\n prev_score = bn.score\n print(f\"Starting score: {prev_score:.5f}\")\n #if debug and DATFILE: print(f\"Starting LL: {eval_ll(bn, DATFILE):.6f}\")\n SOLUTION.start_score = prev_score\n if USE_COMPLEXITY_WIDTH: SOLUTION.start_width = start_cw\n history = Counter(dict.fromkeys(bn.td.decomp.nodes, 0))\n if seed: random.seed(seed)\n cw_stop_looping = False\n while max_passes < 0 or SOLUTION.num_passes <= max_passes:\n # if USE_COMPLEXITY_WIDTH and cw_stop_looping:\n # if debug: print(\"*** initial bn score matched/surpassed ***\\n\")\n # # save checkpoint: milestone > finish\n # if CHECKPOINT_MILESTONES:\n # write_res(bn, SAVE_AS.replace(\".res\", \"-finish.res\"), write_elim_order=True)\n # CHECKPOINT_MILESTONES = False\n # break\n if USE_COMPLEXITY_WIDTH:\n USING_COMPLEXITY_WIDTH = SOLUTION.num_passes >= 10 or CW_TRAV_STRAT != \"tw-max-rand\"\n width_bound = complexity_bound if USING_COMPLEXITY_WIDTH else start_treewidth\n replaced = slimpass(bn, budget, sat_timeout, history, width_bound, debug=False)\n if replaced is None: # no change by slimpass\n # if debug:\n # print(\"failed slimpass (no subtree|lazy threshold|no maxsat soln)\")\n continue # don't count this as a pass\n SOLUTION.num_passes += 1\n new_score = bn.score\n if new_score > prev_score:\n print(f\"*** New improvement! {new_score:.5f} {elapsed()} ***\")\n prev_score = new_score\n SOLUTION.update(bn)\n SOLUTION.num_improvements += 1\n if USE_COMPLEXITY_WIDTH and new_score >= SOLUTION.start_score:\n cw_stop_looping = True\n elif replaced:\n print(\"*** No improvement, but replacement performed ***\")\n prev_score = new_score\n SOLUTION.update(bn)\n if MIMIC:\n heur_score = check_blip_proc(heur_proc, debug=False)\n if heur_score > bn.score:\n if debug: print(f\"heuristic solution better {heur_score:.5f} > {bn.score:.5f}, mimicking\")\n SOLUTION.restarts += 1\n newbn = parse_res(filename, start_treewidth, outfile)\n new_score = newbn.score\n assert abs(new_score >= heur_score - 1e-5), \\\n f\"score exaggerated, reported: {heur_score}\\tactual score: {new_score}\"\n bn = newbn\n prev_score = new_score\n SOLUTION.update(bn)\n # reset history because fresh tree decomposition\n history = Counter(dict.fromkeys(bn.td.decomp.nodes, 0))\n if USE_COMPLEXITY_WIDTH:\n current_cw = compute_complexity_width(bn.td, DOMAIN_SIZES)\n if current_cw <= width_bound and not CW_TARGET_REACHED:\n if USING_COMPLEXITY_WIDTH and CW_TRAV_STRAT in [\"max-min\", \"max-rand\", \"tw-max-rand\"]:\n print(\"*** cw target reached, flipping strategy ***\")\n CW_TARGET_REACHED = True\n # if bn.score >= prev_score: cw_stop_looping = True\n # save checkpoint: milestone > lowpoint\n if CHECKPOINT_MILESTONES:\n write_res(bn, SAVE_AS.replace(\".res\", \"-lowpoint.res\"), write_elim_order=True)\n if debug and USE_COMPLEXITY_WIDTH: print(\"current msss:\", current_cw)\n if debug: print(f\"* Iteration {SOLUTION.num_passes}:\\t{bn.score:.5f} {elapsed()}\\n\")\n if now() - start > max_time:\n if debug: print(\"time limit exceeded, quitting\")\n break\n else:\n if debug: print(f\"{max_passes} passes completed, quitting\")\n if MIMIC:\n if debug: print(\"stopping heur proc\")\n stop_blip_proc(heur_proc)\n print(f\"done {elapsed()}\")\n if USE_COMPLEXITY_WIDTH and cw_stop_looping:\n return True\n\n\nclass SolverInterrupt(BaseException): pass\n\n\ndef term_handler(signum, frame):\n print(f\"#### received signal {signum}, stopping...\")\n raise SolverInterrupt\n\n\ndef register_handler():\n signums = [signal.SIGHUP, signal.SIGINT, signal.SIGTERM,\n signal.SIGUSR1, signal.SIGUSR2]\n for signum in signums:\n signal.signal(signum, term_handler)\n\n\ndef wandb_configure(wandb: wandb, args):\n basename, ext = os.path.splitext(os.path.basename(args.file))\n wandb.config.instance = basename\n wandb.config.treewidth = args.treewidth\n wandb.config.budget = args.budget\n wandb.config.sat_timeout = args.sat_timeout\n wandb.config.heuristic = args.heuristic\n wandb.config.offset = args.offset\n wandb.config.threshold = args.lazy_threshold\n wandb.config.SEED = args.random_seed\n wandb.config.method = args.heuristic if args.compare else f\"slim_{args.heuristic}\"\n wandb.config.traversal = args.traversal_strategy\n wandb.config.relaxed = int(args.relaxed_parents)\n wandb.config.mimic = int(args.mimic)\n wandb.config.datfile = args.datfile\n wandb.config.complexity_width = args.complexity_width\n wandb.config.cw_strategy = args.cw_strategy\n if USE_COMPLEXITY_WIDTH: wandb.config.cwbound = args.feasible_cw_threshold\n wandb.config.autobudget = args.autobudget_offset\n if args.heuristic in [\"kg\", \"kmax\"]:\n wandb.config.widthmode = \"tw\"\n elif args.heuristic.endswith(\"-mw\"):\n wandb.config.widthmode = \"mw\"\n elif args.heuristic.endswith(\"-cw\"):\n wandb.config.widthmode = \"cw\"\n\n # process config\n wandb.config.platform = \"cluster\" if CLUSTER else \"workstation\"\n wandb.config.jobid = int(os.environ.get(\"MY_JOB_ID\", -1))\n wandb.config.taskid = int(os.environ.get(\"MY_TASK_ID\", -1))\n\n\n# noinspection PyTypeChecker\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument(\"file\", help=\"path to input file\")\nparser.add_argument(\"treewidth\", help=\"bound for treewidth (set 0 to use cwidth)\", type=int)\nparser.add_argument(\"-b\", \"--budget\", type=int, default=BUDGET,\n help=\"budget for size of local instance (set 0 for autobudget)\")\nparser.add_argument(\"-s\", \"--sat-timeout\", type=int, default=TIMEOUT,\n help=\"timeout per MaxSAT call\")\nparser.add_argument(\"-p\", \"--max-passes\", type=int, default=MAX_PASSES,\n help=\"max number of passes of SLIM to run\")\nparser.add_argument(\"-t\", \"--max-time\", type=int, default=MAX_TIME,\n help=\"max time for SLIM to run\")\nparser.add_argument(\"-u\", \"--heuristic\", default=HEURISTIC,\n choices=[\"kg\", \"ka\", \"kmax\", \"hc\", \"hcp\", \"greedy-mw\", \"max-mw\",\n \"greedy-cw\", \"max-cw\"], help=\"heuristic solver to use\")\nparser.add_argument(\"-o\", \"--offset\", type=int, default=OFFSET,\n help=\"duration after which slim takes over\")\nparser.add_argument(\"-c\", \"--compare\", action=\"store_true\",\n help=\"run only heuristic to gather stats for comparison\")\nparser.add_argument(\"-z\", \"--lazy-threshold\", type=float, default=LAZY_THRESHOLD,\n help=\"threshold below which to not try to improve local instances (set 0 to disable)\")\nparser.add_argument(\"-x\", \"--relaxed-parents\", action=\"store_true\",\n help=\"relax allowed parent sets for maxsat encoding\\n\"\n \"[warning: use at own risk, could terminate abruptly]\")\nparser.add_argument(\"-y\", \"--traversal-strategy\", default=TRAV_STRAT,\n choices=[\"random\", \"post\", \"pre\"],\n help=\"td traversal strategy\")\nparser.add_argument(\"-m\", \"--mimic\", action=\"store_true\",\n help=\"mimic heuristic if it outperforms\")\nparser.add_argument(\"-d\", \"--datfile\", help=\"path to datfile, if omitted,\"\n \"complexity-width will not be tracked\")\nparser.add_argument(\"-w\", \"--complexity-width\", action=\"store_true\",\n help=\"minimizing complexity width becomes main objective\\n\"\n \"[requires option -d|--datfile]\")\nparser.add_argument(\"--cw-strategy\", default=CW_TRAV_STRAT,\n choices=[\"max\", \"max-rand\", \"max-min\", \"tw-max-rand\"],\n help=\"complexity width reduction traversal strategy\\n\"\n \"[ignored if option -w|--complexity width not provided]\")\nparser.add_argument(\"--cw-reduction-factor\", default=CW_REDUCTION_FACTOR, type=float,\n help=\"factor to multiply current cw by to obtain target cw\")\nparser.add_argument(\"--checkpoint-milestones\", action=\"store_true\",\n help=\"save key milestone networks\")\nparser.add_argument(\"--feasible-cw\", action=\"store_true\", help=\"use feasible cw\"\n \" thresholding instead of iterative decrementing\")\nparser.add_argument(\"--feasible-cw-threshold\", type=int, default=FEASIBLE_CW_THRESHOLD,\n help=\"absolute bound for cwidth when --feasible-cw is provided\")\nparser.add_argument(\"--heuristic-onlyfilter\", action=\"store_true\",\n help=\"whether to run cwidth heuristic with only pset filtering\"\n \"(cwidth bound is only suggestive when this is enabled)\")\nparser.add_argument(\"--autobudget-offset\", type=int, default=AUTOBUDGET_OFFSET,\n help=\"how much to offset cwidth/tw to obtain budget value\"\n \"(only applicable when budget=0 (i.e. autobudget mode)\")\nparser.add_argument(\"--log-metrics\", action=\"store_true\",\n help=\"log metrics log-likelihood and mean absolute error\")\nparser.add_argument(\"-r\", \"--random-seed\", type=int, default=SEED,\n help=\"random seed (set 0 for no seed)\")\nparser.add_argument(\"-l\", \"--logging\", action=\"store_true\", help=\"wandb logging\")\nparser.add_argument(\"--project-name\", default=\"twbnslim-test\", help=\"wandb project name\")\nparser.add_argument(\"--start-with\", default=None,\n help=\"optionally skip running heuristic and start with this solution\")\nparser.add_argument(\"--save-as\", default=\"\", help=\"filename to save final network as\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"verbose mode\")\n\nif __name__ == '__main__':\n args = parser.parse_args()\n filepath = os.path.abspath(args.file)\n LAZY_THRESHOLD = args.lazy_threshold\n RELAXED_PARENTS = args.relaxed_parents\n MIMIC = args.mimic\n DATFILE = args.datfile\n SAVE_AS = os.path.abspath(args.save_as) if args.save_as else \"\"\n DOMAIN_SIZES = get_domain_sizes(args.datfile) if args.datfile else None\n if args.complexity_width:\n USE_COMPLEXITY_WIDTH = True\n if DOMAIN_SIZES is None:\n parser.error(\"--complexity-width requires --datfile\")\n CW_TRAV_STRAT = args.cw_strategy\n CW_REDUCTION_FACTOR = args.cw_reduction_factor\n CHECKPOINT_MILESTONES = args.checkpoint_milestones\n FEASIBLE_CW = args.feasible_cw\n FEASIBLE_CW_THRESHOLD = args.feasible_cw_threshold\n HEURISTIC_ONLYFILTER = args.heuristic_onlyfilter\n if CHECKPOINT_MILESTONES and not SAVE_AS:\n parser.error(\"--checkpoint-milestones switch requires --save-as option\")\n LOG_METRICS = args.log_metrics\n if LOG_METRICS and not CHECKPOINT_MILESTONES:\n parser.error(\"--log-metrics switch requires --checkpoint-milestones option\")\n if args.budget == 0: # auto-budget: set to nearest multiple of 5 (10, 15, 20, 25 ... )\n if USE_COMPLEXITY_WIDTH:\n min_domain_size = min(DOMAIN_SIZES.values())\n equivalent_tw = ceil(log(FEASIBLE_CW_THRESHOLD, min_domain_size))\n else:\n equivalent_tw = args.treewidth\n args.budget = max(10, int(5 * ceil((equivalent_tw + args.autobudget_offset) / 5))) # nearest multiple of 5\n print(f\"autobudget set to {args.budget} (tw: {equivalent_tw})\")\n if args.budget <= args.treewidth and not args.compare:\n print(\"budget smaller than treewidth bound, quitting\", file=sys.stderr)\n sys.exit()\n print(\"not\"*__debug__, \"running optimized\")\n if args.start_with is not None:\n START_WITH = os.path.abspath(args.start_with)\n TRAV_STRAT = args.traversal_strategy\n logger = lambda x: x # no op\n # logger = lambda x: print(f\"log: {x}\") # local log\n LOGGING = args.logging\n SEED = args.random_seed\n if LOGGING:\n wandb.init(project=args.project_name)\n wandb_configure(wandb, args)\n logger = wandb.log\n SOLUTION = Solution(logger=logger)\n\n # if comparison requested, compare then exit\n if args.compare:\n outfile = SAVE_AS or \"temp.res\"\n #logger = lambda x: print(f\"log: {x}\") # local log\n common_args = dict(timeout=args.max_time, seed=SEED, outfile=outfile,\n solver=args.heuristic, datfile=args.datfile,\n save_as=SAVE_AS, debug=args.verbose)\n if USE_COMPLEXITY_WIDTH:\n if not FEASIBLE_CW:\n raise NotImplementedError(\"compare mode doesn't currently support\"\n \" iterative cwidth target reduction\")\n monitor_blip(filepath, 0, logger, cwidth=FEASIBLE_CW_THRESHOLD,\n onlyfilter=HEURISTIC_ONLYFILTER, **common_args)\n else:\n monitor_blip(filepath, args.treewidth, logger, **common_args)\n sys.exit()\n\n register_handler()\n try:\n # perform slim once and retry only if working with cw\n res = slim(filepath, args.treewidth, args.budget, None, args.sat_timeout,\n args.max_passes, args.max_time, args.heuristic, args.offset,\n SEED, args.verbose)\n while USE_COMPLEXITY_WIDTH and not FEASIBLE_CW:\n if not res:\n print(\"unable to improve complexity width\")\n break\n CW_TARGET_REACHED = False # reset target reached flag\n res = slim(filepath, args.treewidth, args.budget, SOLUTION.value,\n args.sat_timeout, args.max_passes, args.max_time,\n args.heuristic, args.offset, SEED, args.verbose)\n except SolverInterrupt:\n print(\"solver interrupted\")\n finally:\n if SOLUTION.value is None:\n print(\"terminated. no solution computed so far!\")\n else:\n # verify final bn (not required if optimizing complexity width)\n # todo[req]: complexity width separate verification\n SOLUTION.value.verify(verify_treewidth=not USE_COMPLEXITY_WIDTH)\n print(\"verified\")\n if SAVE_AS:\n save_fname = SAVE_AS.replace(\".res\", \"-final.res\")\n write_res(SOLUTION.value, save_fname, write_elim_order=True)\n print(\"saving final network to\", save_fname, \"as final checkpoint\")\n # evaluate metrics\n if LOG_METRICS: # checkpoint milestones guaranteed guraranteed\n finalres = save_fname\n startres = SAVE_AS.replace(\".res\", \"-start.res\")\n print(\"evaluating metrics for\", finalres)\n ll, maescore, maetime = eval_all(filepath, args.treewidth,\n DATFILE, finalres, SEED)\n start_metrics = dict(start_ll=ll, start_maescore=maescore,\n start_maetime=maetime)\n if LOGGING:\n wandb.log(start_metrics)\n else:\n print(start_metrics)\n print(\"evaluating metrics for\", startres)\n ll, maescore, maetime = eval_all(filepath, args.treewidth,\n DATFILE, startres, SEED)\n final_metrics = dict(final_ll=ll, final_maescore=maescore,\n final_maetime=maetime)\n if LOGGING:\n wandb.log(final_metrics)\n else:\n print(final_metrics)\n success_rate = SOLUTION.num_improvements / (SOLUTION.num_passes - SOLUTION.skipped)\n treewidths = dict(start_tw=args.treewidth,\n final_tw=SOLUTION.value.td.compute_width())\n if LOGGING:\n wandb.log({\"success_rate\": success_rate})\n wandb.log(treewidths)\n if SOLUTION.num_improvements > 0:\n wandb.log({\"improved\": True})\n else:\n print(\"final metrics:\")\n pprint(SOLUTION.data)\n print(f\"success_rate: {success_rate:.2%}\")\n print(f\"final score: {SOLUTION.value.score:.5f}\")\n print(treewidths)\n if DOMAIN_SIZES:\n #log_bag_metrics(SOLUTION.value.td, DOMAIN_SIZES, append=True)\n print(\"complexity-width:\", compute_complexity_width(SOLUTION.value.td,\n DOMAIN_SIZES))\n","sub_path":"slim.py","file_name":"slim.py","file_ext":"py","file_size_in_byte":36443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529047997","text":"from app.key.model import Key\nimport unittest\nfrom tests.helpers import setUpApp, with_context, setUpDB, tearDownDB\nfrom tests.key.helpers import KeyHelper\n\n\nclass TestKeyModel (unittest.TestCase):\n\n def setUp(self):\n setUpApp(self)\n setUpDB(self)\n\n def tearDown(self):\n tearDownDB(self)\n\n @with_context\n def test_get_all(self):\n gen_key = KeyHelper._create_key()\n gen_key.save()\n\n retrieved_gen_key = Key.get(gen_key.id)\n\n self.assertIn(retrieved_gen_key, Key.all())\n\n Key.delete(gen_key.id)\n\n self.assertNotIn(retrieved_gen_key, Key.all())\n\n @with_context\n def test_get_and_delete(self):\n gen_key = KeyHelper._create_key()\n\n retrieved_gen_key = Key.get(gen_key.id)\n self.assertIsNone(retrieved_gen_key)\n\n gen_key.save()\n\n reretrieved_gen_key = Key.get(gen_key.id)\n self.assertEqual(reretrieved_gen_key, gen_key)\n\n Key.delete(gen_key.id)\n\n retrieved_no_gen_key = Key.get(gen_key.id)\n\n self.assertIsNone(retrieved_no_gen_key)\n","sub_path":"tests/key/test_key_model.py","file_name":"test_key_model.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"232874112","text":"#判断质数\n\n\n\n\nprint(\"Please enter a integer number.\")\nnumber=input()\nvalue=int(number)\n\n\nif value==1:\n print(\"1 is not a prime.\")\nelif value==2:\n print(\"This is a prime.\")\nelse:\n i=2\n while i= i:\n continue;\n L[i][j] = U[i][j]/U[j][j];\n U[i] = list(map(lambda x,y:x - (L[i][j] * y) , U[i], U[j]))\n \n b = unit_matrix(row)\n inverse_m = [];\n for i in b:\n inverse_m.append(solve_X_by_LU(L, U, i))\n inverse_m = tranpose_matrix(inverse_m)\n return inverse_m;\n\n\n# find the inverse of (ATA + lambda*I)\ndef linear_regression(datas, N = 2, Lambda = 1.0):\n # A,b = convert_data_Ab(datas, N)\n \n ATA = mul_matrix(tranpose_matrix(A), A)\n ATA_Lambda = add_matrix(ATA, matrix_scale(Lambda, unit_matrix(N)))\n x_vector = mul_matrix(inverse_matrix(ATA_Lambda), tranpose_matrix(A))\n x_vector = mul_matrix(x_vector, b)\n \n return x_vector;\n\n# print(tranpose_matrix(test))\n#load data and show data points\ninput_data = pd.read_csv(input_file, header = None, names = ['x','y'])\n# print(input_data)\n# plt.plot(input_data['x'], input_data['y'], 'r.')\n# plt.show()","sub_path":"Hw01/hw01.py","file_name":"hw01.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"445325829","text":"from kanban_board import app, db, bcrypt\nfrom flask import render_template, url_for, flash, redirect, request, abort\nfrom kanban_board.models import Task, User\nfrom kanban_board.forms import CreateTaskForm, UpdateTaskForm, RegisterForm, LoginForm\nfrom flask_login import login_user, logout_user, current_user, login_required\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register a new user\"\"\"\n # If user is already logged in, redirect to homepage\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegisterForm()\n if form.validate_on_submit():\n # Encrypt password\n new_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n\n # Create and add user\n user = User(username=form.username.data, email=form.email.data, password=new_password)\n db.session.add(user)\n db.session.commit()\n flash('Account has been created.', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Validates log in formation and log in user\"\"\"\n # If user is already logged in, redirect to homepage\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n # Validate form data against data in db and log in user or prompt user to recheck\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n flash('You have been logged in', 'success')\n return redirect(url_for('home'))\n else:\n flash('Login Failed. Please enter correct credentials', 'danger')\n return render_template('login.html', form=form, title='Login')\n\n\n@app.route('/logout', methods=['GET', 'POST'])\ndef logout():\n \"\"\"Log out user\"\"\"\n logout_user()\n return redirect(url_for('home'))\n\n\n@app.route('/')\n@login_required\ndef home():\n \"\"\"Home page, only shows tasks that belong to logged in user\"\"\"\n # Filter tasks shown by the current logged in user\n tasks = Task.query.filter_by(creator=current_user)\n return render_template('home.html', title='Home Page', tasks=tasks)\n\n\n@app.route('/add', methods=['GET', 'POST'])\n@login_required\ndef add():\n \"\"\"Adding a task. User specific\"\"\"\n form = CreateTaskForm()\n if form.validate_on_submit():\n task = Task(title=form.title.data, task_type=form.task_type.data,\n content=form.content.data, creator=current_user)\n db.session.add(task)\n db.session.commit()\n flash('Your task has been added!', 'success')\n return redirect(url_for('home'))\n return render_template('taskadd.html', form=form, title='Add a task', legend=\"Create a new task\")\n\n\n@app.route('/task/', methods=['GET'])\n@login_required\ndef task(task_id):\n \"\"\"Viewing a task in more detail. User specific\"\"\"\n task = Task.query.get_or_404(task_id)\n # Raise a 403 status code if a user tries to access a task not created by him/her.\n if task.creator != current_user:\n abort(403)\n return render_template('task.html', task=task, title=task.title)\n\n\n@app.route('/task//update', methods=['GET', 'POST'])\n@login_required\ndef update(task_id):\n \"\"\"Update a task. User specific\"\"\"\n task = Task.query.get_or_404(task_id)\n # Raise a 403 status code if a user tries to access a task not created by him/her.\n if task.creator != current_user:\n abort(403)\n form = UpdateTaskForm()\n if form.validate_on_submit():\n task.title = form.title.data\n task.task_type = form.task_type.data\n task.content = form.content.data\n db.session.commit()\n flash('Your task has been updated!', 'success')\n return redirect(url_for('task', task_id=task.id))\n elif request.method == 'GET':\n form.title.data = task.title\n form.task_type.data = task.task_type\n form.content.data = task.content\n return render_template('taskadd.html', title='Update Task', form=form, legend=\"Update your task\")\n\n\n@app.route('/task//delete', methods=['POST'])\n@login_required\ndef delete(task_id):\n \"\"\"Delete a task. User specific\"\"\"\n task = Task.query.get_or_404(task_id)\n # Raise a 403 status code if a user tries to access a task not created by him/her.\n if task.creator != current_user:\n abort(403)\n db.session.delete(task)\n db.session.commit()\n flash('You have successfully deleted the task!', 'success')\n return redirect(url_for('home'))","sub_path":"root_dir/kanban_board/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"317942628","text":"#This is the code defining the model and training the model\n\n\nimport numpy as np \nimport csv\nimport cv2\nimport matplotlib.image as mpimg\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom random import shuffle\n\nlines = []\n\nwith open('../data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n\ntrain_samples, validation_samples = train_test_split(lines, test_size = 0.2)\n\n#define a generator\ndef generator(samples, batch_size = 32):\n num_samples = len(samples)\n while 1:\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n for i in range(3):\n name = '../data/IMG/' + batch_sample[i].split('/')[-1]\n image = mpimg.imread(name)\n images.append(image)\n if i == 0:\n angle = float(batch_sample[3])\n else:\n angle = float(batch_sample[3]) + (-0.4)*i+0.6\n angles.append(angle)\n #Flip the image to get more data\n\n augmented_images, augmented_angles = [], []\n for image, angle in zip(images, angles):\n augmented_images.append(image)\n augmented_angles.append(angle)\n augmented_images.append(cv2.flip(image, 1))\n augmented_angles.append(angle*-1.0)\n\n X_train = np.array(augmented_images)\n y_train = np.array(augmented_angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n\n'''\nimages = []\nmeasurements = []\nfor line in lines:\n for i in range(3):\n source_path = line[i]\n filename = source_path.split('/')[-1]\n current_path = './data/IMG/'+filename\n image = mpimg.imread(current_path)\n images.append(image)\n if i == 0:\n measurement = float(line[3])\n else:\n measurement = float(line[3]) + (-0.4)*i+0.6\n measurements.append(measurement)\n\n\n\n\n\nX_train = np.array(images)\ny_train = np.array(measurements)\n'''\ntrain_generator = generator(train_samples, batch_size=16)\nvalidation_generator = generator(validation_samples, batch_size=16)\n\n\n#This is the model\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Dropout, Cropping2D\nfrom keras.layers.convolutional import Conv2D\n#from keras.layers.core import Activation\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras import regularizers\n\nmodel = Sequential()\n\n#Cropping image\nmodel.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(160, 320, 3)))\n\n\n#Lambda layer\nmodel.add(Lambda(lambda x: (x / 255) - 0.5))\n\n#Layer 1 (90, 320, 3)\nmodel.add(Conv2D(12, kernel_size = (3, 3), padding = 'valid', activation = 'relu', \\\n kernel_regularizer = regularizers.l2(0.001)))\n#model.add(MaxPooling2D(pool_size=(2, 2)))\n#model.add(Dropout(rate = 0.8))\n\n#Layer 2 (88, 318, 12)\nmodel.add(Conv2D(24, kernel_size = (3, 3), padding = 'valid', activation = 'relu', \\\n kernel_regularizer = regularizers.l2(0.001)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n#Layer 3 (43, 158, 24)\nmodel.add(Conv2D(36, kernel_size = (3, 3), padding = 'valid', activation = 'relu', \\\n kernel_regularizer = regularizers.l2(0.001)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n#Layer 4 (20, 78, 36)\nmodel.add(Conv2D(48, kernel_size = (3, 3), padding = 'valid', activation = 'relu', \\\n kernel_regularizer = regularizers.l2(0.001)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n#Layer 5 (9, 38, 48)\nmodel.add(Conv2D(64, kernel_size = (3, 3), padding = 'valid', activation = 'relu', \\\n kernel_regularizer = regularizers.l2(0.001)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n#model.add(Dropout(rate = 0.8))\n\n#Flatten (3, 18, 64)\nmodel.add(Flatten())\n\n#Layer 6, full connected layer (3456, 1)\nmodel.add(Dense(512, activation = 'relu', kernel_regularizer = regularizers.l2(0.001)))\n#model.add(Dropout(rate = 0.5))\n\n\n#Layer 7\nmodel.add(Dense(256, activation = 'relu', kernel_regularizer = regularizers.l2(0.001)))\n\n#Layer 8\nmodel.add(Dense(128, activation = 'relu', kernel_regularizer = regularizers.l2(0.001)))\n\n#Layer 9\nmodel.add(Dense(64, activation = 'relu', kernel_regularizer = regularizers.l2(0.001)))\n\n\n#Output\n\n\nmodel.add(Dense(1))\n\n#Train the model below\n\nmodel.compile(loss = 'mse', optimizer = 'adam')\n#model.fit(X_train, y_train, validation_split=0.2, shuffle = True, nb_epoch = 1)\n\n\n#visualizing the loss\n\nfrom keras.models import Model\nimport matplotlib.pyplot as plt\n\nhistory_object = model.fit_generator(train_generator, steps_per_epoch = len(train_samples), \\\n validation_data = validation_generator, validation_steps = len(validation_samples), epochs = 3, verbose = 1)\n\n#Save the model\nmodel.save('model.h5')\nprint(history_object.history.keys())\n\n#plot the training and validation loss for each epoch\n\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.savefig('loss.jpg')\n#plt.show()\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"450182211","text":"import sys\nimport io\nfrom bs4 import BeautifulSoup\nimport requests\n\n#Rest : POST, GET, PUT:UPDATE, REPLACE(FETCH:UPDATE, MODIFY), DELETE\n\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')\n\n#로그인 유저정보\nLOGIN_INFO = {\n 'login_id':'',\n 'login_pwd':''\n}\n\n#Session 생성\nwith requests.Session() as s:\n login_req = s.post('http://domebon.com/?menuType=member&mode=json&act=login',data=LOGIN_INFO)\n #HTML 소스확인\n #print('login_req',login_req.text)\n #Header 확인\n #print('headers',login_req.headers)\n\n if login_req.status_code == 200 and login_req.ok:\n post_one = s.get('http://domebon.com/?menuType=product&mode=view&prodCode=2018091100419')\n post_one.raise_for_status()\n\n soup = BeautifulSoup(post_one.text,'html.parser')\n #print(soup.prettify())\n\n #form > div.prodDetailWrap > div.detailInfo > ul:nth-child(1) > li.prodName\n article = soup.select_one(\"ul > li.prodName\")\n #print(article.)\n for i in article:\n if i.string is not None:\n print(i.string)\n","sub_path":"3-4-1.py","file_name":"3-4-1.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"415254419","text":"# --------------------------------\n# Name: build_static_files_for_gages.py\n# Purpose: Build static files for ET-Demands from zonal stats ETCells (basin zone from GAGES-II)\n# --------------------------------\n\nimport argparse\nimport copy\nfrom collections import defaultdict\nimport datetime as dt\nimport logging\nimport os\nimport re\nimport shutil\nimport sys\n\nimport pandas as pd\n\nimport src.prep._util as util\nfrom src.prep import _arcpy\nfrom src.config.config_prep import cfg_prep, crop_et_config\n\n\ndef build_static_files(cfg_prep_used, area_threshold=0.25, beef_cuttings=4, dairy_cuttings=5, overwrite_flag=False):\n \"\"\"Build static text files needed to run ET-Demands model\n\n Parameters\n ----------\n area_threshold : float\n CDL area threshold [acres] (the default is 0.25 acres).\n beef_cuttings : int\n Initial number of beef hay cuttings (the default is 4).\n dairy_cuttings : int\n Initial number of dairy hay cuttings (the default is 5).\n overwrite_flag : bool\n If True, overwrite existing files (the default is False).\n\n Returns\n -------\n None\n\n \"\"\"\n logging.info('\\nBuilding ET-Demands Static Files')\n\n # Default values\n permeability = -999\n soil_depth = 60 # inches\n aridity = 50\n irrigation = 1\n\n # Input paths\n # DEADBEEF - For now, get cropET folder from INI file\n # This function may eventually be moved into the main cropET code\n config = copy.deepcopy(cfg_prep_used)\n\n try:\n project_ws = config.CROP_ET.project_folder\n except:\n logging.error('project_folder parameter must be set in the INI file, exiting')\n return False\n try:\n gis_ws = config.CROP_ET.gis_folder\n except:\n logging.error('gis_folder parameter must be set in the INI file, exiting')\n return False\n try:\n et_cells_path = config.CROP_ET.cells_path\n except:\n logging.error('cells_path parameter must be set in the INI file, exiting')\n return False\n try:\n crop_et_ws = config.CROP_ET.crop_et_folder\n except:\n logging.error('crop_et_folder parameter must be set in the INI file, exiting')\n return False\n try:\n template_ws = config.CROP_ET.template_folder\n except:\n logging.error('CROP_ET template_folder parameter must be set in the INI file, exiting')\n return False\n\n # Read data from geodatabase or shapefile\n # if '.gdb' in et_cells_path and not et_cells_path.endswith('.shp'):\n # _flag = False\n # _path = os.path.dirname(et_cells_path)\n # gdb_path = r'D:\\Projects\\CAT_Basins\\AltusOK\\et-demands_py\\et_demands.gdb'\n # _cells_path = os.path.join(gdb_path, 'et_cells')\n\n # Output sub-folder names\n static_ws = os.path.join(project_ws, 'static')\n\n # basin attributes fields\n basin_id_filed = 'STAID'\n basin_elev_field = 'ELEV_MEAN_M_BASIN'\n\n # ET Cell field names\n cell_lat_field = 'LAT'\n cell_lon_field = 'LON'\n cell_id_field = 'GAGE_ID'\n # cell_station_id_field = 'STATION_ID'\n # awc_field = 'AWC'\n clay_field = 'CLAY'\n sand_field = 'SAND'\n awc_in_ft_field = 'AWC_IN_FT'\n hydgrp_num_field = 'HYDGRP_NUM'\n hydgrp_field = 'HYDGRP'\n\n # huc_field = 'HUC{}'.format(huc)\n # permeability_field = 'PERMEABILITY'\n # soil_depth_field = 'SOIL_DEPTH'\n # aridity_field = 'ARIDITY'\n # dairy_cutting_field = 'DAIRY_CUTTINGS'\n # beef_cutting_field = 'BEEF_CUTTINGS'\n\n # Static file names\n cell_props_name = 'ETCellsProperties.txt'\n cell_crops_name = 'ETCellsCrops.txt'\n cell_cuttings_name = 'MeanCuttings.txt'\n crop_params_name = 'CropParams.txt'\n crop_coefs_name = 'CropCoefs.txt'\n crop_coefs_eto = 'CropCoefs_eto.txt'\n crop_coefs_etr = 'CropCoefs_etr.txt'\n eto_ratio_name = 'EToRatiosMon.txt'\n etr_ratio_name = 'ETrRatiosMon.txt'\n static_list = [crop_params_name, crop_coefs_name, crop_coefs_eto, crop_coefs_etr, cell_props_name, cell_crops_name,\n cell_cuttings_name, eto_ratio_name, etr_ratio_name]\n\n # Check input folders\n if not os.path.isdir(crop_et_ws):\n logging.critical('\\nERROR: The INI cropET folder does not exist\\n {}'.format(crop_et_ws))\n sys.exit()\n elif not os.path.isdir(project_ws):\n logging.critical('\\nERROR: The project folder does not exist\\n {}'.format(project_ws))\n sys.exit()\n elif not os.path.isdir(gis_ws):\n logging.critical('\\nERROR: The GIS folder does not exist\\n {}'.format(gis_ws))\n sys.exit()\n logging.info('\\nGIS Workspace: {}'.format(gis_ws))\n logging.info('Project Workspace: {}'.format(project_ws))\n logging.info('CropET Workspace: {}'.format(crop_et_ws))\n logging.info('Template Workspace: {}'.format(template_ws))\n\n # Check input files\n if not _arcpy.exists(et_cells_path):\n logging.critical('\\nERROR: The ET Cell shapefile does not exist\\n {}'.format(et_cells_path))\n sys.exit()\n for static_name in static_list:\n if not os.path.isfile(os.path.join(template_ws, static_name)):\n logging.error(\n '\\nERROR: The static template does not exist\\n {}'.format(os.path.join(template_ws, static_name)))\n sys.exit()\n logging.debug('ET Cells Path: {}'.format(et_cells_path))\n\n # Build output table folder if necessary\n if not os.path.isdir(static_ws):\n os.makedirs(static_ws)\n\n # Read ET Cell zonal stats\n logging.info('\\nReading ET Cell Zonal Stats')\n logging.debug(' {}'.format(et_cells_path))\n crop_field_list = sorted([f for f in _arcpy.list_fields(et_cells_path) if re.match('CROP_\\d{3}', f)])\n fields = [cell_id_field, cell_lat_field, cell_lon_field, awc_in_ft_field, clay_field, sand_field, hydgrp_num_field,\n hydgrp_field]\n fields = fields + crop_field_list\n logging.debug(' Fields: {}'.format(fields))\n cell_data_dict = defaultdict(dict)\n\n for fid, row in _arcpy.search_cursor(et_cells_path, fields).items():\n # Switch to cell_id_field as index (instead of FID)\n for f in fields[1:]:\n cell_data_dict[str(row[cell_id_field])][f] = row[f]\n\n # Read basin data\n logging.info('\\nReading basin attributes')\n fields = [basin_elev_field]\n logging.debug(' Fields: {}'.format(fields))\n basin_data_dict = defaultdict(dict)\n basin_topo = pd.read_csv(config.GAGES.basin_topo_file, sep=',', dtype={0: str})\n for cell_id_tmp in list(cell_data_dict.keys()):\n basin_data_dict[cell_id_tmp][basin_elev_field] = \\\n basin_topo.loc[basin_topo[basin_id_filed] == cell_id_tmp, basin_elev_field].values[0]\n # Convert elevation units if necessary\n logging.debug(' Convert station elevation from meters to feet')\n for k in basin_data_dict.keys():\n basin_data_dict[k][basin_elev_field] /= 0.3048\n\n # static files\n logging.info('\\nCopying template static files')\n for static_name in static_list:\n logging.debug(' {}'.format(static_name))\n shutil.copy(os.path.join(template_ws, static_name), static_ws)\n\n logging.info('\\nWriting static text files')\n cell_props_path = os.path.join(static_ws, cell_props_name)\n cell_crops_path = os.path.join(static_ws, cell_crops_name)\n cell_cuttings_path = os.path.join(static_ws, cell_cuttings_name)\n eto_ratio_path = os.path.join(static_ws, eto_ratio_name)\n etr_ratio_path = os.path.join(static_ws, etr_ratio_name)\n\n # Write cell properties\n logging.debug(' {}'.format(cell_props_path))\n with open(cell_props_path, 'a') as output_f:\n for cell_id, cell_data in sorted(cell_data_dict.items()):\n basin_id = cell_id\n basin_lat = '{:>9.4f}'.format(cell_data[cell_lat_field])\n basin_lon = '{:>9.4f}'.format(cell_data[cell_lon_field])\n basin_data = basin_data_dict[basin_id]\n basin_elev = '{:.2f}'.format(basin_data[basin_elev_field])\n # There is an extra/unused column in the template and excel files\n output_list = [cell_id, cell_id, basin_id, basin_lat, basin_lon, basin_elev, permeability,\n '{:.4f}'.format(cell_data[awc_in_ft_field]), soil_depth, cell_data[hydgrp_field],\n cell_data[hydgrp_num_field], aridity, '']\n output_f.write('\\t'.join(map(str, output_list)) + '\\n')\n\n del output_list\n del basin_id, basin_lat, basin_lon, basin_elev\n\n # Write cell crops\n crops = util.parse_int_set(config.USDA.cdl_crops)\n logging.debug(' {}'.format(cell_crops_path))\n with open(cell_crops_path, 'a') as output_f:\n for cell_id, cell_data in sorted(cell_data_dict.items()):\n basin_id = cell_id\n output_list = [cell_id, cell_id, basin_id, irrigation]\n crop_list = ['CROP_{:03d}'.format(i) for i in crops]\n crop_area_list = []\n for crop in crop_list:\n if crop in cell_data.keys() and cell_data[crop] is not None:\n crop_area_list.append(cell_data[crop])\n else:\n crop_area_list.append(0)\n crop_flag_list = [1 if area > area_threshold else 0 for area in crop_area_list]\n output_list = output_list + crop_flag_list\n output_f.write('\\t'.join(map(str, output_list)) + '\\n')\n\n del crop_list, crop_area_list, crop_flag_list, output_list\n\n # Write cell cuttings\n logging.debug(' {}'.format(cell_cuttings_path))\n with open(cell_cuttings_path, 'a') as output_f:\n for cell_id, cell_data in sorted(cell_data_dict.items()):\n output_list = [cell_id, cell_id, '{:>9.4f}'.format(cell_data[cell_lat_field]), dairy_cuttings,\n beef_cuttings]\n output_f.write('\\t'.join(map(str, output_list)) + '\\n')\n\n # Write monthly ETo ratios\n logging.debug(' {}'.format(eto_ratio_path))\n with open(eto_ratio_path, 'a') as output_f:\n for cell_id, cell_data in sorted(cell_data_dict.items()):\n basin_id = cell_id\n output_f.write('\\t'.join(map(str, [basin_id, ''] + [1.0] * 12)) + '\\n')\n\n # Write monthly ETr ratios\n logging.debug(' {}'.format(etr_ratio_path))\n with open(etr_ratio_path, 'a') as output_f:\n for cell_id, cell_data in sorted(cell_data_dict.items()):\n basin_id = cell_id\n output_f.write('\\t'.join(map(str, [basin_id, ''] + [1.0] * 12)) + '\\n')\n\n\ndef arg_parse():\n \"\"\"\"\"\"\n parser = argparse.ArgumentParser(\n description='ET-Demands Static Files',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--acres', default=0.25, type=float, help='Crop area threshold')\n parser.add_argument('--beef', default=4, type=int, help='Number of beef hay cuttings')\n parser.add_argument('--dairy', default=5, type=int, help='Number of dairy hay cuttings')\n parser.add_argument('-o', '--overwrite', default=None, action='store_true', help='Overwrite existing file')\n parser.add_argument('--debug', default=logging.INFO, const=logging.DEBUG, help='Debug level logging',\n action=\"store_const\", dest=\"loglevel\")\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == '__main__':\n args = arg_parse()\n\n logging.basicConfig(level=args.loglevel, format='%(message)s')\n logging.info('\\n{}'.format('#' * 80))\n logging.info('{0:<20s} {1}'.format(\n 'Run Time Stamp:', dt.datetime.now().isoformat(' ')))\n logging.info('{0:<20s} {1}'.format('Current Directory:', os.getcwd()))\n logging.info('{0:<20s} {1}'.format(\n 'Script:', os.path.basename(sys.argv[0])))\n\n # region = \"some_from_irrigation\"\n region = \"some_from_3557\"\n cfg_prep_new = crop_et_config(cfg_prep, region)\n build_static_files(cfg_prep_new, area_threshold=args.acres, dairy_cuttings=args.dairy, beef_cuttings=args.beef,\n overwrite_flag=args.overwrite)\n","sub_path":"src/prep/preprocess4gages/build_static_files_for_gages.py","file_name":"build_static_files_for_gages.py","file_ext":"py","file_size_in_byte":11929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"343603806","text":"import os\nimport sys\nimport json\nimport spotipy\nimport webbrowser\nimport spotipy.util as util\nfrom json.decoder import JSONDecodeError\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport collections\nimport queue\nimport random\n\nscope = 'user-library-read'\nclient_id = \"91f224b0e56d405cb6c2c10bb9961405\"\nclient_secret = \"2c0ec20e82774410be39c1862bf6c821\"\nredirect_uri = \"http://www.google.com/\"\n#Get the username from the terminal\nclient_credentials_manager = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)\nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\nsp.trace=False\n\ndef get_artist(name):\n results = sp.search(q='artist:' + name, type='artist')\n #items = results['artists']['items']\n #if len(items) > 0:\n # return items[0]\n #else:\n # return None\n return results\n\ndef show_recommendations_for_artist(artist):\n albums = []\n results = sp.recommendations(seed_artists = [artist['id']], limit = 3)\n for track in results['tracks']:\n print (track['name'], '-', track['artists'][0]['name'])\n\ndef show_related_artists(artist):\n results = []\n albums = []\n\n name = artist['artists']['items'][0]['name']\n uri = artist['artists']['items'][0]['uri']\n\n related = sp.artist_related_artists(uri)\n results.append(name)\n for related_artist in related['artists'][:3]:\n results.append(related_artist['name'])\n\n # print out the first three genres\n for idx, genre in enumerate(related_artist['genres'][:3], start = 1):\n results.append('Genre {}: {}'.format(idx, genre))\n\n top_tracks = sp.artist_top_tracks(related_artist['uri'])\n for idx, track in enumerate(top_tracks['tracks'][:3], start = 1):\n results.append('Song {}: {}'.format(idx, track['name']))\n\n #tracks = sp.recommendations(seed_artists = [related_artist['id']], limit = 3)\n #for idx, track in enumerate(tracks['tracks'], start = 1):\n # results.append('Song {}: {}'.format(idx, track['name'] + ' - ' + track['artists'][0]['name']))\n\n return results\n\nname = input(\"Who is the Artist you would like to search for? \")\n\nartist = get_artist(name)\nif artist:\n results = show_related_artists(artist)\n #print('Related artists for', results[0])\n #for artist in results[1:]:\n #print(artist)\n print()\n\nprint()\nprint()\nprint()\n\n# Loop\nwhile True:\n # Main Menu\n print()\n print(\">>> Welcome to Spotipy!\")\n print()\n print(\"0 - Search for an artist\")\n print(\"1 - exit\")\n print()\n choice = input(\"Your choice: \")\n\n if choice == \"0\":\n print()\n searchQuery = input(\"Ok, what's their name?: \")\n print()\n\n # Get search results\n searchResults = sp.search(searchQuery,1,0,\"artist\")\n\n # Artist details\n artist = searchResults['artists']['items'][0]\n print(artist['name'])\n print(str(artist['followers']['total']) + \" followers\")\n print(artist['genres'][0])\n print()\n webbrowser.open(artist['images'][0]['url'])\n artistID = artist['id']\n\n\n # Album and track details\n trackURIs = []\n trackArt = []\n z = 0\n\n # Extract album data\n albumResults = sp.artist_albums(artistID)\n albumResults = albumResults['items']\n\n for item in albumResults:\n print(\"ALBUM: \" + item['name'])\n albumID = item['id']\n albumArt = item['images'][0]['url']\n\n # Extract track data\n trackResults = sp.album_tracks(albumID)\n trackResults = trackResults['items']\n\n for item in trackResults:\n print(str(z) + \": \" + item['name'])\n trackURIs.append(item['uri'])\n trackArt.append(albumArt)\n z+=1\n print()\n\n # See album art\n while True:\n songSelection = input(\"Enter a song number to see album art and play the song (x to exit): \") # and play the song\n if songSelection == \"x\":\n break\n trackSelectionList = []\n trackSelectionList.append(trackURIs[int(songSelection)])\n sp.start_playback(deviceID, None, trackSelectionList) # added\n webbrowser.open(trackArt[int(songSelection)])\n\n if choice == \"1\":\n break\n\n # print(json.dumps(trackResults, sort_keys=True, indent=4))\n","sub_path":"trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"456061366","text":"# -*-coding:utf-8-*-\nimport pandas as pd\nfrom class_request.requests_scan import HttpRequest\n\ninfo = pd.read_excel('E:\\\\request_demo\\\\test_data.xlsx')\nitem_data = info.values\n\n\nclass RunCase:\n @staticmethod\n def run():\n for i in item_data:\n print('正在执行第{0}条用例,结果如下:'.format(i[0]))\n HttpRequest(). \\\n http_requests(url_data=i[1],\n method_info=i[3])\n\n\nif __name__ == '__main__':\n RunCase.run()\n","sub_path":"class_request/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"502318434","text":"#coding=utf-8\nimport socket\nimport time\nsk = socket.socket(type=socket.SOCK_DGRAM)\n\nsk.bind(('127.0.0.1',9090))\n\nwhile 1:\n startopt,addr = sk.recvfrom(1024)\n print(addr)\n local_tm = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())\n sk.sendto(local_tm,addr)\nsk.close()\n","sub_path":"udp-server.py","file_name":"udp-server.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"520463781","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2021 CERN.\n# Copyright (C) 2021 Northwestern University.\n#\n# Invenio-RDM-Records is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"DataCite based Schema for Invenio RDM Records.\"\"\"\n\nfrom edtf import parse_edtf\nfrom edtf.parser.grammar import ParseException\nfrom flask import current_app\nfrom flask_babelex import lazy_gettext as _\nfrom invenio_access.permissions import system_identity\nfrom invenio_records_resources.proxies import current_service_registry\nfrom invenio_vocabularies.proxies import current_service as vocabulary_service\nfrom marshmallow import Schema, ValidationError, fields, missing, post_dump, \\\n validate\nfrom marshmallow_utils.fields import SanitizedUnicode\nfrom marshmallow_utils.html import strip_html\n\nfrom invenio_rdm_records.resources.serializers.ui.schema import \\\n current_default_locale\n\nfrom ..utils import get_vocabulary_props\n\n\ndef get_scheme_datacite(scheme, config_name, default=None):\n \"\"\"Returns the datacite equivalent of a scheme.\"\"\"\n config_item = current_app.config[config_name]\n return config_item.get(scheme, {}).get(\"datacite\", default)\n\n\nclass PersonOrOrgSchema43(Schema):\n \"\"\"Creator/contributor common schema for v4.\"\"\"\n\n name = fields.Str(attribute=\"person_or_org.name\")\n nameType = fields.Method('get_name_type', attribute=\"person_or_org.type\")\n givenName = fields.Str(attribute=\"person_or_org.given_name\")\n familyName = fields.Str(attribute=\"person_or_org.family_name\")\n nameIdentifiers = fields.Method('get_name_identifiers')\n affiliation = fields.Method('get_affiliation')\n\n def get_name_type(self, obj):\n \"\"\"Get name type.\"\"\"\n return obj[\"person_or_org\"][\"type\"].title()\n\n def get_name_identifiers(self, obj):\n \"\"\"Get name identifier list.\"\"\"\n serialized_identifiers = []\n identifiers = obj[\"person_or_org\"].get(\"identifiers\", [])\n\n for identifier in identifiers:\n scheme = identifier[\"scheme\"]\n id_scheme = get_scheme_datacite(\n scheme, \"RDM_RECORDS_PERSONORG_SCHEMES\", default=scheme,\n )\n\n if id_scheme:\n name_id = {\n \"nameIdentifier\": identifier[\"identifier\"],\n \"nameIdentifierScheme\": id_scheme,\n }\n serialized_identifiers.append(name_id)\n\n return serialized_identifiers\n\n def get_affiliation(self, obj):\n \"\"\"Get affiliation list.\"\"\"\n affiliations = obj.get(\"affiliations\", [])\n\n if not affiliations:\n return missing\n\n serialized_affiliations = []\n ids = []\n\n for affiliation in affiliations:\n id_ = affiliation.get(\"id\")\n if id_:\n ids.append(id_)\n else:\n # if no id, name is mandatory\n serialized_affiliations.append(\n {\"name\": affiliation[\"name\"]}\n )\n\n if ids:\n affiliations_service = (\n current_service_registry.get(\"rdm-affiliations\")\n )\n affiliations = affiliations_service.read_many(system_identity, ids)\n\n for affiliation in affiliations:\n aff = {\n \"name\": affiliation[\"name\"],\n }\n identifiers = affiliation.get(\"identifiers\")\n if identifiers:\n # PIDS-FIXME: DataCite accepts only one, how to decide\n identifier = identifiers[0]\n id_scheme = get_scheme_datacite(\n identifier[\"scheme\"],\n \"VOCABULARIES_AFFILIATION_SCHEMES\",\n default=identifier[\"scheme\"]\n )\n\n if id_scheme:\n aff[\"affiliationIdentifier\"] = identifier[\"identifier\"]\n aff[\"affiliationIdentifierScheme\"] = id_scheme.upper()\n # upper() is fine since this field is free text. It\n # saves us from having to modify invenio-vocabularies\n # or do config overrides.\n\n serialized_affiliations.append(aff)\n\n return serialized_affiliations\n\n @post_dump(pass_many=False)\n def capitalize_name_type(self, data, **kwargs):\n \"\"\"Capitalize type.\"\"\"\n if data.get(\"nameType\"):\n data[\"nameType\"] = data[\"nameType\"].capitalize()\n\n return data\n\n\nclass CreatorSchema43(PersonOrOrgSchema43):\n \"\"\"Creator schema for v4.\"\"\"\n\n\nclass ContributorSchema43(PersonOrOrgSchema43):\n \"\"\"Contributor schema for v43.\"\"\"\n\n contributorType = fields.Method('get_role')\n\n def get_role(self, obj):\n \"\"\"Get datacite role.\"\"\"\n role = obj.get(\"role\")\n if not role:\n return missing\n\n props = get_vocabulary_props(\n 'contributorsroles', ['props.datacite'], role[\"id\"])\n return props.get('datacite', '')\n\n\nclass SubjectSchema43(Schema):\n \"\"\"Subjects schema for v43.\"\"\"\n\n subject = fields.Str(attribute=\"subject\")\n valueURI = fields.Str(attribute=\"identifier\")\n subjectScheme = fields.Str(attribute=\"scheme\")\n\n\nclass FundingSchema43(Schema):\n \"\"\"Funding schema for v43.\"\"\"\n\n funderName = fields.Str(attribute=\"funder.name\")\n funderIdentifier = fields.Str(attribute=\"funder.identifier\")\n funderIdentifierType = fields.Method('get_identifier_type')\n awardTitle = fields.Str(attribute=\"award.title\")\n awardNumber = fields.Str(attribute=\"award.number\")\n # PIDS-FIXME: URI should be processed depending on the schema\n awardURI = fields.Str(attribute=\"award.identifier\")\n\n TO_FUNDER_IDENTIFIER_TYPES = {\n \"ISNI\": \"ISNI\",\n \"GRID\": \"GRID\",\n \"ROR\": \"ROR\",\n \"CROSSREF FUNDER ID\": \"Crossref Funder ID\",\n \"OTHER\": \"Other\",\n }\n\n def get_identifier_type(self, obj):\n \"\"\"Upper case the type.\"\"\"\n # TODO: Likely has to be revisted when the form support deposit.\n id_type = obj.get(\"funder\", {}).get(\"scheme\", \"Other\")\n key = id_type.upper()\n return self.TO_FUNDER_IDENTIFIER_TYPES.get(key, \"Other\")\n\n\nclass DataCite43Schema(Schema):\n \"\"\"DataCite JSON 4.3 Marshmallow Schema.\"\"\"\n\n # PIDS-FIXME: What about versioning links and related ids\n types = fields.Method('get_type')\n titles = fields.Method('get_titles')\n creators = fields.List(\n fields.Nested(CreatorSchema43), attribute='metadata.creators')\n contributors = fields.List(\n fields.Nested(ContributorSchema43), attribute='metadata.contributors')\n publisher = fields.Str(attribute='metadata.publisher')\n publicationYear = fields.Method(\"get_publication_year\")\n subjects = fields.Method(\"get_subjects\")\n dates = fields.Method('get_dates')\n language = fields.Method('get_language')\n identifiers = fields.Method('get_identifiers')\n relatedIdentifiers = fields.Method('get_related_identifiers')\n sizes = fields.List(SanitizedUnicode(), attribute=\"metadata.sizes\")\n formats = fields.List(SanitizedUnicode(), attribute=\"metadata.formats\")\n version = SanitizedUnicode(attribute=\"metadata.version\")\n rightsList = fields.Method('get_rights')\n descriptions = fields.Method('get_descriptions')\n geoLocations = fields.Method(\"get_locations\")\n fundingReferences = fields.List(\n fields.Nested(FundingSchema43), attribute='metadata.funding')\n schemaVersion = fields.Constant(\"http://datacite.org/schema/kernel-4\")\n\n def get_type(self, obj):\n \"\"\"Get resource type.\"\"\"\n props = get_vocabulary_props(\n 'resourcetypes',\n ['props.datacite_general', 'props.datacite_type'],\n obj[\"metadata\"][\"resource_type\"][\"id\"],\n )\n return {\n 'resourceTypeGeneral': props.get(\"datacite_general\", \"Other\"),\n 'resourceType': props.get(\"datacite_type\", \"\"),\n }\n\n def _merge_main_and_additional(self, obj, field, default_type=None):\n \"\"\"Return merged list of main + additional titles/descriptions.\"\"\"\n result = []\n main_value = obj[\"metadata\"].get(field)\n\n if main_value:\n item = {field: strip_html(main_value)}\n if default_type:\n item[f\"{field}Type\"] = default_type\n result.append(item)\n\n additional_values = obj[\"metadata\"].get(f\"additional_{field}s\", [])\n for v in additional_values:\n item = {field: strip_html(v.get(field))}\n\n # Type\n type_id = v.get(\"type\", {}).get(\"id\")\n if type_id:\n props = get_vocabulary_props(\n f\"{field}types\", [\"props.datacite\"], type_id)\n if \"datacite\" in props:\n item[f\"{field}Type\"] = props[\"datacite\"]\n\n # Language\n lang_id = v.get(\"lang\", {}).get(\"id\")\n if lang_id:\n item[\"lang\"] = lang_id\n\n result.append(item)\n\n return result or missing\n\n def get_titles(self, obj):\n \"\"\"Get titles list.\"\"\"\n return self._merge_main_and_additional(obj, \"title\")\n\n def get_descriptions(self, obj):\n \"\"\"Get descriptions list.\"\"\"\n return self._merge_main_and_additional(\n obj, \"description\", default_type=\"Abstract\"\n )\n\n def get_publication_year(self, obj):\n \"\"\"Get publication year from edtf date.\"\"\"\n try:\n publication_date = obj[\"metadata\"][\"publication_date\"]\n parsed_date = parse_edtf(publication_date)\n return str(parsed_date.lower_strict().tm_year)\n except ParseException:\n # Should not fail since it was validated at service schema\n current_app.logger.error(\"Error parsing publication_date field for\"\n f\"record {obj['metadata']}\")\n raise ValidationError(_(\"Invalid publication date value.\"))\n\n def get_dates(self, obj):\n \"\"\"Get dates.\"\"\"\n dates = [{\n \"date\": obj[\"metadata\"][\"publication_date\"],\n \"dateType\": \"Issued\"\n }]\n\n for date in obj[\"metadata\"].get(\"dates\", []):\n date_type_id = date.get(\"type\", {}).get(\"id\")\n props = get_vocabulary_props(\n 'datetypes', [\"props.datacite\"], date_type_id)\n to_append = {\n \"date\": date[\"date\"],\n \"dateType\": props.get(\"datacite\", \"Other\")\n }\n desc = date.get(\"description\")\n if desc:\n to_append[\"dateInformation\"] = desc\n\n dates.append(to_append)\n\n return dates or missing\n\n def get_language(self, obj):\n \"\"\"Get language.\"\"\"\n languages = obj[\"metadata\"].get(\"languages\", [])\n if languages:\n # DataCite support only one language, so we take the first.\n return languages[0][\"id\"]\n\n return missing\n\n def get_identifiers(self, obj):\n \"\"\"Get (main and alternate) identifiers list.\"\"\"\n serialized_identifiers = []\n\n # pids go first so the DOI from the record is included\n pids = obj[\"pids\"]\n for scheme, id_ in pids.items():\n id_scheme = get_scheme_datacite(\n scheme, \"RDM_RECORDS_IDENTIFIERS_SCHEMES\", default=scheme,\n )\n\n if id_scheme:\n serialized_identifiers.append({\n \"identifier\": id_[\"identifier\"],\n \"identifierType\": id_scheme,\n })\n\n # Identifiers field\n identifiers = obj[\"metadata\"].get(\"identifiers\", [])\n for id_ in identifiers:\n scheme = id_[\"scheme\"]\n id_scheme = get_scheme_datacite(\n scheme, \"RDM_RECORDS_IDENTIFIERS_SCHEMES\", default=scheme\n )\n if id_scheme:\n # DataCite only accepts a DOI identifier that is the official\n # registered DOI - ones in the alternate identifier field are\n # dropped\n if id_scheme != 'DOI':\n serialized_identifiers.append({\n \"identifier\": id_[\"identifier\"],\n \"identifierType\": id_scheme,\n })\n\n return serialized_identifiers or missing\n\n def get_related_identifiers(self, obj):\n \"\"\"Get related identifiers.\"\"\"\n serialized_identifiers = []\n metadata = obj[\"metadata\"]\n identifiers = metadata.get(\"related_identifiers\", [])\n for rel_id in identifiers:\n relation_type_id = rel_id.get(\"relation_type\", {}).get(\"id\")\n props = get_vocabulary_props(\n \"relationtypes\", [\"props.datacite\"], relation_type_id)\n\n scheme = rel_id[\"scheme\"]\n id_scheme = get_scheme_datacite(\n scheme, \"RDM_RECORDS_IDENTIFIERS_SCHEMES\", default=scheme,\n )\n\n if id_scheme:\n serialized_identifier = {\n \"relatedIdentifier\": rel_id[\"identifier\"],\n \"relationType\": props.get(\"datacite\", \"\"),\n \"relatedIdentifierType\": id_scheme,\n }\n\n resource_type_id = rel_id.get(\"resource_type\", {}).get(\"id\")\n if resource_type_id:\n props = get_vocabulary_props(\n \"resourcetypes\",\n # Cache is on both keys so query datacite_type as well\n # even though it's not accessed.\n [\"props.datacite_general\", \"props.datacite_type\"],\n resource_type_id\n )\n serialized_identifier[\"resourceTypeGeneral\"] = props.get(\n \"datacite_general\", \"Other\")\n\n serialized_identifiers.append(serialized_identifier)\n\n return serialized_identifiers or missing\n\n def get_locations(self, obj):\n \"\"\"Get locations.\"\"\"\n locations = []\n\n loc_list = obj[\"metadata\"].get(\"locations\", {}).get(\"features\", [])\n for location in loc_list:\n place = location.get(\"place\")\n serialized_location = {}\n if place:\n serialized_location[\"geoLocationPlace\"] = place\n geometry = location.get(\"geometry\")\n if geometry:\n geo_type = geometry[\"type\"]\n # PIDS-FIXME: Scalable enough?\n # PIDS-FIXME: Implement Box and Polygon serialization\n if geo_type == \"Point\":\n serialized_location[\"geoLocationPoint\"] = {\n \"pointLatitude\": geometry[\"coordinates\"][0],\n \"pointLongitude\": geometry[\"coordinates\"][1],\n }\n\n locations.append(serialized_location)\n return locations or missing\n\n def get_subjects(self, obj):\n \"\"\"Get datacite subjects.\"\"\"\n subjects = obj[\"metadata\"].get(\"subjects\", [])\n if not subjects:\n return missing\n\n serialized_subjects = []\n ids = []\n for subject in subjects:\n sub_text = subject.get(\"subject\")\n if sub_text:\n serialized_subjects.append({\"subject\": sub_text})\n else:\n ids.append(subject.get(\"id\"))\n\n if ids:\n subjects_service = (\n current_service_registry.get(\"rdm-subjects\")\n )\n subjects = subjects_service.read_many(system_identity, ids)\n validator = validate.URL()\n for subject in subjects:\n serialized_subj = {\n \"subject\": subject.get(\"subject\"),\n \"subjectScheme\": subject.get(\"scheme\"),\n }\n id_ = subject.get(\"id\")\n\n try:\n validator(id_)\n serialized_subj[\"valueURI\"] = id_\n except ValidationError:\n pass\n\n serialized_subjects.append(serialized_subj)\n\n return serialized_subjects if serialized_subjects else missing\n\n def get_rights(self, obj):\n \"\"\"Get datacite rigths.\"\"\"\n rights = obj[\"metadata\"].get(\"rights\", [])\n if not rights:\n return missing\n\n serialized_rights = []\n ids = []\n for right in rights:\n _id = right.get(\"id\")\n if _id:\n ids.append(_id)\n else:\n serialized_right = {\n \"rights\": right.get(\"title\").get(\n current_default_locale()\n ),\n }\n\n link = right.get(\"link\")\n if link:\n serialized_right[\"rightsUri\"] = link\n\n serialized_rights.append(serialized_right)\n\n if ids:\n rights = vocabulary_service.read_many(\n system_identity, \"licenses\", ids\n )\n for right in rights:\n serialized_right = {\n \"rights\": right.get(\"title\").get(\n current_default_locale()\n ),\n \"rightsIdentifierScheme\": right.get(\"props\").get(\"scheme\"),\n \"rightsIdentifier\": right.get(\"id\"),\n }\n link = right.get(\"props\").get(\"url\")\n if link:\n serialized_right[\"rightsUri\"] = link\n\n serialized_rights.append(serialized_right)\n\n return serialized_rights if serialized_rights else missing\n","sub_path":"invenio_rdm_records/resources/serializers/datacite/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":17651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"143646710","text":"from __future__ import division, print_function\nimport numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n\nfrom extract import brain_graph\nfrom random_graph.binary_directed import source_growth\nfrom random_graph.binary_directed import source_growth_total_degree\nfrom network_plot.change_settings import set_all_text_fontsizes, set_all_colors\n\nimport brain_constants as bc\n\nimport config\nimport in_out_plot_config as cf\n\n\nMARKERSIZE = 25.\nFONTSIZE = 12.\nALPHA = 0.5\n\nL = 0.725\nGAMMAS = [1, 1.67]\nLABELSS = [['a', 'b'], ['c', 'd']]\nBRAIN_SIZE = [7., 7., 7.]\n\nfor gamma, labels in zip(GAMMAS, LABELSS):\n # create attachment and growth models\n Gbrain, _, _ = brain_graph.binary_directed()\n\n Gsg_total, _, _ = source_growth_total_degree(\n N=bc.num_brain_nodes, N_edges=bc.num_brain_edges_directed, L=L,\n gamma=gamma, brain_size=BRAIN_SIZE)\n\n # Get in- & out-degree\n indeg_brain = np.array([Gbrain.in_degree()[node] for node in Gbrain])\n outdeg_brain = np.array([Gbrain.out_degree()[node] for node in Gbrain])\n deg_brain = indeg_brain + outdeg_brain\n\n indeg_sg_total = np.array([Gsg_total.in_degree()[node] for node in Gsg_total])\n outdeg_sg_total = np.array([Gsg_total.out_degree()[node] for node in Gsg_total])\n deg_sg_total = indeg_sg_total + outdeg_sg_total\n\n\n # Calculate proportion in degree\n percent_indeg_brain = indeg_brain / deg_brain.astype(float)\n percent_indeg_sg_total = indeg_sg_total / deg_sg_total.astype(float)\n\n\n # make plots\n fig = plt.figure(figsize=(8, 4.25), facecolor='w')\n plt.subplots_adjust(hspace=0.45, wspace=0.45)\n\n left_main_ax = plt.subplot2grid(cf.subplot_divisions, cf.left_main_location,\n rowspan=cf.left_main_rowspan,\n colspan=cf.left_main_colspan,\n aspect='equal')\n\n right_main_ax = plt.subplot2grid(cf.subplot_divisions, cf.right_main_location,\n rowspan=cf.right_main_rowspan,\n colspan=cf.right_main_colspan)\n\n top_margin_ax = plt.subplot2grid(cf.subplot_divisions, cf.top_margin_location,\n rowspan=cf.top_margin_rowspan,\n colspan=cf.top_margin_colspan,\n sharex=left_main_ax)\n\n right_margin_ax = plt.subplot2grid(cf.subplot_divisions,\n cf.right_margin_location,\n rowspan=cf.right_margin_rowspan,\n colspan=cf.right_margin_colspan,\n sharey=left_main_ax)\n\n # Left main plot (in vs out degree)\n left_main_ax.scatter(indeg_brain, outdeg_brain,\n c=config.COLORS['brain'],\n s=MARKERSIZE, lw=0, alpha=ALPHA)\n left_main_ax.scatter(indeg_sg_total, outdeg_sg_total, c=config.COLORS['sg'],\n s=MARKERSIZE, lw=0, alpha=ALPHA, zorder=3)\n\n left_main_ax.set_xlabel('In-degree')\n left_main_ax.set_ylabel('Out-degree')\n\n left_main_ax.set_xlim([0, 125])\n left_main_ax.set_ylim([0, 125])\n left_main_ax.set_aspect('auto')\n left_main_ax.set_xticks(np.arange(0, 121, 40))\n left_main_ax.set_yticks(np.arange(0, 121, 40))\n left_main_ax.text(150, 150, labels[0], fontsize=FONTSIZE + 2, fontweight='bold')\n\n # Top marginal (in-degree)\n top_margin_ax.hist(indeg_brain, bins=cf.OUTDEGREE_BINS,\n histtype='stepfilled', color=config.COLORS['brain'],\n alpha=ALPHA, label='Brain', normed=True,\n stacked=True)\n top_margin_ax.hist(indeg_sg_total, bins=cf.OUTDEGREE_BINS, histtype='stepfilled',\n color=config.COLORS['sg'], alpha=ALPHA,\n label='SG total', normed=True, stacked=True)\n\n # Right marginal (out-degree)\n right_margin_ax.hist(outdeg_brain, bins=cf.OUTDEGREE_BINS,\n histtype='stepfilled',\n color=config.COLORS['brain'], alpha=ALPHA,\n orientation='horizontal', normed=True, stacked=True)\n right_margin_ax.hist(outdeg_sg_total, bins=cf.OUTDEGREE_BINS,\n histtype='stepfilled', color=config.COLORS['sg'],\n alpha=ALPHA, orientation='horizontal', normed=True,\n stacked=True)\n for tick in right_margin_ax.get_xticklabels():\n tick.set_rotation(270)\n\n plt.setp(right_margin_ax.get_yticklabels() + top_margin_ax.get_xticklabels(),\n visible=False)\n\n top_margin_ax.set_yticks([0, 0.05, 0.1])\n top_margin_ax.set_ylim([0, 0.1])\n right_margin_ax.set_xticks([0, 0.05, 0.1])\n right_margin_ax.set_xlim([0, 0.1025])\n\n top_margin_ax.set_ylabel('$P(k_\\mathrm{in})$', va='baseline')\n right_margin_ax.set_xlabel('$P(k_\\mathrm{out})$', va='top')\n\n # Right main plot (proportion in vs total degree)\n right_main_ax.scatter(deg_brain, percent_indeg_brain,\n s=MARKERSIZE, lw=0, c=config.COLORS['brain'],\n alpha=ALPHA, label='Connectome')\n right_main_ax.scatter(deg_sg_total, percent_indeg_sg_total, s=MARKERSIZE, lw=0,\n c=config.COLORS['sg'], alpha=ALPHA,\n label='SGPA(total, gamma = {})'.format(gamma), zorder=3)\n\n right_main_ax.xaxis.set_major_locator(plt.MaxNLocator(4))\n right_main_ax.set_yticks(np.arange(0, 1.1, .25))\n right_main_ax.set_xticks(np.arange(0, 151, 50))\n right_main_ax.set_xlabel('Total degree (in + out)')\n right_main_ax.set_ylabel('Proportion in-degree')\n right_main_ax.text(1., 1.2, labels[1], fontsize=FONTSIZE + 2, fontweight='bold',\n transform=right_main_ax.transAxes, ha='right')\n right_main_ax.set_xlim([0., 150.])\n right_main_ax.set_ylim([-0.025, 1.025])\n right_main_ax.legend(loc=(-0.35, 1.12), prop={'size': 12})\n\n for temp_ax in [left_main_ax, right_main_ax, top_margin_ax, right_margin_ax]:\n set_all_text_fontsizes(temp_ax, FONTSIZE)\n set_all_colors(temp_ax, cf.LABELCOLOR)\n temp_ax.tick_params(width=1.)\n\n fig.subplots_adjust(left=0.125, top=0.925, right=0.95, bottom=0.225)\n\n fig.savefig('fig5sf5{}{}.pdf'.format(*labels), dpi=300)\n fig.savefig('fig5sf5{}{}.png'.format(*labels), dpi=300)\n\n\n plt.show(block=False)\n\n\n r, p = stats.spearmanr(indeg_sg_total, outdeg_sg_total)\n print('r = {}'.format(r))\n print('p = {}'.format(p))\n","sub_path":"final_figures/fig5fs5.py","file_name":"fig5fs5.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349017330","text":"# Name: Alan Kuo\n# Date: 5/18/2020\n# Description: Text-based implementation of the board game Gess.\n\n\nclass Selection:\n \"\"\"Represents a 3x3 Selection. Includes methods for finding the footprint of a selection, and checking if\n a selection is valid (center is in-bounds). Class interacts with the GessGame class during each attempted move.\n\n :param center: The center coordinate of Selection\n :type center: tuple\n \"\"\"\n\n def __init__(self, center):\n \"\"\"Initializes Selection by setting center of selection and calculating footprint of selection.\"\"\"\n self._center = center\n self._footprint = [(self._center[0] + num1, self._center[1] + num2) for num1 in range(-1, 2) for num2 in\n range(-1, 2)]\n\n def footprint(self):\n \"\"\"Returns coordinates of spaces in footprint. Takes no parameters.\"\"\"\n return self._footprint\n\n def check_valid_selection(self):\n \"\"\"Method for checking if a selection is valid. Returns False if selection is out of bounds, otherwise True.\n Takes no parameters.\"\"\"\n # Checking if center of Selection is within bounds\n if not (0 < self._center[0] < 19 and 0 < self._center[1] < 19):\n return False\n else:\n return True\n\n\nclass Piece(Selection):\n \"\"\"Represents a 3x3 Game Piece. Inherits from Selection. Includes methods for finding coordinates of stones\n that are within the footprint of the Piece, and the allowed move directions based on the included stones.\n Class interacts with the GessGame class during each attempted move.\"\"\"\n\n def __init__(self, center, curr_player_stones):\n \"\"\"Initializes Piece. Takes center coordinate and a list of the current player's stones as parameters.\n Creates dictionary that corresponds each cardinal direction to a space in the footprint. Calculates which\n of the current player's stones are in the Piece's footprint.\"\"\"\n\n super().__init__(center)\n self._dirs = [\"DL\", \"L\", \"UL\", \"D\", \"C\", \"U\", \"DR\", \"R\", \"UR\"]\n self._dir_coords = {self._dirs[i]: self._footprint[i] for i in range(len(self._dirs))}\n self._contained_stones = [stone for stone in curr_player_stones if stone in self.footprint()]\n\n def contained_stones(self):\n \"\"\"Returns the stones contained in the Piece. Takes no parameters.\"\"\"\n return self._contained_stones\n\n def move_dirs(self):\n \"\"\"Returns the allowed move directions based on contained stones. Takes no parameters\"\"\"\n move_dirs = {key for key, value in self._dir_coords.items() for stone in self._contained_stones if\n stone == value}\n return move_dirs\n\n\nclass GessGame:\n \"\"\"Represents a game of Gess. Stores game board and game state. Includes method for making moves. Includes\n other helper methods to implement move and game functionality. Instantiates Selection and Piece classes during\n move functionality.\"\"\"\n\n # Initializes game board and stone locations\n def __init__(self):\n \"\"\"Initializes Game Board. Sets up game status, current player status, and places game pieces for both sides\n on the board. Takes no parameters\"\"\"\n\n # Board will be saved as a list of lists. Each nested list represents a row.\n # Coordinate system for board will be a numerical [col, row]. For example, \"a3\" will be [0,2]\n self._board = [[\"-\"] * 20 for _ in range(20)]\n self._game_state = \"UNFINISHED\"\n self._curr_player = \"BLACK\"\n self._opp_player = \"WHITE\"\n self._black_starting = [\"b3\", \"c2\", \"c3\", \"c4\", \"c7\", \"d3\", \"e2\", \"e4\", \"f3\", \"f7\", \"g2\", \"g4\", \"h2\", \"h3\",\n \"h4\", \"i2\", \"i3\", \"i4\", \"i7\", \"j2\", \"j3\", \"j4\", \"k2\", \"k3\", \"k4\", \"l2\", \"l4\", \"l7\",\n \"m2\", \"m3\", \"m4\", \"n2\", \"n4\", \"o3\", \"o7\", \"p2\", \"p4\", \"q3\", \"r2\", \"r3\", \"r4\", \"r7\",\n \"s3\"]\n\n # Converts coordinates for black stones, saves coordinate to list, and places on board\n self._black_stones = [self.coord_converter(stone) for stone in self._black_starting]\n for stone in self._black_stones:\n self._board[stone[1]][stone[0]] = \"B\"\n\n # Saves coordinates of white stones to list and places on board\n self._white_stones = [(stone[0], stone[1]+15) if stone[1] != 6 else (stone[0], 13) for stone in\n self._black_stones]\n for stone in self._white_stones:\n self._board[stone[1]][stone[0]] = \"W\"\n\n def get_black_stones(self):\n \"\"\"Returns list of black stones.\n\n :param: None\n :return: Returns list of black stones\n :rtype: list\n \"\"\"\n return self._black_stones\n\n def get_white_stones(self):\n return self._white_stones\n\n def get_curr_player(self):\n return self._curr_player\n\n def get_game_state(self):\n \"\"\"Returns current game state. Takes no parameters\"\"\"\n return self._game_state\n\n def coord_converter(self, coordinate):\n \"\"\"Converts letter-number coordinate to number-number coordinate system used in program. Takes the coordinate\n to convert as parameter and returns the converted coordinate.\"\"\"\n return ((ord(coordinate[0]) - 97), int(coordinate[1:]) - 1)\n\n def print_board(self):\n \"\"\"Function for printing game board. Used for debugging. Takes no parameters. Prints board.\"\"\"\n for i in range(19, -1, -1):\n print(self._board[i])\n print(\"\")\n\n def make_move(self, start_center, end_center):\n \"\"\"Function for making a move. Uses helper functions for checking if move is valid. If move is valid, updates\n game state and switches current/opposing players (if necessary). Takes starting and ending coordinates of move\n as parameters. Returns True if move is successful or False if move is invalid.\"\"\"\n\n # Checks if game is already over.\n if self._game_state != \"UNFINISHED\":\n return \"Game is over!\"\n\n # Sets current player stones, symbol, and initializes start/end Selection objects for proposed move.\n curr_player_stones, opp_player_stones = self.player_stone_selector()\n curr_player_sym = self._curr_player[0]\n start_sel, end_sel = Selection(start_center), Selection(end_center)\n\n # Checks if starting and ending locations are valid with check_valid_selection method.\n if not start_sel.check_valid_selection() or not end_sel.check_valid_selection():\n return \"Invalid selection - try again!\"\n\n # Checking starting footprint for any of opponent's stones. Also will prevent moving out of turn.\n start_footprint = start_sel.footprint()\n opp_stones_in_sel = any(stone in opp_player_stones for stone in start_footprint)\n if opp_stones_in_sel:\n return \"Invalid selection - try again!\"\n\n # Initializing Piece object that will be moving.\n moving_piece = Piece(start_center, curr_player_stones)\n\n # Getting stones contained in Piece and checking if move is allowed based on direction and range.\n contained_stones = moving_piece.contained_stones()\n move_dir_range = self.check_dir_range(start_center, end_center, moving_piece)\n if not move_dir_range:\n return \"Invalid move - try again!\"\n else:\n move_dir, move_range = move_dir_range\n\n # Gets list of current player's stones that aren't moving, and runs collision checking method.\n curr_stationary_stones = [stone for stone in curr_player_stones if stone not in contained_stones]\n stationary_stones = set(opp_player_stones + curr_stationary_stones)\n if not self.collision_checker(start_center, move_dir, move_range, stationary_stones):\n return \"Other stones in the way of move - try again!\"\n\n # Setting backup lists for restoring board if necessary.\n backup_black, backup_white = self._black_stones[:], self._white_stones[:]\n\n # Removing current player's stones from starting footprint.\n for stone in contained_stones:\n self.remove_stone(stone, curr_player_stones)\n\n # Removing both player's stones from ending footprint.\n for stone in end_sel.footprint():\n if stone in curr_stationary_stones:\n self.remove_stone(stone, curr_player_stones)\n elif stone in opp_player_stones:\n self.remove_stone(stone, opp_player_stones)\n\n # Uses stone_mover method to place stones at new location in proper place.\n self.stone_mover(end_center, moving_piece.move_dirs(), curr_player_stones, curr_player_sym)\n\n # If move results in current player losing their last ring, restores board to state from before attempted move.\n if not self.ring_checker(curr_player_stones):\n self.restore_board(backup_black, backup_white)\n return \"Move leaves you without a ring - try again!\"\n\n # If move results in opponent losing their last ring, changes game state to reflect win, switches players, and\n # returns True.\n if not self.ring_checker(opp_player_stones):\n self._game_state = self._curr_player + \"_WON\"\n return True\n\n # Move is successful. Switches players for next move and returns True.\n self._curr_player, self._opp_player = self._opp_player, self._curr_player\n return True\n\n def player_stone_selector(self):\n \"\"\"Helper function for assigning the correct list of stones to the current and opposing player. Takes no\n parameters and returns lists of stones for the current player and opposing player as a tuple.\n\n :param: None\n :return: Returns lists of stones for current and opposing players.\n :rtype: tuple\n \"\"\"\n if self._curr_player == \"BLACK\":\n return self._black_stones, self._white_stones\n else:\n return self._white_stones, self._black_stones\n\n def check_dir_range(self, start_center, end_center, piece):\n \"\"\"Function for checking if direction and range of move are allowed based on the stones contained in the Piece.\n Takes starting and ending coordinates of move and Piece object as parameters. Returns False if move is invalid,\n otherwise returns the direction and range of move as a tuple.\"\"\"\n\n # Getting valid move directions based on contained stones with the move_dirs method.\n valid_dirs = piece.move_dirs()\n\n # if only center stone is present, piece cannot move\n if valid_dirs == {\"C\"}:\n return False\n\n # setting range for move, range is unlimited if center space is filled, otherwise range is 3 squares\n elif \"C\" in valid_dirs:\n move_range = 99\n else:\n move_range = 3\n\n # Finding the direction and range of the attempted move with the find_dir_range method.\n move_dir_range = self.find_dir_range(start_center, end_center)\n if not move_dir_range:\n return False\n elif move_dir_range[0] in valid_dirs and move_dir_range[1] <= move_range:\n return move_dir_range[0], move_dir_range[1]\n else:\n return False\n\n def find_dir_range(self, start_center, end_center):\n \"\"\"Helper function for finding the direction and range of a move. Used by check_dir_range method. Takes the\n starting and ending coordinates of move as parameters. If move direction corresponds to one of the allowed\n directions, returns the move direction and distance as a tuple. Otherwise, returns False\"\"\"\n\n # Finding the change in x and y values\n x_delta, y_delta = end_center[0] - start_center[0], end_center[1] - start_center[1]\n\n # Returns direction and distance of move as a tuple\n if x_delta == y_delta != 0:\n if x_delta > 0:\n return \"UR\", x_delta\n elif x_delta < 0:\n return \"DL\", abs(x_delta)\n\n elif x_delta == - y_delta != 0:\n if x_delta > 0:\n return \"DR\", x_delta\n elif x_delta < 0:\n return \"UL\", abs(x_delta)\n\n elif x_delta == 0:\n if y_delta > 0:\n return \"U\", y_delta\n elif y_delta < 0:\n return \"D\", abs(y_delta)\n\n elif y_delta == 0:\n if x_delta > 0:\n return \"R\", x_delta\n elif x_delta < 0:\n return \"L\", abs(x_delta)\n\n # Returns False if move is not in an allowed direction or if position has not changed.\n else:\n return False\n\n def collision_checker(self, start_center, move_dir, move_range, stationary_stones):\n \"\"\"Function for checking if premature collision occurs during attempted move. Takes the start location for move,\n move direction, move range, and list of stones not involved in move as parameters. Returns False if move results\n in a premature collision, otherwise returns True.\"\"\"\n\n # Gets corresponding offset for given move direction from dir_offsets method\n offsets = self.dir_offsets(move_dir)\n\n # Iterates through intermediate moves to check for collisions\n for i in range(1, move_range):\n new_center = (start_center[0] + i * offsets[0], start_center[1] + i * offsets[1])\n test_footprint = Selection(new_center).footprint()\n if any(stone in stationary_stones for stone in test_footprint):\n return False\n return True\n\n def dir_offsets(self, move_dir):\n \"\"\"Helper function that returns x and y offsets for a 1 unit move in any direction. Used during collision\n checking. Takes the desired move direction as a parameter and returns the correct x,y offset as a tuple.\"\"\"\n\n offsets = {\"DL\": (-1, -1), \"L\": (-1, 0), \"UL\": (-1, 1), \"D\": (0, -1), \"U\": (0, 1), \"DR\": (1, -1), \"R\": (1, 0),\n \"UR\": (1, 1)}\n return offsets[move_dir]\n\n def stone_mover(self, center, directions, player_list, symbol):\n \"\"\"Function for moving stones to new location during a move. Takes the center of new location, a list of\n directional positions of the stones to be moved, the current player's list of stones, and correct symbol\n for the stones.\"\"\"\n\n # Dictionary containing x,y offsets for each directional position\n offsets = {\"DL\": (-1, -1), \"L\": (-1, 0), \"UL\": (-1, 1), \"D\": (0, -1), \"U\": (0, 1), \"DR\": (1, -1), \"R\": (1, 0),\n \"UR\": (1, 1), \"C\": (0, 0)}\n\n # Calculates new x,y coordinate for each stone based on their directional position.\n for direction in directions:\n new_col, new_row = center[0] + offsets[direction][0], center[1] + offsets[direction][1]\n if 0 < new_col < 19 and 0 < new_row < 19:\n self.place_stone((new_col, new_row), player_list, symbol)\n\n def place_stone(self, coord, stone_list, symbol):\n \"\"\"Places stone on board and appends to each player's list of stones. Due to nested list structure of board,\n stone is placed on board as [col][row]. Takes coordinate of new stone to be placed, the list where the\n coordinate of the new stone is to be placed, and the specific symbol ('B' or 'W') to be placed on the\n board as parameters.\"\"\"\n\n self._board[coord[1]][coord[0]] = symbol\n stone_list.append(coord)\n\n def remove_stone(self, coord, stone_list):\n \"\"\"Removes stone from board and removes from each player's list of stones. Due to list structure of board,\n stone coordinates are [col][row]. Takes coordinate of stone to be removed and the list where the coordinate of\n the stone is to be removed from as parameters.\"\"\"\n\n self._board[coord[1]][coord[0]] = \"-\"\n stone_list.remove(coord)\n\n def ring_checker(self, stone_list):\n \"\"\"Function for checking if player has a valid ring by iterating through a list of stones and checking if the\n other stones in the ring are present. Each stone will be checked as if it was the lower left stone in the ring.\n Takes list of stones to check as a parameter. Returns True if ring is present, otherwise False.\"\"\"\n\n # Offsets for other coordinates in the ring compared to the lower left stone in ring\n offsets = {(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1), (2, 2)}\n\n # Iterates through each stone in the list and checks that all other stones required for ring are in the list.\n for stone in stone_list:\n other_o_stones = {(stone[0] + offset[0], stone[1] + offset[1]) for offset in offsets}\n if other_o_stones.issubset(stone_list) and (stone[0] + 1, stone[1] + 1) not in stone_list:\n return True\n return False\n\n def restore_board(self, black_list, white_list):\n \"\"\"Restores game board from backup lists of stone locations. Used by make_move method and takes lists of stones\n for the two players as parameters.\"\"\"\n self._board = [[\"-\"] * 20 for _ in range(20)]\n self._black_stones, self._white_stones = [], []\n for stone in black_list:\n self.place_stone(stone, self._black_stones, \"B\")\n for stone in white_list:\n self.place_stone(stone, self._white_stones, \"W\")\n\n def resign_game(self):\n \"\"\"Function for current player to resign the game and give the opposing player the win. Takes no parameters.\"\"\"\n self._game_state = self._opp_player + \"_WON\"\n","sub_path":"GessBackend.py","file_name":"GessBackend.py","file_ext":"py","file_size_in_byte":17531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"35037876","text":"#!/usr/bin/python3\n\"\"\"\nStart Flask web app, listening on 0.0.0.0, port 5000, display Hello HBNB!\n\"\"\"\nfrom flask import Flask, render_template\nfrom models import storage\nfrom models.city import City\nfrom models.state import State\n\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app.teardown_appcontext\ndef close(self):\n \"\"\" close storage \"\"\"\n storage.close()\n\n\n@app.route('/cities_by_states')\ndef cities_by_states():\n \"\"\" fetch data from the storage engine\"\"\"\n states = storage.all(State)\n return render_template('8-cities_by_states.html', states=states)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"web_flask/8-cities_by_states.py","file_name":"8-cities_by_states.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"448316855","text":"#!/usr/bin/env python3\nimport time\nfrom multiprocessing import cpu_count\nfrom typing import Union, NamedTuple\n\nimport torch\nimport torch.backends.cudnn\nimport numpy as np\nfrom torch import nn, optim\nfrom torch.nn import functional as F\n\n\n\ntorch.backends.cudnn.benchmark = True\n\n\nclass ImageShape(NamedTuple):\n height: int\n width: int\n channels: int\n\nclass CNN(nn.Module):\n def __init__(self, height: int, width: int, channels: int):\n super().__init__()\n self.input_shape = ImageShape(height=96, width=96, channels=3)\n\n #self.class_count = class_count\n ### convolution 1 and max pool ###\n self.conv1 = nn.Conv2d(\n in_channels=self.input_shape.channels,\n out_channels=32,\n kernel_size=(5, 5),\n padding=(2,2),\n stride=1,\n )\n self.initialise_layer(self.conv1)\n # #self.batchNorm= nn.BatchNorm2d(num_features=32)\n self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)\n #### convolution 1 and max pool ###\n\n ### convolution 2 and max pool ###\n self.conv2= nn.Conv2d(\n in_channels=self.conv1.out_channels,\n out_channels=64,\n kernel_size=(3,3),\n padding=(1,1),\n stride=1,\n )\n self.initialise_layer(self.conv2)\n #self.batchNorm2= nn.BatchNorm2d(num_features=64)\n self.pool2 = nn.MaxPool2d(kernel_size=(3, 3), stride=2)\n ### convolution 2 and max pool ###\n\n\n ### convolution 3 and max pool ###\n self.conv3= nn.Conv2d(\n in_channels= self.conv2.out_channels,\n out_channels=128,\n kernel_size=(3,3),\n stride=1,\n )\n self.initialise_layer(self.conv3)\n # #self.batchNorm2= nn.BatchNorm2d(num_features=64)\n self.pool3 = nn.MaxPool2d(kernel_size=(3, 3), stride=2)\n ### convolution 3 and max pool ###\n\n\n self.fc1 = nn.Linear(12800,4608)\n self.initialise_layer(self.fc1)\n #self.batchNormFC= nn.BatchNorm1d(1024)\n\n ## TASK 6-1: Define the last FC layer and initialise its parameters\n self.fc2= nn.Linear(2304,2304)\n self.initialise_layer(self.fc2)\n\n\n\n def forward(self, images: torch.Tensor) -> torch.Tensor:\n\n x = self.conv1(images)\n x = self.pool1(x)\n\n x = self.conv2(x)\n x = self.pool2(x)\n\n x = self.conv3(x)\n x = self.pool3(x)\n\n x = torch.flatten(x,start_dim=1)\n x = self.fc1(x)\n\n x1, x2 = torch.split(x, 2304, dim=1)\n x= torch.max(x1,x2)\n #x= self.fc2(x)\n x= torch.sigmoid(self.fc2(x))\n #x= torch.nn.functional.sigmoid(torch.reshape(x, (128,1,48,48)))\n\n return x\n\n @staticmethod\n def initialise_layer(layer):\n if hasattr(layer, \"bias\"):\n nn.init.zeros_(layer.bias)\n if hasattr(layer, \"weight\"):\n nn.init.kaiming_normal_(layer.weight)","sub_path":"cw/shallow_net.py","file_name":"shallow_net.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"525313060","text":"import random\r\nimport math\r\nimport csv\r\n### ---------------------------------------\r\n### CSCI 127, Joy and Beauty of Data\r\n### Program 3: Msuic CSV Library\r\n### Justin Guerrero\r\n### October 14 2018 \r\n### ---------------------------------------\r\n### This program is designed to open a music file and\r\n### be able to find, the longest song, the number of songs\r\n### in a given year, and identify all songs by an artist.\r\n### ---------------------------------------\r\n##\r\n### --------------------------------------\r\n\r\n\r\n#-------------------------------#\r\n#The menu will print the choices#\r\n#_______________________________#\r\ndef menu():\r\n print()\r\n print(\"1. Identify longest song.\")\r\n print(\"2. Identify number of songs in a given year.\")\r\n print(\"3. Identify all songs by a given artist.\")\r\n print(\"4. Have me choose a song for you.\")\r\n print(\"5. Quit.\")\r\n\r\nf = open(\"music.csv\", \"r\")\r\n#___________________Define the function for longest song\r\ndef longest_song():\r\n f = open('music.csv' , 'r')\r\n duration_max = 0\r\n line_count = 0\r\n song_name_duration = \"\"\r\n ##make a for loop to iterate over the lines (i) in your file\r\n for i in f:\r\n songs = i.split(\",\")\r\n if line_count != 0:\r\n if float(songs[9]) > duration_max:\r\n duration_max = float(songs[9])\r\n song_name_duration = songs[-2]\r\n line_count += 1\r\n##We place the line_count method in there to skip over the first line in the csv file to make the float numbers work\r\n \r\n print(\"Here is one long song name\", song_name_duration) \r\n print(\"at the length\",round(duration_max))\r\n \r\n\r\n\r\ndef songs_by_year(year):\r\n f = open('music.csv' , 'r')\r\n yeart = 0\r\n songs = []\r\n ## create a song list for us to search through, and then make a for loop again to search through lines\r\n ## split the lines by comma, strip will take away the white spaces allowing code to read\r\n ## then do yeart += 1 to keep count and add songs to the count.\r\n for i in f:\r\n songs = i.split(\",\") \r\n if songs[-1].strip() == str(year):\r\n yeart += 1\r\n \r\n print(\"the song(s) from the year are: \" + str(yeart))\r\n\r\n \r\ndef all_songs_by_artist(artist):\r\n f = open('music.csv' , 'r')\r\n artist_songs = []\r\n line_count = 0\r\n ## create your song list, and line counter. initiate for loop as we've done above\r\n ## next we will create our if statements to give equalities.\r\n ## then you'll set the song_name variable equal to the row and song column\r\n for i in f:\r\n artist_row = i.split(\",\")\r\n if artist_row[2].lower() == str(artist):\r\n song_name = artist_row[-2]\r\n artist_songs.append(song_name)\r\n ## next thing you'll see is where we sort the songs into alphabetical order\r\n ## followed by printing our statements\r\n artist_songs.sort()\r\n print(\"\\n\",\"The songs from this Artist in Alphabetical order\", \"\\n\" ,\r\n \"------------------------------------------------\")\r\n ## if the artist is NOT in the library make a not equal to statement\r\n \r\n if artist_row[2].lower != str(artist):\r\n print(\" There are no songs by this artist\", \"\\n\" ,\r\n \"---------------------------------\")\r\n ## this counter will make the 1, 2, 3, 4, etc for the track ids\r\n count = 0\r\n for i in artist_songs:\r\n print(count + 1, i)\r\n count += 1\r\n \r\n \r\n\r\ndef random_song():\r\n f = open(\"music.csv\", 'r')\r\n random_number = random.randint(1,10000)\r\n line_count = 1\r\n ## above you'll see that we used random_number because random cannot be used as a variable name\r\n ## then you'll set your parameters (we used rows)\r\n ## then we set the line count - equal to the random number, so when they match it prints a song\r\n \r\n for i in f:\r\n songs = i.split(\",\")\r\n if line_count == random_number:\r\n print(\"\\n\", \"Listen to this track! \" , songs[-2],\"by\", songs[2])\r\n line_count += 1\r\n ## you'll also make a line counter here to make sure the counter is iterating through\r\n ## the lines\r\n \r\n\r\n\r\n \r\n#----------------------------------------------------\r\ndef main():\r\n choice = 0\r\n while (choice != 5):\r\n menu()\r\n choice = int(input(\"Enter your choice: \"))\r\n if (choice == 1):\r\n longest_song()\r\n elif (choice == 2):\r\n year = int(input(\"Enter desired year: \"))\r\n songs_by_year(year)\r\n elif (choice == 3):\r\n artist = input(\"Enter name of artist: \").lower()\r\n all_songs_by_artist(artist)\r\n elif (choice == 4):\r\n random_song()\r\n elif (choice != 5):\r\n print(\"That is not a valid option. Please try again.\")\r\n\r\nmain()\r\nf.close()\r\n","sub_path":"CSVMusicFiles.py","file_name":"CSVMusicFiles.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"431192263","text":"import RankCalculator\nimport csv\n\nschoolslink = \"https://docs.google.com/spreadsheets/d/e/2PACX-1vR4d8JvuxteLJ7NqAvZhjYzRggjV_ptKUQCNNsQAVrblK9r2h3CFovSODtSpg7Jp7_xt0lFdLjxUedQ/pub?output=csv\"\ncoedRegattaLink = \"https://docs.google.com/spreadsheets/d/e/2PACX-1vRYuBfRn534EQy6tXw957Ree0IPLUxyaFri25OQtd_0n5SyG3J-5ELfpUPgwUKGxT_qfDmzrmtds8Y2/pub?output=csv\"\nwomensRegattaLink = \"https://docs.google.com/spreadsheets/d/e/2PACX-1vRrN9Sdev0TpLnAzuUgPQFwEa_VXcmsXX9CKoR3Y5p4fFydyh8WwdM1Xx-yaOWobLlNYPYWUMjGql1w/pub?output=csv\"\n\ncoedRankingsOutputFile = \"rankings.csv\"\ncoedComponentScoresFile = \"component_scores.csv\"\n\nwomensRankingOutputFile = \"womensrankings.csv\"\nwomensComponentScoresFile = \"womens_component_scores.csv\"\n\n################## COED ######################\nranks, schoolobjects = RankCalculator.calculateRanks(coedRegattaLink, schoolslink)\n\nf = open(coedRankingsOutputFile, \"w\")\nf.truncate()\nf.close()\n\nwith open(coedRankingsOutputFile, 'w') as result:\n writer = csv.writer(result, delimiter=\",\")\n writer.writerow(('School', 'Score'))\n for row in ranks:\n row = (row[0], str(row[1]))\n writer.writerow(row)\n\n\nf = open(coedComponentScoresFile, \"w\")\nf.truncate()\nf.close()\n\nwith open(coedComponentScoresFile, 'w') as result:\n writer = csv.writer(result, delimiter=\",\")\n writer.writerow(('School', 'Counted Scores Regular Regattas', 'Championship Score'))\n for school in schoolobjects:\n obje = schoolobjects[school]\n row = (obje.name, obje.countedPoints, obje.SRegattaScore)\n writer.writerow(row)\n######################################################\n\n\n################### WOMENS ###########################\nranks, schoolobjects = RankCalculator.calculateRanks(womensRegattaLink, schoolslink)\n\nf = open(womensRankingOutputFile, \"w\")\nf.truncate()\nf.close()\n\nwith open(womensRankingOutputFile, 'w') as result:\n writer = csv.writer(result, delimiter=\",\")\n writer.writerow(('School', 'Score'))\n for row in ranks:\n row = (row[0], str(row[1]))\n writer.writerow(row)\n\n\nf = open(womensComponentScoresFile, \"w\")\nf.truncate()\nf.close()\n\nwith open(womensComponentScoresFile, 'w') as result:\n writer = csv.writer(result, delimiter=\",\")\n writer.writerow(('School', 'Counted Scores Regular Regattas', 'Championship Score'))\n for school in schoolobjects:\n obje = schoolobjects[school]\n row = (obje.name, obje.countedPoints, obje.SRegattaScore)\n writer.writerow(row)\n","sub_path":"Runner.py","file_name":"Runner.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"549116050","text":"# coding: utf-8\n#\n# This file is part of VIRL2\n# Cisco (c) 2020\n#\n\nimport logging\nimport re\nimport pytest\n\nfrom virl2_client import ClientLibrary\nfrom virl2_client.models.cl_pyats import ClPyats\n\nlogger = logging.getLogger(\"__main__\")\nlogging.basicConfig(level=logging.INFO)\n\n# pytestmark = [pytest.mark.integration]\n\nRE1 = r\"(\\d+) packets transmitted, (\\d+) packets received, (\\d+)% packet loss\"\nRE2 = r\"round-trip min/avg/max = [\\d\\.]+/([\\d\\.]+)/[\\d\\.]+ ms\"\n\n\ndef check_result(result, has_loss, min_avg, max_avg):\n print(result)\n rm = re.search(RE1, result, re.MULTILINE)\n assert len(rm.groups()) == 3\n transmitted, received, loss = [int(a) for a in rm.groups()]\n # print(transmitted, received, loss)\n if has_loss:\n assert transmitted != received\n assert loss > 0\n else:\n assert transmitted == received\n assert loss == 0\n\n rm = re.search(RE2, result, re.MULTILINE)\n assert len(rm.groups()) == 1\n avg = float(rm.group(1))\n # print(\"avg:\", avg)\n assert min_avg <= avg <= max_avg\n\n\n@pytest.mark.integration\n@pytest.mark.nomock\ndef test_link_conditioning(register_licensing, client_library_keep_labs: ClientLibrary):\n lab = client_library_keep_labs.create_lab()\n\n alpine = lab.create_node(\"alpine-0\", \"alpine\", 0, 0)\n ums = lab.create_node(\"unmanaged-switch-0\", \"unmanaged_switch\", 100, 0)\n ext = lab.create_node(\"ext\", \"external_connector\", 200, 0)\n\n lab.connect_two_nodes(alpine, ums)\n lab.connect_two_nodes(ums, ext)\n\n lab.start(wait=True)\n\n alpine = lab.get_node_by_label(\"alpine-0\")\n ums = lab.get_node_by_label(\"unmanaged-switch-0\")\n link = lab.get_link_by_nodes(alpine, ums)\n\n pylab = ClPyats(lab)\n pylab.sync_testbed(\"cml2\", \"cml2cml2\")\n\n # ensure there's no link condition\n result = link.get_condition()\n assert result is None\n\n # remove, just to be sure\n link.remove_condition()\n result = pylab.run_command(\"alpine-0\", \"time ping -Aqc100 192.168.255.1\")\n check_result(result, False, 0.0, 10.0)\n\n # link.set_condition_by_name(\"dsl1\")\n\n # 2mbps, 50ms delay, 0ms jitter, 5.1% loss)\n # 5.1 to ensure that the float is understood and returned\n link.set_condition(2000, 50, 0, 5.1)\n\n result = link.get_condition()\n assert result == {\"bandwidth\": 2000, \"latency\": 50, \"loss\": 5.1, \"jitter\": 0}\n\n result = pylab.run_command(\"alpine-0\", \"time ping -Aqc100 192.168.255.1\")\n check_result(result, True, 90.0, 110.0)\n\n link.remove_condition()\n result = pylab.run_command(\"alpine-0\", \"time ping -Aqc100 192.168.255.1\")\n check_result(result, False, 0.0, 10.0)\n\n lab.stop()\n lab.wipe()\n lab.remove()\n","sub_path":"tests/test_link_conditioning.py","file_name":"test_link_conditioning.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"165132754","text":"import sys\n\nfrom pyspark import SparkContext, SparkConf\nfrom flask import Flask\nfrom app import create_app\n\n\n# This initialises the spark context.\ndef init_spark_context():\n # load spark context\n conf = SparkConf().setAppName(\"movie_recommendation-server\")\n sc = SparkContext(conf=conf, pyFiles=['models.py', 'app.py'])\n return sc\n\n\n# This is the main class\nif __name__ == \"__main__\":\n args = sys.argv[1:]\n port = 8080\n small = False\n ml = False\n try:\n port = int(args[0])\n small = True if args[1] == \"T\" else False \n ml = True if args[2] == \"T\" else False \n except: \n port = 8080\n # Init spark context and load libraries\n sc = init_spark_context()\n # dataset_path = os.path.join('datasets', 'ml-latest')\n app = create_app(sc, small, ml)\n app.run(port=port)\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"124776230","text":"from django import template\nfrom django.core.urlresolvers import reverse\n# from django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n@register.simple_tag(takes_context=True)\ndef active(context, pattern):\n \"\"\" Try to determine whether a link is active \"\"\"\n request = context.get('request', None)\n if request is not None:\n if pattern == request.path:\n return u'active'\n elif pattern != '/':\n import re\n # Use \"match\" instead of \"search\" to find from beginning\n if re.match(pattern, request.path):\n return u'active'\n return u''\n\n@register.simple_tag(takes_context=True)\ndef make_crumbs(context, *args, **kwargs):\n \"\"\" Make a first-level breadcrumb trail with the given title. \"\"\"\n request = context.get('request', None)\n if request is not None and len(args) > 0:\n if len(args) == 1:\n request.breadcrumbs(args[0], request.path_info)\n elif len(args) == 2:\n title = args[0]\n url = args[1]\n request.breadcrumbs(title, url)\n elif len(args) > 2:\n title = args[0]\n url = args[1]\n request.breadcrumbs(title, reverse(url, args=args[2:], kwargs=kwargs))\n # request.breadcrumbs(mark_safe(''.join(args)), request.path_info)\n return ''\n\n@register.filter(name='is_checkbox')\ndef is_checkbox(field):\n \"\"\" From http://stackoverflow.com/questions/3927018/django-how-to-check-if-field-widget-is-checkbox-in-the-template \"\"\"\n from django.forms import CheckboxInput\n return field.field.widget.__class__.__name__ == CheckboxInput().__class__.__name__\n","sub_path":"core/templatetags/core_tags.py","file_name":"core_tags.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"291142596","text":"from django.http import HttpResponse\nfrom django.template import loader\nfrom django.shortcuts import get_object_or_404\n\nfrom .models import Bet\n\ndef index(request):\n bets = Bet.objects.order_by('created_at')\n template = loader.get_template('bets/index.html')\n context = {\n 'bets': bets,\n }\n return HttpResponse(template.render(context, request))\n\ndef detail(request, bet_id):\n bet = get_object_or_404(Bet, pk=bet_id)\n template = loader.get_template('bets/detail.html')\n context = {\n 'bet': bet,\n }\n return HttpResponse(template.render(context, request))\n","sub_path":"bets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"92039898","text":"# Copyright (c) 2015 by the parties listed in the AUTHORS file.\n# All rights reserved. Use of this source code is governed by \n# a BSD-style license that can be found in the LICENSE file.\n\n\nimport sys\nimport os\n\nif 'TOAST_NO_MPI' in os.environ.keys():\n from toast import fakempi as MPI\nelse:\n from mpi4py import MPI\n\nimport numpy as np\n\nfrom toast.tod.tod import *\nfrom toast.tod.pointing import *\nfrom toast.tod.noise import *\nfrom toast.tod.sim_noise import *\nfrom toast.tod.sim_detdata import *\nfrom toast.tod.sim_tod import *\n\nfrom toast.mpirunner import MPITestCase\n\nfrom toast.fod import autocov_psd\n\n\nclass PSDTest(MPITestCase):\n\n\n def setUp(self):\n self.outdir = \"tests_output\"\n if self.comm.rank == 0:\n if not os.path.isdir(self.outdir):\n os.mkdir(self.outdir)\n\n # Note: self.comm is set by the test infrastructure\n self.worldsize = self.comm.size\n if (self.worldsize >= 2):\n self.groupsize = int( self.worldsize / 2 )\n self.ngroup = 2\n else:\n self.groupsize = 1\n self.ngroup = 1\n self.toastcomm = Comm(world=self.comm, groupsize=self.groupsize)\n self.data = Data(self.toastcomm)\n\n self.dets = [\"f1a\", \"f1b\", \"f2a\", \"f2b\", \"white\", \"high\"]\n self.fp = {}\n for d in self.dets:\n self.fp[d] = np.array([0.0, 0.0, 1.0, 0.0])\n\n self.rate = 20.0\n\n self.rates = {}\n self.fmin = {}\n self.fknee = {}\n self.alpha = {}\n self.NET = {}\n\n self.rates[\"f1a\"] = self.rate\n self.fmin[\"f1a\"] = 1.0e-5\n self.fknee[\"f1a\"] = 20.00\n self.alpha[\"f1a\"] = 2.0\n self.NET[\"f1a\"] = 10.0\n\n self.rates[\"f1b\"] = self.rate\n self.fmin[\"f1b\"] = 1.0e-5\n self.fknee[\"f1b\"] = 0.1\n self.alpha[\"f1b\"] = 1.0\n self.NET[\"f1b\"] = 10.0\n\n self.rates[\"f2a\"] = self.rate\n self.fmin[\"f2a\"] = 1.0e-5\n self.fknee[\"f2a\"] = 0.05\n self.alpha[\"f2a\"] = 1.0\n self.NET[\"f2a\"] = 10.0\n\n self.rates[\"f2b\"] = self.rate\n self.fmin[\"f2b\"] = 1.0e-5\n self.fknee[\"f2b\"] = 0.001\n self.alpha[\"f2b\"] = 1.0\n self.NET[\"f2b\"] = 10.0\n\n self.rates[\"white\"] = self.rate\n self.fmin[\"white\"] = 0.0\n self.fknee[\"white\"] = 0.0\n self.alpha[\"white\"] = 1.0\n self.NET[\"white\"] = 10.0\n\n self.rates[\"high\"] = self.rate\n self.fmin[\"high\"] = 1.0e-5\n self.fknee[\"high\"] = 2.0\n self.alpha[\"high\"] = 1.0\n self.NET[\"high\"] = 10.0\n\n self.totsamp = 100000\n\n self.oversample = 2\n\n self.MC = 100\n\n # in order to make sure that the noise realization is reproducible\n # all all concurrencies, we set the chunksize to something independent\n # of the number of ranks.\n\n nchunk = 2\n chunksize = int(self.totsamp / nchunk)\n chunks = np.ones(nchunk, dtype=np.int64)\n chunks *= chunksize\n remain = self.totsamp - (nchunk * chunksize)\n for r in range(remain):\n chunks[r] += 1\n\n self.chunksize = chunksize\n\n # Construct an empty TOD (no pointing needed)\n\n self.tod = TODHpixSpiral(mpicomm=self.toastcomm.comm_group, detectors=self.fp, samples=self.totsamp, firsttime=0.0, rate=self.rate, nside=512, sizes=chunks)\n\n # construct an analytic noise model\n\n self.nse = AnalyticNoise(\n rate=self.rates, \n fmin=self.fmin, \n detectors=self.dets, \n fknee=self.fknee, \n alpha=self.alpha, \n NET=self.NET\n )\n\n ob = {}\n ob['name'] = 'noisetest-{}'.format(self.toastcomm.group)\n ob['id'] = 0\n ob['tod'] = self.tod\n ob['intervals'] = None\n ob['baselines'] = None\n ob['noise'] = self.nse\n\n self.data.obs.append(ob)\n\n #data\n #self.nsamp = 100000\n self.stationary_period = self.totsamp\n self.lagmax = self.totsamp / 10\n #self.fsample = 4.0\n #self.times = np.arange(self.nsamp) / self.fsample\n #self.sigma = 10.\n #self.signal = np.random.randn(self.nsamp) * self.sigma\n #self.flags = np.zeros(self.nsamp, dtype=np.bool)\n #self.flags[int(self.nsamp/4):int(self.nsamp/2)] = True\n\n\n def test_autocov_psd(self):\n start = MPI.Wtime()\n\n ob = self.data.obs[0]\n tod = ob['tod']\n nse = ob['noise']\n\n ntod = self.totsamp\n\n r = 0 # noise realization\n op = OpSimNoise(realization=r)\n op.exec(self.data)\n\n # this replicates the calculation in sim_noise_timestream()\n\n fftlen = 2\n half = 1\n while fftlen <= (self.oversample * self.chunksize):\n fftlen *= 2\n half *= 2\n\n freqs = {}\n psds = {}\n psdnorm = {}\n todvar = {}\n\n for idet, det in enumerate( tod.local_dets ):\n fsamp = nse.rate(det)\n cutoff = 0.95 * (fsamp / 2.0)\n indx = np.where(nse.freq(det) > cutoff)\n\n NET = nse.NET(det)\n knee = nse.fknee(det)\n avg = np.mean(nse.psd(det)[indx])\n NETsq = NET*NET\n\n df = nse.rate(det) / float(fftlen)\n\n (temp, freqs[det], psds[det]) = sim_noise_timestream(0, 0, 0, 0, idet, nse.rate(det), 0, self.chunksize, self.oversample, nse.freq(det), nse.psd(det))\n\n if False:\n psdfreq = freqs[det]\n psd = psds[det]\n\n nn = 2\n while nn < ntod: nn *= 2\n freq = np.fft.rfftfreq( nn, 1/fsamp )\n fnn = freq.size\n psd_interp = np.interp( freq, psdfreq, psd )\n fnoisetod = np.random.randn(fnn) + 1j*np.random.randn(fnn)\n fnoisetod *= np.sqrt(psd_interp * fsamp) * np.sqrt(nn) / np.sqrt(2)\n noisetod = np.fft.irfft( fnoisetod )[:ntod]\n\n #noisetod[::2] = 1\n #noisetod[1::2] = -1\n #print(noisetod[:100])\n else:\n noisetod = tod.cache.reference(\"noise_{}\".format(det))\n\n noisetod2 = noisetod.copy()\n for i in range(1,noisetod.size):\n noisetod[i] = .999*( noisetod[i-1] + noisetod2[i] - noisetod2[i-1] )\n \n autocovs = autocov_psd(np.arange(ntod)/fsamp, noisetod, np.zeros(ntod,dtype=np.bool), self.lagmax, self.stationary_period, fsamp, comm=self.comm)\n #autocovs = autocov_psd(np.arange(ntod)/fsamp, noisetod, np.zeros(ntod,dtype=np.bool), 10, self.stationary_period, fsamp, comm=self.comm)\n\n if self.comm.rank == 0:\n import matplotlib.pyplot as plt\n\n nn = 2\n while nn*2 < noisetod.size: nn *= 2\n fnoise = np.abs( np.fft.rfft( noisetod[:nn] ) )**2 / nn / fsamp\n ffreq = np.fft.rfftfreq( nn, 1/fsamp )\n\n nbin = 300\n fnoisebin, hits = log_bin( fnoise, nbin=nbin )\n ffreqbin, hits = log_bin( ffreq, nbin=nbin )\n fnoisebin = fnoisebin[ hits != 0 ]\n ffreqbin = ffreqbin[ hits != 0 ]\n\n fig = plt.figure(figsize=(12,8), dpi=72)\n ax = fig.add_subplot(1, 1, 1, aspect='auto')\n for i in range(len(autocovs)):\n t0, t1, freq, psd = autocovs[i]\n bfreq, hits = log_bin( freq, nbin=nbin )\n bpsd, hits = log_bin( psd, nbin=nbin )\n ax.loglog( freq, psd, '.', color='magenta', label='autocov PSD' )\n ax.loglog( bfreq, bpsd, '-', color='red', label='autocov PSD (binned)' )\n #ax.loglog( ffreq, fnoise, '.', color='green', label='FFT of the noise' )\n ax.loglog( ffreqbin, fnoisebin, '.', color='green', label='FFT of the noise' )\n #ax.loglog(freqs[det], psds[det], marker='+', c=\"blue\", label='{}: rate={:0.1f} NET={:0.1f} fknee={:0.4f}, fmin={:0.4f}'.format(det, self.rates[det], self.NET[det], self.fknee[det], self.fmin[det]))\n ax.loglog(nse.freq(det), nse.psd(det), '-b', lw=2, label='{}: rate={:0.1f} NET={:0.1f} fknee={:0.4f}, fmin={:0.4f}'.format(det, self.rates[det], self.NET[det], self.fknee[det], self.fmin[det]))\n cur_ylim = ax.get_ylim()\n ax.set_xlim([1e-5, fsamp/2])\n ax.set_ylim([0.001*(nse.NET(det)**2), 10.0*cur_ylim[1]])\n ax.legend(loc=1)\n plt.title(\"Simulated PSD from toast.AnalyticNoise\")\n\n savefile = os.path.join(self.outdir, \"out_test_psd_math_rawpsd_{}.png\".format(det))\n plt.savefig(savefile)\n plt.close()\n\n \"\"\"\n autocovs = autocov_psd(self.times, self.signal, self.flags, self.lagmax, self.stationary_period, self.fsample, comm=self.comm)\n\n for i in range(len(autocovs)):\n t0, t1, freq, psd = autocovs[i]\n\n n = len(psd)\n mn = np.mean( np.abs( psd ) )\n err = np.std( np.abs( psd ) )\n\n ref = self.sigma**2 / self.fsample\n if np.abs(mn - ref) > err / np.sqrt(n) * 4.:\n raise RuntimeError('White noise input failed to produce a properly normalized white noise spectrum')\n \"\"\"\n return\n\n\ndef log_bin( data, nbin=100 ):\n\n # Take a regularly sampled, ascending vector of values and bin it to\n # logaritmically narrowing bins\n\n # To get the bin positions, you must call log_bin twice: first with x and then y vectors\n n = len(data)\n\n ind = np.arange(n)+1\n\n bins = np.logspace(\n np.log(ind[0]), np.log(ind[-1]), num=nbin+1, endpoint=True, base=np.e\n )\n bins[-1] *= 1.01 # Widen the last bin not to have a bin with one entry\n\n locs = np.digitize(ind, bins)\n\n hits = np.zeros( nbin+2, dtype=np.int )\n binned = np.zeros( nbin+2, dtype=data.dtype )\n\n for i, ibin in enumerate(locs):\n hits[ibin] += 1\n binned[ibin] += data[i]\n\n ind = hits > 0\n binned[ind] /= hits[ind]\n\n return binned[ind], hits[ind]\n\n\n","sub_path":"tests/test_psd_math.py","file_name":"test_psd_math.py","file_ext":"py","file_size_in_byte":10095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"163057254","text":"import socket\nimport cv2\nimport pickle\nimport struct\n\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('/home/pi/Desktop/output.avi', fourcc, 30, (640, 480))\nclass server():\n def __init__(self, host_ip, port):\n socket_address = (host_ip, port)\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)\n self.server_socket.bind(socket_address)\n self.server_socket.listen(5)\n\n def Main(self):\n while True:\n client_socket, addr = self.server_socket.accept()\n print('GOT CONNECTION FROM:', addr)\n if client_socket:\n vid = cv2.VideoCapture(0)\n frame_id = 0\n while (vid.isOpened()):\n img, frame = vid.read()\n a = pickle.dumps(frame)\n frame_width = 640\n frame_height = 480\n frame_id += 1\n left_x_up = int(frame_width / frame_id)\n left_y_up = int(frame_height / frame_id)\n # 文字坐标\n word_x = left_x_up + 5\n word_y = left_y_up + 25\n name = 'opencv monitor'\n cv2.putText(frame, '%s' % name, (word_x, word_y), cv2.FONT_HERSHEY_SIMPLEX, 1, (55, 255, 155), 2)\n out.write(frame)\n message = struct.pack(\"Q\", len(a)) + a\n client_socket.sendall(message)\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n client_socket.close()\nif __name__ == '__main__':\n try:\n work = server('192.168.68.101', 9999)\n work.Main()\n except ConnectionResetError:\n print('连接中断')\n\n\n","sub_path":"linux_project/test/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"20571261","text":"class Job: \r\n def __init__(self, start, finish, profit, path):\r\n self.start = start \r\n self.finish = finish \r\n self.profit = profit\r\n self.path = path\r\n\r\n########## DIJKSTRA ###############\r\n\r\ndef dijkstra(graph, start, end):\r\n queue,seen = [(0, start, [])], set()\r\n while True:\r\n (cost, v, path) = heapq.heappop(queue)\r\n if v not in seen:\r\n path = path + [v]\r\n seen.add(v)\r\n if v == end:\r\n return cost, path\r\n for (next, c) in graph[v].items():\r\n heapq.heappush(queue, (cost + c, next, path))\r\n\r\n##### MENORES CAMINHOS #####\r\n\r\ndef menores_caminhos(entregas,graph,job):\r\n inicio = list(graph.keys())[0];\r\n \r\n for destino in entregas: #O(e) * tudo abaixo\r\n try:\r\n tempo_ida, caminho_ida = dijkstra(graph,inicio, destino) #O((n+m)*logn)\r\n \r\n tempo_volta, caminho_volta = dijkstra(graph,destino, inicio)#O(idem)\r\n tempo_final = tempo_ida + tempo_volta#O(1)\r\n tempo_inicial = int(entregas[destino][0])#O(1)\r\n lucro_entrega = int(entregas[destino][1])#O(1)\r\n caminho = list([caminho_ida] + [caminho_volta])#O(1)\r\n job.append(Job(tempo_inicial, tempo_inicial + tempo_final, lucro_entrega, caminho))#O(1)\r\n except:\r\n print(\"Não há caminho para a entrega : \",destino,\"\\n\")\r\n return job\r\n\r\n##### PREDECESSOR ###########\r\n\r\ndef Encontrar_Predecessor(job,start_index): # O(nlogn) ou O(n2)\r\n escolhido = 0\r\n for i in reversed(range(0,start_index)):\r\n #print(\":\",i)\r\n if job[i].finish <= job[start_index].start:\r\n return i\r\n return escolhido\r\n\r\n########## WIS ###############\r\n\r\ndef schedule(job): \r\n\r\n job = merge_sort(job) #OK O(e log e)\r\n for j in job: #O(e)\r\n print(\"Start :\",j.start,\" Finish :\",j.finish,\" Profit : \",j.profit)\r\n pre=0\r\n n = len(job)\r\n table_pre = [0 for _ in range(n)] #p(J)\r\n table_lucro = [0 for _ in range(n)] #v(J)\r\n table_max = [0 for _ in range(n)] #M[J]\r\n\r\n ### esse eh o wis ###\r\n for i in range(1,n): #O(e)\r\n table_lucro[i] = job[i].profit\r\n pre = Encontrar_Predecessor(job,i) #O(?)\r\n if pre != 0:\r\n table_pre[i] = pre\r\n table_max[i] = max(table_lucro[i] + table_max[table_pre[i]], table_max[i - 1]) #O(1)\r\n\r\n print(\"Table lucro,Tabela pre, Tabela Max :\")\r\n print(table_lucro)\r\n print(table_pre)\r\n print(table_max)\r\n \r\n #lucro_max,solution_list = Find_Solution(n-1,table_pre,table_lucro,table_max,job) #O(no slide)\r\n lucro_max=0\r\n lista_lucro = Find_Solution(n-1,table_pre,table_lucro,table_max,[])\r\n for indice in lista_lucro:\r\n lucro_max += int(job[indice].profit)\r\n\r\n return lucro_max,lista_lucro,job\r\n\r\n#versão do prof. recursivo\r\n########## FIND SOLUTION ###########\r\n\r\ndef Find_Solution(j,table_pre,table_lucro,table_max,lista_lucro): #O(e)\r\n if (j == 0):# or cont >= j):\r\n print(\"fim\")\r\n return lista_lucro\r\n elif ((table_lucro[j] + table_max[table_pre[j]]) > table_max[j-1]):\r\n #print(j)\r\n lista_lucro.append(j)\r\n return Find_Solution(table_pre[j],table_pre,table_lucro,table_max,lista_lucro)\r\n else: \r\n return Find_Solution(j-1,table_pre,table_lucro,table_max,lista_lucro)\r\n\r\n########## MERGE SORT ###############\r\n\r\ndef merge(llist, rlist):\r\n final = []\r\n while llist or rlist:\r\n # This verification is necessary for not try to compare\r\n # a NoneType with a valid type.\r\n if len(llist) and len(rlist):\r\n if llist[0].finish < rlist[0].finish:\r\n final.append(llist.pop(0))\r\n else:\r\n final.append(rlist.pop(0))\r\n if not len(llist):\r\n if len(rlist): final.append(rlist.pop(0))\r\n\r\n if not len(rlist):\r\n if len(llist): final.append(llist.pop(0))\r\n\r\n return final\r\n\r\ndef merge_sort(list):\r\n if len(list) < 2: return list\r\n mid = len(list) // 2\r\n return merge(merge_sort(list[:mid]), merge_sort(list[mid:]))\r\n\r\nif __name__ == \"__main__\":\r\n import heapq\r\n import ler_arquivo2\r\n while(1):\r\n arquivo = ''\r\n print(\"Digite o caminho para seu arquivo : \")\r\n arquivo = input(arquivo)\r\n print(arquivo)\r\n try:\r\n graph,entregas = ler_arquivo2.ler_arquivo(arquivo) #O(n2)\r\n except:\r\n print(\"Nao foi possivel manipular o arquivo\")\r\n continue\r\n print(\"Grafo\\n\", graph)\r\n print(\"Entregas\\n\", entregas,\"\\n\")\r\n job = []\r\n job = menores_caminhos(entregas,graph,job) #O(e)*(2*(O((n+m)*logn))\r\n job.append(Job(0,0,0,[]))\r\n #(start, finish, profit, path)\r\n \r\n lucro_max, lucro_list, job = schedule(job)\r\n\r\n print(\"Entregas realizadas : \", len(lucro_list))\r\n caminhos_lucrativos = ''\r\n for indice in lucro_list:\r\n caminhos_lucrativos += str(job[indice].path)\r\n print(\"Para :\", job[indice].path[0][-1], \"Path : \", job[indice].path[0], \"Com lucro = \", job[indice].profit)\r\n print(\"Tempo de inicio : \",job[indice].start, \" e tempo final \", job[indice].finish)\r\n print(\"Totalizando : \", lucro_max,\" de lucro\")\r\n print(caminhos_lucrativos)\r\n graph.clear()\r\n entregas.clear()\r\n '''\r\n #print(Mod(graph).gr)\r\n '''\r\n\r\n","sub_path":"AnalisedeAlgoritmos/Trabalho_Final/grafo6.py","file_name":"grafo6.py","file_ext":"py","file_size_in_byte":5579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"443236234","text":"import sqlalchemy as sa\nfrom sqlalchemy.sql.expression import func\nfrom sqlalchemy import or_, and_\nfrom sqlalchemy.sql.functions import array_agg\nfrom sqlalchemy import Text\nfrom sqlalchemy.types import ARRAY\nfrom datetime import datetime, timedelta\n\nfrom .. import Chuzzon\nfrom .. import Poll\nfrom .. import PollTopic\nfrom .. import Answer\nfrom .. import Question\nfrom .. import PollCountry\nfrom .. import Topic\n\nPAGE_SIZE = 20\nPROMOTED_POLL_SIZE = 2\n\n\nclass ChuzzonQuery:\n def __init__(self, session):\n self.session = session\n\n @property\n def query(self):\n q = self.session.query(Chuzzon)\n return q\n\n def find_by_id(self, id):\n return self.query.filter_by(id=id).first()\n\n def paginate(self, query, page, page_size=PAGE_SIZE,\n first_page_size=PAGE_SIZE):\n if page == 1:\n return query.limit(first_page_size)\n else:\n return query.limit(page_size).offset(\n (page - 2) * page_size + first_page_size)\n\n def filter_by_active(self, query):\n return query.filter(Chuzzon.is_active.is_(True))\n \n\n def filter_by_active_and_not_expired(self, query):\n today = datetime.now()\n return self.filter_by_active(query)\\\n .filter(Chuzzon.start_time <= today)\\\n .filter(Chuzzon.end_time >= today - timedelta(days=1) + timedelta(minutes=1))\n\n def find_by_celebrity_owner_ids(self, user_id, celebrities_owner_ids, page=1):\n \n query = self.session.query(Chuzzon)\n query = self.filter_by_active_and_not_expired(query)\n query = query.filter(Chuzzon.user_chuzzon_id.in_(celebrities_owner_ids))\n query = query.group_by(Chuzzon.id, Chuzzon._database_id, Chuzzon._owner_id)\n query = query.order_by(Chuzzon.start_time.desc())\n query = self.paginate(query, page)\n return query.all()\n\n","sub_path":"plugin/models/queries/chuzzon_query.py","file_name":"chuzzon_query.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"629595869","text":"import numpy as np\nimport scipy as sp\nimport scipy.sparse\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.path\nplt.ion()\nimport pybie2d\n\n\"\"\"o solve an interior Modified Helmholtz problem\nOn a complicated domain using a global quadr\nDemonstrate how to use the pybie2d package tature\n\nThis example demonstrates how to do this entirely using low-level routines,\nTo demonstrate both how to use these low level routines\nAnd to give you an idea what is going on under the hood in the\n\thigher level routines\n\"\"\"\n\nNG = 100\nh_max = 0.01\nhelmholtz_k = 0.5\n\n# extract some functions for easy calling\nsquish = pybie2d.misc.curve_descriptions.squished_circle\nPPB = pybie2d.boundaries.panel_polygon_boundary.panel_polygon_boundary.Panel_Polygon_Boundary\nGrid = pybie2d.grid.Grid\nPointSet = pybie2d.point_set.PointSet\nModified_Helmholtz_Layer_Form = pybie2d.kernels.high_level.modified_helmholtz.Modified_Helmholtz_Layer_Form\nModified_Helmholtz_Layer_Apply = pybie2d.kernels.high_level.modified_helmholtz.Modified_Helmholtz_Layer_Apply\nk0 = pybie2d.misc.numba_special_functions.numba_k0\nk1 = pybie2d.misc.numba_special_functions.numba_k1\n\n################################################################################\n# define problem\n\n# boundary\nboundary = PPB([0,1,1,0], [0,0,1,1], [h_max]*4, [True]*4, dyadic_levels=24, dyadic_base=3)\n# solution\ndef _solution_func(x, y):\n\tdx = x - (-0.1)\n\tdy = y - (0.5)\n\tr = np.sqrt(dx**2 + dy**2)\n\treturn k0(helmholtz_k*r)/(2*np.pi)\ndef _solution_func_dn(x, y, nx, ny):\n\tdx = x - (-0.1)\n\tdy = y - (0.5)\n\tr = np.sqrt(dx**2 + dy**2)\n\tdd = helmholtz_k*k1(helmholtz_k*r)/(2*np.pi*r)\n\treturn (nx*dx+ny*dy)*dd\nbc = _solution_func_dn(boundary.x, boundary.y, boundary.normal_x, boundary.normal_y)\nbcmax = np.abs(bc).max()\nbc /= bcmax\ndef solution_func(x, y):\n\treturn _solution_func(x, y)/bcmax\n\ndef err_plot(u):\n\t# compute the error\n\terror = u - solution_func(gridp.xg, gridp.yg)\n\tdigits = -np.log10(np.abs(error)+1e-16)\n\tmdigits = np.ma.array(digits)\n\n\t# plot the error as a function of space (only good in interior)\n\tfig, ax = plt.subplots(1,1)\n\tclf = ax.imshow(mdigits[:,::-1].T, extent=[0,1,0,1],\n\t\t\t\t\t\t\t\t\t\t\t\tcmap=mpl.cm.viridis_r)\n\tax.set_aspect('equal')\n\tfig.colorbar(clf)\n\n\tprint('Error: {:0.2e}'.format(np.abs(error).max()))\n\n################################################################################\n##### solve problem the hard way ###############################################\n################################################################################\n\n################################################################################\n# find physical region\n# (this implements a fast way to tell if points are in or out of the boundary)\n# (and of course, for the squish boundary, we could easily figure out something\n# faster, but this illustrates a general purpose routine)\n\ngridp = Grid([0,1], NG, [0,1], NG, x_endpoints=[False,False], y_endpoints=[False,False])\n\n################################################################################\n# solve for the density\n\nDLP = Modified_Helmholtz_Layer_Form(boundary, k=helmholtz_k, ifdipole=True)\nSLPp = -(DLP/boundary.weights).T*boundary.weights\nA = -0.5*np.eye(boundary.N) + SLPp\ntau = np.linalg.solve(A, bc)\n\n################################################################################\n# naive evaluation\n\nu = Modified_Helmholtz_Layer_Apply(boundary, gridp, k=helmholtz_k, charge=tau)\nu = gridp.reshape(u)\nerr_plot(u)\n\n################################################################################\n# oversampled\n\nhmax = gridp.xg[1,0] - gridp.xg[0,0]\nfbdy, IMAT = boundary.prepare_oversampling(hmax/6.0)\nIMAT = sp.sparse.csr_matrix(IMAT)\nftau = IMAT.dot(tau)\nu = Modified_Helmholtz_Layer_Apply(fbdy, gridp, k=helmholtz_k, charge=ftau)\nu = gridp.reshape(u)\nerr_plot(u)\n\nua = solution_func(gridp.xg, gridp.yg)\n","sub_path":"examples/solvers using low level utilities/interior_modified_helmholtz__neumann_panel_polygon.py","file_name":"interior_modified_helmholtz__neumann_panel_polygon.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"141242279","text":"def detect_all_cameras(self):\n self.disconnect_all()\n\n with self._gp_lock:\n cameras_name_and_port = gp.check_result(gp.gp_camera_autodetect())\n\n port_info_list = gp.PortInfoList()\n port_info_list.load()\n\n for name, port in cameras_name_and_port:\n gp_camera = gp.Camera()\n idx = port_info_list.lookup_path(port)\n port_info = port_info_list[idx]\n gp_camera.set_port_info(port_info)\n\n camera = GpCamera(name, port, gp_camera)\n self._cameras_dict[camera.id] = camera","sub_path":"src/gp_detect_all.py","file_name":"gp_detect_all.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"557408777","text":"#import socket module\r\n#tcp server (sock STREAM instead of SOCK_DGRAM\r\nfrom socket import *\r\nimport sys\r\nHOST = '172.19.117.129' #local IP of host \r\nPORT = 9001 #arbitrary number \r\nserverSocket = socket(AF_INET, SOCK_STREAM)\r\n#Prepare a sever socket\r\nserverSocket.bind((HOST, PORT))\r\nwhile True: #while the socket/port is open\r\n #Establish the connection\r\n print('Ready to serve...')\r\n serverSocket.listen(5) #serves 5 connections before refusing anymore incoming connections\r\n connectionSocket, addr = serverSocket.accept() #acepting any incomming connections to the socket\r\n try:\r\n message = connectionSocket.recv(1024)#reciving and storing data from the socket, max size is 1024b, this is the url\r\n print(\"--------MESSAGE---------\")\r\n print(message)\r\n #message is just the header of the connection\r\n print(\"--------FILENAME---------\")\r\n filename = message.split()[1] #splits the url and finds the selected file identified by [1]\r\n print(filename)\r\n #filename is just /index.html\r\n print(\"--------F---------\")\r\n f = open(filename[1:]) #opens the requested file (index.html)\r\n print(f)\r\n #this is the command to open the file at the selected location\r\n print(\"--------OUTPUTDATA---------\")\r\n outputdata = f.read(1024) #storing the information from the selected file, storing 1024 bits\r\n print(outputdata)\r\n #output data is just the html code from index.html\r\n print(\"--------END LINE---------\")\r\n #Send one HTTP header line into socket\r\n connectionSocket.send('HTTP/1.1 200 OK\\nContent-Type: text/html\\r\\n\\r\\n') #this is sending this line to to the server\r\n #Send the content of the requested file to the client\r\n for i in range(0, len(outputdata)): #sending the data from the html file to the client that is connected\r\n connectionSocket.send(outputdata[i].encode())\r\n connectionSocket.send(\"\\r\\n\".encode())\r\n \r\n connectionSocket.close()\r\n except IOError: #for error handling, if selected file isnt found then show this\r\n #Send response message for file not found\r\n connectionSocket.send(\"HTTP/1.1 404 Not Found\\r\\n\\r\\n\") #sending the error message in header format\r\n connectionSocket.send(\"

    404 Not Found

    \\r\\n\") #showing the error message on the webpage\r\n #Close client socket\r\n connectionSocket.close()\r\nserverSocket.close()\r\nsys.exit() #Terminate the program \r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"136968070","text":"# Copyright 2017 QuantRocket - All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport webbrowser\nfrom quantrocket.houston import houston\nfrom quantrocket.exceptions import UnavailableInsideJupyter\nfrom quantrocket.cli.utils.output import json_to_cli\n\ndef list_gateway_statuses(exchanges=None, sec_type=None, research_vendors=None, status=None,\n gateways=None):\n \"\"\"\n Query statuses of IB Gateway services.\n\n Parameters\n ----------\n exchanges : list of str, optional\n limit to IB Gateway services with market data permission for these exchanges\n\n sec_type : str, optional\n limit to IB Gateway services with market data permission for this securitiy type (useful for disambiguating permissions for exchanges that trade multiple asset classes). Possible choices: STK, FUT, CASH, OPT\n\n research_vendors : list of str, optional\n limit to IB Gateway services with permission for these research vendors (choices: reuters, wsh)\n\n status : str, optional\n limit to IB Gateway services in this status. Possible choices: running, stopped, error\n\n gateways : list of str, optional\n limit to these IB Gateway services\n\n Returns\n -------\n dict of gateway:status (if status arg not provided), or list of gateways (if status arg provided)\n \"\"\"\n params = {}\n if sec_type:\n params[\"sec_type\"] = sec_type\n if exchanges:\n params[\"exchanges\"] = exchanges\n if research_vendors:\n params[\"research_vendors\"] = research_vendors\n if gateways:\n params[\"gateways\"] = gateways\n if status:\n params[\"status\"] = status\n\n response = houston.get(\"/launchpad/gateways\", params=params)\n houston.raise_for_status_with_json(response)\n return response.json()\n\ndef _cli_list_gateway_statuses(*args, **kwargs):\n return json_to_cli(list_gateway_statuses, *args, **kwargs)\n\ndef start_gateways(exchanges=None, sec_type=None, research_vendors=None, gateways=None, wait=False):\n \"\"\"\n Start one or more IB Gateway services.\n\n Parameters\n ----------\n exchanges : list of str, optional\n limit to IB Gateway services with market data permission for these exchanges\n\n sec_type : str, optional\n limit to IB Gateway services with market data permission for this securitiy type (useful for disambiguating permissions for exchanges that trade multiple asset classes). Possible choices: STK, FUT, CASH, OPT\n\n research_vendors : list of str, optional\n limit to IB Gateway services with permission for these research vendors (choices: reuters, wsh)\n\n gateways : list of str, optional\n limit to these IB Gateway services\n\n wait: bool\n wait for the IB Gateway services to start before returning (default is to start the gateways asynchronously)\n\n Returns\n -------\n dict\n status message\n \"\"\"\n params = {\"wait\": wait}\n if sec_type:\n params[\"sec_type\"] = sec_type\n if exchanges:\n params[\"exchanges\"] = exchanges\n if research_vendors:\n params[\"research_vendors\"] = research_vendors\n if gateways:\n params[\"gateways\"] = gateways\n\n response = houston.post(\"/launchpad/gateways\", params=params, timeout=120)\n houston.raise_for_status_with_json(response)\n return response.json()\n\ndef _cli_start_gateways(*args, **kwargs):\n return json_to_cli(start_gateways, *args, **kwargs)\n\ndef stop_gateways(exchanges=None, sec_type=None, research_vendors=None, gateways=None, wait=False):\n \"\"\"\n Stop one or more IB Gateway services.\n\n Parameters\n ----------\n exchanges : list of str, optional\n limit to IB Gateway services with market data permission for these exchanges\n\n sec_type : str, optional\n limit to IB Gateway services with market data permission for this securitiy type (useful for disambiguating permissions for exchanges that trade multiple asset classes). Possible choices: STK, FUT, CASH, OPT\n\n research_vendors : list of str, optional\n limit to IB Gateway services with permission for these research vendors (choices: reuters, wsh)\n\n gateways : list of str, optional\n limit to these IB Gateway services\n\n wait: bool\n wait for the IB Gateway services to stop before returning (default is to stop the gateways asynchronously)\n\n Returns\n -------\n dict\n status message\n \"\"\"\n params = {\"wait\": wait}\n if sec_type:\n params[\"sec_type\"] = sec_type\n if exchanges:\n params[\"exchanges\"] = exchanges\n if research_vendors:\n params[\"research_vendors\"] = research_vendors\n if gateways:\n params[\"gateways\"] = gateways\n\n response = houston.delete(\"/launchpad/gateways\", params=params, timeout=45)\n houston.raise_for_status_with_json(response)\n return response.json()\n\ndef _cli_stop_gateways(*args, **kwargs):\n return json_to_cli(stop_gateways, *args, **kwargs)\n\ndef load_launchpad_config(filename):\n \"\"\"\n Uploads a new config.\n\n Parameters\n ----------\n filename : str, required\n the config file to upload to the launchpad service\n\n Returns\n -------\n dict\n status message\n \"\"\"\n with open(filename) as file:\n response = houston.put(\"/launchpad/config\", data=file.read())\n houston.raise_for_status_with_json(response)\n return response.json()\n\ndef get_launchpad_config():\n \"\"\"\n Returns the current config.\n\n Returns\n -------\n dict\n the config as a dict\n \"\"\"\n response = houston.get(\"/launchpad/config\")\n houston.raise_for_status_with_json(response)\n # It's possible to get a 204 empty response\n if not response.content:\n return {}\n return response.json()\n\ndef _cli_load_or_show_config(filename=None):\n if filename:\n return json_to_cli(load_launchpad_config, filename)\n else:\n return json_to_cli(get_launchpad_config)\n\ndef open_ibg_gui(gateways=None):\n \"\"\"\n Access the IB Gateway GUI in a web browser.\n\n Note: IB Gateway must already be running.\n\n Parameters\n ----------\n gateways : list of str, optional\n limit to these IB Gateway services (default all IB Gateway services)\n\n Returns\n -------\n None\n \"\"\"\n if os.environ.get(\"YOU_ARE_INSIDE_JUPYTER\", False):\n raise UnavailableInsideJupyter(\"\"\"Cannot open GUI inside Jupyter\n\nPlease use the Jupyter commands menu to open the IB Gateway GUI\n(Commands > QuantRocket > IB Gateway GUI)\n\"\"\")\n\n if not gateways:\n gateways = sorted(list_gateway_statuses())\n for gateway in gateways:\n url = \"{0}/{1}/vnc\".format(houston.base_url, gateway)\n webbrowser.open(url)\n\ndef _cli_open_ibg_gui(*args, **kwargs):\n return json_to_cli(open_ibg_gui, *args, **kwargs)","sub_path":"quantrocket/launchpad.py","file_name":"launchpad.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"122482269","text":"import math\nf = open('input8')\nn, k = map(int, f.readline().split()) # количество всех и искомых точек\npoints = dict() # словарь вида : расстояние - координаты точки [x, y]\nfor i in range(n):\n x, y = map(int, f.readline().split())\n d = math.sqrt(x**2 + y**2)\n points[d] = [x, y]\n# словарь не изменяемый так что отсортировать нельзя\n# создадим список из ключей и отсортируем а потом будем через них\n# обащаться к значениям в словаре\nlist_keys = list(points.keys())\nlist_keys.sort() # любая сортировка( например quicksort )\nfile = open('output8', 'w')\nfor i in range(k):\n file.write(str(points[list_keys[i]])+',')\nf.close()","sub_path":"8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"66975280","text":"import os\r\n\r\ntour = int(input())\r\n\r\nfor case in range(1, tour+1):\r\n\ti = 1\r\n\tlistall = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n\tlistdigit = [-1] * 10\r\n\tn = int(input())\r\n\tif n == 0:\r\n\t\tlast = \"INSOMNIA\"\r\n\t\tlistdigit = listall\r\n\twhile listdigit != listall:\r\n\t\tlast = n*i\r\n\t\tx = n*i\r\n\t\twhile x > 0:\r\n\t\t\tdigit = x % 10\r\n\t\t\tif digit not in listdigit:\r\n\t\t\t\tlistdigit[digit] = digit\r\n\t\t\tx = int(x/10)\r\n\r\n\t\ti += 1\r\n\r\n\tprint(\"Case #\" + str(case) + \": \" + str(last))\r\n\r\nos.system(\"pause\")","sub_path":"codes/CodeJamCrawler/16_0_1/ommanu13/rappel.py","file_name":"rappel.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"427197003","text":"from PIL import Image\nimport numpy as np\nimport cv2\nimport os\nimport pickle\n\n\ndef color_map(root, all_file, result_pkl_name=\"data/color_map.pkl\"):\n classes = list(range(0, 21))\n classes.append(255)\n\n map_dict = dict()\n for file in all_file:\n now_file = os.path.join(root, file)\n png = np.asarray(Image.open(now_file))\n rbg = cv2.imread(now_file)\n for i in classes:\n idx = np.where(png == i)\n if len(idx[0]) == 0:\n continue\n else:\n color = rbg[idx[0][0], idx[1][0], :]\n map_dict[str(i)] = color\n if len(map_dict) == 22:\n break\n with open(result_pkl_name, \"wb\") as f:\n pickle.dump(map_dict, f)\n pass\n\n\ndef test(pkl_file_name, file_name, result_image_name=\"data/tmp.png\"):\n with open(pkl_file_name, \"rb\") as f:\n d = pickle.load(f)\n\n img = np.array(Image.open(file_name))\n color_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n color_img[i, j, :] = d[str(img[i][j])]\n Image.fromarray(color_img).convert(\"RGB\").save(result_image_name)\n pass\n\n\nif __name__ == '__main__':\n root = 'C:\\\\ALISURE\\\\Data\\\\voc\\\\VOCdevkit\\\\VOC2012\\\\SegmentationClass\\\\'\n result_pkl_name = \"data/color_map.pkl\"\n all_file = os.listdir(root)\n color_map(root, all_file, result_pkl_name)\n test(result_pkl_name, os.path.join(root, all_file[0]))\n","sub_path":"build_color_map.py","file_name":"build_color_map.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"130457655","text":"\"\"\"\nDQN benchmark on lunarlander\n\"\"\"\nfrom baconian.benchmark.dqn_benchmark.lunarlander_conf import *\nfrom baconian.algo.dqn import DQN\nfrom baconian.core.core import EnvSpec\nfrom baconian.envs.gym_env import make\nfrom baconian.algo.value_func.mlp_q_value import MLPQValueFunction\nfrom baconian.core.agent import Agent\nfrom baconian.algo.misc import EpsilonGreedy\nfrom baconian.core.experiment import Experiment\nfrom baconian.core.flow.train_test_flow import TrainTestFlow\nfrom baconian.config.global_config import GlobalConfig\nfrom baconian.common.schedules import LinearScheduler\nfrom baconian.core.status import get_global_status_collect\n\n\ndef lunarlander_task_fn():\n exp_config = LUNARLANDER_BENCHMARK_CONFIG_DICT\n GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',\n exp_config['DEFAULT_EXPERIMENT_END_POINT'])\n\n env = make('LunarLander-v2')\n name = 'benchmark'\n env_spec = EnvSpec(obs_space=env.observation_space,\n action_space=env.action_space)\n\n mlp_q = MLPQValueFunction(env_spec=env_spec,\n name_scope=name + '_mlp_q',\n name=name + '_mlp_q',\n **exp_config['MLPQValueFunction'])\n dqn = DQN(env_spec=env_spec,\n name=name + '_dqn',\n value_func=mlp_q,\n **exp_config['DQN'])\n agent = Agent(env=env, env_spec=env_spec,\n algo=dqn,\n name=name + '_agent',\n exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,\n prob_scheduler=LinearScheduler(\n t_fn=lambda: get_global_status_collect()(\n 'TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),\n **exp_config['EpsilonGreedy']['LinearScheduler']),\n **exp_config['EpsilonGreedy']['config_or_config_dict']))\n flow = TrainTestFlow(train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),\n config_or_config_dict=exp_config['TrainTestFlow']['config_or_config_dict'],\n func_dict={\n 'test': {'func': agent.test,\n 'args': list(),\n 'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TEST_SAMPLES_COUNT']),\n },\n 'train': {'func': agent.train,\n 'args': list(),\n 'kwargs': dict(),\n },\n 'sample': {'func': agent.sample,\n 'args': list(),\n 'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TRAIN_SAMPLES_COUNT'],\n env=agent.env,\n in_which_status='TRAIN',\n store_flag=True),\n },\n })\n\n experiment = Experiment(\n tuner=None,\n env=env,\n agent=agent,\n flow=flow,\n name=name\n )\n experiment.run()\n","sub_path":"baconian/benchmark/dqn_benchmark/lunarlander.py","file_name":"lunarlander.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"627157097","text":"import data_generator as dg\nimport pandas as pd\nimport cv2\ninput_file = './deepdrivedb/deepdrive/linux_recordings/2018-01-18__05-14-48PM'\noutput_file = '.'\n\n\nfiles = dg.get_hdf5_file_names(input_file)\n\n\ngen = dg.generator(files)\nprint(gen())\n\ndf = pd.DataFrame(columns=('name', 'steering'))\n\nprint('Starting time')\nimport time\nt = time.time()\nindex = 0\nfor tpl in gen():\n\n name = str(index) + '.png'\n cv2.imwrite('images/' + name, tpl[0])\n df.loc[index] = [name, tpl[1]]\n print(tpl[1])\n index += 1\n\nprint('Process time: ' + str(time.time() - t))\ndf.to_csv('a.cvs', index=False)\n","sub_path":"nvidiaDave2/hd5files_to_jpg.py","file_name":"hd5files_to_jpg.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"397000029","text":"from __future__ import division\nimport os, csv, time\nfrom datetime import datetime\nimport datetime as dt\nimport cPickle as pickle\nfrom matplotlib import pyplot as plt\n\ncwd = '/home/cwp/EMC/data/data/'\ncwd2 = '/home/cwp/EMC/data/authors/'\nbasedir = '/home/cwp/EMC/lib/analysis/variation/temporal/'\n\nallData = {}\nauthors = []\nallCounts = {}\n\nwith open(basedir+'Nobservers.txt', 'r') as f:\n lines = f.readlines()\n for line in lines:\n authors.append(line.split('\\n')[0])\n\ncount = 0\nfor year in sorted(os.listdir(cwd)):\n print('======='+year+'=======')\n for month in sorted(os.listdir(os.path.join(cwd,year))):\n print(year+'-'+month)\n newAuthors = os.listdir(os.path.join(cwd,year,month))\n for author in newAuthors:\n path = os.path.join(cwd,year,month,author)\n if author[:-4] in authors:\n with open(path, 'r') as f:\n monthData = list(csv.reader(f))[1:]\n f.close()\n\n for day in monthData:\n if (len(set(day[1:]))==1) and (list(set(day[1:]))[0]=='-1'):\n pass\n else:\n try:\n for i in range(1,len(day)):\n if day[i] not in ['-1', '0', '\\r']:\n dateString = year+'-'+month+'-'+day[0]+':'\n if i < 24:\n dateString += '{0:02d}'.format(i)\n dateObject=datetime.strptime(dateString,\\\n \"%Y-%m-%d:%H\")\n else:\n dateString += '23'\n dateObject=datetime.strptime(dateString,\\\n \"%Y-%m-%d:%H\")\n dateObject+=dt.timedelta(hours=1)\n\n if dateObject not in allData.keys():\n allData[dateObject] = [int(day[i])]\n else:\n allData[dateObject].append(int(day[i]))\n except Exception as e:\n print(e)\n print(year, month, author)\n print(day)\n\nkeys = sorted(allData.keys())\nplotData = []\nplotTimes = []\n\nfor key in keys:\n plotData.append(allData[key])\n plotTimes.append(key)\n\nwith open('/home/cwp/EMC/lib/analysis/variation/temporal/NS/NplotData.txt', 'w') as f:\n for item in plotData:\n f.write(','.join([str(x) for x in item]))\n f.write('\\n')\n f.close()\n\nwith open('/home/cwp/EMC/lib/analysis/variation/temporal/NS/NplotTimes.txt', 'w') as f:\n for item in plotTimes:\n f.write(str(item)+'\\n')\n f.close()\n","sub_path":"lib/analysis/variation/temporal/NS/getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377943751","text":"\"\"\"\nSuccess\nDetails \nRuntime: 40 ms, faster than 76.12% of Python3 online submissions for Increasing Triplet Subsequence.\nMemory Usage: 13.6 MB, less than 6.80% of Python3 online submissions for Increasing Triplet Subsequence.\n\"\"\"\nclass Solution:\n def increasingTriplet(self, nums: 'List[int]') -> 'bool':\n if len(nums) < 3:\n return False\n i = float('inf') \n j = float('inf') \n ptr = 0\n while ptr < len(nums):\n if nums[ptr] < i:\n i = nums[ptr]\n elif i < nums[ptr] < j:\n j = nums[ptr]\n elif nums[ptr] > j:\n return True\n ptr += 1\n return False\n\n\ns = Solution()\nprint(s.increasingTriplet([1,2,3,4,5]))\nprint(s.increasingTriplet([5,4,3,2,1]))\nprint(s.increasingTriplet([5,1,5,5,2,5,4]))\n","sub_path":"M_334_increasingTriplet.py","file_name":"M_334_increasingTriplet.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"442451270","text":"'''\nAuthor @ Subhamoy Karmakar\n\nThis is the Module where the user is able to view the compliance target.\n\nInput:\n\nOutput:\n'''\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nimport NewContextDBOps as dbop\nimport sqlOperations\n\n\nclass contextViewContext(QWidget):\n def __init__(self):\n super(contextViewContext, self).__init__()\n self.configTypes = {}\n self.configDict = {}\n\n # contents\n self.tree = QTreeWidget()\n header = QTreeWidgetItem([\"Entry\", \"Type\"])\n self.tree.setHeaderItem(header)\n self.contextList = QComboBox()\n self.versionList = QComboBox()\n self.configVersion = QLineEdit()\n self.configVersion.setReadOnly(True)\n\n # listners\n self.contextList.currentIndexChanged.connect(self.contextChanged)\n self.versionList.currentIndexChanged.connect(self.loadContext)\n\n # layout\n vLayout = QVBoxLayout()\n hLayout = QHBoxLayout()\n hConfigLayout = QHBoxLayout()\n namelabel = QLabel('Select Compliance Target Name')\n namelabel.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))\n hLayout.addWidget(namelabel)\n hLayout.addWidget(self.contextList)\n versionlabel = QLabel('Select Version')\n versionlabel.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))\n hLayout.addWidget(versionlabel)\n hLayout.addWidget(self.versionList)\n configLabel = QLabel('Configuration Version')\n configLabel.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))\n hConfigLayout.addWidget(configLabel)\n hConfigLayout.addWidget(self.configVersion)\n vLayout.addLayout(hLayout)\n vLayout.addLayout(hConfigLayout)\n\n vLayout.addWidget(self.tree)\n\n self.setLayout(vLayout)\n\n def splitProc(self, idChain):\n if (idChain == ''):\n return None\n return idChain.split('/')[1:]\n\n def groupProc(self, splitProced):\n result = []\n if (len(splitProced) > 0):\n max_len = len(splitProced[-1][1])\n else:\n max_len = 0\n for i in range(max_len):\n tmpRes = []\n for item in splitProced:\n if (len(item[1]) > i + 1):\n break\n if (len(item[1]) == i + 1):\n tmpRes.append(item)\n if (len(tmpRes) > 0):\n result.append(tmpRes)\n return result\n\n def loadContextNames(self):\n contextNames = sqlOperations.getContextNames()\n self.contextList.clear()\n for contextName in contextNames:\n self.contextList.addItem(contextName)\n\n def loadVersions(self):\n versions = sqlOperations.getContextVersions(str(self.contextList.currentText()))\n self.versionList.clear()\n for versionNo in versions:\n self.versionList.addItem(versionNo)\n\n def loadContext(self):\n name = str(self.contextList.currentText())\n if name != '':\n self.configTypes = []\n version = str(self.versionList.currentText())\n configPresentVersion = sqlOperations.getContextConfigVersion(name, version)\n self.configVersion.setText(configPresentVersion)\n\n result = dbop.getCTEntryID(name, version, configPresentVersion)\n if result.__len__() < 0:\n return\n ctIds = result.keys()\n self.tree.clear()\n length = 2\n cnt = 0\n rKeys = sorted(result)\n dbop.loadTempContext(rKeys, result)\n self.insertIntoTree(rKeys, result, 0, self.tree, {})\n\n def insertIntoTree(self, rKey, result, currentId, treeParent, oldIDs):\n nextIDs = dbop.getChildNodes(currentId)\n if nextIDs.keys().__len__() == 0:\n return\n else:\n for id in nextIDs.keys():\n parent = QTreeWidgetItem(treeParent)\n parent.setText(0, nextIDs[id][0])\n parent.setText(1, nextIDs[id][1])\n parent.setExpanded(True)\n self.insertIntoTree(rKey, result, id, parent, nextIDs)\n\n def contextChanged(self):\n self.loadVersions()\n self.loadContext()\n\n def addChild(self, parent, column, title, data):\n item = QTreeWidgetItem(parent, [title])\n item.setData(column, Qt.UserRole, data)\n return item\n\n def addChildType(self, item, column, type):\n combo = QLabel(type)\n self.tree.setItemWidget(item, column, combo)\n return combo\n\n def refreshContent(self):\n self.loadContextNames()\n self.loadVersions()\n self.loadContext()\n","sub_path":"PHASE-1.1_IMPROVEMENT_PHASE/POLICOMP_TOOL/contextViewContext.py","file_name":"contextViewContext.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"575349745","text":"from lib.node import Node\nfrom lib.stack import Stack\n\nclass MinStack:\n def __init__(self):\n self.mins = Stack()\n self.items = Stack()\n\n def push(self, item):\n min = self.mins.peek()\n if min == None or item < min:\n self.mins.push(item)\n\n return self.items.push(item)\n\n def pop(self):\n item = self.items.pop()\n if item == self.mins.peek():\n self.mins.pop()\n\n return item\n\n def min(self):\n return self.mins.peek()\n\nstack = MinStack()\nstack.push(1)\nstack.push(2)\nstack.push(3)\nassert stack.min() == 1\nstack.push(-100)\nstack.push(4)\nassert stack.min() == -100\nstack.pop()\nstack.pop()\nassert stack.min() == 1\n","sub_path":"section3/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"337115386","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 23 08:39:29 2020\n\n@author: tvanzyl\n\"\"\"\n\nimport pandas as pd\n\n\nfrom itertools import combinations\nimport pandas as pd\nfrom numba import jit\n\nf_sc = open('StripeCodes.txt')\nstr_sc = f_sc.readlines()\nf_sc.close()\n\nanimalsc = {}\nanimalsl = {}\n\n# Animal loop\nfor line in str_sc:\n if line.startswith(\"ANIMAL\"):\n animal = line.split()[1]\n code = line.split()[2]\n animalsc[animal] = animalsc.get(animal, {})\n animalsc[animal][code] = [[],[],[],[],[],[],[],[],[],[]]\n animalsl[animal] = animalsl.get(animal, {})\n animalsl[animal][code] = [[],[],[],[],[],[],[],[],[],[]]\n elif line.startswith(\"STRIPECODE\"):\n code_length = int(line.split()[1])\n elif line.startswith(\"stripestring\"):\n stripestring = int(line.split()[1])\n animalsc[animal][code][stripestring] = []\n animalsl[animal][code][stripestring] = []\n elif line.startswith(\"#\"):\n animalsc[animal][code][stripestring].append( int(float(line.split()[1])/255) )\n animalsl[animal][code][stripestring].append( float(line.split()[-1]) )\n\ndfc = pd.DataFrame(animalsc)\ndfc = dfc.unstack().dropna().reset_index()\ndfc = dfc.drop(columns=['level_1'])\n\ndfl = pd.DataFrame(animalsl)\ndfl = dfl.unstack().dropna().reset_index()\ndfl = dfl.drop(columns=['level_1'])\n\ndf = pd.read_csv('./stripecode_dist.csv',)\ndf.rename(columns={'Unnamed: 0':'idx','0':'a1','1':'c1','2':'a2','3':'c2','4':'dist'},inplace=True)\ndf.drop(columns=['idx'],inplace=True)\n\nimport numpy as np\n\nnp.random.seed(0)\nfor k in (1,5,10,20,50,100):\n tot = 0\n for l in range(30):\n p_idx = np.random.permutation(pd.unique(df.c1))\n test_idx = p_idx[:int(len(p_idx)*0.25)]\n train_idx = p_idx[int(len(p_idx)*0.25):]\n df_Q = df.loc[df.c1.isin(test_idx) & df.c2.isin(train_idx)]\n for i in test_idx:\n Q = df_Q.loc[df_Q.c1 == i].sort_values('dist')[0:k]\n tot += np.any(Q.a1 == Q.a2)\n\n print(\"top-\",k, \" accuracy \", tot/len(test_idx)/30)\n\nnp.random.seed(0)\ntot = 0\nfor l in range(30):\n p_idx = np.random.permutation(pd.unique(df.c1))\n test_idx = p_idx[:int(len(p_idx)*0.25)]\n train_idx = p_idx[int(len(p_idx)*0.25):]\n df_Q = df.loc[df.c1.isin(test_idx) & df.c2.isin(train_idx)]\n for i in test_idx:\n Q = df_Q.loc[df_Q.c1 == i].sort_values('dist')\n tot += 1/(np.argmin(Q.a1 != Q.a2)+1)\n\nprint(\"MRR \", tot/len(test_idx)/30)\n\nnp.random.seed(0)\nfor k in (1,5,10,20):\n tot = 0\n for l in range(100):\n p_idx = np.random.permutation(pd.unique(df.c1))\n test_idx = p_idx[:int(len(p_idx)*0.25)]\n train_idx = p_idx[int(len(p_idx)*0.25):]\n df_Q = df.loc[df.c1.isin(test_idx) & df.c2.isin(train_idx)]\n for i in test_idx:\n Q = df_Q.loc[df_Q.c1 == i][0:k]\n tot += np.any(Q.a1 == Q.a2)\n\n print(\"top-\",k, \" accuracy \", tot/len(test_idx)/100)\n\n\nnp.random.seed(0)\ntot = 0\nfor l in range(100):\n p_idx = np.random.permutation(pd.unique(df.c1))\n test_idx = p_idx[:int(len(p_idx)*0.25)]\n train_idx = p_idx[int(len(p_idx)*0.25):]\n df_Q = df.loc[df.c1.isin(test_idx) & df.c2.isin(train_idx)]\n for i in test_idx:\n Q = df_Q.loc[df_Q.c1 == i]\n tot += 1/(np.argmin(Q.a1 != Q.a2)+1)\n\nprint(\"MRR \", tot/len(test_idx)/100)\n\ns = 1945\nind = 237\nr = 8.21\n\nnp.random.seed(0)\nfor k in (1,5,10,20,50,100):\n tot = 0\n for l in range(100):\n test_idx = np.random.permutation( np.random.choice(range(ind),1945) )\n for i in test_idx:\n Q = test_idx[0:k]\n tot += np.isin(i, Q)\n\n print(\"top-\",k, \" accuracy \", tot/len(test_idx)/100)\n\n\nnp.random.seed(0)\ntot = 0\nfor l in range(100):\n test_idx = np.random.permutation( np.random.choice(range(ind),1945) )\n for i in test_idx:\n Q = test_idx\n tot += 1/(np.argmin(Q != i)+1)\n\nprint(\"MRR \", tot/len(test_idx)/100)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"stripe_codes/StripeCodes_MRR.py","file_name":"StripeCodes_MRR.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"317892273","text":"from django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.paginator import Paginator\nfrom django.http import Http404\nfrom django.template.response import TemplateResponse\n\nfrom wagtail.admin.views import generic\nfrom wagtail.models import Page, UserPagePermissionsProxy\n\n\ndef content_type_use(request, content_type_app_name, content_type_model_name):\n try:\n content_type = ContentType.objects.get_by_natural_key(\n content_type_app_name, content_type_model_name\n )\n except ContentType.DoesNotExist:\n raise Http404\n\n page_class = content_type.model_class()\n\n # page_class must be a Page type and not some other random model\n if not issubclass(page_class, Page):\n raise Http404\n\n pages = page_class.objects.all().specific(defer=True)\n\n paginator = Paginator(pages, per_page=10)\n pages = paginator.get_page(request.GET.get(\"p\"))\n\n return TemplateResponse(\n request,\n \"wagtailadmin/pages/content_type_use.html\",\n {\n \"pages\": pages,\n \"app_name\": content_type_app_name,\n \"content_type\": content_type,\n \"page_class\": page_class,\n },\n )\n\n\nclass UsageView(generic.UsageView):\n model = Page\n pk_url_kwarg = \"page_id\"\n header_icon = \"doc-empty-inverse\"\n\n def dispatch(self, request, *args, **kwargs):\n user_perms = UserPagePermissionsProxy(request.user)\n if not user_perms.for_page(self.object).can_edit():\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n","sub_path":"wagtail/admin/views/pages/usage.py","file_name":"usage.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"537825240","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n#Open file\r\nwith open(\"activity.csv\", \"r\") as file:\r\n csv_lines = file.read().split(\"\\n\")\r\n data = []\r\n for line in csv_lines[1:-1]:\r\n data.append(line.split(\",\"))\r\n\"\"\"PART C\"\"\"\r\ndef steps_per_day_func(data):\r\n #Finding the total steps per day\r\n for i in data:\r\n current_day = \"\"\r\n total = 0\r\n counter = 0\r\n total_per_day = []\r\n for i in data:\r\n if i[1] != current_day:\r\n total_per_day.append([current_day, total])\r\n current_day = i[1]\r\n total = 0\r\n if i[0] != \"NA\":\r\n total += int(i[0])\r\n total_per_day = total_per_day[1:]\r\n return total_per_day\r\n\r\ndef mean_and_median(steps_per_day):\r\n #Mean and Median\r\n total_steps = 0\r\n steps_list = []\r\n for i in steps_per_day:\r\n total_steps += i[1]\r\n mean = total_steps / len(steps_per_day)\r\n median = sorted(steps_per_day, key = lambda x:x[1])[len(steps_per_day) // 2]\r\n return mean, median[1]\r\n\r\ndef histogram(steps_per_day):\r\n #Histogram\r\n day = []\r\n total_steps = []\r\n for steps in steps_per_day:\r\n day.append(steps[0])\r\n total_steps.append(steps[1])\r\n\r\n histogram = pd.DataFrame({\r\n 'days': day, \r\n 'total steps': total_steps\r\n })\r\n draw = histogram.hist()\r\n \r\ndef counting_nas(data):\r\n #Calculating the number of NAs\r\n total_nas = 0\r\n for i in data:\r\n if i[0] == \"NA\":\r\n total_nas += 1\r\n return total_nas\r\n\r\n\r\ndef fix_dataset(data):\r\n #We will fill in the NAs with the average steps taken on that interval on other days instead\r\n for i in range(len(data)):\r\n if data[i][0] == \"NA\":\r\n total_steps_same_interval = 0\r\n same_interval_counter = 0\r\n for j in range(len(data)):\r\n if data[j][0] != \"NA\":\r\n if data[j][2] == data[i][2]:\r\n total_steps_same_interval += int(data[j][0])\r\n same_interval_counter += 1\r\n try:\r\n data[i][0] = total_steps_same_interval // same_interval_counter\r\n except:\r\n data[i][0] = 0\r\n print(data[i])\r\n\r\nprint(\"NUMBER OF NAs\")\r\nprint(counting_nas(data))\r\n\r\nprint(\"\\n\\nFIXING NAs\")\r\nprint(fix_dataset(data))\r\n\r\nsteps_per_day = steps_per_day_func(data)\r\n\r\nprint(\"\\n\\nMEAN AND MEDIAN\")\r\nnew_mean_n_median = mean_and_median(steps_per_day)\r\nprint(\"mean: {} \\nmedian: {}\".format(new_mean_n_median[0], new_mean_n_median[1]))\r\n\r\nprint(\"\\n\\nHISTOGRAM\")\r\nhistogram(steps_per_day)\r\n","sub_path":"partC.py","file_name":"partC.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"213702629","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'Benjamin F Jones'\nSITENAME = u'Benjamin F Jones'\nSITEURL = 'http://benjaminfjones.github.io'\n\nTIMEZONE = 'America/Los_Angeles'\n\nDEFAULT_LANG = u'en'\n\n# Feed generation is usually not desired when developing\n#FEED_ALL_ATOM = None\n#CATEGORY_FEED_ATOM = None\n#TRANSLATION_FEED_ATOM = None\n\nDEFAULT_PAGINATION = False\nRELATIVE_URLS = False\nFEED_ALL_ATOM = 'feeds/all.atom.xml'\nCATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'\n\n# Social widget\nSOCIAL = (('Twitter', 'https://twitter.com/BenjaminFJones'),\n ('Tumblr', 'http://benjaminfjones.tumblr.com/'),)\n\n# Static content directories\nSTATIC_PATHS = ['images', 'pdf', 'talks']\n\n# Theme & theme settings\nTHEME = 'themes/chunk'\n\nSITESUBTITLE = 'e7d79f27ced664077a6a4792e53023389e0a48b4'\nFOOTER_TEXT = 'This site powered by electrons'\nDISPLAY_CATEGORIES_ON_MENU = False\nGOOGLE_ANALYTICS = 'UA-40935706-1'\nDISQUS_SITENAME = 'benjaminfjones'\nTWITTER_USERNAME = 'Benjaminfjones'\nGITHUB_URL = 'https://github.com/benjaminfjones'\nGOOGLE_ANALYTICS = True\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"470121406","text":"#! /usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n# This file is part of the desktop management solution opsi\n# (open pc server integration) http://www.opsi.org\n\n# Copyright (C) 2010-2019 uib GmbH \n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\"\"\"\nopsi python library - setup file\n\n@copyright:\tuib GmbH \n@author: Christian Kampka \n@author: Niko Wenselowski \n@license: GNU Affero General Public License version 3\n\"\"\"\n\nfrom setuptools import setup, find_packages\nimport codecs\nimport os\n\nLANGUAGES = ['da', 'de', 'es', 'en', 'fr', 'it', 'nl', 'pl', 'ru']\n\nwith codecs.open(os.path.join(\"debian\", \"changelog\"), 'r', 'utf-8') as changelog:\n\tVERSION = changelog.readline().split('(')[1].split('-')[0]\n\nif not VERSION:\n\traise ValueError(u\"Failed to get version info\")\n\n# Always set __version__ in OPSI.__init__.py to the version found in\n# the changelog to make sure the version is always up-to-date\n# and nobody needs to manually update it.\ninitFilePath = os.path.join('OPSI', '__init__.py')\nnewInitLines = []\nwith open(initFilePath) as originalFile:\n\tfor line in originalFile:\n\t\tif line.startswith('__version__'):\n\t\t\tnewInitLines.append(\"__version__ = '{0}'\\n\".format(VERSION))\n\t\t\tcontinue\n\n\t\tnewInitLines.append(line)\n\nwith open(initFilePath, 'w') as newInitFile:\n\tnewInitFile.writelines(newInitLines)\nprint(\"Patched version {1!r} from changelog into {0}\".format(initFilePath, VERSION))\n\ndata_files = [\n\t(\n\t\t'/etc/opsi/backendManager',\n\t\t[\n\t\t\t'data/backendManager/acl.conf.example',\n\t\t\t'data/backendManager/dispatch.conf.example'\n\t\t]\n\t),\n\t(\n\t\t'/etc/opsi/backendManager/extend.d',\n\t\t[\n\t\t\t'data/backendManager/extend.d/10_opsi.conf',\n\t\t\t'data/backendManager/extend.d/10_wim.conf',\n\t\t\t'data/backendManager/extend.d/20_legacy.conf',\n\t\t\t'data/backendManager/extend.d/20_easy.conf',\n\t\t\t'data/backendManager/extend.d/30_kiosk.conf',\n\t\t\t'data/backendManager/extend.d/30_sshcommands.conf',\n\t\t\t'data/backendManager/extend.d/40_admin_tasks.conf',\n\t\t\t'data/backendManager/extend.d/40_groupActions.conf',\n\t\t\t'data/backendManager/extend.d/45_deprecated.conf',\n\t\t\t'data/backendManager/extend.d/70_dynamic_depot.conf',\n\t\t\t'data/backendManager/extend.d/70_wan.conf',\n\t\t]\n\t),\n\t(\n\t\t'/etc/opsi/backends/',\n\t\t[\n\t\t\t'data/backends/dhcpd.conf',\n\t\t\t'data/backends/file.conf',\n\t\t\t'data/backends/jsonrpc.conf',\n\t\t\t'data/backends/mysql.conf',\n\t\t\t'data/backends/sqlite.conf',\n\t\t\t'data/backends/hostcontrol.conf',\n\t\t\t'data/backends/opsipxeconfd.conf'\n\t\t]\n\t),\n\t(\n\t\t'/etc/opsi/',\n\t\t[\n\t\t\t'data/server_commands_default.conf',\n\t\t\t'data/opsi.conf'\n\t\t]\n\t),\n\t(\n\t\t'/etc/opsi/hwaudit/',\n\t\t['data/hwaudit/opsihwaudit.conf']\n\t),\n\t(\n\t\t'/etc/opsi/hwaudit/locales',\n\t\t[\n\t\t\t'data/hwaudit/locales/da_DA',\n\t\t\t'data/hwaudit/locales/de_DE',\n\t\t\t'data/hwaudit/locales/en_US',\n\t\t\t'data/hwaudit/locales/es_ES',\n\t\t\t'data/hwaudit/locales/fr_FR',\n\t\t\t'data/hwaudit/locales/nl_NL',\n\t\t\t'data/hwaudit/locales/ru_RU',\n\t\t]\n\t),\n]\n\nfor language in LANGUAGES:\n\tlanguageFile = os.path.join('gettext', 'python-opsi_{0}.po'.format(language))\n\tif not os.path.exists(languageFile):\n\t\tprint(\"Can't find localisation file {0}. Skipping.\".format(languageFile))\n\t\tcontinue\n\n\toutput_path = os.path.join('locale', language, 'LC_MESSAGES')\n\tif not os.path.exists(output_path):\n\t\tos.makedirs(output_path)\n\n\ttarget_file = os.path.join(output_path, 'python-opsi.mo')\n\texitCode = os.system(\n\t\t'msgfmt --output-file {outputfile} {langFile}'.format(\n\t\t\tlangFile=languageFile,\n\t\t\toutputfile=target_file\n\t\t)\n\t)\n\tif not exitCode:\n\t\tdata_files.append(\n\t\t\t('/usr/share/locale/%s/LC_MESSAGES' % language, [target_file])\n\t\t)\n\telse:\n\t\tprint('Generating locale for \"{0}\" failed. Is gettext installed?'.format(language))\n\n\nsetup(\n\tname='python-opsi',\n\tversion=VERSION,\n\tlicense='AGPL-3',\n\turl=\"http://www.opsi.org\",\n\tdescription='The opsi python library',\n\tpackages=find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests.*\", \"tests\"]),\n\tdata_files=data_files,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"119501038","text":"#!/usr/bin/env python\n\n'''Demonstrates basic use of IncrementalTextLayout and Caret.\n\nA simple widget-like system is created in this example supporting keyboard and\nmouse focus.\n'''\n\n__docformat__ = 'restructuredtext'\n__version__ = '$Id: $'\n\nimport pyglet\n\nclass Rectangle(object):\n '''Draws a rectangle into a batch.'''\n def __init__(self, x1, y1, x2, y2, batch):\n self.vertex_list = batch.add(4, pyglet.gl.GL_QUADS, None,\n ('v2i', [x1, y1, x2, y1, x2, y2, x1, y2]),\n ('c4B', [200, 200, 220, 255] * 4)\n )\n\nclass TextWidget(object):\n def __init__(self, text, x, y, width,height, batch):\n self.document = pyglet.text.document.UnformattedDocument(text)\n self.document.set_style(0, len(self.document.text), \n dict(color=(0, 0, 0, 255))\n )\n font = self.document.get_font()\n self.layout = pyglet.text.layout.IncrementalTextLayout(\n self.document, width, height, multiline=True, batch=batch)\n self.caret = pyglet.text.caret.Caret(self.layout)\n\n self.layout.x = x\n self.layout.y = y\n\n #Rectangular outline\n pad = 2\n self.rectangle = Rectangle(x - pad, y - pad, \n x + width + pad, y + height + pad, batch)\n\n \n","sub_path":"User Interface/Pre-13thMarch/older_versions/bciuiv7/user_input.py","file_name":"user_input.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"572889390","text":"from django.shortcuts import render, redirect\nimport Game\nimport Game_data\nimport random\nfrom django.http import Http404\nfrom django.conf import settings\n\ndef titlescreen(request):\n\treturn render(request, \"titlescreen.html\",\n\t{'commands':{'btn_a':'worldmap/?key=new_game', 'btn_b':'options/load_game/', 'btn_start':'#', 'btn_select':'#',\n\t\t'btn_up':'#', 'btn_down':'#', 'btn_left':'#', 'btn_right':'#'\n\t\t}})\n\n\ndef location(pos):\n\tif pos is None:\n\t\treturn '#'\n\telse:\n\t\treturn \"/worldmap?x={}&y={}&\".format(pos[0], pos[1])\n\ndef worldmap(request):\n\tkey = request.GET.get('key', '')\n\tx = int(request.GET.get('x', -1))\n\ty = int(request.GET.get('y', -1))\n\n\tball_got = False\n\tmovie_got = False\n\tbtn_a = '#'\n\tmoviemon_id = ''\n\tmy_info = Game.Game()\n\n\tif key == 'new_game':\n\t\tmy_info.load_default_settings()\n\t\tx, y = my_info.player.position()\n\telse:\n\t\tmy_info.load_data(my_info.load_cache())\n\n\t\tif x == -1 and y == -1:\n\t\t\tx, y = my_info.player.position()\n\t\t\tx = my_info.player.x_position()\n\t\t\ty = my_info.player.y_position()\n\n\t\tif x != my_info.player.x_position() or y != my_info.player.y_position():\n\t\t\tif my_info.battle_status() == True:\n\t\t\t\tmy_info.battle_end()\n\t\t\tevent = random.randint(0, 5)\n\t\t\tif event == 0 or event == 1:\n\t\t\t\tmy_info.movie_balls += 1\n\t\t\t\tball_got = True\n\t\t\telif event == 2 and len(my_info.movie.moviedex) > len(my_info.movie.captured):\n\t\t\t\tmy_info.battle_start()\n\t\tif my_info.battle_status() == True:\n\t\t\tmoviemon_id = my_info.get_random_movie()\n\t\t\tbtn_a = '../battle/' + moviemon_id[0]\n\t\t\tmovie_got = True\n\n\t\tmy_info.player.pos_x = x\n\t\tmy_info.player.pos_y = y\n\n\tmy_info.dump_cache(my_info.dump_data())\n\n\treturn render(request, \"worldmap.html\",\n\t{'commands':{\n\t\t'btn_a':btn_a, \n\t\t'btn_b':'#', \n\t\t'btn_start':'../options/', \n\t\t'btn_select':'../moviedex/',\n\t\t'btn_up':location(my_info.move_player('Up')), \n\t\t'btn_down':location(my_info.move_player('Down')),\n\t\t'btn_left':location(my_info.move_player('Left')), \n\t\t'btn_right':location(my_info.move_player('Right'))\n\t\t},\n\t\t'my_location_x':my_info.player.x_position(), \n\t\t'my_location_y':my_info.player.y_position(),\n\t\t'map_size_x':range(0, my_info.world.grid_x), \n\t\t'map_size_y':range(0, my_info.world.grid_y),\n\t\t'ball_count':my_info.movie_balls, \n\t\t'ball_got':ball_got, \n\t\t'movie_got':movie_got\n\t\t})\n\ndef battle(request, moviemon_id):\n\tif not moviemon_id in settings.MOVIES or moviemon_id == None:\n\t\traise Http404(\"Moviemon_id Error\")\n\n\tmy_info = Game.Game()\n\tmy_info.load_data(my_info.load_cache())\n\n\tresult = request.GET.get('result', 'first')\n\tbtn_a = '#'\n\tmovie_info = my_info.get_movie(moviemon_id)\n\n\tif result == 'first':\n\t\tmention_A = 'Press A! You Can catch that!'\n\t\tmention_C = ''\n\t\tresult = None\n\t\tbtn_a = './' + moviemon_id + '?result=' + str(result)\n\telif result == 'gotcha':\n\t\tmention_A = ''\n\t\tmention_C = 'Gotcha!'\n\telse:\n\t\tresult = my_info.player_Attack(moviemon_id)\n\t\tif result == None:\n\t\t\tmention_A = 'Go Away!'\n\t\t\tmention_C = 'Your Ball is empty.'\n\t\t\tbtn_a = './' + moviemon_id + '?result=' + str(result)\n\t\telif result == True:\n\t\t\t# mention_A = ''\n\t\t\t# mention_C = 'Gotcha!'\n\t\t\t# btn_a = './' + moviemon_id + '?result=gotcha'\n\t\t\tmy_info.battle_end()\n\t\t\tmy_info.dump_cache(my_info.dump_data())\n\t\t\treturn redirect('./' + moviemon_id + '?result=gotcha')\n\t\telif result == False:\n\t\t\tmention_A = 'A - Launch Movieball'\n\t\t\tmention_C = 'Unfortunately missed!'\n\t\t\tbtn_a = './' + moviemon_id + '?result=' + str(result)\n\n\tmy_info.battle_end()\n\tmy_info.dump_cache(my_info.dump_data())\n\n\treturn render(request, \"battle.html\",\n\t{'commands':{'btn_a':btn_a, \n\t'btn_b':'../worldmap', \n\t'btn_start':'#', \n\t'btn_select':'#',\n\t\t'btn_up':'#', \n\t\t'btn_down':'#', \n\t\t'btn_left':'#', \n\t\t'btn_right':'#'\n\t\t},'mention_A':mention_A, \n\t\t'mention_C':mention_C, \n\t\t'balls' : my_info.movie_balls,\n\t\t'player_strength':my_info.get_strength(), \n\t\t'movie_strength':int(float(movie_info['imdbRating']) * 10),\n\t\t'rate':my_info.player.percentage(float(movie_info['imdbRating'])),\n\t\t'image':movie_info['Poster'], \n\t\t'title':movie_info['Title'], \n\t\t'imdbRating':movie_info['imdbRating'],\n\t\t})\n\ndef moviedex(request):\n\tmy_info = Game.Game()\n\tmy_info.load_data(my_info.load_cache())\n\tkey = int(request.GET.get('key', 0))\n\tpost_list = []\n\ttitle_list = []\n\tshow_list = []\n\tmoviemon_id = ''\n\tbtn_a = 'detail/'\n\n\tprint(my_info.cache['moviedex'])\n\n\tmovie_count = len(my_info.movie.captured)\n\n\tif movie_count == 0:\n\t\tkey = 0\n\t\tbtn_a = '#'\n\telif movie_count == 1:\n\t\tshow_list = [0]\n\t\tkey = 0\n\telif movie_count == 2:\n\t\tkey %= 2\n\t\tif key == 0:\n\t\t\tshow_list = [0, 1]\n\t\telse:\n\t\t\tshow_list = [1, 0]\n\telif movie_count >= 3:\n\t\tfor i in range(key-1, key+2):\n\t\t\tif i < 0:\n\t\t\t\ti += movie_count\n\t\t\telif i >= movie_count:\n\t\t\t\ti -= movie_count\n\t\t\tshow_list.append(i)\n\t\tif key < 0:\n\t\t\tkey += movie_count\n\t\telif key >= movie_count:\n\t\t\tkey -= movie_count\n\tfor i in show_list:\n\t\tid = my_info.movie.captured[i]\n\t\tpost_list.append(my_info.movie.moviedex[id]['Poster'])\n\t\ttitle_list.append(my_info.movie.moviedex[id]['Title'])\n\tif movie_count > 0:\n\t\tmoviemon_id = my_info.movie.captured[key]\n\n\treturn render(request, \"moviedex.html\",\n\t{'commands':{'btn_a':btn_a + moviemon_id + '?key=' + str(key), 'btn_b':'#', 'btn_start':'#', 'btn_select':'../worldmap/',\n\t\t'btn_up':'#', 'btn_down':'#', 'btn_left':'./?key=' + str(key - 1), 'btn_right':'./?key=' + str(key + 1)\n\t\t},'post_list':post_list, 'title_list':title_list, 'movie_count':movie_count\n\t\t})\n\ndef detail(request, moviemon_id):\n\tif not moviemon_id in settings.MOVIES or moviemon_id == None:\n\t\traise Http404(\"Moviemon_id Error\")\n\n\tkey = int(request.GET.get('key', 0))\n\tmy_info = Game.Game()\n\tmovie_info = my_info.get_movie(moviemon_id)\n\n\treturn render(request, \"detail.html\",\n\t{'commands':{'btn_a':'#', 'btn_b':'../?key='+ str(key), 'btn_start':'#', 'btn_select':'#', 'btn_up':'#', 'btn_down':'#', 'btn_left':'#', 'btn_right':'#'\n\t\t},'image':movie_info['Poster'], 'title':movie_info['Title'], 'year':movie_info['Year'],\n\t\t'genre':movie_info['Genre'],'imdbRating':float(movie_info['imdbRating']),'plot':movie_info['Plot'],\n\t\t'director':movie_info['Director'], 'actors':movie_info['Actors']\n\t\t})\n\ndef option(request):\n\treturn render(request, \"option.html\",\n\t{'commands':{'btn_a':'save_game/', \n\t'btn_b':'../', \n\t'btn_start':'../worldmap', \n\t'btn_select':'#',\n\t\t'btn_up':'#', \n\t\t'btn_down':'#', \n\t\t'btn_left':'#', \n\t\t'btn_right':'#'\n\t\t}})\n\ndef save(request):\n\tindicator = request.GET.get('indicator', None)\n\tkey = request.GET.get('key', None)\n\tsave_slot = bool(request.GET.get('save_slot', False))\n\n\tif indicator == None:\n\t\tindicator = 'A'\n\telif indicator == 'B' and key == 'Up' or indicator == 'C' and key == 'Down':\n\t\tindicator = 'A'\n\telif indicator == 'C' and key == 'Up' or indicator == 'A' and key == 'Down':\n\t\tindicator = 'B'\n\telif indicator == 'A' and key == 'Up' or indicator == 'B' and key == 'Down':\n\t\tindicator = 'C'\n\n\tif save_slot == True:\n\t\tGame_data.GameData.save(indicator)\n\n\tsav_lst = Game_data.GameData.get_save_list()\n\n\treturn render(request, \"save.html\",\n\t{'commands':{'btn_a':'./?save_slot=True&indicator=' + indicator, \n\t'btn_b':'../', \n\t'btn_start':'#', \n\t'btn_select':'#',\n\t\t'btn_up':'./?key=Up&indicator=' + indicator,\n\t\t'btn_down':'./?key=Down&indicator=' + indicator, \n\t\t'btn_left':'#', \n\t\t'btn_right':'#'\n\t\t},'indicator':indicator, \n\t\t'sav_lst':sav_lst\n\t\t})\n\ndef load(request):\n\tindicator = request.GET.get('indicator', None)\n\tkey = request.GET.get('key', None)\n\tload_slot = bool(request.GET.get('load_slot', False))\n\n\tif indicator == None:\n\t\tindicator = 'A'\n\telif indicator == 'B' and key == 'Up' or indicator == 'C' and key == 'Down':\n\t\tindicator = 'A'\n\telif indicator == 'C' and key == 'Up' or indicator == 'A' and key == 'Down':\n\t\tindicator = 'B'\n\telif indicator == 'A' and key == 'Up' or indicator == 'B' and key == 'Down':\n\t\tindicator = 'C'\n\n\tsav_lst = Game_data.GameData.get_save_list()\n\n\tif sav_lst[ord(indicator) - ord('A')] == 'Free':\n\t\tbtn_a = './?indicator=' + indicator\n\telse:\n\t\tbtn_a = './?load_slot=True&indicator=' + indicator\n\n\tif load_slot == True:\n\t\tif Game_data.GameData.load(indicator) == True:\n\t\t\tbtn_a = '/worldmap/?key=load_game'\n\n\n\n\treturn render(request, \"load.html\",\n\t{'commands':{'btn_a':btn_a, 'btn_b':'/', \n\t'btn_start':'#', \n\t'btn_select':'#',\n\t\t'btn_up':'./?key=Up&indicator=' + indicator, \n\t\t'btn_down':'./?key=Down&indicator=' + indicator,\n\t\t'btn_left':'#', \n\t\t'btn_right':'#'\n\t\t},'indicator':indicator, \n\t\t'load_slot':load_slot, \n\t\t'sav_lst':sav_lst\n\t\t})\n\ndef error404(request, exception):\n\treturn render(request, \"404.html\", {'exception':exception})\n","sub_path":"moviemon/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"507968249","text":"# ***********************************************************************************************\n# ***********************************************************************************************\n#\n#\t\tName : \t\tmakewordfile.py\n#\t\tPurpose : \tCopy word files to temp, adding @macro and @word processing.\n#\t\tAuthor :\tPaul Robson (paul@robsons.org.uk)\n#\t\tCreated : \t21st October 2018\n#\n# ***********************************************************************************************\n# ***********************************************************************************************\n\nimport os,sys,re\n\nprint(\"Constructing word files.\")\n#\n#\tFind all source files\n#\nsources = []\nfor root,dirs,files in os.walk(\"words\"):\n\tfor f in [x for x in files if x[-4:] == \".asm\"]:\n\t\tsources.append([root,f])\n#\n#\tCopy each file in turn, looking for words/macros\n#\nincludes = []\nfor src in sources:\n\tsourceFile = src[0]+os.sep+src[1]\n\ttargetFile = \"temp\"+os.sep+src[1]\n\t#print(\"\\t\"+sourceFile+\" => \"+targetFile)\n\tincludes.append(src[1])\n\th = open(targetFile,\"w\")\n\tfor l in open(sourceFile).readlines():\n\t\tl = l.rstrip()\n\t\tif l != \"\" and l[0] == \";\" and l.find(\"@\") > 0:\n\t\t\tm = re.match(\"^\\;\\s+\\@(\\w+)\\s*(.*)$\",l)\n\t\t\tif m is not None:\n\t\t\t\twtype = m.group(1).lower()\n\t\t\t\tname = m.group(2).lower()\n\t\t\t\tassert wtype==\"forth\" or wtype==\"macro\"\n\t\t\t\tif wtype == \"macro\":\n\t\t\t\t\tname = name + \"/macro\"\n\t\t\t\tscramblename = \"__cfdefine_\"+\"_\".join([\"{0:02x}\".format(ord(c)) for c in name])\n\t\t\t\th.write(scramblename+\":\\n\")\t\t\t\n\t\t\t\th.write(l+\"\\n\")\n\t\telse:\n\t\t\th.write(l+\"\\n\")\n\th.close()\n#\n#\t\tCreate include file\n#\nincludes.sort()\nh = open(\"temp\"+os.sep+\"include.asm\",\"w\")\nh.write(\"\".join([\"\\tinclude \\\"{0}\\\"\\n\".format(x) for x in includes]))\nh.close()\n\n","sub_path":"old-flat-color-forth/kernel/makewordfiles.py","file_name":"makewordfiles.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"55886615","text":"from typing import List\nclass Solution:\n def checkPossibility(self, nums: List[int]) -> bool:\n lenNums = len(nums)\n if(lenNums<=2):\n return True\n excep = False\n if(nums[0] > nums[1]):\n excep = True\n for i in range(2,lenNums):\n if(nums[i] < nums[i-1]):\n if(excep): return False\n elif(i == lenNums - 1): return True\n else:\n excep = True\n if(nums[i-2] > nums[i] and nums[i+1] < nums[i-1]):\n return False\n return True\n\n'''\n\n给你一个长度为 n 的整数数组,请你判断在 最多 改变 1 个元素的情况下,该数组能否变成一个非递减数列。\n\n我们是这样定义一个非递减数列的: 对于数组中所有的 i (0 <= i <= n-2),总满足 nums[i] <= nums[i + 1]。\n\n \n\n示例 1:\n\n输入: nums = [4,2,3]\n输出: true\n解释: 你可以通过把第一个4变成1来使得它成为一个非递减数列。\n示例 2:\n\n输入: nums = [4,2,1]\n输出: false\n解释: 你不能在只改变一个元素的情况下将其变为非递减数列。\n \n\n说明:\n\n1 <= n <= 10 ^ 4\n- 10 ^ 5 <= nums[i] <= 10 ^ 5\n'''","sub_path":"665. 非递减数列/pythonCode.py","file_name":"pythonCode.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"402129441","text":"import discord, re, datetime\nfrom math import floor\nfrom services.config import IGNORE_MESSAGE_TIME, STRIP\n\nclass UserError(Exception):\n def __init__(self, message=\"Invalid Input\", no_cooldown=False):\n self.message = message\n # No cooldown, this usually doesn't matter but can be used to say that an error can be used to stop cooldown. Must be implemented when relevant.\n self.no_cooldown = no_cooldown\n\n# The regex used to recognize if a word is an external emoji\nparseEmojis = re.compile(r\"(<:[\\w]+:\\d+>)\")\n\n\ndef parse_time_to_seconds(raw_time):\n r\"\"\"Parses a time string to seconds. Takes a form such as \"6h\".\n Returns an integer representing the total time in seconds.\n Parameters\n ----------\n raw_time : string\n Should be in form: \\d+[smhd], an integer followed by a unit, either s, m, h, or d.\n \"\"\"\n units = (\"d\", \"h\", \"m\", \"s\")\n try:\n minutes = int(raw_time[:-1])\n except ValueError:\n raise UserError(\"Invalid time duration.\")\n unit = raw_time[-1].lower()\n if not unit in units:\n raise UserError(\"Invalid time unit\")\n if unit == \"d\":\n minutes *= 86400\n elif unit == \"h\":\n minutes *= 3600\n elif unit == \"m\":\n minutes *= 60\n return minutes\n\ndef parse_seconds_to_time(raw_seconds, show_seconds=False):\n r\"\"\"Parses a integer into a huaman readable string of days, hours, minutes, and optionally seconds.\n If the time given is less than a minute then seconds shown anyway.\n Returns a string.\n Parameters\n ----------\n raw_seconds : integer\n The seconds to parse\n show_seconds : bool, default=False\n If seconds should be displayed\n \"\"\"\n output = \"\"\n time_added = False\n if raw_seconds < 0:\n output += \"-\"\n raw_seconds *= -1\n elif raw_seconds < 1:\n return \"No time\"\n \n if raw_seconds > 86400:\n time_added = True\n unit = floor(raw_seconds / 86400)\n output += str(unit) + \" day\"\n if unit > 1:\n output += \"s, \"\n else:\n output += \", \"\n raw_seconds %= 86400\n if raw_seconds > 3600:\n time_added = True\n unit = floor(raw_seconds / 3600)\n output += str(unit) + \" hour\"\n if unit > 1:\n output += \"s, \"\n else:\n output += \", \"\n raw_seconds %= 3600\n if raw_seconds > 60:\n time_added = True\n unit = floor(raw_seconds / 60)\n output += str(unit) + \" minute\"\n if unit > 1:\n output += \"s, \"\n else:\n output += \", \"\n raw_seconds %= 60\n if show_seconds or not time_added:\n unit = round(raw_seconds)\n output += str(unit) + \" second\"\n if unit > 1:\n output += \"s, \"\n else:\n output += \", \"\n return output[:-2]\n\ndef parse_bool(in_bool):\n r\"\"\"Parses a string to decide if it is true or false.\n Defaults to true unless input matches \"false\", \"0\", \"no\".\n Case insensitive.\n Parameters\n ----------\n in_bool : string\n The string to be parsed to see if it is true or false.\n \"\"\"\n falseValues = (\"false\", \"0\", \"no\")\n return in_bool.lower() not in falseValues\n\nasync def collect_messages(\n ctx, one_channel, timestamp, stopwords, case_insensitive = True, until_last_user_msg = False\n):\n \"\"\"Collects messages from a discord server from within a time period.\n Returns a frequency dictionary with its findings.\n Parameters\n ----------\n ctx : discord.ext.commands.Context\n The context that the command is being run from.\n Is used to get the current channel, and server if necessary.\n one_channel : bool\n Determines if only a single channels history should be grabbed,\n or every channel in the server that the bot can access.\n timestamp : datetime.datetime\n The datetime that the bot should look forward from\n stopwords : list[string] or set[string]\n A list of words that should be left out of the word count if matched.\n case_insensitive : bool, default=True\n If the messages should be case sensitive or not.\n until_last_user_msg : bool, default=False\n If the message collection should end when the next message from the user is found\n \"\"\"\n # Getting the channel's that should be grabbed from\n if one_channel or ctx.guild is None: # If the message isn't in a server just grab current channel\n histories = [ctx.history]\n else:\n histories = [i.history for i in list(filter(\n lambda i:type(i) is discord.TextChannel and i.permissions_for(ctx.me).read_messages,\n ctx.guild.channels))]\n words = dict()\n time_now = datetime.datetime.utcnow()\n # Default time_back of 0\n # This will be set to a larger value only if until_last_user_msg is True\n time_back = datetime.timedelta()\n for hist in histories:\n async for msg in hist(limit=None, after=timestamp, oldest_first=False):\n if msg.author is not ctx.me:\n # Since I can't tell when the last message will be this is calculator for every\n # message. Efficient.\n # If only looking until the users last message, stop looking if they're the author\n if(\n until_last_user_msg and\n msg.author == ctx.message.author and\n msg.created_at < time_now - datetime.timedelta(\n seconds = parse_time_to_seconds(IGNORE_MESSAGE_TIME)\n )\n ):\n time_back = time_now - msg.created_at\n break\n else:\n # clean_content parses @'s and #'s to be readable names, while content doesn't.\n add_frequency(words, msg.clean_content, stopwords, case_insensitive)\n if until_last_user_msg:\n return (words, time_back)\n else:\n return words\n\ndef add_frequency(freq_dict, text, stopwords, case_insensitive):\n r\"\"\"Adds the frequency of words inside the given string to a dict.\n Strips characters at the start and end as defined by\n config.STRIP\n Ignores words longer than 20 characters unless they're of the emoji format.\n Parameters\n ----------\n freq_dict : dict\n The dictionary that these values should be added to.\n test : string\n The string that should be parsed\n stopwords : list[string] or set[string]\n A list of words that should be left out of the word count if matched.\n case_insensitive : bool\n If the frequency should be case sensitive or not\n \"\"\"\n MAXLEN = 20\n # A dictionary of words, each word having an integer value of it's frequency\n # Adds the frequency to an existing set, pass an empty dict() to start with.\n if not text.startswith(\".\"):\n for word in text.split():\n if case_insensitive:\n word = word.lower()\n word = word.strip(STRIP)\n # Testing if the word is emojis\n emojis = parseEmojis.findall(word)\n if len(emojis) > 0:\n for emoji in emojis:\n add_dict(freq_dict, emoji)\n elif word not in stopwords and len(word) <= MAXLEN:\n add_dict(freq_dict, word)\n\ndef add_dict(freq_dict, word):\n \"\"\"Adds to a frequency dictionary\n Used by add_frequency, but add_frequency has all the logic to only add good words\n \"\"\"\n if word in freq_dict:\n freq_dict[word] += 1\n else:\n freq_dict[word] = 1\n\ndef check_perms(ctx, perms):\n \"\"\"Checks if the discord bot has all the required permissions in the given channel.\n Returns true if the bot has all permissions required,\n or if the context takes place within DM's where permissions don't apply.\n Parameters\n ----------\n ctx : discord.ext.commands.Context\n The context that the command is being run in.\n Used to get the channel, and check if the command is being run in a server.\n perms : discord.Permissions\n The set of permissions that the bot requires.\n Only values explicitly defined are checked.\n \"\"\"\n # Checks that all permissions are present in context's channel, if the channel is part of a guild (server)\n return type(ctx.me) is not discord.Member or ctx.channel.permissions_for(ctx.me).is_superset(perms)\n\ndef merge_dicts(dict_one, dict_two):\n \"\"\"Merges two dicts\n Recursively merges sub-dicts, rather than overwriting them at the top level as update() does.\n Parameters\n ----------\n dict_one : dict\n The dictionary for values to be merged into. This dictionary is written to.\n dict_two : dict\n The dictionary for values to be read from.\n \"\"\"\n # Merges dict_two into dict_one, merging dicts and only overwriting values with the same name:\n for key, val in dict_two.items():\n if type(val) is dict and key in dict_one and type(dict_one[key]) is dict:\n merge_dicts(dict_one[key], val)\n else:\n dict_one[key] = val\n \n ","sub_path":"services/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"420674885","text":"#!/usr/bin/env python3\nimport os\nimport argparse\nimport shutil\nimport sys\nimport zipfile\n\nparser = argparse.ArgumentParser(description='Compile java source files with special tags into a version with new content')\nparser.add_argument('indir', metavar='IN',\n help='project directory containting the source files')\nparser.add_argument('outdir', metavar='OUT',\n help='output folder')\nparser.add_argument('-n', '--name', dest=\"name\", action=\"store\",\n help='name for the project. default is the basename of IN')\nparser.add_argument('-c', '--clear', dest=\"clear\", action=\"store_true\",\n help='delete OUT before creating project versions')\nparser.add_argument('-to', '--tag-open', dest=\"tag_open\", action=\"store\", default='jml*',\n help='opening tag for jml comments')\nparser.add_argument('-tc', '--tag-close', dest=\"tag_close\", action=\"store\", default='*jml',\n help='closeing tag for jml comments')\nparser.add_argument('-mlo', '--ml-open', dest=\"ml_open\", action=\"store\",\n help='opening tag for jml comments')\nparser.add_argument('-mlc', '--ml-close', dest=\"ml_close\", action=\"store\",\n help='closeing tag for jml solution comments')\nparser.add_argument('-mls', '--ml-suffix', dest=\"ml_suffix\", action=\"store\", default=\"ML\",\n help='suffix for the solution version')\nparser.add_argument('-vs', '--ver-sep', dest=\"sep\", action=\"store\", default=\"_\",\n help='separator between project name and version number')\nparser.add_argument('-i', '--include', dest=\"include\", action=\"extend\", nargs=\"+\",\n default=['java'],\n help='list of file extensions to parse for jml comments')\nparser.add_argument('-e', '--exclude', dest=\"exclude\", action=\"extend\", nargs=\"+\",\n default=['class','ctxt','Thumbs.db','.DS_Store'],\n help='list of file extensions to exclude from processing')\nparser.add_argument('-v', '--versions', dest=\"versions\", action=\"extend\", type=str,\n help='list of versions to process')\nparser.add_argument('--preset', dest=\"preset\",\n choices=['xml', 'de', 'de-xml'],\n help='use a preset tag style')\nparser.add_argument('--project-root', dest=\"root\", action='store',\n help='if set to a prefix of IN, the folder structure in OUT will reflect the structure of the project root. For IN=/projects/foo/bar/my-project OUT=/projects/out project-root=/projects the resulting versions will be written to /projects/out/foo/bar')\nparser.add_argument('--keep-empty', dest=\"keep_empty\", action='store_true',\n help='By default empty files are omitted. This will keep those files in the output.')\nparser.add_argument('--encoding', dest=\"encoding\", action='store', default=\"utf-8\",\n help='Set the encoding for project files Default: utf-8.')\nparser.add_argument('-z', '--zip', dest=\"create_zip\", action='store_true', default=False,\n help='Create additional zip files of the output folders.')\nparser.add_argument('--no-ml', dest=\"delete_ml\", action='store_true',\n help='Skip generating a ml version of the project. Note that the ml version is always generated, but this will delete the folder afterwards.')\n\nparser.add_argument('--debug', dest=\"debug\", action='store_true',\n help='Enable more verbose debug output.')\n\nargs = parser.parse_args()\n\nif args.preset:\n if args.preset == 'xml':\n args.tag_open = ''\n args.tag_close = ''\n elif args.preset == 'de':\n args.tag_open = 'aufg*'\n args.tag_close = '*aufg'\n args.ml_open = 'ml*'\n args.ml_close = '*ml'\n elif args.preset == 'de-xml':\n args.tag_open = ''\n args.tag_close = ''\n args.ml_open = ''\n args.ml_close = ''\n\nif args.tag_open == args.tag_close:\n print(\"You can't use the same tag for opening and closeing jml comments.\")\n print(\"Please set distinct tags. e.g. @jml and lmj@\")\n quit()\n\nif args.ml_open and args.ml_open == args.ml_close:\n print(\"You can't use the same tag for opening and closeing jml solution comments.\")\n print(\"Please set distinct tags. e.g. @jml and lmj@\")\n quit()\n\nif not args.ml_open:\n args.ml_open = args.tag_open\nif not args.ml_close:\n args.ml_close = args.tag_close\n\ndef debug(msg):\n if args.debug:\n print(msg)\n\ndef test_version(version1, version2):\n \"\"\" Compares a version with a version string and checks if the first\n is in the range defined by the second. The second version can be\n prefixed by one of =, <, >, >=, <= or != to compare with a range of\n versions.\n \"\"\"\n version1 = int(version1)\n version2 = str(version2)\n\n ver1 = version1\n ver2 = int(version2.lstrip('<>=!'))\n op = version2.rstrip('0123456789')\n\n if len(op) == 0 or op == '=':\n return ver1 == ver2\n else:\n return eval(f'{ver1}{op}{ver2}')\n\ndef create_zip(dir, args):\n with zipfile.ZipFile(f'{dir}.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:\n for root, dirs, files in os.walk(dir):\n for file in files:\n filepath = os.path.join(root, file)\n relpath = os.path.relpath(filepath, start=dir)\n zipf.write(filepath, arcname=relpath)\n\ndef create_version(version, args):\n if version == 0:\n ver_name = args.name\n else:\n ver_name = f'{args.name}{args.sep}{version}'\n outdir = os.path.join(args.outdir, ver_name)\n\n # prepare output folders\n if os.path.isdir(outdir) and args.clear:\n shutil.rmtree(outdir)\n if not os.path.isdir(outdir):\n os.makedirs(outdir)\n\n for root, dirs, files in os.walk(args.indir):\n subpath = root[len(args.indir)+1:]\n outroot = os.path.join(outdir, subpath)\n\n os.makedirs(outroot, exist_ok=True)\n\n for file in files:\n fullpath = os.path.join(root,file)\n fulloutpath = os.path.join(outroot, file)\n\n _, ext = os.path.splitext(file)\n ext = ext[1:]\n\n if ext in args.exclude:\n continue\n elif ext in args.include:\n is_empty = True\n with open(fullpath, 'r', encoding=args.encoding) as inf:\n with open(fulloutpath, 'w', encoding=args.encoding) as outf:\n skip = False\n line = inf.readline()\n #if args.encoding != 'utf-8':\n # line = line.decode(args.encoding).encode()\n while line:\n lline = line.lstrip()\n if lline.startswith(f'//{args.ml_close}'):\n skip = False\n elif lline.startswith(f'{args.tag_close}*/'):\n skip = False\n elif lline.startswith(f'//{args.ml_open}'):\n skip = True\n elif lline.startswith(f'/*{args.tag_open}'):\n parts = lline.split()\n if len(parts) > 1:\n skip = not test_version(version, parts[1])\n else:\n skip = False\n elif skip:\n pass\n else:\n outf.write(line)\n is_empty = False\n line = inf.readline()\n if is_empty and not args.keep_empty:\n os.remove(fulloutpath)\n else:\n shutil.copy(fullpath, fulloutpath)\n\n if args.create_zip:\n create_zip(outdir, args)\n\ndef create_ml(args):\n versions = set()\n\n ver_name = f'{args.name}{args.sep}{args.ml_suffix}'\n outdir = os.path.join(args.outdir, ver_name)\n\n # prepare output folders\n if os.path.isdir(outdir) and args.clear:\n shutil.rmtree(outdir)\n if not os.path.isdir(outdir):\n os.makedirs(outdir)\n\n for root, dirs, files in os.walk(args.indir):\n subpath = root[len(args.indir)+1:]\n outroot = os.path.join(outdir, subpath)\n\n os.makedirs(outroot, exist_ok=True)\n\n for file in files:\n fullpath = os.path.join(root,file)\n fulloutpath = os.path.join(outroot, file)\n\n _, ext = os.path.splitext(file)\n ext = ext[1:]\n\n debug(f'working on {fullpath}')\n\n if ext in args.exclude:\n continue\n elif ext in args.include:\n is_empty = True\n with open(fullpath, 'r', encoding=args.encoding) as inf:\n with open(fulloutpath, 'w', encoding=args.encoding) as outf:\n skip = False\n line = inf.readline()\n while line:\n lline = line.lstrip()\n if lline.startswith(f'//{args.ml_close}'):\n pass\n elif lline.startswith(f'//{args.ml_open}'):\n pass\n elif lline.startswith(f'{args.tag_close}*/'):\n skip = False\n elif lline.startswith(f'/*{args.tag_open}'):\n parts = lline.split()\n if len(parts) > 1:\n ver = parts[1].lstrip('<>!=')\n versions.add(ver)\n skip = True\n elif skip:\n pass\n else:\n outf.write(line)\n is_empty = False\n line = inf.readline()\n if is_empty and not args.keep_empty:\n os.remove(fulloutpath)\n else:\n shutil.copy(fullpath, fulloutpath)\n\n if args.delete_ml:\n shutil.rmtree(outdir)\n elif args.create_zip:\n create_zip(outdir, args)\n\n if not versions:\n versions.add(0)\n return versions\n\nif os.path.isdir(args.indir):\n if not args.name:\n args.name = os.path.basename(args.indir)\n\n if args.root and os.path.commonprefix([args.root, args.indir]) == args.root:\n args.outdir = os.path.dirname(os.path.join(args.outdir, args.indir[len(args.root)+1:]))\n\n debug(f'Compiling project <{args.name}>\\n from <{args.indir}>\\n to <{args.outdir}>')\n debug('Creating ML version:')\n versions = create_ml(args)\n\n if not args.versions:\n args.versions = versions\n for ver in versions:\n if any(test_version(ver, v) for v in args.versions):\n debug(f'Creating version {ver}:')\n create_version(ver, args)\nelse:\n print(f'{args.indir} does not exist')\n","sub_path":"jml.py","file_name":"jml.py","file_ext":"py","file_size_in_byte":11207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"220141938","text":"# content of conftest.py\nimport os\nimport pytest\nimport random\nfrom time import sleep, time\nfrom datetime import datetime, timedelta\nfrom decimal import Decimal\n\nfrom loguru import logger\nfrom pytezos import pytezos\n\nfrom src.accounts import Accounts\nfrom src.config import Config\nfrom src.compile import launch_sandbox, stop_sandbox\nfrom src.deploy import deploy_market, deploy_stablecoin\nfrom src.market import Market\nfrom src.stablecoin import Stablecoin\nfrom src.utils import *\n\nmarket_pool = []\nreserved = []\n\nlogger.add(\"tests/file_{time}.log\", level='DEBUG')\nlogger = logger.opt(colors=True)\n\n\ntest_accounts = [\n {\"name\": \"donald\", \"key\": \"tz1VWU45MQ7nxu5PGgWxgDePemev6bUDNGZ2\", \"status\": \"created\"},\n {\"name\": \"mala\", \"key\": \"tz1azKk3gBJRjW11JAh8J1CBP1tF2NUu5yJ3\", \"status\": \"created\"},\n {\"name\": \"marty\", \"key\": \"tz1Q3eT3kwr1hfvK49HK8YqPadNXzxdxnE7u\", \"status\": \"created\"},\n {\"name\": \"palu\", \"key\": \"tz1LQn3AuoxRVwBsb3rVLQ56nRvC3JqNgVxR\", \"status\": \"created\"},\n {\"name\": \"rimk\", \"key\": \"tz1PMqV7qGgWMNH2HR9inWjSvf3NwtHg7Xg4\", \"status\": \"created\"},\n {\"name\": \"tang\", \"key\": \"tz1MDwHYDLgPydL5iav7eee9mZhe6gntoLet\", \"status\": \"created\"},\n {\"name\": \"patoch\", \"key\": \"tz1itzGH43N8Y9QT1UzKJwJM8Y3qK8uckbXB\", \"status\": \"created\"},\n {\"name\": \"marco\", \"key\": \"tz1UbdPPEVcyT5tC34yh7LweQ1tTWk8vHVXk\", \"status\": \"created\"},\n {\"name\": \"carl\", \"key\": \"tz1XvHb5KDrui5S8WJP6txdAG7Qu5WHbfw1Q\", \"status\": \"created\"},\n {\"name\": \"siri\", \"key\": \"tz1MT1ZfNoDXzWvUj4zJg8cVq7tt7a6QcC58\", \"status\": \"created\"},\n {\"name\": \"clara\", \"key\": \"tz1dWWkzwEKWg9S7sA3A2gFcZXzyz3ekHNRE\", \"status\": \"created\"},\n {\"name\": \"lisa\", \"key\": \"tz1SVMjM4BcELyhBYhqUVe5PaV2ZgckBd8bG\", \"status\": \"created\"},\n {\"name\": \"laura\", \"key\": \"tz1P6AVBYU3SAz6o9gCAxpgLG3TsF7mSLTk7\", \"status\": \"created\"},\n {\"name\": \"anna\", \"key\": \"tz1UZFiTE2G5vxRicUeiyov5yt6Zku6fC3dt\", \"status\": \"created\"},\n {\"name\": \"maria\", \"key\": \"tz1NY2EdxiVNyyB5kLAiZCC1UrA6CagM5Zek\", \"status\": \"created\"},\n {\"name\": \"penny\", \"key\": \"tz1UGtuFY8R3fQp6Exi9Bb6AMppVubunFj2p\", \"status\": \"created\"},\n {\"name\": \"amy\", \"key\": \"tz1TXa1B9CfMmvWPH7uWX3rsjNcqzLZ6af5U\", \"status\": \"created\"},\n {\"name\": \"astrid\", \"key\": \"tz1SopWRnn116FzCevsZxL9rzpJq4A6hFWRv\", \"status\": \"created\"},\n {\"name\": \"cathalina\", \"key\": \"tz1hNY9Fv11hNZBwmjRe4VtTwkeevgW6tGq9\", \"status\": \"created\"},\n {\"name\": \"romina\", \"key\": \"tz1iFcpVuaBCaffFNG8LZKJrQdzzj7c5uW3t\", \"status\": \"created\"},\n {\"name\": \"xenia\", \"key\": \"tz1L5nZbXTV46KGVB4gRVNQ4RWxYoq3jLn1R\", \"status\": \"created\"},\n {\"name\": \"eva\", \"key\": \"tz1dAPf4ivAgAQbp7Lua2M7kecVu6d9t4oYJ\", \"status\": \"created\"},\n {\"name\": \"alexa\", \"key\": \"tz1Vb3PvQAHTgyp56rXqSpkcaUc5zdEcFfbD\", \"status\": \"created\"},\n {\"name\": \"mia\", \"key\": \"tz1Lc4SKZLFriSm8ouwyUo3Hxkbv35VxwBP1\", \"status\": \"created\"},\n {\"name\": \"robin\", \"key\": \"tz1XRcA64rrepjkDgJudqjk7Z5HyALk8u9iU\", \"status\": \"created\"},\n {\"name\": \"hannah\", \"key\": \"tz1hGsWjVurdQMR7U9EC8TYwhgubfioTSf28\", \"status\": \"created\"},\n {\"name\": \"emma\", \"key\": \"tz1QahWZZHgrREnjThcxKXdmQMuqAWwYWoB9\", \"status\": \"created\"},\n {\"name\": \"lily\", \"key\": \"tz1bCwYgZcTQwLERvXS9UZghffENphBsEcho\", \"status\": \"created\"},\n {\"name\": \"madonna\", \"key\": \"tz1RncjhUDusSeNoLT11z35wmdRRaMsq5fsp\", \"status\": \"created\"},\n {\"name\": \"nina\", \"key\": \"tz1ZJARU2TodXW8cFhYuai3VmLpY1qqkHq9B\", \"status\": \"created\"},\n {\"name\": \"robert\", \"key\": \"tz1ghjxBNM1ic25Lzq33Eq7z5RiXTQhiaPDT\", \"status\": \"created\"},\n {\"name\": \"tasos\", \"key\": \"tz1XdPirP3FxZDNGZMhw7Nk2hDAfSiCVGWF9\", \"status\": \"created\"},\n {\"name\": \"sergio\", \"key\": \"tz1gK1rZy2Biut8hcJiyEufbtXQ9rkNvToub\", \"status\": \"created\"},\n {\"name\": \"stavros\", \"key\": \"tz1iPFr4obPeSzknBPud8uWXZC7j5gKoah8d\", \"status\": \"created\"},\n {\"name\": \"leonidas\", \"key\": \"tz1ZrWi7V8tu3tVepAQVAEt8jgLz4VVEEf7m\", \"status\": \"created\"}\n]\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef contract_id():\n id = deploy_market()\n logger.info(f\"Binary prediction contract deployed at address {id}\")\n return id\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef mock_get_tezos_client_path():\n return os.path.join('tests/users', 'secret_keys')\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef endpoint():\n endpoint = 'http://localhost:20000'\n logger.info(f'Endpoint is : {endpoint}')\n return endpoint\n\n\n@pytest.fixture(scope='function', autouse=True)\ndef mock_functions(monkeypatch):\n print(\"mock_function\")\n monkeypatch.setattr(\n 'src.utils.get_tezos_client_path',\n lambda: os.path.join('tests/users', 'secret_keys')\n )\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef stablecoin_id(endpoint):\n id = deploy_stablecoin(shell=endpoint)\n logger.info(f\"Stablecoin contract deployed at = {id}\")\n return id\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef config(contract_id, stablecoin_id, endpoint):\n config = Config(config_file=\"tests/cli.ini\", contract=contract_id, stablecoin=stablecoin_id, endpoint=endpoint)\n logger.info(f\"account originator = {config['admin_priv_key']}\")\n return config\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef market(config, get_accounts):\n new_market = Market(get_accounts, config)\n return new_market\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef stablecoin(config, get_accounts):\n new_stablecoin = Stablecoin(get_accounts, config)\n return new_stablecoin\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef client(config):\n client = pytezos.using(\n shell=config[\"endpoint\"],\n key=\"edsk3QoqBuvdamxouPhin7swCvkQNgq4jP5KZPbwWNnwdZpSpJiEbq\"\n )\n return client\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef get_accounts(config):\n accounts = Accounts(endpoint=config[\"endpoint\"])\n accounts.import_from_folder(\"tests/users\")\n return accounts\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef financed_accounts(client, config: Config, stablecoin_id: str):\n money_seeding = []\n stablecoin_seeding = []\n accounts_to_finance = random.choices(test_accounts, k=30)\n for account in test_accounts:\n if account in accounts_to_finance:\n money_seed = client.transaction(\n account['key'], amount=Decimal(10)\n )\n account['status'] += ',tezzed'\n money_seeding.append(money_seed)\n stablecoin = get_stablecoin(config['admin_account'], stablecoin_id)\n stablecoin_seed = stablecoin.transfer({\n 'from': get_public_key(config['admin_account']),\n 'to': account['key'],\n 'value': 2 ** 42\n })\n account['status'] += ',financed'\n stablecoin_seeding.append(stablecoin_seed.as_transaction())\n\n bulk_transactions = config[\"admin_account\"].bulk(*stablecoin_seeding)\n submit_transaction(bulk_transactions, error_func=print_error)\n bulk_transactions = config[\"admin_account\"].bulk(*money_seeding)\n submit_transaction(bulk_transactions, error_func=print_error)\n return accounts_to_finance\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef financed_revealed_accounts(financed_accounts, config, get_accounts):\n accounts_obj = get_accounts\n accounts_to_reveal = random.choices(financed_accounts, k=20)\n #non_financed_accounts_to_reveal = [x in test_accounts if 'financed' not in x['status']]\n #accounts_to_reveal += non_financed_account\n for account in financed_accounts:\n if account in accounts_to_reveal:\n accounts_obj.activate_account(account['name'])\n try:\n accounts_obj.reveal_account(account['name'])\n account[\"status\"] += \",revealed\"\n except:\n continue\n return accounts_to_reveal\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef accounts_who_minted(config, market, financed_revealed_accounts, gen_cleared_markets):\n accounts_who_mint = random.choices(financed_revealed_accounts, k=15)\n market_with_minted_token = random.choices(gen_cleared_markets, k=15)\n transactions = []\n for account in test_accounts:\n if account in accounts_who_mint:\n for ma in market_pool:\n if ma in market_with_minted_token:\n try:\n transaction = market.mint(\n ma['id'],\n account['name'],\n 2**16\n )\n submit_transaction(transaction, error_func=print_error)\n if 'minted' not in ma['status']:\n ma['status'] += ',minted'\n if 'minted' not in account['status']:\n account['status'] += ',minted'\n except:\n continue\n return accounts_who_mint\n\n\"\"\"\n@pytest.fixture(scope=\"session\", autouse=True)\ndef accounts_with_liquidity(config, market, financed_revealed_accounts, gen_cleared_markets):\n accounts_whith_liquidity = random.choices(financed_revealed_accounts, k=15)\n market_with_minted_token = random.choices(gen_cleared_markets, k=15)\n transactions = []\n for account in test_accounts:\n if account in accounts_whith_liquidity:\n for ma in market_pool:\n if ma in market_with_minted_token:\n try:\n transaction = market.update_liquidity(\n ma['id'],\n account['name'],\n 'payIn',\n 2**16\n )\n submit_transaction(transaction, error_func=print_error)\n if 'liquid' not in ma['status']:\n ma['status'] += ',liquid'\n if 'sprayer' not in account['status']:\n account['status'] += ',sprayer'\n except:\n continue\n logger.error(accounts_whith_liquidity)\n return accounts_whith_liquidity\n\"\"\"\n\n@pytest.fixture(scope=\"function\")\ndef revealed_account(financed_revealed_accounts, stablecoin, get_accounts):\n selected_account = random.choice(financed_revealed_accounts)\n stablecoin_balance = stablecoin.get_balance(selected_account[\"name\"])\n tez_balance = get_accounts[selected_account['name']].balance()\n logger.info(f\"acount use for the call: {selected_account}\")\n logger.info(f\"account stablecoin balance before call: {stablecoin_balance}\")\n logger.info(f\"account tez balance before call: {tez_balance}\")\n yield selected_account\n logger.info(f\"account stablecoin balance after call: {stablecoin_balance}\")\n logger.info(f\"account tez balance before call: {tez_balance}\")\n\n\n@pytest.fixture(scope=\"function\")\ndef financed_account(financed_accounts, stablecoin, get_accounts):\n selected_account = random.choice(financed_accounts)\n stablecoin_balance = stablecoin.get_balance(selected_account[\"name\"])\n tez_balance = get_accounts[selected_account['name']].balance()\n logger.info(f\"acount use for the call: {selected_account}\")\n logger.info(f\"account stablecoin balance before call: {stablecoin_balance}\")\n logger.info(f\"account tez balance before call: {tez_balance}\")\n yield selected_account\n logger.info(f\"account stablecoin balance after call: {stablecoin_balance}\")\n logger.info(f\"account tez balance before call: {tez_balance}\")\n\n\n@pytest.fixture(scope=\"function\")\ndef non_financed_account(stablecoin, get_accounts):\n selection = [x for x in test_accounts if 'financed' not in x['status']]\n selected_account = random.choice(selection)\n stablecoin_balance = stablecoin.get_balance(selected_account[\"name\"])\n tez_balance = get_accounts[selected_account['name']].balance()\n logger.info(f\"acount used for the call: {selected_account}\")\n try:\n get_accounts.activate_account(account_name=selected_account['name'])\n get_accounts.reveal_account(account_name=selected_account['name'])\n except:\n logger.info(f\"Non financed account: {selected_account} already available on the network\")\n logger.info(f\"account stablecoin balance before call: {stablecoin_balance}\")\n logger.info(f\"account tez balance before call: {tez_balance}\")\n yield selected_account\n logger.info(f\"account stablecoin balance after call: {stablecoin_balance}\")\n logger.info(f\"account tez balance before call: {tez_balance}\")\n\n\n@pytest.fixture(scope=\"function\")\ndef minter_account():\n selection = [x for x in test_accounts if 'minted' in x['status']]\n account = random.choice(selection)\n return account\n\n\n@pytest.fixture(scope=\"function\")\ndef sprayer_account():\n selection = [x for x in test_accounts if 'sprayer' in x['status']]\n account = random.choice(selection)\n return account\n\n\n@pytest.fixture(scope=\"session\", autouse=\"True\")\ndef gen_markets(financed_revealed_accounts, config, market, stablecoin_id):\n transactions = []\n for i in range(2):\n for index in range(40):\n quantity = random.randint(0, 900)\n rate = random.randint(0, 2 ** 63)\n end_delay = random.uniform(0.05, 0.15)\n end = datetime.now() + timedelta(minutes=end_delay)\n caller = random.choice(financed_revealed_accounts)\n market_id, transaction = market.ask_question(\n id_generator(),\n id_generator(),\n caller['name'],\n quantity,\n rate,\n id_generator(),\n auction_end_date=end.timestamp(),\n token_contract=stablecoin_id\n )\n if market_id not in reserved and market_id != 1:\n reserved.append(market_id)\n transactions.append(transaction)\n market_pool.append({\n 'id': int(market_id),\n 'caller': caller,\n 'end': end.timestamp(),\n 'status': 'created'\n })\n bulk_transactions = config[\"admin_account\"].bulk(*transactions)\n res = submit_transaction(bulk_transactions, error_func=print_error)\n logger.info(\"-------------------------------RESULT OF THE OPERATION--------------------------------------\")\n logger.info(res)\n logger.info(\"-------------------------------RESULT OF THE OPERATION--------------------------------------\")\n logger.info(\"-------------------------------LIST OF GENERATED MARKETS------------------------------------\")\n for ma in market_pool:\n logger.info(ma)\n logger.info(\"-------------------------------LIST OF GENERATED MARKETS------------------------------------\")\n transactions.clear()\n sleep(80)\n return market_pool\n\n\n@pytest.fixture(scope=\"session\", autouse=\"True\")\ndef gen_bid_markets(gen_markets, market, config):\n selection = random.sample(gen_markets, k=60)\n for i in range(1):\n for ma in selection:\n transactions = market.multiple_bids(\n ma['id'],\n random.randint(2, 2 ** 8),\n random.randint(2, 2 ** 63)\n )\n bulk_transactions = config[\"admin_account\"].bulk(transactions)\n submit_transaction(bulk_transactions, error_func=print_error)\n for ma in selection:\n ma['status'] = 'bidded'\n sleep(2)\n return selection\n\n\n@pytest.fixture(scope=\"session\", autouse=\"True\")\ndef gen_cleared_markets(config, market, gen_bid_markets):\n selection = random.sample(gen_bid_markets, k=40)\n cleared = []\n for ma in selection:\n transaction = market.auction_clear(ma['id'], ma['caller']['name'])\n try:\n end = datetime.now()\n logger.info(f\" who is the end {end.timestamp()} {ma['end']}\")\n submit_transaction(transaction, error_func=raise_error)\n ma['status'] = 'cleared'\n cleared.append(ma)\n except Exception as e:\n continue\n sleep(2)\n logger.info(len(cleared))\n return cleared\n\n\n@pytest.fixture(scope=\"session\", autouse=\"True\")\ndef gen_resolved_markets(config, market, gen_cleared_markets):\n selection = random.choices(gen_cleared_markets, k=20)\n resolved = []\n random_bit = random.getrandbits(1)\n random_boolean = bool(random_bit)\n for ma in market_pool:\n if ma in selection:\n transaction = market.close_market(ma['id'], ma['caller']['name'], random_boolean)\n try:\n submit_transaction(transaction, error_func=raise_error)\n ma['status'] = 'resolved'\n resolved.append(ma)\n except Exception as e:\n logger.info(e)\n continue\n logger.info(len(resolved))\n return resolved\n\n\n@pytest.fixture(scope=\"function\", autouse=True)\ndef log_contract_state(request):\n logger.info(f\"-----------------------------{request}-----------------------------\")\n yield\n logger.info(f\"-----------------------------END OF THE TEST-----------------------------\")\n\n\ndef get_random_market(status=['created'], exclude=[]):\n pool = [\n x for x in market_pool if\n any(status_el in x['status'] for status_el in status)\n ]\n r_pool = random.choice(pool)\n pool.remove(r_pool)\n logger.info(f\"selected market for test: {r_pool}\")\n return r_pool\n\n\ndef pytest_configure():\n \"\"\"\n Allows plugins and conftest files to perform initial configuration.\n This hook is called for every plugin and initial conftest\n file after command line options have been parsed.\n \"\"\"\n #launch_sandbox()\n sleep(20)\n\n\ndef pytest_sessionstart(session):\n \"\"\"\n Called after the Session object has been created and\n before performing collection and entering the run test loop.\n \"\"\"\n #launch_sandbox()\n\n\ndef pytest_sessionfinish(session, exitstatus):\n \"\"\"\n Called after whole test run finished, right before\n returning the exit status to the system.\n \"\"\"\n\n\ndef pytest_unconfigure(config):\n \"\"\"\n Called before test process is exited.\n \"\"\"\n #stop_sandbox()\n\n\n@pytest.hookimpl(hookwrapper=True)\ndef pytest_fixture_setup(fixturedef, request):\n start = time()\n yield\n total = time() - start\n logger.error(total)\n\n\ndef get_random_account(status=\"created\", exclude=\"\"):\n selection = [x for x in test_accounts if status not in x['status'] and status not in exclude]\n selected_account = random.choice(selection)\n stablecoin_balance = stablecoin.get_balance(selected_account[\"name\"])\n tez_balance = get_accounts[selected_account['name']].balance()\n logger.info(f\"acount use for the call: {selected_account}\")\n logger.info(f\"account stablecoin balance before call: {stablecoin_balance}\")\n logger.info(f\"account tez balance before call: {tez_balance}\")\n yield selected_account\n logger.info(f\"account stablecoin balance after call: {stablecoin_balance}\")\n logger.info(f\"account tez balance before call: {tez_balance}\")\n return selected_account\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef liquidity_storage(client, config):\n return get_question_liquidity_provider_map(client, config['contract'])\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef ledger_storage(client, config):\n return get_tokens_ledgermap(client, config['contract'])\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef supply_storage(client, config):\n return get_tokens_supplymap(client, config['contract'])\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":19395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"627027313","text":"import socket\r\nfrom threading import Thread\r\nimport pickle\r\nimport os\r\nimport file\r\nimport copy\r\nimport hashlib\r\n\r\n\r\n\r\nclass folder(file.f_obj):\r\n def __init__(self, path=None, parent=None, root=False):\r\n self.root = root if root else False\r\n if self.root and not path:\r\n raise AttributeError('root object must have atribute \"path\"')\r\n\r\n self.parent = parent\r\n self.name = os.path.basename(path)\r\n self.name_ext = self.name\r\n self.content = []\r\n self.type = 'folder'\r\n super(folder, self).__init__(real_path=path, name_ext=self.name_ext, parent=self.parent)\r\n self.generate_from_path()\r\n\r\n\r\n def generate_from_path(self):\r\n for el in os.listdir(self.calc_path()):\r\n obj_path = f\"{self.calc_path()}\\\\{el}\"\r\n if os.path.isdir(obj_path):\r\n self.content.append(folder(obj_path, parent=self))\r\n elif os.path.isfile(obj_path):\r\n self.content.append(file.ffile(path=obj_path, parent=self))\r\n\r\n\r\n def __iter__(self):\r\n for obj in os.listdir(self.real_path):\r\n # print(obj)\r\n obj_path = f\"{self.real_path}\\\\{obj}\"\r\n if os.path.isdir(obj_path):\r\n fold = folder(path=obj_path, parent=self)\r\n yield fold\r\n for f in fold:\r\n yield f\r\n elif os.path.isfile(obj_path):\r\n fo = file.ffile(path=obj_path, parent=self)\r\n yield fo\r\n\r\n\r\n def update_struct(self):\r\n try:\r\n os.mkdir(self.calc_path())\r\n except FileExistsError:\r\n pass\r\n for child in self:\r\n print('a', child)\r\n child.update_struct()\r\n os.rmdir(self.real_path)\r\n self.real_path = self.calc_path()\r\n\r\n\r\n # def __hash__(self, manag):\r\n # type, name_ext\r\n # hash([for el self.content])\r\n\r\ndef root(path):\r\n obj = folder(path=path, root=True)\r\n return obj\r\n\r\n\r\n# class root_folder(folder):\r\n # def __init__(self, path_to_root=None):\r\n # super(root_folder, self).__init__(name=, path=path_to_root, root=True)\r\n#\r\n\r\nclass f_manager:\r\n def __init__(self, root):\r\n self.root_ob = root\r\n\r\n def search_by_path(self, path):\r\n return self.root_ob.recursive_search(path)\r\n\r\n def listner(self, ip, port):\r\n pass\r\n\r\n def log(self):\r\n # hash folder: relative path, hash of ffile hashes\r\n # hash ffile: relativ e path, content hash\r\n pass\r\n\r\n def is_collisia(self):\r\n hash_list = dict()\r\n for obj in self.root_ob:\r\n if obj.hash in hash_list:\r\n hash_list[obj.hash] += 1\r\n # else:\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n root_path = r\"C:\\Users\\leyla808\\Desktop\\code\\ftp\\filesystem_lib\\v2\\test_folder\"\r\n root = root(path=root_path)\r\n test_file = root.recursive_search('i')\r\n test_file.move(new_parent=root.recursive_search(path=\"other\\\\\"))\r\n test_file.update_struct()\r\n\r\n print(0)\r\n","sub_path":"v2/folder.py","file_name":"folder.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436816282","text":"# -*- coding: utf-8 -*-\n\n# Fall back to StringIO in environments where cStringIO is not available\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template.loader import render_to_string\nfrom django.template import RequestContext\nfrom django.conf import settings\nimport ho.pisa as pisa\nimport cStringIO as StringIO\nimport cgi\nimport os\n\nfrom website.apps.art.models import Artwork, ArtworkImage\nimport logging\n\n\nclass PisaNullHandler(logging.Handler):\n def emit(self, record):\n pass\nlogging.getLogger(\"ho.pisa\").addHandler(PisaNullHandler())\n\n\ndef common_report_method(valuelist):\n #if request.is_ajax():\n temp = valuelist.split(\",\")\n artworkimageslist = []\n \n for value in temp:\n artworkimageslist.append(int(value))\n \n artworkimages = ArtworkImage.objects.filter(artwork_image_id__in=artworkimageslist)\n return artworkimages\n\n\ndef render_to_pdf(request,html):\n \n result = StringIO.StringIO()\n response = HttpResponse(mimetype='application/pdf')\n response['Content-Disposition'] = 'filename=summary_report.pdf'\n pdf = pisa.CreatePDF(StringIO.StringIO(html.encode(\"UTF-8\")), dest=result,link_callback=fetch_resources)\n \n if not pdf.err:\n response.write(result.getvalue())\n \n else:\n response.write('We had some errors
    %s
    ' % cgi.escape(html))\n #json = simplejson.dumps(response,ensure_ascii=False)\n #return HttpResponse(json, mimetype=\"application/json\")\n\n return response\n\ndef fetch_resources(uri, rel):\n path = os.path.join(settings.MEDIA_ROOT, uri.replace(settings.MEDIA_URL, \"\"))\n #print path\n return path\n\n#@csrf_protect\ndef summary_report(request,template=\"inventory/summary_report.html\"):\n valuelist = request.GET.get('valuelist')\n #artworkimages = ArtworkImage.objects.all()\n artworkimages = common_report_method(valuelist)\n #print artworkimages\n html = render_to_response(template,\n {'pagesize':'A4','title':'Gallery Stock Inventory List','artworkimages':artworkimages},\n context_instance = RequestContext(request)\n )\n \n return render_to_pdf(request, html.content)\n\n\ndef checklist_report(request,template=\"inventory/checklist_report.html\"):\n valuelist = request.GET.get('valuelist')\n \n #artworkimages = ArtworkImage.objects.all()\n\n artworkimages = common_report_method(valuelist)\n\n data = render_to_response(template,\n {'pagesize':'A4','title':'Gallery Stock Inventory List','artworkimages':artworkimages},\n context_instance = RequestContext(request)\n )\n \n return render_to_pdf(request, data.content)\n\n\n\ndef details_report(request,template=\"inventory/details_report.html\"):\n valuelist = request.GET.get('valuelist',\"\")\n #artworkimages = ArtworkImage.objects.all()\n\n artworkimages = common_report_method(valuelist)\n \n data = render_to_response(template,\n {'pagesize':'A4','title':'Gallery Stock Inventory List','artworkimages':artworkimages},\n context_instance = RequestContext(request)\n )\n \n return render_to_pdf(request, data.content)\n","sub_path":"website/apps/inventory/report_views.py","file_name":"report_views.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155683464","text":"# -*- coding: utf-8 -*-\n# author: Tac\n# contact: gzzhanghuaxiong@corp.netease.com\n\n\nimport logging\nimport logging.handlers\nimport sys\nimport traceback\nimport platform\nimport types\nimport time\n\n\ndef compact_traceback():\n \"\"\"\n 获取exception信息\n Returns:\n tuple\n 0: tuple\n 0: str, exception所在文件名\n 1: str, exception所在函数名\n 2: str, exception所在行号\n 1: exception类型\n 2: exception参数\n 3: str, traceback信息\n \"\"\"\n exception, arg, tb = sys.exc_info()\n if tb is None:\n return\n tb_info = []\n while tb:\n tb_info.append((\n tb.tb_frame.f_code.co_filename,\n tb.tb_frame.f_code.co_name,\n str(tb.tb_lineno),\n ))\n tb = tb.tb_next\n del tb\n\n info = ' '.join(['[%s|%s|%s]' % t for t in tb_info])\n return tb_info[-1], exception, arg, info\n\n\ndef log_compact_traceback(logger, skip_frame=0, max_frame=30):\n \"\"\"\n 默认的log_last_except方法\n Args:\n logger: logger\n skip_frame: [int]跳过的PyFrameObject数量\n max_frame: [int]最大的PyFrameObject数量\n Returns:\n str, traceback信息\n \"\"\"\n stack = ['\\n>>>>>>>>>> TRACEBACK >>>>>>>>>>\\n', 'Traceback\\n']\n exception, arg, tb = sys.exc_info()\n if exception:\n stack.append('%s: %s\\n' % (exception.__name__, arg))\n if tb:\n stack.extend(traceback.format_tb(tb, max_frame))\n else:\n stack.extend(traceback.format_stack(None, max_frame))\n skip_frame += 1\n if skip_frame > 0:\n stack = stack[:-skip_frame]\n stack.append('<<<<<<<<<<<<< END <<<<<<<<<<<<<\\n')\n stack_str = ''.join(stack)\n logger.error(stack_str)\n return stack_str\n\n\n# log级别定义,从高到低\nCRITICAL = logging.CRITICAL\nERROR = logging.ERROR\nWARNING = logging.WARNING\nWARN = logging.WARN\nINFO = logging.INFO\nDEBUG = logging.DEBUG\n\n\n# 日志输出流\nSTREAM = 'stream'\nSYSLOG = 'syslog'\nFILE = 'file'\n\n\nclass LogManager(object):\n created_loggers = set() # 已经有的logger名\n level = DEBUG # log等级\n handler = STREAM # log输出Handler\n tag = '' # 日志的tag\n sa_tag = '' # SA日志的tag\n SA_LOGGER_NAME = 'SALogger'\n sys_logger = None\n\n @classmethod\n def get_logger(cls, logger_name, save_file=False, dirname=None):\n \"\"\"\n 获取logger对象\n Args:\n logger_name: [str]logger名字\n save_file: [bool]是否保存文件\n dirname: [str]文件保存的目录名\n Returns:\n logger对象\n \"\"\"\n if cls.handler == SYSLOG and platform.system() == 'Linux' and cls.sys_logger is not None:\n return logging.LoggerAdapter(cls.sys_logger, {'modulename': logger_name})\n\n if logger_name in cls.created_loggers:\n return logging.getLogger(logger_name)\n\n logger = logging.getLogger(logger_name)\n # 为logger实例绑定一个log_last_except的方法\n logger.log_last_except = types.MethodType(log_compact_traceback, logger)\n logger.setLevel(cls.level)\n logger.addHandler(cls._create_handler(logger, save_file, dirname))\n cls.created_loggers.add(logger_name)\n\n if cls.handler == SYSLOG and platform.system() == 'Linux' and cls.sys_logger is not None:\n # 做两次判断,因为可能中途在_CreateHandler的时候sys_logger创建出来了\n return logging.LoggerAdapter(cls.sys_logger, {'modulename': logger_name})\n\n return logger\n\n @classmethod\n def _create_handler(cls, logger, save_file=False, dirname=None):\n \"\"\"\n 创建handler\n Args:\n logger: logger对象\n save_file: [bool]是否保存文件\n dirname: [str]文件保存的目录名\n Returns:\n handler对象\n \"\"\"\n con = ''\n # 可以用的key参考logging源码Formatter类的注释\n format_str = '%(asctime)s '\\\n + con.join((cls.tag, '%(levelname)s'))\\\n + '-[%(filename)s:%(lineno)d][%(name)s][%(funcName)s]%(message)s'\n if cls.handler == SYSLOG:\n if platform.system() == 'Linux':\n handler = logging.handlers.SysLogHandler('/dev/log', facility=logging.handlers.SysLogHandler.LOG_LOCAL1)\n cls.sys_logger = logger\n else:\n handler = logging.FileHandler(cls.tag + '_' + time.strftime('%Y%m%d_%H%M%S') + '.log', encoding='utf-8')\n elif save_file:\n if dirname:\n filename = dirname + '/' + cls.tag + '_' + time.strftime('%Y%m%d_%H%M%S') + '.log'\n else:\n filename = cls.tag + '_' + time.strftime('%Y%m%d_%H%M%S') + '.log'\n handler = logging.FileHandler(filename, encoding='utf-8')\n else:\n # handler = logging.StreamHandler(sys.stdout)\n handler = OutputWindowHandler()\n\n handler.setLevel(cls.level)\n formatter = logging.Formatter(format_str)\n handler.setFormatter(formatter)\n return handler\n\n @classmethod\n def set_level(cls, level):\n \"\"\"\n 设置log等级\n Args:\n level: int\n Returns:\n None\n \"\"\"\n cls.level = level\n for logger_name in cls.created_loggers:\n logging.getLogger(logger_name).setLevel(level)\n\n @classmethod\n def set_handler(cls, handler):\n \"\"\"\n 设置Handler类型\n Args:\n handler: str\n Returns:\n None\n \"\"\"\n cls.handler = handler\n for logger_name in cls.created_loggers:\n logger = logging.getLogger(logger_name)\n logger.addHandler(cls._create_handler(logger))\n\n @classmethod\n def set_tag(cls, tag):\n \"\"\"\n 设置tag\n Args:\n tag: str\n Returns:\n None\n \"\"\"\n cls.tag = tag\n for logger_name in cls.created_loggers:\n logger = logging.getLogger(logger_name)\n logger.addHandler(cls._create_handler(logger))\n\n\nclass OutputWindowHandler(logging.Handler):\n \"\"\"\n 将log输出到Output窗口\n \"\"\"\n main_window_ready = False\n\n def emit(self, record: logging.LogRecord):\n \"\"\"\n 重载emit\n Args:\n record: logging.LogRecord\n Returns:\n None\n \"\"\"\n message = self.format(record)\n if self.main_window_ready:\n try:\n import gui.main_window\n if record.levelno >= logging.ERROR:\n gui.main_window.main_window.output_window.show_error_message(message)\n elif record.levelno == logging.WARNING:\n gui.main_window.main_window.output_window.show_warning_message(message)\n else:\n gui.main_window.main_window.output_window.show_normal_message(message)\n except Exception:\n import traceback\n message = 'OutputWindow Exception:\\n%s\\nOriginal message: %s\\n' % (traceback.format_exc(), message)\n sys.stderr.write(message)\n self.handleError(record)\n # 无论main window是否创建完成,都走StreamHandler的逻辑输出到stream\n try:\n if record.levelno >= logging.ERROR:\n sys.__stderr__.write(message + '\\n')\n else:\n sys.__stdout__.write(message + '\\n')\n self.flush()\n except RecursionError:\n raise\n except Exception:\n self.handleError(record)\n\n def flush(self):\n \"\"\"\n flushed the stream\n Returns:\n None\n \"\"\"\n self.acquire()\n try:\n sys.stderr.flush()\n finally:\n self.release()\n\n\nclass MockStdOut(object):\n \"\"\"\n Mock stdout和stderr\n \"\"\"\n\n def __init__(self, channel, origin):\n \"\"\"\n 构造器\n Args:\n channel: [str]'stdout'或者'stderr'\n origin: sys.stdout或sys.stderr\n \"\"\"\n self.channel = channel\n self.origin = origin\n self._buffer = ''\n\n def __getattr__(self, attr_name):\n \"\"\"\n 获取属性\n Args:\n attr_name: [str]属性名\n Returns:\n 对应的值\n \"\"\"\n return object.__getattribute__(self.origin, attr_name)\n\n def write(self, text):\n \"\"\"\n 输出回调\n Args:\n text: [str]输出内容\n is_show_output: [bool]是否显示到输出窗口\n Returns:\n None\n \"\"\"\n self.origin.write(text)\n\n self._buffer += text\n if self._buffer.endswith('\\n'):\n self._buffer = self._buffer.rstrip('\\n')\n import gui.main_window\n if gui.main_window.main_window:\n if self.channel == 'stdout':\n gui.main_window.main_window.output_window.show_normal_message(self._buffer)\n else:\n gui.main_window.main_window.output_window.show_error_message(self._buffer)\n self._buffer = ''\n\n\nsys.stdout = MockStdOut('stdout', sys.stdout)\nsys.stderr = MockStdOut('stderr', sys.stderr)\n","sub_path":"log_manager.py","file_name":"log_manager.py","file_ext":"py","file_size_in_byte":9253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"341821398","text":"from rest_framework.views import APIView\r\nfrom rest_framework.response import Response\r\nfrom data.models import ClientData\r\nfrom .serializers import ClientDataSerializer\r\n\r\n\r\nclass DataView(APIView):\r\n '''\r\n REST API view\r\n '''\r\n def get(self, request):\r\n data = ClientData.objects.all()\r\n serializer = ClientDataSerializer(data, many=True)\r\n \r\n responseData = [(r['timestamp'], r['value']) for r in serializer.data]\r\n\r\n return Response({\"data\": responseData})\r\n\r\n\r\n def post(self, request):\r\n # clientId = request.data.get('clientId')\r\n dataRows = request.data.get('data')\r\n for row in dataRows: \r\n serializer = ClientDataSerializer(data=row)\r\n if (serializer.is_valid(raise_exception=True)):\r\n serializer.save()\r\n return Response({\"success\": \"Data row saved\"})","sub_path":"pegasResearchTest/rest/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"277010410","text":"from lect_12_1 import *\nfrom openpyxl import load_workbook\nimport unittest\n\n\nclass FixturesTest(unittest.TestCase):\n def setUp(self):\n self.event_loop = asyncio.new_event_loop()\n self.url = URL\n asyncio.set_event_loop(self.event_loop)\n self.session = create_session()\n self.result_of_session = self.event_loop.run_until_complete(get_url(self.session, self.event_loop, self.url))\n\n self.table_head, self.table_body = search_of_data(self.result_of_session)\n self.headers = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"ru-RU,ru;q=0.8,en-US;q=0.6,en;q=0.4\",\n \"Host\": \"httpbin.org\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/57.0.2987.98 Safari/537.36 OPR/44.0.2510.857\"\n }\n\n def test_create_session(self):\n self.assertEqual(self.session, aiohttp.ClientSession, 'create_session should return aiohttp client session '\n 'object, but return {}'.format(type(self.session)))\n\n def test_run_loop(self):\n self.assertIsInstance(self.result_of_session, str, 'Result of loop should return string, but '\n 'return {}'.format(type(self.result_of_session)))\n\n def test_search_of_data(self):\n self.assertIsInstance(self.table_head, tuple, 'Table head must be list, '\n 'but return type: {}'.format(type(self.table_head)))\n self.assertIsInstance(self.table_body, list, 'Table head must be list, '\n 'but return type: {}'.format(type(self.table_body)))\n self.assertGreater(len(self.table_head), 0, 'Table head list is empty')\n self.assertGreater(len(self.table_body), 0, 'Table body list is empty')\n\n def test_write_to_xls(self):\n wb = load_workbook('coin_market_cap.xlsx', read_only=True)\n ws = wb.active\n r = ws['A1':'J1']\n l_head = []\n l_first_row = []\n\n for row in r:\n for i in row:\n l_head.append(i.value)\n self.assertLessEqual(l_head, list(self.table_head), 'Table head was write wrong to xlsx file. '\n 'Head look like: {}'.format(list(self.table_head)))\n # r = ws['A2':'J2']\n # for row in r:\n # for i in row:\n # l_first_row.append(i.value)\n # self.assertLessEqual(l_first_row, self.table_body[0], 'Table first row was write wrong to xlsx file. '\n # 'First row looks like: {}'.format(self.table_body[0]))\n wb.close()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"lect_12/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"317975652","text":"from userbot.utils import *\n\n@bot.on(admin_cmd(pattern=\"ad ?(.*)\"))\nasync def hello(event):\n ab = event.pattern_match.group(1)\n abc = await event.get_reply_message()\n if not abc:\n await event.edit(\"Reply any file !!!\")\n return\n if not ab:\n await event.edit(\"Write anything..\")\n return\n abb = abc.text\n await bot.send_message(event.chat_id,f\"{abb} {ab}\")","sub_path":"ad.py","file_name":"ad.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"30836875","text":"# Should probably put the Apache2 license here\n\nimport lib.utils as utils\n\nimport logging\nimport sys\n\nlogger = logging.getLogger()\n\n\ndef _init_logger(debug=False, log_format=None):\n \"\"\"Initialize the logger\n\n :param debug: Enables debug mode\n :return: A logging instance\n \"\"\"\n logger.handlers = []\n log_level = logging.INFO\n if debug:\n log_level = logging.DEBUG\n\n if not log_format:\n log_format_string = '[%(module)s]\\t%(levelname)s\\t%(message)s'\n else:\n log_format_string = log_format\n\n logging.captureWarnings(True)\n logger.setLevel(log_level)\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(log_format_string))\n logger.addHandler(handler)\n logger.debug(\"Logging initialized.\")\n\n\ndef main():\n # main stuff here\n debug = ('-d' in sys.argv or '--debug' in sys.argv)\n _init_logger(debug)\n\n # get docker version\n # get list of running containers\n # can we profile flavor (/etc/release?)\n # start calling modules\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"oculus.py","file_name":"oculus.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"257623555","text":"#!/usr/bin/env python2\n#%%\nfrom __future__ import print_function\nimport sys\nimport math\nimport numpy as np\nimport tf\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nimport sensor_msgs.point_cloud2 as pc2\nfrom sensor_msgs.msg import PointCloud2\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom scipy.spatial.transform import Rotation as R\nfrom particle_filter import PF\n#ROS Imports\nimport rospy\nfrom sensor_msgs import point_cloud2\nfrom sensor_msgs.msg import Image, LaserScan\nfrom ackermann_msgs.msg import AckermannDriveStamped, AckermannDrive\nfrom nav_msgs.msg import Odometry\nimport ros_numpy\n# import imutils\nfrom tf.transformations import quaternion_matrix\n\nfrom std_msgs.msg import String\nimport os\nimport json\nfrom Helpers import *\nimport copy\nimport pickle as pkl\nclass DepthTracker:\n def __init__(self):\n self.points_sub = rospy.Subscriber(\"/camera/depth/points\",PointCloud2,self.pointCloudCallback,queue_size=1)\n self.bounding_box_sub = rospy.Subscriber(\"bounding_box\",String,self.boundingBoxCallback,queue_size=1)\n self.rgb_img_sub = rospy.Subscriber(\"/camera/color/image_raw\",Image,self.imageCallback,queue_size=1, buff_size=2**24)\n self.xyz_array = np.zeros((480,640,3))\n self.latestBbox = [0,0,0,0,0]\n self.latestImg = np.zeros((480,640,3))\n self.bridge = CvBridge()\n self.sift = cv2.xfeatures2d.SIFT_create()\n self.cameraK = np.matrix([[462.1379497504639, 0.0, 320.5],[0.0, 462.1379497504639, 240.5],[0.0, 0.0, 1.0]])\n self.objectModel = Keypoints3D()\n # self.tf = tf.TransformListener()\n self.odomFrame = \"odom\"\n self.cameraOpticalFrameRGB = \"camera_color_optical_frame\"\n self.particles3D = np.zeros((1,3))\n self.cameraR = np.array([[0,-1,0],[0,0,-1],[1,0,0]])\n\n def IOU(self, bbox, latestBbox):\n dx = min(bbox[2],self.latestBbox[2])-max(bbox[0],self.latestBbox[0])\n dy = min(bbox[3],self.latestBbox[3])-max(bbox[1],self.latestBbox[1])\n int_area = dx*dy\n area_1 = (bbox[2]-bbox[0])*(bbox[3]-bbox[1])\n area_2 = (latestBbox[2]-latestBbox[0])*(latestBbox[3]-latestBbox[1])\n union_area = area_1+area_2-int_area\n iou = int_area/union_area\n return iou\n\n\n def boundingBoxCallback(self,boundingBoxMsgString):\n boundingBoxString = boundingBoxMsgString.data\n boundingBoxData = json.loads(boundingBoxString)\n bboxes = boundingBoxData['bounding_boxes']\n # have a score function here for checking which bounding box tracks the best\n if len(bboxes)==1:\n self.latestBbox = bboxes[0]\n elif len(bboxes)>1:\n max_iou = -1\n max_idx = -1\n for i,bbox in enumerate(bboxes):\n iou = self.IOU(bbox, self.latestBbox)\n if iou > max_iou:\n max_iou = area\n max_idx = i\n self.latestBbox = bboxes[i]\n\n def pointCloudCallback(self,cloud):\n self.xyz_array = ros_numpy.point_cloud2.pointcloud2_to_xyz_array(cloud,remove_nans=False)\n # with open('xyz.pkl', 'wb') as pickle_file:\n # pkl.dump(self.xyz_array, pickle_file)\n\n def imageCallback(self,imageMsg):\n frame = self.bridge.imgmsg_to_cv2(imageMsg, desired_encoding='passthrough')\n # cv2.imwrite('myman1.png',frame)\n\n def getKeypoints2D(self, img, bbox):\n (startX, startY, endX, endY,_) = [int(i) for i in bbox]\n img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n img = img[startY:endY,startX:endX]\n keypoints, descriptors = self.sift.detectAndCompute(img,None)\n return keypoints, descriptors, img\n\n def scanObject(self, cvimg, bbox, xyz_array):\n # sift ignores colour, so convert to grayscale\n keypoints, descriptors, cvimg = self.getKeypoints2D(cvimg, bbox)\n origin = np.array([cvimg.shape[0]//2,cvimg.shape[1]//2])\n (startX, startY, endX, endY,_) = [int(i) for i in bbox] \n campoints3D = xyz_array[startY:endY,startX:endX,:]\n origin3D = campoints3D[cvimg.shape[0]//2,cvimg.shape[1]//2,:]\n keypoints3D = Keypoints3D()\n for kp,desc in zip(keypoints,descriptors):\n # find a way to interpolate depth map\n u,v = int(kp.pt[1]), int(kp.pt[0])\n point3D = np.matmul(self.cameraR.T,campoints3D[u,v,:]-origin3D)\n # print(point3D,campoints3D[u,v,:]-origin3D)\n if np.isnan(point3D).any() == False:\n keypoints3D.add(kp,desc,point3D)\n # print(campoints3D[u,v,:])\n keypoints3D.numpyify()\n self.objectModel = keypoints3D\n return keypoints3D,origin3D\n\n # pixels is a 3xN array\n def pixel2camera(self,pixels,z_estimate):\n # python2 doesnt support @ :(\n # make sure units of z_estd are correct\n cameraPoints3D = z_estimate*np.matmul(np.linalg.inv(self.cameraK),pixels)\n return cameraPoints3D\n\n def camera2pixel(self,cameraPoints3D):\n pixels = np.matmul(self.cameraK,(cameraPoints3D/cameraPoints3D[-1,:]))[:2]\n return pixels\n\n def getTfTransform(self, destination, source):\n if self.tf.frameExists(destination) and self.tf.frameExists(source):\n t = self.tf.getLatestCommonTime(destination, source)\n return self.tf.lookupTransform(destination, source, t)\n return None, None\n\n def shiftKeypoints(self, kps, dmatch, bbox):\n pixels = []\n for idx in dmatch:\n pixels.append([kps[idx.trainIdx].pt[0]+bbox[0],kps[idx.trainIdx].pt[1]+bbox[1]])\n # print(lkps[idx.trainIdx].pt[0]+lbbox[0],lkps[idx.trainIdx].pt[1]+lbbox[1])\n return np.array(pixels).T\n\n def correlation2D(self, particle, img, bbox):\n # we can weight it by number of matches too\n kps, desc, img = self.getKeypoints2D(img,bbox)\n match, dmatch = self.featureMatch(self.objectModel.descriptors,desc)\n pose, quaternion = self.getTfTransform(self.cameraOpticalFrameRGB,self.odomFrame)\n R = quaternion_matrix(quaternion)\n cameraPoints3D = self.globalKeypoint2camera(match,particle,pose,R)\n predictedKeypointPixels = self.camera2pixel(cameraPoints3D)\n shiftedKeypointPixels = self.shiftKeypoints(kps,dmatch,bbox)\n return np.linalg.norm(shiftedKeypointPixels-predictedKeypointPixels)\n \n def correlation3D(self, particle, img, bbox):\n u,v = int(bbox[1]+bbox[3])//2,int(bbox[0]+bbox[2])//2\n pose = self.xyz_array[u,v,:]\n return np.linalg.norm(pose-particle)\n\n def processFrame(self, img, bbox, particles3D):\n keypoints, descriptors, img = self.getKeypoints2D(img, bbox)\n # Need to draw only good matches, so create a mask\n # each index of match is the pt in 2nd frame\n match, dMatch = self.featureMatch(self.objectModel.descriptors,descriptors)\n for particle in particles3D:\n cameraPoints3D = self.globalKeypoint2camera(match,particle)\n pixels = self.camera2pixel(cameraPoints3D)\n # do some correlation thing here\n\n def featureMatch(self, descs1, descs2):\n matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED)\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(descs1,descs2, k=2)\n match = []\n dMatch = []\n for d1,d2 in matches:\n if d1.distance < 0.75*d2.distance:\n match.append(1)\n dMatch.append(d1)\n else:\n match.append(0)\n return np.array(match),dMatch\n\n # leave the keypoints as they are. Instead, divide the camera points by z_estd\n def globalKeypoint2camera(self, keypointMatches, particleGlobal3D, T, R):\n # translation, rotation = self.getTfTransform(self.cameraOpticalFrameRGB,self.odomFrame)\n # Hcw = self.tf.fromTranslationRotation(translation,rotation)\n Hcw = np.zeros((4,4))\n Hcw[:3,:3] = R\n Hcw[:3,3] = T\n Hcw[3,3] = 1\n points3D = np.zeros((4,np.sum(keypointMatches)))\n points3D[3,:] = np.ones((np.sum(keypointMatches)))\n p3Didx = 0\n for k,idx in enumerate(keypointMatches):\n if idx != 0:\n points3D[0:3,p3Didx] = self.objectModel.objectPoints[k]+particleGlobal3D\n p3Didx += 1\n # print(self.objectModel.objectPoints[k])\n # print(points3D)\n cameraPoints3D = np.matmul(Hcw,points3D)\n cameraPoints3D = (cameraPoints3D/cameraPoints3D[-1,:])[:3,:]\n return cameraPoints3D\n #convert this to camera frame\n#%%\ndef main(args):\n rospy.init_node(\"DepthTrackerNode\", anonymous=True)\n bbox = DepthTracker()\n listener = tf.TransformListener()\n \n rospy.spin()\n\nif __name__=='__main__':\n\tmain(sys.argv)","sub_path":"src/DepthTracker.py","file_name":"DepthTracker.py","file_ext":"py","file_size_in_byte":8765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"593086","text":"def DFS(v):\n if v == N+1:\n for i in range(1,N+1):\n if used[i]:\n print(i, end=\" \")\n print()\n else:\n used[v] = 1\n DFS(v+1)\n used[v] = 0\n DFS(v+1)\n\nN = int(input())\nused = [0]*(N+1)\nDFS(1)","sub_path":"inflearn/section5/again_section5/부분집합구하기(DFS).py","file_name":"부분집합구하기(DFS).py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"305955525","text":"#!/usr/bin/env python3\n\n#for the random numbers..\nimport random\nimport math\n\ndef bettersort(toSort):\n\n #this is for the max number of people per house\n maxPer = [0,0,0,0] #max:[Hufflepuffs,Ravenclaws,Griffindors,Slytherins]\n counts = [0,0,0,0] #counts:[Hufflepuffs,Ravenclaws,Griffindors,Slytherins]\n\n #initialize the maxs, notice that the order of the values takes care of the preferences for which house gets extra people\n for i in range(toSort):\n maxPer[i%4]+=1\n\n #implement the pickAHouse function\n def pickAHouse(maxPer, counts, nameToSort, houses):\n #random int is between 0 and 3 because of list indexes\n possibleHouse = random.randint(0,3)\n #down to one if since we use the array index to take care of sorting!\n if counts[possibleHouse] < maxPer[possibleHouse]:\n counts[possibleHouse]+=1\n houses[possibleHouse].append(nameToSort)\n else:\n #still recurse if it is full\n results = pickAHouse(maxPer, counts, nameToSort, houses)\n return (counts,houses)\n\n #sort each person\n houses = [[],[],[],[]]#list of people in:[Hufflepuff,Ravenclaw,Griffindor,Slytherin]\n for i in range(toSort):\n currentName = input('Who is next? ')\n results = pickAHouse(maxPer, counts, currentName, houses)\n\n #print out the results to check\n print ('Max: ' + str(maxPer))\n print ('Counts: ' + str(results[0]))\n print ('Hufflepuffs: ' + str(results[1][0]))\n print ('Ravenclaws: ' + str(results[1][1]))\n print ('Griffindors: ' + str(results[1][2]))\n print ('Slytherins: ' + str(results[1][3]))\n \n#goal of this one is to sort with a guaranteed even split into the houses\ndef sort(toSort):\n \n print ('to sort: ' + str(toSort))\n #figure out how many remainders there are\n remainder = toSort%4\n #figure out max per house\n maxPerHouse = math.floor(toSort/4)\n\n #if there's only 3 extra, give one to each except Slytherin (cause fuck 'em, right??)\n if remainder == 3:\n numHuff = numRave = numGrif = maxPerHouse+1\n numSlyt = maxPerHouse\n\n #if there are 2 extra, give them to Hufflepuff and Ravenclaw\n elif remainder == 2:\n numHuff = numRave = maxPerHouse+1\n numSlyt = numGrif = maxPerHouse\n\n #if there's 1 extra, give to Hufflepuff.. they could use it\n elif remainder == 1:\n numHuff = maxPerHouse+1\n numSlyt = numGrif = numRave = maxPerHouse\n\n else:\n numHuff = numRave = numGrif = numSlyt = maxPerHouse\n\n #now we just need to pick a random house, making sure we don't overdraw it!\n \n maxs = {'Hufflepuff':numHuff, 'Ravenclaw':numRave, 'Slytherin':numSlyt, 'Griffindor':numGrif}\n print ('Max House Count: ' + str(maxs))\n\n counts = {'Hufflepuffs':0, 'Ravenclaws':0, 'Slytherins':0, 'Griffindors':0}\n print ('Current House Counts: ' + str(counts))\n\n def pickAHouse(maxs, counts):\n possibleHouse = random.randint(1,4)\n print (possibleHouse)\n print ('Current House Counts: ' + str(counts))\n if (possibleHouse == 1 and counts['Griffindors'] < maxs['Griffindor']):\n print (\"Sorted into Griffindor!\")\n counts['Griffindors']+=1\n elif (possibleHouse == 2 and counts['Slytherins'] < maxs['Slytherin']):\n print (\"Sorted into Slytherin!\")\n counts['Slytherins']+=1\n elif (possibleHouse == 3 and counts['Ravenclaws'] < maxs['Ravenclaw']):\n print (\"Sorted into Ravenclaw!\")\n counts['Ravenclaws']+=1\n elif (possibleHouse == 4 and counts['Hufflepuffs'] < maxs['Hufflepuff']):\n print (\"sorted into Hufflepuff!\")\n counts['Hufflepuffs']+=1\n else:\n counts = pickAHouse(maxs, counts)\n return counts\n\n for i in range(toSort):\n counts = pickAHouse(maxs,counts)\n print ('Current House Counts: ' + str(counts))\n\n#the goal of this function is to just sort into houses using iid random-ish numbers\ndef randsort():\n house = random.randint(1,4)\n if house == 1:\n print (\"Gryffindor!\")\n elif house == 2:\n print (\"Slytherin!?\")\n elif house == 3:\n print (\"Ravenclaw.\")\n else:\n print (\"Hufflepuff...\")\n return house\n \n","sub_path":"sorter.py","file_name":"sorter.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436098511","text":"\"\"\"Because all 9-digit and 8-digit pandigital numbers have a digital root\nthat is divisible by 3, these are also multiples of 3 and therefore not primes.\nThis way we can reduce the search span 7-digit or lower pandigital numbers.\"\"\"\n\nimport string\nfrom itertools import permutations\n\ndef is_prime(n):\n if n == 2: return True\n if n < 2: return False\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0: return False\n else:\n return True\n\ndef solve():\n\n # generate all pandigital numbers\n pans = []\n for n in range(1, 8):\n perms = [\"\".join(p) for p in permutations(string.digits[1:n+1])]\n pans.extend(list(set(map(int, perms))))\n\n for num in reversed(sorted(pans)):\n if is_prime(num):\n return num\n\nif __name__ == \"__main__\":\n print(solve())\n","sub_path":"pb041.py","file_name":"pb041.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403087229","text":"\"\"\"\n返回redirect响应对象\n\n- 格式: response = redirect('地址')\n- 参数:\n - location: 方法中的第一个参数,可以是服务器内部地址,也可以外链地址\n - code: 默认值302(重定向的标识)\n\n\n\"\"\"\"\"\nfrom flask import Flask,redirect\n\napp = Flask(__name__)\n\n#班长地址\n@app.route('/address')\ndef banzhang():\n\n #1,调用redirect方法,生成响应体\n response = redirect(\"/inner\")\n #response = redirect(\"http://www.taobao.com\")\n\n return response\n\n#班主任地址\n@app.route('/inner')\ndef inner():\n return \"跳转到了服务器内部!!!\"\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"Flask01/demo08_视图响应重定向.py","file_name":"demo08_视图响应重定向.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"2822026","text":"def cicloHamiltoniano(grafo):\n def genera(ciclo, nodiCiclo):\n ultimoNodo = ciclo[-1] # Ultimo nodo aggiunto\n if len(ciclo) == len(grafo): # nodo foglia\n if 0 in grafo[ultimoNodo]: # Se si chiude il ciclo\n return True\n else: # nodo interno\n for adiacente in grafo[ultimoNodo]:\n if adiacente not in nodiCiclo:\n nodiCiclo.add(adiacente)\n ciclo.append(adiacente)\n\n if genera(ciclo, nodiCiclo):\n return True\n\n ciclo.pop()\n nodiCiclo.remove(adiacente)\n\n\n ciclo = [0]\n nodiCiclo = {0}\n genera(ciclo, nodiCiclo)\n return ciclo if len(ciclo) == len(grafo) else []\n","sub_path":"Progettazione di Algoritmi/canale2/2018_2019/esercizi/backtracking/cicloHamiltoniano.py","file_name":"cicloHamiltoniano.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315317529","text":"from abc import ABC, abstractmethod\nfrom copy import copy\n\nfrom collections import deque\n\nimport pandas as pd\nimport logbook\n\nfrom zipline.finance import cancel_policy\nfrom zipline.finance import trading\nfrom zipline.data import data_portal as dp\nfrom zipline.pipeline.data import equity_pricing\nfrom zipline.protocol import BarData\nfrom zipline.finance import asset_restrictions\nfrom zipline.finance.order import ORDER_STATUS\nfrom zipline.pipeline.loaders import USEquityPricingLoader\nfrom zipline.utils import api_support\n\nfrom pluto.coms.utils import conversions\nfrom pluto.control.controllable import synchronization_states as ss\nfrom pluto.finance.metrics import tracker\nfrom pluto.sources import benchmark_source as bs\nfrom pluto.data.universes import universes\nfrom pluto.pipeline import domain\n\nfrom protos import controllable_pb2\n\nlog = logbook.Logger('Controllable')\n\n\nclass _State(ABC):\n @abstractmethod\n def handle_data(self, algo, current_data, dt):\n raise NotImplementedError\n\n @abstractmethod\n def before_trading_starts(self, current_data, dt):\n raise NotImplementedError\n\n\nclass _Recovering(_State):\n def handle_data(self, algo, current_data, dt):\n # does nothing\n pass\n\n def before_trading_starts(self, algo, current_data):\n pass\n\n\nclass _Ready(_State):\n def handle_data(self, algo, current_data, dt):\n algo.event_manager.handle_data(algo, current_data, dt)\n\n def before_trading_starts(self, algo, current_dt):\n algo.before_trading_start(algo, current_dt)\n\n\nclass Controllable(ABC):\n def __init__(self):\n self._metrics_tracker = None\n\n self._data_portal = None\n self._blotter = None\n self._asset_finder = None\n self._algo = None\n\n self._account = None\n self._current_data = None\n\n self._last_sync_time = None\n self._calculate_minute_capital_changes = None\n self._emission_rate = 'daily'\n self._data_frequency = 'daily'\n\n self._capital_change_deltas = {}\n self._capital_changes = {}\n\n # script namespace\n self._namespace = {}\n\n self._calendar = None\n self._universe = None\n self._calendars = None\n self._benchmark = None\n self._benchmark_source = None\n\n self._sessions = pd.Series()\n self._sessions_array = deque()\n\n self._session_id = None\n self._start_dt = None\n self._end_dt = None\n self._look_back = None\n self._current_dt = None\n self._params = None\n\n self._ready = _Ready()\n self._recovering = recovering = _Recovering()\n self._run_state = recovering\n\n @property\n def state(self):\n return self._sync_state_tracker\n\n @property\n def current_dt(self):\n return self._current_dt\n\n @property\n def run_state(self):\n return self._run_state\n\n @run_state.setter\n def run_state(self, value):\n self._run_state = value\n\n @property\n def recovering(self):\n return self._recovering\n\n @property\n def ready(self):\n return self._ready\n\n @property\n def data_portal(self):\n return self._data_portal\n\n @property\n def trading_calendar(self):\n return self._calendar\n\n @property\n def asset_finder(self):\n return self._asset_finder\n\n @property\n def run_params(self):\n return self._params\n\n @property\n def domain(self):\n return self._domain\n\n @property\n def current_data(self):\n return self._current_data\n\n def initialize(self,\n session_id,\n start_dt,\n end_dt,\n universe,\n strategy,\n capital,\n max_leverage,\n data_frequency,\n arena,\n look_back,\n cancel_policy):\n '''\n\n Parameters\n ----------\n session_id: str\n start_dt: pandas.Timestamp\n end_dt: pandas.Timestamp\n universe: str\n strategy: bytes\n capital: float\n max_leverage: float\n data_frequency: str\n arena: str\n look_back: int\n cancel_policy: str\n '''\n\n uni = universes.get_universe(universe)\n\n self._sync_state_tracker = sst = ss.Tracker(uni.calendars)\n calendar = uni.get_calendar(\n start_dt - pd.Timedelta(days=look_back),\n end_dt)\n\n self._session_id = session_id\n self._start_dt = start_dt\n end_dt = calendar.last_session\n self._end_dt = end_dt\n self._last_session_close = calendar.session_close(end_dt)\n self._calendar = calendar\n self._universe = uni\n self._look_back = look_back\n self._data_frequency = data_frequency\n\n if data_frequency == 'minute':\n # always set the emission_rate to a minute if the data_frequency is a minute\n self._emission_rate = 'minute'\n\n def calculate_minute_capital_changes(dt, metrics_tracker, emission_rate):\n self._calculate_capital_changes(\n dt,\n metrics_tracker,\n emission_rate,\n is_interday=False)\n else:\n def calculate_minute_capital_changes(dt, metrics_tracker, emission_rate):\n return []\n\n self._calculate_minute_capital_changes = calculate_minute_capital_changes\n\n self._params = params = trading.SimulationParameters(\n start_dt,\n end_dt,\n calendar,\n capital,\n emission_rate=data_frequency,\n data_frequency=data_frequency,\n arena=arena)\n\n self._sessions = sessions = params.sessions\n\n self._sessions_array = deque(sessions)\n\n # we assume that the data has already been ingested => the controller must first\n # send data. An error is raised if there is no data\n self._bundle = bundle = uni.load_bundle()\n self._asset_finder = asset_finder = bundle.asset_finder\n # todo: first trading day should be the start_dt?\n first_trading_day = calendar.first_session\n\n last_session = calendar.last_session\n\n self._data_portal = data_portal = dp.DataPortal(\n asset_finder=asset_finder,\n trading_calendar=calendar,\n first_trading_day=first_trading_day,\n equity_minute_reader=bundle.equity_minute_bar_reader,\n equity_daily_reader=bundle.equity_daily_bar_reader,\n adjustment_reader=bundle.adjustment_reader,\n last_available_session=last_session,\n last_available_minute=calendar.minutes_for_session(last_session)[-1])\n\n # todo: we need to load benchmark returns from a file using an environment\n # todo: create benchmark source instance based on the run mode.\n # (simulation benchmark, live benchmark)\n self._benchmark_source = benchmark_source = bs.SimulationBenchmarkSource(\n self,\n sessions,\n uni.benchmark,\n self._look_back,\n self._emission_rate)\n\n self._metrics_tracker = metrics_tracker = tracker.MetricsTracker(\n benchmark_source,\n capital,\n data_frequency,\n start_dt,\n look_back)\n\n self._blotter = blotter = self._create_blotter(\n session_id,\n uni,\n self._get_cancel_policy(\n cancel_policy))\n\n # todo: asset restrictions are also loaded per session\n self._restrictions = restrictions = asset_restrictions.NoRestrictions()\n\n self._current_data = self._create_bar_data(\n data_portal,\n calendar,\n restrictions,\n data_frequency)\n\n # todo: we need more domains\n\n self._domain = dom = domain.Domain(self)\n\n loader = USEquityPricingLoader.without_fx(\n bundle.equity_daily_bar_reader,\n bundle.adjustment_reader\n )\n\n eq = equity_pricing.EquityPricing.specialize(dom)\n\n # for a single pipeline, we can't have multiple domains...\n\n def choose_loader(column):\n # todo: data_sets should can have \"overlapping\" columns\n # todo: we need more pipeline loaders (fundamentals etc.)\n # as-well as associated data-sets and domains\n if column in eq.columns:\n return loader\n raise ValueError(\n \"No PipelineLoader registered for column %s.\" % column)\n\n def noop(*args, **kwargs):\n pass\n\n namespace = self._namespace\n\n code = compile(strategy, '', 'exec')\n exec(code, namespace)\n\n # the algorithm object is just for exposing methods (api) that are needed by the user\n # (we don't run the algorithm through the algorithm object)\n\n self._algo = algo = self._get_algorithm_class(\n controllable=self,\n params=params,\n blotter=blotter,\n metrics_tracker=metrics_tracker,\n pipeline_loader=choose_loader,\n initialize=namespace.get('initialize', noop),\n handle_data=namespace.get('handle_data', noop),\n before_trading_start=namespace.get('before_trading_start', noop),\n analyze=noop)\n\n sst.state = sst.out_session\n self._run_state = self._ready\n\n api_support.set_algo_instance(algo)\n algo.on_dt_changed(start_dt)\n # initialize the algo (called only once per lifetime)\n algo.initialize(**{}) # todo: kwargs?\n algo.initialized = True\n\n @abstractmethod\n def _get_algorithm_class(self,\n controllable,\n params,\n blotter,\n metrics_tracker,\n pipeline_loader,\n initialize,\n before_trading_start,\n handle_data,\n analyze):\n '''\n Returns\n -------\n pluto.algorithm.TradingAlgorithm\n '''\n raise NotImplementedError(self._get_algorithm_class.__name__)\n\n def _get_cancel_policy(self, name):\n if name == 'never_cancel':\n return cancel_policy.NeverCancel()\n elif name == 'end_of_day':\n return cancel_policy.EODCancel()\n\n def minute_end(self, dt):\n return self._get_minute_message(\n dt,\n self._algo,\n self._metrics_tracker,\n self._data_portal,\n self._calendar,\n self._sessions), dt == self._end_dt\n\n def session_start(self, dt):\n end_dt = self._end_dt\n\n if dt > end_dt:\n # reload a new calendar\n start_dt = dt - pd.Timedelta(days=self._look_back)\n self._end_dt = dt\n\n # todo: reload equity minute bar reader etc (reload bundle)\n\n self._calendar = calendar = self._universe.get_calendar(\n start_dt,\n dt,\n cache=True)\n\n self._last_session_close = calendar.session_close(dt)\n\n # fixme: this might not be correct\n self._start_dt = fs = calendar.first_session\n look_back = calendar.last_session - fs\n\n sessions = self._sessions_array\n # reload calendar if it is not the full look-back period\n if look_back < self._look_back:\n self._end_dt = end = fs + pd.Timedelta(days=150)\n self._calendar = calendar = self._universe.get_calendar(fs, end)\n dt = end\n\n sessions.popleft()\n sessions.append(dt)\n\n # updates the sessions (live)\n params = self._params\n self._params = trading.SimulationParameters(\n fs,\n dt,\n calendar,\n params.capital_base,\n params.emission_rate,\n params.data_frequency,\n params.arena\n )\n\n # reload bundle so that it updates the calendar instance\n bundle = self._universe.load_bundle()\n\n self._data_portal = data_portal = dp.DataPortal(\n asset_finder=self._asset_finder,\n trading_calendar=calendar,\n first_trading_day=fs,\n equity_minute_reader=bundle.equity_minute_bar_reader,\n equity_daily_reader=bundle.equity_daily_bar_reader,\n adjustment_reader=bundle.adjustment_reader\n )\n\n self._current_data = self._create_bar_data(\n data_portal,\n calendar,\n self._restrictions,\n params.data_frequency)\n\n metrics_tracker = self._metrics_tracker\n\n capital_changes = self._calculate_capital_changes(\n dt,\n metrics_tracker,\n self._emission_rate,\n is_interday=False)\n\n algo = self._algo\n\n data_portal = self._data_portal\n self._current_dt = dt\n\n algo.on_dt_changed(dt)\n metrics_tracker.handle_market_open(\n dt,\n data_portal,\n self._calendar,\n self._sessions)\n\n # handle any splits that impact any positions or any open orders.\n assets_we_care_about = (\n metrics_tracker.positions.keys() |\n algo.blotter.open_orders.keys()\n )\n\n if assets_we_care_about:\n splits = data_portal.get_splits(assets_we_care_about, dt)\n if splits:\n algo.blotter.process_splits(splits)\n metrics_tracker.handle_splits(splits)\n return capital_changes\n\n def before_trading_starts(self, dt):\n algo = self._algo\n self._current_dt = dt\n algo.on_dt_changed(dt)\n\n api_support.set_algo_instance(algo)\n self._run_state.before_trading_starts(algo, self._current_data)\n\n def bar(self, dt):\n metrics_tracker = self._metrics_tracker\n self._current_dt = dt\n\n algo = self._algo\n algo.on_dt_changed(dt)\n blotter = self._blotter\n\n capital_changes = self._calculate_minute_capital_changes(\n dt,\n metrics_tracker,\n self._emission_rate)\n\n # todo: assets must be restricted to the provided exchanges\n # self._restrictions.set_exchanges(exchanges/calendars)\n current_data = self._current_data\n\n # todo: this is where we update everything (ledger etc.)\n new_transactions, new_commissions, closed_orders = \\\n blotter.get_transactions(current_data)\n\n for transaction in new_transactions:\n metrics_tracker.process_transaction(transaction)\n\n order = blotter.orders[transaction.order_id]\n metrics_tracker.process_order(order)\n\n for commission in new_commissions:\n metrics_tracker.process_commission(commission)\n\n blotter.prune_orders(closed_orders)\n\n # handle_data is not called while in recovery\n self._run_state.handle_data(algo, current_data, dt)\n self._sync_last_sale_prices(metrics_tracker, dt)\n\n # grab any new orders from the blotter, then clear the list.\n # this includes cancelled orders.\n\n # if we have any new orders, record them so that we know\n # in what perf period they were placed.\n for new_order in blotter.new_orders:\n metrics_tracker.process_order(new_order)\n\n blotter.new_orders = []\n\n return capital_changes\n\n def session_end(self, dt):\n metrics_tracker = self._metrics_tracker\n\n positions = metrics_tracker.positions\n position_assets = self._asset_finder.retrieve_all(positions)\n blotter = self._blotter\n data_portal = self._data_portal\n algo = self._algo\n\n self._cleanup_expired_assets(\n dt,\n data_portal,\n blotter,\n metrics_tracker,\n position_assets)\n\n # todo execute cancellation policy on trade end events not session end\n # blotter.execute_cancel_policy(sim_engine.SESSION_END)\n algo.validate_account_controls()\n return self._get_daily_message(\n dt,\n algo,\n metrics_tracker,\n data_portal,\n self._calendar,\n self._sessions\n ), dt == self._last_session_close\n\n def get_state(self, dt):\n metrics_tracker = self._metrics_tracker\n return controllable_pb2.ControllableState(\n session_id=self._session_id,\n session_state=self._sync_state_tracker.state.name,\n capital=metrics_tracker.portfolio.cash,\n max_leverage=metrics_tracker.account.leverage,\n universe=self._universe.name,\n look_back=self._look_back,\n data_frequency=self._data_frequency,\n start=conversions.to_proto_timestamp(self._start_dt),\n end=conversions.to_proto_timestamp(self._end_dt),\n checkpoint=conversions.to_proto_timestamp(dt),\n metrics_tracker_state=metrics_tracker.get_state(dt)\n ).SerializeToString()\n\n def restore_state(self, state, strategy):\n self.initialize(\n state.session_id,\n conversions.to_datetime(state.start_dt),\n conversions.to_datetime(state.end_dt),\n state.universe,\n strategy,\n state.capital,\n state.max_leverage,\n state.data_frequency,\n state.mode,\n state.look_back)\n\n ss.set_state(state.session_state, self._sync_state_tracker)\n self._current_dt = conversions.to_datetime(state.checkpoint)\n\n def stop(self, dt):\n # todo: liquidate all positions\n pass\n\n def update_blotter(self, broker_data):\n self._update_blotter(self._blotter, broker_data)\n\n @abstractmethod\n def _update_blotter(self, blotter, broker_data):\n raise NotImplementedError\n\n def update_account(self, main_account):\n self._update_account(self._blotter, main_account)\n\n @abstractmethod\n def _update_account(self, blotter, main_account):\n raise NotImplementedError\n\n def update_capital(self, dt, capital):\n self._capital_changes = {dt: {'type': 'target', 'value': capital}}\n\n @abstractmethod\n def _create_blotter(self, session_id, universe, cancel_policy):\n raise NotImplementedError(self._create_blotter.__name__)\n\n def _get_daily_message(self,\n dt,\n algo,\n metrics_tracker,\n data_portal,\n trading_calendar,\n sessions):\n '''\n\n Parameters\n ----------\n dt\n algo: pluto.algorithm.TradingAlgorithm\n metrics_tracker: pluto.finance.metrics.tracker.MetricsTracker\n data_portal: zipline.data.data_portal.DataPortal\n trading_calendar: trading_calendars.TradingCalendar\n\n Returns\n -------\n\n '''\n \"\"\"\n Get a perf message for the given datetime.\n \"\"\"\n\n self._sync_last_sale_prices(metrics_tracker, dt)\n perf_message = metrics_tracker.handle_market_close(\n dt,\n data_portal,\n trading_calendar,\n sessions\n )\n perf_message['daily_perf']['recorded_vars'] = algo.recorded_vars\n return perf_message\n\n def _get_minute_message(self,\n dt,\n algo,\n metrics_tracker,\n data_portal,\n trading_calendar,\n sessions):\n '''\n\n Parameters\n ----------\n dt\n algo\n metrics_tracker: pluto.finance.metrics.tracker.MetricsTracker\n data_portal\n trading_calendar: trading_calendars.TradingCalendar\n\n Returns\n -------\n\n '''\n \"\"\"\n Get a perf message for the given datetime.\n \"\"\"\n\n self._sync_last_sale_prices(metrics_tracker, dt)\n rvars = algo.recorded_vars\n\n minute_message = metrics_tracker.handle_minute_close(\n dt,\n data_portal,\n trading_calendar,\n sessions\n )\n\n minute_message['minute_perf']['recorded_vars'] = rvars\n return minute_message\n\n def get_current_dt(self):\n return self._current_dt\n\n def _create_bar_data(self, data_portal, calendar, restrictions, data_frequency):\n return BarData(\n data_portal=data_portal,\n simulation_dt_func=self.get_current_dt,\n data_frequency=data_frequency,\n trading_calendar=calendar,\n restrictions=restrictions\n )\n\n def _cleanup_expired_assets(self,\n dt,\n data_portal,\n blotter,\n metrics_tracker,\n position_assets):\n '''\n\n Parameters\n ----------\n dt\n data_portal\n blotter\n metrics_tracker: pluto.finance.metrics.tracker.MetricsTracker\n position_assets\n\n Returns\n -------\n\n '''\n \"\"\"\n Clear out any assets that have expired before starting a new sim day.\n\n Performs two functions:\n\n 1. Finds all assets for which we have open orders and clears any\n orders whose assets are on or after their auto_close_date.\n\n 2. Finds all assets for which we have positions and generates\n close_position events for any assets that have reached their\n auto_close_date.\n \"\"\"\n\n def past_auto_close_date(asset):\n acd = asset.auto_close_date\n return acd is not None and acd <= dt\n\n # Remove positions in any sids that have reached their auto_close date.\n assets_to_clear = [\n asset for asset in position_assets\n if past_auto_close_date(asset)]\n\n for asset in assets_to_clear:\n metrics_tracker.process_close_position(asset, dt, data_portal)\n\n # Remove open orders for any sids that have reached their auto close\n # date. These orders get processed immediately because otherwise they\n # would not be processed until the first bar of the next day.\n\n assets_to_cancel = [\n asset for asset in blotter._open_orders\n if past_auto_close_date(asset)\n ]\n for asset in assets_to_cancel:\n blotter.cancel_all_orders_for_asset(asset)\n\n # Make a copy here so that we are not modifying the list that is being\n # iterated over.\n for order in copy(blotter.new_orders):\n if order.status == ORDER_STATUS.CANCELLED:\n metrics_tracker.process_order(order)\n blotter.new_orders.remove(order)\n\n def sync_last_sale_prices(self):\n self._sync_last_sale_prices(self._metrics_tracker, self._current_dt)\n\n def _sync_last_sale_prices(self, metrics_tracker, dt):\n \"\"\"Sync the last sale prices on the metrics tracker to a given\n datetime.\n\n Parameters\n ----------\n dt : datetime\n The time to sync the prices to.\n\n Notes\n -----\n This call is cached by the datetime. Repeated calls in the same bar\n are cheap.\n \"\"\"\n if dt != self._last_sync_time:\n metrics_tracker.sync_last_sale_prices(\n dt,\n self._data_portal,\n )\n\n self._last_sync_time = dt\n\n def _calculate_capital_changes(self,\n dt,\n metrics_tracker,\n emission_rate,\n is_interday,\n portfolio_value_adjustment=0.0):\n \"\"\"\n If there is a capital change for a given dt, this means that the change\n occurs before `handle_data` on the given dt. In the case of the\n change being a target value, the change will be computed on the\n portfolio value according to prices at the given dt\n\n `portfolio_value_adjustment`, if specified, will be removed from the\n portfolio_value of the cumulative performance when calculating deltas\n from target capital changes.\n \"\"\"\n try:\n capital_change = self._capital_changes[dt]\n except KeyError:\n return\n\n if capital_change['type'] == 'target':\n target = capital_change['value']\n capital_change_amount = (\n target -\n (metrics_tracker.portfolio.portfolio_value -\n portfolio_value_adjustment))\n\n log.info('Processing capital change to target %s at %s. Capital '\n 'change delta is %s' % (target, dt,\n capital_change_amount))\n elif capital_change['type'] == 'delta':\n target = None\n capital_change_amount = capital_change['value']\n log.info('Processing capital change of delta %s at %s'\n % (capital_change_amount, dt))\n else:\n log.error(\"Capital change %s does not indicate a valid type \"\n \"('target' or 'delta')\" % capital_change)\n return\n\n self._capital_change_deltas.update({dt: capital_change_amount})\n metrics_tracker.capital_change(capital_change_amount)\n\n return {\n 'capital_change':\n {'date': dt,\n 'type': 'cash',\n 'target': target,\n 'delta': capital_change_amount}\n }\n","sub_path":"pluto/control/controllable/controllable.py","file_name":"controllable.py","file_ext":"py","file_size_in_byte":26006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"60335630","text":"\"\"\" Tilengine python platformer demo \"\"\"\n# pylint: disable=C0103\n# pylint: disable=W0614\n# pylint: disable=W0312\nimport xml.etree.ElementTree as ET\nfrom math import sin, radians\nfrom tilengine import *\nfrom sound import Sound\n\n# constants\nWIDTH = 640\nHEIGHT = 360\nASSETS_PATH = \"assets\"\nSKY_COLORS = (Color.fromstring(\"#78D7F2\"), Color.fromstring(\"#E2ECF2\"))\n\ndef load_objects(file_name, layer_name, first_gid):\n\t\"\"\" loads tiles in object layer from a tmx file.\n\tReturns list of Item objects \"\"\"\n\ttree = ET.parse(file_name)\n\troot = tree.getroot()\n\tfor child in root.findall(\"objectgroup\"):\n\t\tname = child.get(\"name\")\n\t\tif name == layer_name:\n\t\t\titem_list = list()\n\t\t\tfor item in child.findall(\"object\"):\n\t\t\t\tgid = item.get(\"gid\")\n\t\t\t\tif gid is not None:\n\t\t\t\t\tx = item.get(\"x\")\n\t\t\t\t\ty = item.get(\"y\")\n\t\t\t\t\titem_list.append(Item(int(gid) - first_gid, int(x), int(y)))\n\t\t\treturn item_list\n\treturn None\n\n# Game management definitions *************************************************\n\nclass State:\n\t\"\"\" player states \"\"\"\n\tUndefined, Idle, Run, Jump, Hit = range(5)\n\nclass Direction:\n\t\"\"\" player orientations \"\"\"\n\tRight, Left = range(2)\n\nclass Tiles:\n\t\"\"\" types of tiles for sprite-terrain collision detection \"\"\"\n\tEmpty, Floor, Gem, Wall, SlopeUp, SlopeDown, InnerSlopeUp, InnerSlopeDown = range(8)\n\nclass Medium:\n\t\"\"\" types of environments \"\"\"\n\tFloor, Air, Ladder, Water = range(4)\n\nclass Rectangle(object):\n\t\"\"\" aux rectangle \"\"\"\n\tdef __init__(self, x, y, w, h):\n\t\tself.width = w\n\t\tself.height = h\n\t\tself.update_position(x, y)\n\n\tdef update_position(self, x, y):\n\t\tself.x1 = x\n\t\tself.y1 = y\n\t\tself.x2 = x + self.width\n\t\tself.y2 = y + self.height\n\n\tdef check_point(self, x, y):\n\t\t\"\"\" returns if point is contained in rectangle \"\"\"\n\t\treturn self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2\n\nclass Item(object):\n\t\"\"\" Generic item declared in tilemap object layer awaiting to spawn \"\"\"\n\tOpossum, Eagle, Frog = range(3)\n\tdef __init__(self, item_type, x, y):\n\t\tself.type = item_type\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.alive = False\n\n\tdef try_spawn(self, x):\n\t\t\"\"\" Tries to spawn an active game object depending on screen position and item type \"\"\"\n\t\tif self.alive is False and x < self.x < x + WIDTH:\n\t\t\tself.alive = True\n\t\t\tif self.type is Item.Eagle:\n\t\t\t\tEagle(self, self.x, self.y - Eagle.size[1])\n\t\t\telif self.type is Item.Opossum:\n\t\t\t\tOpossum(self, self.x, self.y - Opossum.size[1])\n\nclass Actor(object):\n\t\"\"\" Generic active game entity base class \"\"\"\n\tspriteset = None\n\tdef __init__(self, item_ref, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.sprite = engine.sprites[engine.get_available_sprite()]\n\t\tself.animation = engine.animations[engine.get_available_animation()]\n\t\tself.sprite.setup(self.spriteset)\n\t\tself.item = item_ref\n\t\tactors.append(self)\n\n\tdef __del__(self):\n\t\tself.animation.disable()\n\t\tself.sprite.disable()\n\t\tif self.item is not None:\n\t\t\tself.item.alive = False\n\n\tdef kill(self):\n\t\t\"\"\" definitive kill of active game entity, removing from spawn-able item list too \"\"\"\n\t\tworld.objects.remove(self.item)\n\t\tself.item = None\n\t\tactors.remove(self)\n\nclass Player(Actor):\n\t\"\"\" main player entity \"\"\"\n\tsize = (24, 36)\n\txspeed_delta = 12\n\txspeed_limit = 200\n\tyspeed_delta = 10\n\tyspeed_limit = 350\n\tjspeed_delta = 5\n\n\tdef __init__(self):\n\t\tif type(self).spriteset is None:\n\t\t\ttype(self).spriteset = Spriteset.fromfile(\"hero\")\n\n\t\tActor.__init__(self, None, 60, 188)\n\t\tself.state = State.Undefined\n\t\tself.direction = Direction.Right\n\t\tself.xspeed = 0\n\t\tself.yspeed = 0\n\t\tself.set_idle()\n\t\tself.sprite.set_position(self.x, self.y)\n\t\tself.width = self.size[0]\n\t\tself.height = self.size[1]\n\t\tself.medium = Medium.Floor\n\t\tself.jump = False\n\t\tself.immunity = 0\n\t\tself.rectangle = Rectangle(0, 0, self.width, self.height)\n\t\tself.palettes = (self.spriteset.palette, Palette.fromfile(\"hero_alt.act\"))\n\n\tdef set_idle(self):\n\t\t\"\"\" sets idle state, idempotent \"\"\"\n\t\tif self.state is not State.Idle:\n\t\t\tself.animation.set_sprite_animation(self.sprite.index, seq_pack.sequences[\"seq_idle\"], 0)\n\t\t\tself.state = State.Idle\n\t\t\tself.xspeed = 0\n\n\tdef set_running(self):\n\t\t\"\"\" sets running state, idempotent \"\"\"\n\t\tif self.state is not State.Run:\n\t\t\tself.animation.set_sprite_animation(self.sprite.index, seq_pack.sequences[\"seq_run\"], 0)\n\t\t\tself.state = State.Run\n\n\tdef set_jump(self):\n\t\t\"\"\" sets jump state, idempotent \"\"\"\n\t\tif self.state is not State.Jump:\n\t\t\tself.yspeed = -280\n\t\t\tself.animation.set_sprite_animation(self.sprite.index, seq_pack.sequences[\"seq_jump\"], 0)\n\t\t\tself.state = State.Jump\n\t\t\tself.medium = Medium.Air\n\t\t\tsounds.play(\"jump\", 0)\n\n\tdef set_bounce(self):\n\t\t\"\"\" bounces on top of an enemy \"\"\"\n\t\tself.yspeed = -150\n\t\tself.state = State.Jump\n\t\tself.medium = Medium.Air\n\n\tdef set_hit(self, enemy_direction):\n\t\t\"\"\" sets hit animation by an enemy \"\"\"\n\t\tself.direction = enemy_direction\n\t\tif self.direction is Direction.Left:\n\t\t\tself.xspeed = -self.xspeed_limit\n\t\t\tself.sprite.set_flags(0)\n\t\telse:\n\t\t\tself.xspeed = self.xspeed_limit\n\t\t\tself.sprite.set_flags(Flags.FLIPX)\n\t\tself.yspeed = -150\n\t\tself.state = State.Hit\n\t\tself.medium = Medium.Air\n\t\tself.animation.disable()\n\t\tself.sprite.set_picture(12)\n\t\tself.immunity = 90\n\t\tsounds.play(\"hurt\", 0)\n\n\tdef update_direction(self):\n\t\t\"\"\" updates sprite facing depending on direction \"\"\"\n\t\tif window.get_input(Input.RIGHT):\n\t\t\tdirection = Direction.Right\n\t\telif window.get_input(Input.LEFT):\n\t\t\tdirection = Direction.Left\n\t\telse:\n\t\t\tdirection = self.direction\n\t\tif self.direction is not direction:\n\t\t\tself.direction = direction\n\t\t\tif self.direction is Direction.Right:\n\t\t\t\tself.sprite.set_flags(0)\n\t\t\telse:\n\t\t\t\tself.sprite.set_flags(Flags.FLIPX)\n\n\tdef update_floor(self):\n\t\t\"\"\" process input when player is in floor medium \"\"\"\n\t\tif window.get_input(Input.RIGHT) and self.xspeed < Player.xspeed_limit:\n\t\t\tself.xspeed += self.xspeed_delta\n\t\t\tself.set_running()\n\t\telif window.get_input(Input.LEFT) and self.xspeed > -Player.xspeed_limit:\n\t\t\tself.xspeed -= Player.xspeed_delta\n\t\t\tself.set_running()\n\t\telif abs(self.xspeed) < Player.xspeed_delta:\n\t\t\tself.xspeed = 0\n\t\telif self.xspeed > 0:\n\t\t\tself.xspeed -= Player.xspeed_delta\n\t\telif self.xspeed < 0:\n\t\t\tself.xspeed += Player.xspeed_delta\n\t\tif self.xspeed == 0:\n\t\t\tself.set_idle()\n\t\tif window.get_input(Input.A):\n\t\t\tif self.jump is not True:\n\t\t\t\tplayer.set_jump()\n\t\t\t\tself.jump = True\n\t\telse:\n\t\t\tself.jump = False\n\n\tdef update_air(self):\n\t\t\"\"\" process input when player is in air medium \"\"\"\n\t\tif window.get_input(Input.RIGHT) and self.xspeed < Player.xspeed_limit:\n\t\t\tself.xspeed += self.jspeed_delta\n\t\telif window.get_input(Input.LEFT) and self.xspeed > -Player.xspeed_limit:\n\t\t\tself.xspeed -= self.jspeed_delta\n\n\tdef check_left(self, x, y):\n\t\t\"\"\" checks/adjusts environment collision when player is moving to the left \"\"\"\n\t\tworld.foreground.get_tile(x, y + 4, tiles_info[0])\n\t\tworld.foreground.get_tile(x, y + 18, tiles_info[1])\n\t\tworld.foreground.get_tile(x, y + 34, tiles_info[2])\n\t\tif Tiles.Wall in (tiles_info[0].type, tiles_info[1].type, tiles_info[2].type):\n\t\t\tself.x = (tiles_info[0].col + 1) * 16\n\t\t\tself.xspeed = 0\n\t\tworld.pick_gem(tiles_info)\n\n\tdef check_right(self, x, y):\n\t\t\"\"\" checks/adjusts environment collision when player is moving to the right \"\"\"\n\t\tworld.foreground.get_tile(x + self.width, y + 4, tiles_info[0])\n\t\tworld.foreground.get_tile(x + self.width, y + 18, tiles_info[1])\n\t\tworld.foreground.get_tile(x + self.width, y + 34, tiles_info[2])\n\t\tif Tiles.Wall in (tiles_info[0].type, tiles_info[1].type, tiles_info[2].type):\n\t\t\tself.x = (tiles_info[0].col * 16) - self.width\n\t\t\tself.xspeed = 0\n\t\tworld.pick_gem(tiles_info)\n\n\tdef check_top(self, x, y):\n\t\t\"\"\" checks/adjusts environment collision when player is jumping \"\"\"\n\t\tworld.foreground.get_tile(x + 0, y, tiles_info[0])\n\t\tworld.foreground.get_tile(x + 12, y, tiles_info[1])\n\t\tworld.foreground.get_tile(x + 24, y, tiles_info[2])\n\t\tif Tiles.Wall in (tiles_info[0].type, tiles_info[1].type, tiles_info[2].type):\n\t\t\tself.y = (tiles_info[0].row + 1) * 16\n\t\t\tself.yspeed = 0\n\t\tworld.pick_gem(tiles_info)\n\n\tdef check_bottom(self, x, y):\n\t\t\"\"\" checks/adjusts environment collision when player is falling or running \"\"\"\n\t\tground = False\n\n\t\tworld.foreground.get_tile(x + 0, y + self.height, tiles_info[0])\n\t\tworld.foreground.get_tile(x + 12, y + self.height, tiles_info[1])\n\t\tworld.foreground.get_tile(x + 24, y + self.height, tiles_info[2])\n\t\tworld.foreground.get_tile(x + 12, y + self.height - 1, tiles_info[3])\n\n\t\t# check up slope\n\t\tif tiles_info[3].type is Tiles.SlopeUp:\n\t\t\tslope_height = 16 - tiles_info[3].xoffset\n\t\t\tif self.yspeed >= 0 and tiles_info[3].yoffset > slope_height:\n\t\t\t\tself.y -= (tiles_info[3].yoffset - slope_height)\n\t\t\t\tground = True\n\n\t\t# check down slope\n\t\telif tiles_info[3].type is Tiles.SlopeDown:\n\t\t\tslope_height = tiles_info[3].xoffset + 1\n\t\t\tif self.yspeed >= 0 and tiles_info[3].yoffset > slope_height:\n\t\t\t\tself.y -= (tiles_info[3].yoffset - slope_height)\n\t\t\t\tground = True\n\n\t\t# check inner slope (avoid falling between staircased slopes)\n\t\telif tiles_info[1].type is Tiles.InnerSlopeUp:\n\t\t\tif self.xspeed > 0:\n\t\t\t\tself.y = (tiles_info[1].row * 16) - self.height - 1\n\t\t\telse:\n\t\t\t\tself.x -= 1\n\t\t\tground = True\n\n\t\telif tiles_info[1].type is Tiles.InnerSlopeDown:\n\t\t\tif self.xspeed > 0:\n\t\t\t\tself.x += 1\n\t\t\telse:\n\t\t\t\tself.y = (tiles_info[1].row * 16) - self.height - 1\n\t\t\tground = True\n\n\t\t# check regular floor\n\t\telif Tiles.Floor in (tiles_info[0].type, tiles_info[1].type, tiles_info[2].type):\n\t\t\tself.y = (tiles_info[0].row * 16) - self.height\n\t\t\tground = True\n\n\t\t# adjust to ground\n\t\tif ground is True:\n\t\t\tself.yspeed = 0\n\t\t\tif self.medium is Medium.Air:\n\t\t\t\tself.medium = Medium.Floor\n\t\t\t\tif self.xspeed == 0:\n\t\t\t\t\tself.set_idle()\n\t\t\t\telse:\n\t\t\t\t\tself.set_running()\n\t\telse:\n\t\t\tself.medium = Medium.Air\n\t\tworld.pick_gem(tiles_info)\n\n\tdef check_jump_on_enemies(self, x, y):\n\t\t\"\"\" checks jumping above an enemy. If so, kills it, bounces and spawns a death animation \"\"\"\n\t\tpx, py = x+self.width/2, y+self.height\n\t\tfor actor in actors:\n\t\t\tactor_type = type(actor)\n\t\t\tif actor_type in (Eagle, Opossum):\n\t\t\t\tex, ey = actor.x + actor.size[0]/2, actor.y\n\t\t\t\tif abs(px - ex) < 25 and 5 < py - ey < 20:\n\t\t\t\t\tactor.kill()\n\t\t\t\t\tself.set_bounce()\n\t\t\t\t\tEffect(actor.x, actor.y - 10, spriteset_death, seq_pack.sequences[\"seq_death\"])\n\t\t\t\t\tsounds.play(\"crush\", 2)\n\t\treturn\n\n\tdef check_hit(self, x, y, direction):\n\t\t\"\"\" returns if get hurt by enemy at select position and direction\"\"\"\n\t\tif self.immunity is 0 and self.rectangle.check_point(x, y):\n\t\t\tself.set_hit(direction)\n\n\tdef update(self):\n\t\t\"\"\" process input and updates state once per frame \"\"\"\n\t\toldx = self.x\n\t\toldy = self.y\n\n\t\t# update immunity\n\t\tif self.immunity is not 0:\n\t\t\tpal_index0 = (self.immunity >> 2) & 1\n\t\t\tself.immunity -= 1\n\t\t\tpal_index1 = (self.immunity >> 2) & 1\n\t\t\tif self.immunity is 0:\n\t\t\t\tpal_index1 = 0\n\t\t\tif pal_index0 != pal_index1:\n\t\t\t\tself.sprite.set_palette(self.palettes[pal_index1])\n\n\t\t# update sprite facing\n\t\tself.update_direction()\n\n\t\t# user input: move character depending on medium\n\t\tif self.medium is Medium.Floor:\n\t\t\tself.update_floor()\n\t\telif self.medium is Medium.Air:\n\t\t\tif self.state is not State.Hit:\n\t\t\t\tself.update_air()\n\t\t\tif self.yspeed < Player.yspeed_limit:\n\t\t\t\tself.yspeed += Player.yspeed_delta\n\n\t\tself.x += (self.xspeed / 100.0)\n\t\tself.y += (self.yspeed / 100.0)\n\n\t\t# clip to world limits\n\t\tif self.x < 0.0:\n\t\t\tself.x = 0.0\n\t\telif self.x > world.foreground.width - self.width:\n\t\t\tself.x = world.foreground.width\t- self.width\n\n\t\t# check and fix 4-way collisions depending on motion direction\n\t\tintx = int(self.x)\n\t\tinty = int(self.y)\n\t\tif self.yspeed < 0:\n\t\t\tself.check_top(intx, inty)\n\t\telif self.yspeed >= 0:\n\t\t\tself.check_bottom(intx, inty)\n\t\tif self.xspeed < 0:\n\t\t\tself.check_left(intx, inty)\n\t\telif self.xspeed > 0:\n\t\t\tself.check_right(intx, inty)\n\t\tif self.yspeed > 0:\n\t\t\tself.check_jump_on_enemies(intx, inty)\n\n\t\tif self.x != oldx or self.y != oldy:\n\t\t\tself.rectangle.update_position(int(self.x), int(self.y))\n\t\t\tself.sprite.set_position(int(self.x) - world.x, int(self.y))\n\t\treturn True\n\n\nclass Eagle(Actor):\n\t\"\"\" Flying enemy \"\"\"\n\tsize = (40, 40)\n\n\tdef __init__(self, item_ref, x, y):\n\t\tif type(self).spriteset is None:\n\t\t\ttype(self).spriteset = Spriteset.fromfile(\"enemy_eagle\")\n\n\t\tActor.__init__(self, item_ref, x, y)\n\t\tself.frame = 0\n\t\tself.base_y = y\n\t\tself.xspeed = -3\n\t\tself.direction = Direction.Left\n\t\tself.animation.set_sprite_animation(self.sprite.index, seq_pack.sequences[\"seq_eagle\"], 0)\n\t\tself.collision_points = (4, 20, 36)\n\n\tdef update(self):\n\t\t\"\"\" Update once per frame \"\"\"\n\t\tself.x += self.xspeed\n\t\tself.y = self.base_y + int(sin(radians(self.frame*4))*15)\n\t\tself.frame += 1\n\t\tif self.frame is 10:\n\t\t\tsounds.play(\"eagle\", 3)\n\t\tscreen_x = self.x - world.x\n\n\t\tif self.direction is Direction.Left:\n\t\t\tif screen_x < 10:\n\t\t\t\tself.direction = Direction.Right\n\t\t\t\tself.xspeed = -self.xspeed\n\t\t\t\tself.sprite.set_flags(Flags.FLIPX)\n\t\t\t\tsounds.play(\"eagle\", 3)\n\t\t\telse:\n\t\t\t\tfor point in self.collision_points:\n\t\t\t\t\tplayer.check_hit(self.x, self.y + point, self.direction)\n\t\telse:\n\t\t\tif screen_x > 590:\n\t\t\t\tself.direction = Direction.Left\n\t\t\t\tself.xspeed = -self.xspeed\n\t\t\t\tself.sprite.set_flags(0)\n\t\t\t\tsounds.play(\"eagle\", 3)\n\t\t\telse:\n\t\t\t\tfor point in self.collision_points:\n\t\t\t\t\tplayer.check_hit(self.x + self.size[0], self.y + point, self.direction)\n\t\tself.sprite.set_position(screen_x, self.y)\n\t\treturn True\n\nclass Opossum(Actor):\n\t\"\"\" Floor enemy. Chases player in a 80 pixel radius \"\"\"\n\tsize = (36, 24)\n\n\tdef __init__(self, item_ref, x, y):\n\t\tif type(self).spriteset is None:\n\t\t\ttype(self).spriteset = Spriteset.fromfile(\"enemy_opossum\")\n\n\t\tActor.__init__(self, item_ref, x, y)\n\t\tself.xspeed = -2\n\t\tself.direction = Direction.Left\n\t\tself.animation.set_sprite_animation(self.sprite.index, seq_pack.sequences[\"seq_opossum\"], 0)\n\n\tdef update(self):\n\t\t\"\"\" Update once per frame \"\"\"\n\t\tself.x += self.xspeed\n\t\tif self.direction is Direction.Left:\n\t\t\tif self.x - player.x < -80:\n\t\t\t\tself.direction = Direction.Right\n\t\t\t\tself.xspeed = -self.xspeed\n\t\t\t\tself.sprite.set_flags(Flags.FLIPX)\n\t\t\telse:\n\t\t\t\tplayer.check_hit(self.x, self.y + self.size[1]//2, self.direction)\n\t\telse:\n\t\t\tif self.x - player.x > 80 and self.direction is Direction.Right:\n\t\t\t\tself.direction = Direction.Left\n\t\t\t\tself.xspeed = -self.xspeed\n\t\t\t\tself.sprite.set_flags(0)\n\t\t\telse:\n\t\t\t\tplayer.check_hit(self.x + self.size[0], self.y + self.size[1]//2, self.direction)\n\n\t\tself.sprite.set_position(self.x - world.x, self.y)\n\t\treturn True\n\nclass Effect(Actor):\n\t\"\"\" placeholder for simple sprite effects \"\"\"\n\tdef __init__(self, x, y, spriteset, sequence):\n\t\tself.spriteset = spriteset\n\t\tActor.__init__(self, None, x, y)\n\t\tself.animation.set_sprite_animation(self.sprite.index, sequence, 1)\n\n\tdef update(self):\n\t\t\"\"\" updates effect state once per frame \"\"\"\n\t\tself.sprite.set_position(self.x - world.x, self.y)\n\t\tif self.animation.get_state() is False:\n\t\t\treturn False\n\t\treturn True\n\nclass World(object):\n\t\"\"\" world/play field entity \"\"\"\n\tdef __init__(self):\n\t\tself.foreground = engine.layers[0]\n\t\tself.background = engine.layers[1]\n\t\tself.clouds = 0.0\n\t\tself.foreground.setup(Tilemap.fromfile(\"layer_foreground.tmx\"))\n\t\tself.background.setup(Tilemap.fromfile(\"layer_background.tmx\"))\n\t\tself.x = 0\n\t\tself.x_max = self.foreground.width - WIDTH\n\t\tself.objects = load_objects(\"assets/layer_foreground.tmx\", \"Capa de Objetos 1\", 973)\n\t\tengine.set_background_color(self.background.tilemap)\n\t\tactors.append(self)\n\n\tdef pick_gem(self, tiles_list):\n\t\t\"\"\" updates tilemap when player picks a gem \"\"\"\n\t\ttile = Tile()\n\t\ttile.index = 0\n\t\tfor tile_info in tiles_list:\n\t\t\tif tile_info.type is Tiles.Gem:\n\t\t\t\tself.foreground.tilemap.set_tile(tile_info.row, tile_info.col, tile)\n\t\t\t\tEffect(tile_info.col*16, tile_info.row*16, spriteset_vanish, seq_pack.sequences[\"seq_vanish\"])\n\t\t\t\tsounds.play(\"pickup\", 1)\n\t\t\t\tbreak\n\t\tdel tile\n\n\tdef update(self):\n\t\t\"\"\" updates world state once per frame \"\"\"\n\t\toldx = self.x\n\n\t\tif player.x < 240:\n\t\t\tself.x = 0\n\t\telse:\n\t\t\tself.x = int(player.x - 240)\n\t\tif self.x > self.x_max:\n\t\t\tself.x = self.x_max\n\t\tself.clouds += 0.1\n\n\t\tif self.x is not oldx:\n\t\t\tself.foreground.set_position(self.x, 0)\n\t\t\tself.background.set_position(self.x/8, 0)\n\n\t\t# spawn new entities from object list\n\t\tfor item in self.objects:\n\t\t\titem.try_spawn(self.x)\n\n\t\treturn True\n\n# Raster effect related functions *********************************************\n\ndef lerp(pos_x, x0, x1, fx0, fx1):\n\t\"\"\" integer linear interpolation \"\"\"\n\treturn fx0 + (fx1 - fx0) * (pos_x - x0) // (x1 - x0)\n\ndef interpolate_color(x, x1, x2, color1, color2):\n\t\"\"\" linear interpolation between two Color objects \"\"\"\n\tr = lerp(x, x1, x2, color1.r, color2.r)\n\tg = lerp(x, x1, x2, color1.g, color2.g)\n\tb = lerp(x, x1, x2, color1.b, color2.b)\n\treturn Color(r, g, b)\n\ndef raster_effect(line):\n\t\"\"\" raster effect callback, called every rendered scanline \"\"\"\n\tif 0 <= line <= 128:\n\t\tcolor = interpolate_color(line, 0, 128, SKY_COLORS[0], SKY_COLORS[1])\n\t\tengine.set_background_color(color)\n\n\tif line == 0:\n\t\tworld.background.set_position(int(world.clouds), 0)\n\n\telif 160 <= line <= 208:\n\t\tpos1 = world.x//10\n\t\tpos2 = world.x//3\n\t\txpos = lerp(line, 160, 208, pos1, pos2)\n\t\tworld.background.set_position(xpos, 0)\n\n\telif line == 256:\n\t\tworld.background.set_position(world.x//2, 0)\n\n# init engine\nengine = Engine.create(WIDTH, HEIGHT, 2, 32, 32)\nengine.set_load_path(\"assets\")\n\n# load spritesets for animation effects\nspriteset_vanish = Spriteset.fromfile(\"effect_vanish\")\nspriteset_death = Spriteset.fromfile(\"effect_death\")\n\n# load sequences\nseq_pack = SequencePack.fromfile(\"sequences.sqx\")\ntiles_info = (TileInfo(), TileInfo(), TileInfo(), TileInfo())\n\n# set raster callback\nengine.set_raster_callback(raster_effect)\n\nactors = list()\t\t# list that contains every active game entity\nworld = World()\t\t# world/level entity\nplayer = Player() # player entity\n\n# Sound effects\nsounds = Sound(4, \"assets\")\nsounds.load(\"jump\", \"jump.wav\")\nsounds.load(\"crush\", \"crunch.wav\")\nsounds.load(\"pickup\", \"pickup.wav\")\nsounds.load(\"hurt\", \"hurt.wav\")\nsounds.load(\"eagle\", \"vulture.wav\")\n\n# window creation & main loop\nwindow = Window.create()\nwhile window.process():\n\n\t# update active entities list\n\tfor actor in actors:\n\t\tif not actor.update():\n\t\t\tactors.remove(actor)\n","sub_path":"platformer.py","file_name":"platformer.py","file_ext":"py","file_size_in_byte":18157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"643018145","text":"list1 =[]\nlistall =[]\ndict1 = {}\nd ={}\nwith open(\"test2.txt\",\"r\") as f:\n for i in range(0,4):\n header = f.readline(0,4)\n keys = []\n list1 = header.split(\" \")\n for i in list1:\n if i:\n keys.append(i)\n listall.append(keys) \nvalues = []\nfor i in range(1,4):\n values.append(listall[i])\nd = dict(zip(listall[0],map(list,zip(*values))))\nprint (d)\nlist2 = []\nfor i in range(1,len(listall)):\n dict1 = dict(zip(listall[0],listall[i]))\n list2.append(dict1)\nprint(list2) \n \n","sub_path":"manjula_dictionary.py","file_name":"manjula_dictionary.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"652350450","text":"\"\"\"\nDjango settings for project.\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nfrom djimix.settings.local import DBSERVERNAME\nfrom djimix.settings.local import INFORMIX_ODBC\nfrom djimix.settings.local import INFORMIX_ODBC_TRAIN\nfrom djimix.settings.local import INFORMIXDIR\nfrom djimix.settings.local import INFORMIXSERVER\nfrom djimix.settings.local import INFORMIXSQLHOSTS\nfrom djimix.settings.local import LD_LIBRARY_PATH\nfrom djimix.settings.local import LD_RUN_PATH\nfrom djimix.settings.local import MSSQL_EARL\nfrom djimix.settings.local import ODBCINI\nfrom djimix.settings.local import ONCONFIG\n\nfrom djzbar.settings import INFORMIX_EARL_TEST as INFORMIX_EARL\n\n# Debug\nDEBUG = False\nINFORMIX_DEBUG = 'debug'\nADMINS = (\n ('', ''),\n)\nMANAGERS = ADMINS\n\nSECRET_KEY = ''\nENCRYPTION_KEY = None\n\nALLOWED_HOSTS = []\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'America/Chicago'\nSITE_ID = 1\nUSE_I18N = False\nUSE_L10N = False\nUSE_TZ = True\nDEFAULT_CHARSET = 'utf8'\nFILE_CHARSET = 'utf8'\nSERVER_URL = ''\nAPI_URL = '{}/{}'.format(SERVER_URL, 'api')\nLIVEWHALE_API_URL = 'https://{}'.format(SERVER_URL)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nROOT_DIR = os.path.dirname(__file__)\nADMIN_MEDIA_PREFIX = '/static/admin/'\nSTATIC_URL = '/static/djtwilio/'\nROOT_URL = '/apps/twilio'\nMEDIA_ROOT = '{}/assets/'.format(BASE_DIR)\nSTATIC_ROOT = '{}/static/'.format(BASE_DIR)\nMEDIA_URL = '{}assets/'.format(STATIC_URL)\nUPLOADS_DIR = '{}files/'.format(MEDIA_ROOT)\nUPLOADS_URL = '{}files/'.format(MEDIA_URL)\nROOT_URLCONF = 'djtwilio.core.urls'\nWSGI_APPLICATION = 'djtwilio.wsgi.application'\nSTATICFILES_DIRS = ()\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\nDATABASES = {\n 'default': {\n 'HOST': '127.0.0.1',\n 'PORT': '3306',\n 'NAME': 'django_djtwilio',\n 'ENGINE': 'django.db.backends.mysql',\n 'USER': '',\n 'PASSWORD': ''\n },\n}\nINSTALLED_APPS = [\n #'bootstrap_admin',\n 'bootstrap4',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.humanize',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'djtwilio.core',\n 'djtwilio.apps.sms',\n # needed for template tags\n 'djtools',\n 'loginas',\n]\nMIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n #'django.middleware.cache.UpdateCacheMiddleware',\n 'django.middleware.common.CommonMiddleware',\n #'django.middleware.cache.FetchFromCacheMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # the following should be uncommented unless you are\n # embedding your apps in iframes\n #'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n# template stuff\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'templates'),\n '/data2/django_templates/djbootmin/',\n '/data2/django_templates/django-djskins/',\n '/data2/django_templates/djcher/',\n '/data2/livewhale/includes/',\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'debug':DEBUG,\n 'context_processors': [\n 'djtools.context_processors.sitevars',\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n ],\n #'loaders': [\n # # insert your TEMPLATE_LOADERS here\n #]\n },\n },\n]\n# caching\n'''\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n #'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n #'LOCATION': '127.0.0.1:11211',\n #'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',\n #'LOCATION': '/var/tmp/django_djtwilio_cache',\n #'TIMEOUT': 60*20,\n #'KEY_PREFIX': 'DJTWILIO_',\n #'OPTIONS': {\n # 'MAX_ENTRIES': 80000,\n #}\n }\n}\n'''\nCACHE_MIDDLEWARE_ANONYMOUS_ONLY = True\n# LDAP Constants\nLDAP_SERVER = ''\nLDAP_SERVER_PWM = ''\nLDAP_PORT = ''\nLDAP_PORT_PWM = ''\nLDAP_PROTOCOL = ''\nLDAP_PROTOCOL_PWM = ''\nLDAP_BASE = ''\nLDAP_USER = ''\nLDAP_PASS = ''\nLDAP_EMAIL_DOMAIN = ''\nLDAP_OBJECT_CLASS = ''\nLDAP_OBJECT_CLASS_LIST = []\nLDAP_GROUPS = {}\nLDAP_RETURN = []\nLDAP_RETURN_PWM = []\nLDAP_ID_ATTR = ''\nLDAP_CHALLENGE_ATTR = ''\nLDAP_AUTH_USER_PK = False\n# auth backends\nAUTHENTICATION_BACKENDS = (\n 'djauth.ldapBackend.LDAPBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\nLOGIN_URL = '{}/accounts/login/'.format(ROOT_URL)\nLOGOUT_URL = '{}/accounts/logout/'.format(ROOT_URL)\nLOGIN_REDIRECT_URL = ROOT_URL\nUSE_X_FORWARDED_HOST = True\n#SESSION_ENGINE = 'django.contrib.sessions.backends.cache'\nSESSION_EXPIRE_AT_BROWSER_CLOSE = False\nSESSION_COOKIE_DOMAIN='.carthage.edu'\nSESSION_COOKIE_NAME ='django_djtwilio_cookie'\nSESSION_COOKIE_AGE = 1209600 # default, two weeks\n# SMTP settings\nEMAIL_HOST = ''\nEMAIL_HOST_USER = ''\nEMAIL_HOST_PASSWORD = ''\nEMAIL_USE_TLS = True\nEMAIL_PORT = 587\nEMAIL_FAIL_SILENTLY = False\nDEFAULT_FROM_EMAIL = ''\nSERVER_EMAIL = ''\nSERVER_MAIL=''\n# security\nCSRF_COOKIE_SECURE = True\nSESSION_COOKIE_SECURE = True\nSECURE_BROWSER_XSS_FILTER = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_SSL_REDIRECT = True\n# bootstrap admin\nBOOTSTRAP_ADMIN_SIDEBAR_MENU = True\n# bootstrap forms\nBOOTSTRAP4 = {\n 'required_css_class': 'required',\n}\n# Twilio\nTWILIO_API_URL = 'https://api.twilio.com/2010-04-01/'\nTWILIO_ACCOUNT_SID = ''\nTWILIO_AUTH_TOKEN = ''\nTWILIO_FORGERY_PROTECTION = True\nTWILIO_DEFAULT_FORWARD_PHONE=''\n# redpanda constants\nREDPANDA_SENDER_ID = 54\nREDPANDA_TEST_CIDS = ()\nREDPANDA_SERVER_URL = ''\nREDPANDA_ROOT_URL = ''\nREDPANDA_SHORT_URL_API = ''\nREDPANDA_TEXT_MESSAGE = ''\nREDPANDA_SMTP_ACCOUNTS = ()\nREDPANDA_SMTP_ROTATE_COUNT = 95\n# tests\nTWILIO_TEST_BULK_NAME=''\nTWILIO_TEST_BULK_DESCRIPTION=''\nTWILIO_TEST_BULK_SENDER_ID=0\nTWILIO_TEST_COLLEGE_ID=0\nTWILIO_TEST_PHONE_TO = ''\nTWILIO_TEST_PHONE_OPT_OUT = ''\nTWILIO_TEST_MESSAGE = 'Who does your taxes?'\nTWILIO_TEST_MESSAGE_SID = ''\nTWILIO_TEST_DEPARTMENT = 'Admissions'\nTWILIO_TEST_STATUS_DICT = {}\nTWILIO_TEST_SENDER_ID=0\nTWILIO_TEST_MESSAGING_SERVICE_SID_INVALID = ''\nTWILIO_GROUP = 'Twilio Manager'\nTWILIO_ADDMISSIONS_GROUP = 'Admissions SMS'\nTWILIO_ADMISSIONS_REPS = {}\n# auth tests\nTEST_USERNAME = ''\nTEST_PASSWORD = ''\nTEST_EMAIL = ''\nTEST_USER_ID = ''\n# logging\nLOG_FILEPATH = os.path.join(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'logs/'\n)\nLOG_FILENAME = '{0}{1}'.format(LOG_FILEPATH, 'debug.log')\nDEBUG_LOG_FILENAME = '{0}{1}'.format(LOG_FILEPATH, 'debug.log')\nINFO_LOG_FILENAME = '{0}{1}'.format(LOG_FILEPATH, 'info.log')\nERROR_LOG_FILENAME = '{0}{1}'.format(LOG_FILEPATH, 'error.log')\nCUSTOM_LOG_FILENAME = '{0}{1}'.format(LOG_FILEPATH, 'custom.log')\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format' : '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s',\n 'datefmt' : '%Y/%b/%d %H:%M:%S'\n },\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',\n 'datefmt' : '%Y/%b/%d %H:%M:%S'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n },\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n },\n 'handlers': {\n 'logfile': {\n 'level':'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': LOG_FILENAME,\n 'formatter': 'standard',\n },\n 'console':{\n 'level':'INFO',\n 'class':'logging.StreamHandler',\n 'formatter': 'standard'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'include_html': True,\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'custom_logfile': {\n 'level':'ERROR',\n 'filters': ['require_debug_true'], # do not run error logger in production\n 'class': 'logging.FileHandler',\n 'filename': CUSTOM_LOG_FILENAME,\n 'formatter': 'custom',\n },\n 'info_logfile': {\n 'level':'INFO',\n 'class':'logging.handlers.RotatingFileHandler',\n 'backupCount': 10,\n 'maxBytes': 50000,\n 'filters': ['require_debug_false'], # run logger in production\n 'filename': INFO_LOG_FILENAME,\n 'formatter': 'simple',\n },\n 'debug_logfile': {\n 'level': 'DEBUG',\n 'handlers': ['logfile'],\n 'class': 'logging.FileHandler',\n 'filename': DEBUG_LOG_FILENAME,\n 'formatter': 'verbose',\n },\n 'error_logfile': {\n 'level': 'ERROR',\n 'filters': ['require_debug_true'], # do not run error logger in production\n 'class': 'logging.FileHandler',\n 'filename': ERROR_LOG_FILENAME,\n 'formatter': 'verbose'\n },\n 'djtwilio': {\n 'handlers':['logfile'],\n 'propagate': True,\n 'level':'DEBUG',\n },\n 'djtwilio.apps.sms': {\n 'handlers':['logfile'],\n 'propagate': True,\n 'level':'DEBUG',\n },\n 'djtwilio.core': {\n 'handlers':['logfile'],\n 'propagate': True,\n 'level':'DEBUG',\n },\n 'django': {\n 'handlers':['console'],\n 'propagate': True,\n 'level':'WARN',\n },\n 'django.db.backends': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n","sub_path":"djtwilio/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":11189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"507228984","text":"import os\r\nimport click\r\nimport requests\r\nfrom pprint import pprint\r\n\r\n# Local imports \r\nfrom util import *\r\nfrom query_order import get_order_status\r\n\r\n\r\nBASE_METHOD = \"order\"\r\nBATCH_METHOD = f\"{BASE_METHOD}/batch\"\r\n\r\n\r\ndef place_order(order, apikey, secret, base_url, method):\r\n url = \"{}/{}\".format(base_url, method)\r\n ts = utc_timestamp()\r\n headers = make_auth_headers(ts, method, apikey, secret)\r\n return requests.post(url, headers=headers, json=order)\r\n\r\n\r\ndef place_batch_order(orders, api_key, secret, base_url, method=BATCH_METHOD):\r\n \"\"\"\r\n e.g.\r\n batch_order = [order1, order2]\r\n print(\"Place batch order {}\".format(batch_order))\r\n res = place_batch(batch_order, api_key=api_key, secret=secret, base_url=base_url)\r\n pprint(parse_response(res))\r\n\r\n :param orders:\r\n :param api_key:\r\n :param secret:\r\n :param base_url:\r\n :param method:\r\n :return:\r\n \"\"\"\r\n url = \"{}/{}\".format(base_url, method)\r\n ts = utc_timestamp()\r\n batch_order = {\"orders\": orders}\r\n headers = make_auth_headers(ts, method, api_key, secret)\r\n\r\n return requests.post(url, headers=headers, json=batch_order)\r\n\r\n\r\n@click.command()\r\n@click.option(\"--account\", type=click.Choice(['cash', 'margin']), default=\"cash\")\r\n@click.option(\"--symbol\", type=str, default='BTC/USDT')\r\n@click.option(\"--price\", type=str, default='7000.0')\r\n@click.option(\"--qty\", type=str, default='0.00082')\r\n@click.option(\"--order-type\", type=str, default=\"limit\")\r\n@click.option(\"--side\", type=click.Choice(['buy', 'sell']), default='buy')\r\n@click.option(\"--resp-inst\", type=click.Choice(['ACK', 'ACCEPT', 'DONE']), default=\"ACCEPT\")\r\n@click.option(\"--time-in-force\", type=click.Choice(['GTC', 'IOC', 'IOO', 'FOK']), default=\"GTC\")\r\n@click.option(\"--config\", type=str, default=\"config.json\", help=\"path to the config file\")\r\ndef run(account, symbol, price, qty, order_type, side, resp_inst, time_in_force, config):\r\n\r\n btmx_cfg = load_config(get_config_or_default(config))['bitmax']\r\n\r\n host = btmx_cfg['https']\r\n group = btmx_cfg['group']\r\n apikey = btmx_cfg['apikey']\r\n secret = btmx_cfg['secret']\r\n\r\n if \"user_uid\" in btmx_cfg:\r\n user_uid = btmx_cfg[\"user_uid\"]\r\n else:\r\n user_uid = None\r\n\r\n base_url = f\"{host}/{group}/{ROUTE_PREFIX}/{account}\"\r\n\r\n ts = utc_timestamp()\r\n order = dict(\r\n id=uuid32(),\r\n time=ts,\r\n symbol=symbol.replace(\"-\", \"/\"),\r\n orderPrice=str(price),\r\n orderQty=str(qty),\r\n orderType=order_type,\r\n side=side.lower(),\r\n timeInForce=time_in_force,\r\n respInst=resp_inst,\r\n )\r\n\r\n print(\"Place order {} through {}\".format(order, base_url))\r\n res = place_order(order, apikey=apikey, secret=secret, base_url=base_url, method=\"order\")\r\n pprint(parse_response(res))\r\n\r\n # query order status\r\n if user_uid is not None:\r\n time.sleep(1)\r\n cl_id = order[\"id\"]\r\n server_coid = gen_server_order_id(user_uid, cl_order_id=cl_id, ts=order[\"time\"], order_src='a')\r\n print(f\"server_coid = {server_coid}\")\r\n res = get_order_status(base_url, apikey, secret, server_coid)\r\n pprint(parse_response(res))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run()\r\n","sub_path":"python/order_new.py","file_name":"order_new.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"401974700","text":"#!/usr/bin/env python\n\nimport rospy\nimport cv2\nimport numpy as np\nfrom scipy.spatial import distance as dist\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import String\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import models\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras import backend\nfrom tensorflow.python.keras.backend import set_session\nfrom tensorflow.python.keras.models import load_model\n\nclass LicenseReader():\n\n def __init__(self):\n tf.keras.backend.clear_session()\n # define the parking and plate dictionaries\n self.ordered_data_1 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\n self.int_to_char = dict((i,c) for i,c in enumerate(self.ordered_data_1))\n self.ordered_data_2 = '123456'\n self.int_to_park = dict((i,c) for i,c in enumerate(self.ordered_data_2))\n\n # load the parking CNN\n self.sess = tf.keras.backend.get_session()\n self.graph = tf.compat.v1.get_default_graph()\n\n self.parkModel = models.load_model(\"/home/fizzer/ros_ws/src/my_parking_reader.h5\")\n self.parkModel._make_predict_function()\n\n self.plateModel = models.load_model(\"/home/fizzer/ros_ws/src/my_model.h5\")\n self.plateModel._make_predict_function()\n\n self.bridge = CvBridge()\n self.imageSubscriber = rospy.Subscriber(\"/R1/pi_camera/image_raw\", Image, self.findandread)\n self.ReadPublisher = rospy.Publisher('/license_plate', String, queue_size = 10)\n self.ReadRate = rospy.Rate(10)\n self.prevError = 0\n\n\n\n # This is the Main read code\n def findandread(self, data):\n try:\n cameraImage = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n lic_plate = self.findPlate(cameraImage)\n\n if (lic_plate is not None):\n pos, plate = self.readPlate(lic_plate)\n\n if (plate != \"\" and pos != \"\"):\n message = \"Team7,chuck,\" + pos +\",\" + plate\n print(\"in P{}, plate = {}\".format(pos, plate))\n self.ReadPublisher.publish(message)\n\n #Uses homography to find the plate\n def findPlate(self, cImage):\n\n # The following code is from https://medium.com/programming-fever/license-plate-recognition-using-opencv-python-7611f85cdd6c\n # with very slight modifications\n h, w, c = cImage.shape\n cameraImage = cImage[h/2-50:h+50, 0:w/2]\n gray = cv2.cvtColor(cameraImage, cv2.COLOR_BGR2GRAY) \n hsv_im = cv2.cvtColor(cameraImage, cv2.COLOR_BGR2HSV) \n mask = cv2.inRange(hsv_im, np.array([0,0,97],np.uint8), np.array([0,0,204],np.uint8))\n\n # cv2.imshow(\"gray\", mask)\n # cv2.waitKey(3)\n \n x, contours, y = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n screenCnt = None\n\n for c in contours:\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.010 * peri, True)\n area = cv2.contourArea(c)\n \n if len(approx) == 4 and area > 10000:\n max_h = 0\n min_h = 10000\n min_w = 10000\n max_w = 0\n for pts in approx: #approx gives (w,h)\n h = pts[0,1]\n w = pts[0,0]\n point = [w,h]\n if h > max_h:\n max_h = h\n if h < min_h:\n min_h = h\n if w > max_w:\n max_w = w\n if w < min_w:\n min_w = w\n w = max_w - min_w\n h = max_h - min_h\n\n if abs(w-h) < 35:\n for pts in approx:\n h = pts[0,1]\n if max_h - h < 20:\n pts[0,1] = h + 32\n\n screenCnt = approx\n break\n\n if screenCnt is None:\n detected = 0\n # print (\"No contour detected\")\n else:\n detected = 1\n\n if detected == 1:\n # cv2.drawContours(cameraImage, [screenCnt], -1, (0, 0, 255), 3)\n\n mask = np.zeros(gray.shape,np.uint8)\n new_image = cv2.drawContours(mask,[screenCnt],0,255,-1,)\n new_image = cv2.bitwise_and(cameraImage, cameraImage,mask=mask)\n\n # cv2.imshow(\"new image\", new_image)\n # cv2.waitKey(3)\n \n max_h = 0\n min_h = 10000\n min_w = 10000\n max_w = 0\n\n pts1 = []\n for pts in screenCnt:\n h = pts[0,1]\n w = pts[0,0]\n point = [w,h]\n if h > max_h:\n max_h = h\n if h < min_h:\n min_h = h\n if w > max_w:\n max_w = w\n if w < min_w:\n min_w = w\n pts1.append(point)\n\n pts1 = np.array(pts1)\n pts1 = self.order_points(pts1)\n pts2 = np.float32([[min_w, min_h], [min_w, max_h], [max_w, max_h], [max_w, min_h]])\n matrix = cv2.getPerspectiveTransform(pts1, pts2) \n\n h,w,ch = new_image.shape\n final_crop = cv2.warpPerspective(new_image, matrix, (w, h))\n final_crop = final_crop[min_h:max_h,min_w:max_w]\n\n cv2.imshow(\"cropped\", final_crop)\n cv2.waitKey(3)\n return final_crop\n\n return None\n\n #goes through our CNN to read the parking spot and read plate\n def readPlate(self, img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n plate = \"\"\n pos = \"\"\n\n ############ predicting the parking position #################\n # resize the plate\n hi,wi,chi = img.shape\n scale_h = 400/hi\n scale_w = 350/wi\n img = cv2.resize(img,None,fx=scale_h, fy=scale_w, interpolation = cv2.INTER_CUBIC)\n\n hi,wi,chi = img.shape\n parking_pic = img[int(0.75*hi)-100:int(0.75*hi),wi-150:wi-10]\n parking_pic = parking_pic/255. \n park_aug = np.expand_dims(parking_pic, axis=0)\n\n with self.graph.as_default():\n set_session(self.sess)\n pos_pred = self.parkModel.predict(park_aug)[0]\n if np.amax(pos_pred) > 0.5:\n pos = self.int_to_park[np.argmax(pos_pred)]\n \n ############## predicting the license plate ####################\n x = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n x = cv2.inRange(x, np.array([119,100,75],np.uint8), np.array([121,245,215],np.uint8))\n\n img = cv2.merge((x,x,x))\n # img = img [int(0.8*hi):hi, 0:wi] \n # hi,wi,chi = img.shape\n # cv2.imshow(\"img\", img)\n\n lics_plate = img[hi-70:hi-5,0:int(wi/2)]\n h,w,ch = lics_plate.shape\n \n # cv2.imshow(\"pos\", lics_plate)\n letter_one = lics_plate[0:65,int(w/2)-65:int(w/2)]\n letter_two = lics_plate[0:65,int(w/2):int(w/2)+65]\n\n lics_plate = img[hi-70:hi-5,int(wi/2):wi]\n h,w,ch = lics_plate.shape\n num_one = lics_plate[0:65,int(w/2)-65:int(w/2)]\n num_two = lics_plate[0:65,int(w/2):int(w/2)+65]\n\n letter_one = letter_one/255.\n letter_two = letter_two/255.\n num_one = num_one/255.\n num_two = num_two/255.\n\n l1_aug = np.expand_dims(letter_one, axis=0)\n l2_aug = np.expand_dims(letter_two, axis=0)\n n1_aug = np.expand_dims(num_one, axis=0)\n n2_aug = np.expand_dims(num_two, axis=0)\n\n \n # cv2.imshow(\"1\", letter_one)\n # cv2.imshow(\"2\", letter_two)\n # cv2.imshow(\"3\", num_one)\n # cv2.imshow(\"4\", num_two)\n cv2.waitKey(3)\n\n with self.graph.as_default():\n try:\n set_session(self.sess)\n l1_pred = self.plateModel.predict(l1_aug)[0]\n l2_pred = self.plateModel.predict(l2_aug)[0]\n n1_pred = self.plateModel.predict(n1_aug)[0]\n n2_pred = self.plateModel.predict(n2_aug)[0]\n\n plate = plate + self.int_to_char[np.argmax(l1_pred)] + self.int_to_char[np.argmax(l2_pred)] #adds the letters\n plate = plate + self.int_to_char[np.argmax(n1_pred)] + self.int_to_char[np.argmax(n2_pred)] #adds the numbers\n except Exception as e:\n print(\"plate not found\", e)\n\n\n # print(letter_one)\n # print(\"aug\", l1_aug)\n print(pos,plate)\n\n # check if the plate is in the form [char, char, int, int]\n # for i in range(4):\n # if i < 2:\n # if (not plate[i].isalpha()):\n # return pos, None\n # else:\n # if (not plate[i].isdigit()):\n # return pos, None\n\n return pos, plate\n\n\n ############helper method\n\n #taken from: https://www.pyimagesearch.com/2016/03/21/ordering-coordinates-clockwise-with-python-and-opencv/\n def order_points(self, pts):\n # sort the points based on their x-coordinates\n xSorted = pts[np.argsort(pts[:, 0]), :]\n # grab the left-most and right-most points from the sorted\n # x-roodinate points\n leftMost = xSorted[:2, :]\n rightMost = xSorted[2:, :]\n # now, sort the left-most coordinates according to their\n # y-coordinates so we can grab the top-left and bottom-left\n # points, respectively\n leftMost = leftMost[np.argsort(leftMost[:, 1]), :]\n (tl, bl) = leftMost\n # now that we have the top-left coordinate, use it as an\n # anchor to calculate the Euclidean distance between the\n # top-left and right-most points; by the Pythagorean\n # theorem, the point with the largest distance will be\n # our bottom-right point\n D = dist.cdist(tl[np.newaxis], rightMost, \"euclidean\")[0]\n (br, tr) = rightMost[np.argsort(D)[::-1], :]\n # return the coordinates in top-left, top-right,\n # bottom-right, and bottom-left order\n return np.array([tl, bl, br, tr], dtype=\"float32\")\n\ndef main():\n rospy.init_node(\"license_read\")\n licenseReader = LicenseReader()\n rate = rospy.Rate(10)\n \n while not rospy.is_shutdown():\n rate.sleep()\n \nif __name__ == '__main__':\n main()\n ","sub_path":"src/read_using_contours.py","file_name":"read_using_contours.py","file_ext":"py","file_size_in_byte":10466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"424259564","text":"'''\nCreated on 2017. 9. 12.\n\n@author: callor\n'''\nfrom sys import *\nfrom PyQt5.QtWidgets import *\n\n\nclass QtExam(QWidget):\n \n def __init__(self):\n super().__init__() # QWidget의 함수, 변수를 이어받아서 사용하겠다\n \n \n def initUi(self):\n \n self.resize(250,250)\n self.center()\n \n self.setWindowTitle(\"QT 연습\")\n self.show()\n \n # 창을 현재 모니터 중앙에 ��치하도록 하는 사용자 함수\n def center(self):\n r = self.frameGeometry()\n c = QDesktopWidget().availableGeometry().center()\n r.moveCenter(c)\n self.move(r.topLeft())\n \n \nif __name__ == \"__main__\" :\n app = QApplication(argv)\n qex = QtExam() # 클래스로 객체를 선언\n qex.initUi() # 객체 내의 initUi Method를 실행\n exit(app.exec_())","sub_path":"PyQT/Hello_QT/QT_02.py","file_name":"QT_02.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"14322028","text":"from __future__ import absolute_import\nimport dj_database_url\nfrom .connectors import MASTER, SLAVES\nfrom settings.base.common import *\n\n\nDEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.sites',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'apps.core',\n 'apps.accounts',\n 'apps.locations',\n 'apps.tw_pagination',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.vk',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\n\ndef set_database_list():\n db_list = {'slave_%d' % i: dj_database_url.config(\n env='DATABASE_URL', default=j, conn_max_age=500) for i, j in enumerate(SLAVES, start=1)}\n db_list.update(\n {'default': {}, 'master': dj_database_url.config(env='DATABASE_URL', default=MASTER, conn_max_age=500)})\n return db_list\n\n\nDATABASES = set_database_list()\nDATABASE_ROUTERS = ['apps.core.models.MasterSlaveRouter']\n","sub_path":"settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"11577601","text":"from collections import defaultdict \n\nclass Graph: \n def __init__(self): \n self.graph = defaultdict(list)\n\n def addEdge(self,u,v):\n self.graph[u].append(v)\n\n def show(self) :\n for i in self.graph :\n print(line.index(i), ':', [line.index(j) for j in self.graph[i]])\n\n def replace(self, source, target) :\n for start in self.graph :\n for index in range(len(self.graph[start])) :\n if self.graph[start][index] == source :\n self.graph[start][index] = target\n\n def BFS(self, source, target): \n visited = {source}\n queue = [(source, 0)]\n while queue:\n s = queue.pop(0)\n if s[0] == target :\n return s[1]\n for i in self.graph[s[0]] :\n if i in visited : continue\n queue.append((i,s[1]+1))\n visited.add(i)\n\nN, M = [int(i) for i in input().split()]\n\nline = []\nfor i in reversed(range(N)) :\n line += zip([i for m in range(M)], range(M)) if not ((N-i+1)%2)\\\n else zip([i for m in range(M)], reversed(range(M)))\n\n\ng = Graph()\n\nfor index in range(len(line)) :\n for step in range(min(6, len(line)-index-1)) :\n g.addEdge(line[index], line[index+1+step])\n\n# transes = {}\n# while True :\n# i, j, target = [int(i) for i in input().split()]\n# if i == j == target == -1 :\n# break\n# transes[(i, j)] = line[target]\n\n# for source in transes :\n# target = transes[source]\n# while target in transes :\n# target = transes[target]\n# g.replace(source, target)\n\nwhile True :\n i, j, target = [int(i) for i in input().split()]\n if i == j == target == -1 :\n break\n g.replace((i, j), line[target])\n\n# g.show()\n# print(\"target :\", line[-1])\n\nprint(g.BFS(line[0], line[-1]))","sub_path":"ComputerAssignment5/code/Problem2.py","file_name":"Problem2.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"484584924","text":"import os\nimport sys\nimport argparse\nimport numpy as np\nimport subprocess\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nsys.path.append('lib/')\nimport analysis_utils as au\nfrom scipy.optimize import curve_fit\nimport glob\nimport pickle as pk\n#### define macros #########\nI=complex(0.,1.)\nsqrt=np.sqrt\nlog=np.log\nmatr=np.matrix\nfrom math import pi as PI\n\nGamma4_plus=0.25*(au.one + au.g4_uk)\nGamma4_minus=0.25*(au.one - au.g4_uk)\n###########################\n\nlistSinks=[8,10,12,14]\nlistSymbols=['o','^','*','s']\nlistColors=['red','green','blue','purple']\n\ndef assertions(args):\n parameters={}\n parameters['listConfs'] = args['listConfs']\n parameters['twopPath'] = args['twopPath']\n parameters['loopPath'] = args['loopPath']\n parameters['L'] = int(args['L'])\n parameters['T'] = int(args['T'])\n parameters['binsize'] = int(args['binsize'])\n parameters['latticeSpacing'] = float(args['latticeSpacing'])\n parameters['kappa'] = float(args['kappa'])\n parameters['flavor'] = args['flavor']\n parameters['a_inverse_GeV'] = 0.197/parameters['latticeSpacing']\n parameters['NsrcTwop'] = int(args['NsrcTwop'])\n parameters['NnoiseVec'] = int(args['NnoiseVec'])\n parameters['maxTsink'] = int(args['maxTsink'])\n parameters['picklePath'] = args['picklePath']\n parameters['fitMassLow'] = int(args['fitMassLow'])\n parameters['fitMassHgh'] = int(args['fitMassHgh'])\n parameters['Z_DV'] = float(args['Z_DV'])\n\n assert parameters['L']>0\n assert parameters['T']>0\n assert parameters['binsize']>0\n assert parameters['latticeSpacing'] > 0\n assert parameters['kappa'] > 0\n assert parameters['flavor'] in ['light','strange','charm']\n assert parameters['NsrcTwop'] > 0\n assert parameters['NnoiseVec'] > 0\n assert parameters['maxTsink'] > 0 and parameters['maxTsink'] < int(parameters['T']/2)\n assert parameters['fitMassLow'] >0 and parameters['fitMassLow'] < parameters['T']/2\n assert parameters['fitMassHgh'] >0 and parameters['fitMassHgh'] < parameters['T']/2 and parameters['fitMassHgh'] > parameters['fitMassLow']\n return parameters\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n\n # include the available arguments\n parser = argparse.ArgumentParser(prog=' disconnected', description='Calculates the disconnected contributions to ')\n parser.add_argument('--listConfs', help='Takes the list of configurations that we want to analysze', default = 'list_confs.txt')\n parser.add_argument('--twopPath', help='Path where to find the two point functions', default = 'twop/')\n parser.add_argument('--NsrcTwop', help='The number of two point function source position expected', default = 100)\n parser.add_argument('--loopPath', help='Path where to find the disconnected quark loops',default = 'loops/')\n parser.add_argument('--NnoiseVec', help='Number of noise vectors (you have to divide the loops with this number, if you already did set it to one)', default = 2250)\n parser.add_argument('--L', help='Spatial Lattice Extent', default = 48)\n parser.add_argument('--T', help='Temporal Lattice Extent', default = 96)\n parser.add_argument('--binsize', help='The binsize needed for the binning procedure', default = 1)\n parser.add_argument('--latticeSpacing', help='Lattice spacing for the ensemble we analyze', default = 0.093)\n parser.add_argument('--kappa', help='The value of the kappa parameter', default = 0.13729)\n parser.add_argument('--flavor', help='The flavor of the quark (light,strange,charm)', default = 'light') ## if is light we calculate (u+d), strange (s^+ + s^-)/2, charge (c^+ + c^-)/2\n parser.add_argument('--maxTsink', help='This is the maximum source sink separation that you want to study', default = 14)\n parser.add_argument('--picklePath', help='Path where read or write pickle data', default = '')\n parser.add_argument('--fitMassLow', help='Lower fit range for effective mass', default = 9)\n parser.add_argument('--fitMassHgh', help='Higher fit range for effective mass', default = 15)\n parser.add_argument('--Z_DV', help='Renormalization factor for the one derivative vector current when mu == nu', default = 1.1251)\n args = vars(parser.parse_args())\n\n # check parameters\n parameters=assertions(args)\n\n # read configurations list\n lenConf=au.file_len(parameters['listConfs'])\n with open(parameters['listConfs'],'r') as fp:\n listConfs = list(map(lambda x: x.strip(),fp.readlines()))\n if len(set(listConfs)) != lenConf:\n sys.stderr.write('Error there are duplicate confs in the list')\n sys.exit(-1)\n\n if lenConf % parameters['binsize'] != 0:\n sys.stderr.write('Error number of configurations is not divisible with the binsize: Manually discard confs to make it')\n sys.exit(-1) \n\n \n pathTwopPickle=parameters['picklePath'] + 'twopPickle.pckl'\n pathTwopSrcPickle=parameters['picklePath'] + 'twopSrcPickle.pckl'\n pathLoopPickle=parameters['picklePath'] + 'loopPickle.pckl'\n\n if parameters['picklePath'] and os.path.isfile(pathTwopPickle) and os.path.isfile(pathTwopSrcPickle):\n print('Pickle files for twop found')\n ltwopRaw=pk.load(open(pathTwopPickle,'rb'))\n lsrcPos=pk.load(open(pathTwopSrcPickle,'rb'))\n if len(ltwopRaw) != lenConf*parameters['NsrcTwop']:\n sys.stderr.write('Warning: pickle for twop does not have the same confs as the list with confs provided force reading\\n')\n os.remove(pathTwopPickle)\n os.remove(pathTwopSrcPickle)\n if (not parameters['picklePath']) or ( (not os.path.isfile(pathTwopPickle)) or (not os.path.isfile(pathTwopSrcPickle)) ):\n # read two point function\n print('Reading two point functions')\n ltwopRaw=[]\n lsrcPos=[]\n for iconf in listConfs:\n lsrcPerConf=glob.glob(parameters['twopPath'] + iconf + '/' + 'twop*nucleon*zeromom*dat')\n if len(lsrcPerConf) != parameters['NsrcTwop']:\n sys.stderr.write('Error wrong number of source positions found for conf %s\\n' % (iconf))\n sys.exit(-1)\n for srcPath in lsrcPerConf:\n srcBase=os.path.basename(srcPath)\n ###### !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! this part depends on the convention that data has been stored we assume twop.${CONF}.nucleon_zeromom.SS.${XX}.${YY}.${ZZ}.${TT}.dat\n XX=srcBase.split('.')[4]\n YY=srcBase.split('.')[5]\n ZZ=srcBase.split('.')[6]\n TT=srcBase.split('.')[7]\n lsrcPos.append([XX,YY,ZZ,TT])\n ##### !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! the reading depends on the format the data was written if you change the format change this part also\n arr=np.loadtxt(srcPath, usecols = (7,8,9,10) ) ## proton(re,im) neutron(re,im)\n explen = parameters['T'] * 16\n if len(arr) != explen:\n sys.stderr.write('Error wrong number of lines in %s\\n' % (srcPath))\n sys.exit(-1)\n # average over proton neutron\n arrNucl=((arr[:,0] + arr[:,2])/2 + I*(arr[:,1]+arr[:,3])/2).reshape(parameters['T'],4,4)\n ltwopRaw.append(arrNucl)\n print('read twop %s' % iconf)\n sys.stdout.flush()\n # save data to pickle if is enabled\n if parameters['picklePath']:\n print('writting two point functions to pickle')\n pk.dump(ltwopRaw, open(pathTwopPickle,'wb'))\n pk.dump(lsrcPos, open(pathTwopSrcPickle,'wb')) \n \n # rearrange the list with the source positions\n lsrcPos=(np.array(lsrcPos).reshape(lenConf,parameters['NsrcTwop'],4)).tolist()\n\n # put data in numpy array\n twopRaw=np.array(ltwopRaw).reshape(lenConf,parameters['NsrcTwop'],parameters['T'],4,4)\n del ltwopRaw\n \n # average over the source positions\n twopAver=np.average(twopRaw,axis=1)\n\n # project two point functions\n twopGamma4_plus=np.array([])\n twopGamma4_minus=np.array([])\n \n for i in range(lenConf):\n twopGamma4_plus=np.append(twopGamma4_plus, np.array(list(map(lambda x: (matr(Gamma4_plus)*matr(x.reshape(4,4))).trace() , twopAver[i]))).reshape(parameters['T']) )\n twopGamma4_minus=np.append(twopGamma4_minus, np.array(list(map(lambda x: (matr(Gamma4_minus)*matr(x.reshape(4,4))).trace() , twopAver[i]))).reshape(parameters['T']) )\n\n twopGamma4_plus=twopGamma4_plus.reshape(lenConf,parameters['T'])\n twopGamma4_minus=twopGamma4_minus.reshape(lenConf,parameters['T'])\n twopGamma4=np.zeros((lenConf,parameters['T']), dtype=np.complex128)\n\n for i in range(lenConf):\n twopGamma4[i,0]=twopGamma4_plus[i,0]\n for it in range(1,parameters['T']):\n twopGamma4[i,it] = (twopGamma4_plus[i,it] - twopGamma4_minus[i,-it])/2.\n\n twopGamma4_Bin=au.binning(twopGamma4,parameters['binsize'])\n Nbins=len(twopGamma4_Bin)\n massEffBinning=au.effMass(np.array(twopGamma4_Bin).real)\n meanEffMass = np.average(np.array(massEffBinning),axis=0)\n errEffMass = sqrt(Nbins-1) * np.std(np.array(massEffBinning),axis=0)\n massBin=[]\n for Arr in massEffBinning:\n Lw=parameters['fitMassLow']\n Hg=parameters['fitMassHgh']\n popt , pconv = curve_fit(lambda x,a: a, np.arange(Lw, Hg+1), Arr[Lw:Hg+1] , sigma=errEffMass[Lw:Hg+1])\n massBin.append(popt)\n\n # factor that we need to multiply the loops\n # here we add the introduced sign when we anticommute the fermion fields\n # factor -4*k or -2*k\n if parameters['flavor'] == 'light':\n mul_factor = -4*parameters['kappa'] # this calculates (u+d)\n else:\n mul_factor = -2*parameters['kappa'] * 0.25 # this calculates (s^+ + s^-)/2 or (c^+ + c^-)/2 , 1/4 because Alejadro does not include this factor in the code\n\n\n # read loops \n if parameters['picklePath'] and os.path.isfile(pathLoopPickle):\n print('Pickle files for loops found')\n lloopsRaw=pk.load(open(pathLoopPickle,'rb'))\n if len(lloopsRaw) != lenConf:\n sys.stderr.write('Warning pickle for loop does not have the same confs as the list with confs provided, force reading\\n')\n os.remove(pathLoopPickle)\n\n if (not parameters['picklePath']) or (not os.path.isfile(pathLoopPickle)):\n lloopsRaw=[]\n for iconf in listConfs:\n ######### !!!!!!!!!!!!!!!!!!!!!!!!!!!!! if the loops are in different order you should change this for instance for the strange it might be that we have one more column\n if parameters['flavor'] == 'light':\n loopPath=parameters['loopPath'] + 'LpsDw.' + iconf + '.' + str(parameters['NnoiseVec']).zfill(4)\n arr=np.loadtxt(loopPath, usecols = (6,7))\n # for strange and charm the situation is a bit different. For this case set NnoiseVec = 1 because it has already be done\n elif parameters['flavor'] == 'strange':\n loopPath=parameters['loopPath'] + 'LpsDw.loop.H030.L0960.Processed.' + iconf\n arr=np.loadtxt(loopPath, usecols = (7,8))\n elif parameters['flavor'] == 'charm':\n loopPath=parameters['loopPath'] + 'LpsDw.loop.H030.L0960.Processed.' + iconf\n arr=np.loadtxt(loopPath, usecols = (7,8))\n else:\n sys.stderr.write('Error this flavor is not implemented %s\\n' % (parameters['flavor']))\n sys.exit(-1)\n \n explen = parameters['T'] * 4 * 16\n if len(arr) != explen:\n sys.stderr.write('Error wrong number of lines in %s\\n' % (loopPath))\n sys.exit(-1)\n arr=arr*(mul_factor/parameters['NnoiseVec'])\n arr=arr[:,0]+I*arr[:,1]\n lloopsRaw.append(arr)\n print('read loop %s' % iconf)\n sys.stdout.flush()\n if parameters['picklePath']:\n print('writting loops to pickle')\n pk.dump(lloopsRaw, open(pathLoopPickle,'wb'))\n \n \n loopsRaw=np.array(lloopsRaw).reshape(lenConf,parameters['T'],4,4,4)\n del lloopsRaw\n\n # calculate the loop we need for one derivative\n g_nu=[au.g1_uk,au.g2_uk,au.g3_uk,au.g4_uk]\n lloops_vD=[]\n for i in range(lenConf):\n for it in range(parameters['T']):\n for mu in range(4):\n for nu in range(4):\n loop=((matr(g_nu[nu])*matr((loopsRaw[i,it,mu]).T)).trace()).item()\n lloops_vD.append(loop)\n\n loops_vD=np.array(lloops_vD).reshape(lenConf,parameters['T'],4,4)\n\n # symmetrize and traceless and multiply by I because this is how derivatives are defined\n loops_vD_sym_tr=np.zeros(loops_vD.shape, dtype=np.complex128)\n for i in range(lenConf):\n for it in range(parameters['T']):\n for mu in range(4):\n for nu in range(4):\n if mu == nu:\n loops_vD_sym_tr[i,it,mu,nu] = I*(loops_vD[i,it,mu,nu] - 0.25*(sum([loops_vD[i,it,ii,ii] for ii in range(4)])))\n else:\n loops_vD_sym_tr[i,it,mu,nu] = I*(loops_vD[i,it,mu,nu] + loops_vD[i,it,nu,mu])/2.\n\n # calculate vacuum \n vacuum_tt=np.average(loops_vD_sym_tr,axis=1)[:,3,3]\n vacuum_tt_Binning=au.binning(vacuum_tt, parameters['binsize'])\n Nbins=len(vacuum_tt_Binning)\n# for iconf in range(lenConf):\n# print(vacuum_tt[iconf].imag)\n vaccum_tt_bmean=np.average(np.array(vacuum_tt_Binning).imag)\n err_vaccum_tt_bmean=sqrt(Nbins-1) * np.std(np.array(vacuum_tt_Binning).imag)\n print('vev = %+e %+e' % (vaccum_tt_bmean, err_vaccum_tt_bmean) )\n\n # create three point function\n lthreep=[]\n for i in range(lenConf):\n arr=np.zeros( (parameters['NsrcTwop'],parameters['maxTsink']+1,parameters['maxTsink']+1) , dtype=np.complex128)\n for isrc in range(parameters['NsrcTwop']):\n for it2 in range(parameters['maxTsink']+1):\n twopComp_fwd=((matr(Gamma4_plus)*matr(twopRaw[i,isrc,it2])).trace()).item()\n twopComp_bwd=((matr(Gamma4_minus)*matr(twopRaw[i,isrc,-it2])).trace()).item()\n for it1 in range(it2+1):\n it1_fwd=(int(lsrcPos[i][isrc][3]) + it1)%parameters['T']\n it1_bwd=(int(lsrcPos[i][isrc][3]) - it1)\n loop_fwd=loops_vD_sym_tr[i,it1_fwd,3,3]\n loop_bwd=loops_vD_sym_tr[i,it1_bwd,3,3]\n threep_re=(twopComp_fwd.real*loop_fwd.real - twopComp_bwd.real*loop_bwd.real)/2.\n threep_im=(twopComp_fwd.real*loop_fwd.imag - twopComp_bwd.real*loop_bwd.imag)/2.\n arr[isrc,it2,it1] = threep_re + 1j*threep_im\n lthreep.append(np.average(arr,axis=0)) # average over the source position and save it to the list\n\n\n twopGamma4_Binning=au.binning(twopGamma4, parameters['binsize'])\n threep_Binning=au.binning(lthreep, parameters['binsize'])\n\n # create the ratio for each bin\n ratio_Binning=[]\n for ibin in range(Nbins):\n arr=np.zeros(threep_Binning[ibin].shape, dtype=np.complex128)\n for it2 in range(parameters['maxTsink']+1):\n for it1 in range(it2+1):\n arr[it2,it1] = parameters['Z_DV']*(-4./(3.*massBin[ibin].item())) * ((threep_Binning[ibin][it2,it1].real / twopGamma4_Binning[ibin][it2].real - vacuum_tt_Binning[ibin].real) + 1j*(threep_Binning[ibin][it2,it1].imag / twopGamma4_Binning[ibin][it2].real - vacuum_tt_Binning[ibin].imag))\n ratio_Binning.append(np.array(arr))\n\n ratio_bmean = np.average(np.array(ratio_Binning),axis=0)\n errRatio = sqrt(Nbins-1) * ( np.std(np.array(ratio_Binning).real,axis=0) + 1j*np.std(np.array(ratio_Binning).imag,axis=0) )\n\n\n #ts = 8\n lfit8=[]\n for arr in ratio_Binning:\n Lw=2\n Hg=6\n popt, pconv = curve_fit(lambda x,a: a, np.arange(Lw,Hg+1), arr[8,Lw:Hg+1].imag, sigma=errRatio[8,Lw:Hg+1].imag )\n lfit8.append(popt[0])\n \n lfit8_bmean=np.average(lfit8)\n fit8_error = sqrt(Nbins-1) * np.std(lfit8)\n\n print('ts=8')\n for i in range(8+1):\n print('%d %+e %+e \\t %+e %+e' % (i,ratio_bmean[8,i].real,errRatio[8,i].real, ratio_bmean[8,i].imag,errRatio[8,i].imag))\n\n print(lfit8_bmean,fit8_error)\n print('\\n')\n\n #ts = 10\n lfit10=[]\n for arr in ratio_Binning:\n Lw=2\n Hg=8\n popt, pconv = curve_fit(lambda x,a: a, np.arange(Lw,Hg+1), arr[10,Lw:Hg+1].imag, sigma=errRatio[10,Lw:Hg+1].imag )\n lfit10.append(popt[0])\n \n lfit10_bmean=np.average(lfit10)\n fit10_error = sqrt(Nbins-1) * np.std(lfit10)\n\n print('ts=10')\n for i in range(10+1):\n print('%d %+e %+e \\t %+e %+e' % (i,ratio_bmean[10,i].real,errRatio[10,i].real, ratio_bmean[10,i].imag,errRatio[10,i].imag))\n\n print(lfit10_bmean,fit10_error)\n\n print('\\n')\n #ts = 12\n lfit12=[]\n for arr in ratio_Binning:\n Lw=2\n Hg=10\n popt, pconv = curve_fit(lambda x,a: a, np.arange(Lw,Hg+1), arr[12,Lw:Hg+1].imag, sigma=errRatio[12,Lw:Hg+1].imag )\n lfit12.append(popt[0])\n \n lfit12_bmean=np.average(lfit12)\n fit12_error = sqrt(Nbins-1) * np.std(lfit12)\n\n print('ts=12')\n for i in range(12+1):\n print('%d %+e %+e \\t %+e %+e' % (i,ratio_bmean[12,i].real,errRatio[12,i].real, ratio_bmean[12,i].imag,errRatio[12,i].imag))\n\n print(lfit12_bmean,fit12_error)\n\n# # summed ratio\n# Summed_ratio_Binning=[]\n# for Arr in ratio_Binning:\n# Summed_ratio_Binning.append(np.array( [np.sum(x[1:ts]) for ts,x in enumerate(Arr)] ))\n\n\n# summed_ratio_bmean = np.average(np.array(Summed_ratio_Binning),axis=0)\n# errSumRatio = sqrt(Nbins-1) * ( np.std(np.array(Summed_ratio_Binning).real,axis=0) + 1j*np.std(np.array(Summed_ratio_Binning).imag,axis=0) )\n\n# for ts in range(parameters['maxTsink']):\n# print('%d %+e %+e \\t %+e %+e' % (ts,summed_ratio_bmean[ts].real, errSumRatio[ts].real, summed_ratio_bmean[ts].imag, errSumRatio[ts].imag))\n\n # create figures\n plt.figure()\n if au.which('latex'):\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n\n if parameters['flavor'] == 'light':\n plt.ylabel(r\"$^{u+d}$\")\n elif parameters['flavor'] == 'strange':\n plt.ylabel(r\"$^{s}$\")\n else:\n plt.ylabel(r\"$^{c}$\")\n\n lw_ts8=2\n hg_ts8=6\n \n lw_ts10=2\n hg_ts10=8\n\n lw_ts12=2\n hg_ts12=10\n\n plt.plot(np.arange(-8,8+1), [0]*len(np.arange(-8,8+1)), color='black', linestyle='--', linewidth=1)\n\n plt.fill_between(np.arange(lw_ts10,hg_ts10+1)-10/2+0.1, lfit10_bmean-fit10_error, lfit10_bmean+fit10_error, facecolor='blue', alpha=0.15, edgecolor='white')\n plt.plot(np.arange(lw_ts10,hg_ts10+1)-10/2+0.1, [lfit10_bmean]*len(np.arange(lw_ts10,hg_ts10+1)), color='blue', linestyle='--',linewidth=2)\n\n plt.fill_between(np.arange(lw_ts8,hg_ts8+1)-8/2, lfit8_bmean-fit8_error, lfit8_bmean+fit8_error, facecolor='red', alpha=0.15, edgecolor='white')\n plt.plot(np.arange(lw_ts8,hg_ts8+1)-8/2, [lfit8_bmean]*len(np.arange(lw_ts8,hg_ts8+1)), color='red', linestyle='--',linewidth=2)\n\n plt.errorbar(np.arange(8+1) - 8/2, ratio_bmean[8,:8+1].imag, yerr=errRatio[8,:8+1].imag, fmt='o',color='red',label='$t_s=8$')\n plt.errorbar(np.arange(10+1) - 10/2 + 0.1, ratio_bmean[10,:10+1].imag, yerr=errRatio[10,:10+1].imag, fmt='^',color='blue',label='$t_s=10$')\n plt.errorbar(np.arange(12+1) - 12/2 + 0.2, ratio_bmean[12,:12+1].imag, yerr=errRatio[12,:12+1].imag, fmt='*',color='green',label='$t_s=12$')\n\n plt.legend(numpoints=1, loc=1)\n plt.savefig('averX_'+'Nc'+str(lenConf)+parameters['flavor']+'.pdf', format='pdf')\n############################ \nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"averX_disc.py","file_name":"averX_disc.py","file_ext":"py","file_size_in_byte":19747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"514292186","text":"from collections import defaultdict\nimport operator\nimport re\nimport fileinput\nfrom Canvas import Line\nfrom _collections import defaultdict\nfrom pandas.core.frame import DataFrame\n\nclass POSTag(object):\n \n # Construction initialization\n def __init__(self, trainingSetFile):\n self.trainingSetFile = trainingSetFile\n self.trainingSet = trainingSetFile.readlines()\n self.wordTagTable = {}\n self.wordProbTable = {}\n self.condnProbTable = {}\n self.newTagTable = {}\n self.errorTable = {}\n self.errorCountTable = {}\n self.topError = {}\n self.topErrorCount = {}\n self.topErrorDataFrame = DataFrame(columns=('Word','Tag','Error Probability'))\n \n #For conditional Probability Computation, given word and tag count Table \n def computeCondnProbTable(self):\n for line in self.trainingSet:\n inputWithoutSpaces = line.split()\n for input in inputWithoutSpaces:\n inputValue = input.split(\"_\")\n if inputValue[0] not in self.wordTagTable.keys() :\n self.wordTagTable[inputValue[0]] = {}\n self.wordTagTable[inputValue[0]][inputValue[1]] = 1\n elif inputValue[1] not in self.wordTagTable[inputValue[0]].keys() :\n self.wordTagTable[inputValue[0]][inputValue[1]] = 1\n else:\n self.wordTagTable[inputValue[0]][inputValue[1]] = self.wordTagTable[inputValue[0]][inputValue[1]] + 1 \n \n # Word count computation\n def computeWordProb(self, keyOne):\n self.wordProbTable[keyOne] = 0\n for keyTwo in self.wordTagTable[keyOne].keys():\n self.wordProbTable[keyOne] = self.wordProbTable[keyOne] + self.wordTagTable[keyOne][keyTwo]\n \n # Given a word , conditional probability of the tag computation\n def computeCondnProb(self):\n for keyOne in self.wordTagTable.keys():\n if keyOne not in self.condnProbTable.keys() :\n self.condnProbTable[keyOne] = {}\n self.computeWordProb(keyOne)\n for keyTwo in self.wordTagTable[keyOne].keys():\n self.condnProbTable[keyOne][keyTwo] = (float) (self.wordTagTable[keyOne][keyTwo])/self.wordProbTable[keyOne]\n \n #Most probable tag computation \n def computeNewTags(self):\n for keyOne in self.wordTagTable.keys():\n self.newTagTable[keyOne] = max(self.condnProbTable[keyOne].iteritems(), key= operator.itemgetter(1))[0]\n \n #Writing a file with words and most probable tags \n def newTagFileWrite(self):\n newTagFile = open(\"newTagFile.txt\", \"a\")\n for line in self.trainingSet:\n inputWithoutSpaces = line.split()\n for input in inputWithoutSpaces:\n inputValue = input.split(\"_\")\n inputValue[1] = self.newTagTable[inputValue[0]]\n newTagFile.write(inputValue[0]+\"_\"+inputValue[1]+\" \")\n newTagFile.write(\"\\n\")\n \n # Error analysis after the generation of words with most probable tags \n def errorAnalysis(self):\n for keyOne in self.wordTagTable.keys():\n self.errorTable[keyOne] = {}\n countWord = 0\n for keyTwo in self.wordTagTable[keyOne].keys():\n countWord = countWord + self.wordTagTable[keyOne][keyTwo]\n mostProbableTag = self.newTagTable[keyOne]\n countWordTag = self.wordTagTable[keyOne][mostProbableTag]\n self.errorTable[keyOne] = (float)(countWord - countWordTag)/ countWord\n self.errorCountTable [keyOne] = countWord - countWordTag\n \n #Printing the top five erroneous word with most probable tags \n def printTopErrors(self):\n self.topError = dict(sorted(self.errorTable.iteritems(), key = operator.itemgetter(1), reverse = True)[:5])\n self.topErrorCount = dict(sorted(self.errorCountTable.iteritems(), key = operator.itemgetter(1), reverse =True)[:5])\n count = 0\n print (\"Error Analysis after assigning most Probable Tags\")\n for value in sorted(self.topError.iteritems(), key = operator.itemgetter(1), reverse = True):\n #print(value)\n #keyOne = self.topError.keys()[self.topError.values().index(value)]\n self.topErrorDataFrame.loc[count] = [value[0], self.newTagTable[value[0]], value[1]]\n count = count + 1\n #for keyOne in self.topError.keys():\n # topErrorDataFrame.loc[count] = [keyOne, self.newTagTable[keyOne], self.topError[keyOne]]\n # count = count + 1\n #print (keyOne + \" \" + self.newTagTable[keyOne] + \" \" + str(self.topError[keyOne]))\n #print (\"With Respect to Error Counts\")\n #for keyOne in self.topErrorCount.keys():\n # print (keyOne + \" \" + self.newTagTable[keyOne] + \" \" + str(self.topErrorCount[keyOne]))\n print (self.topErrorDataFrame)\n \n #Applying rules to the file\n def newRuleFileWrite(self):\n rule = {}\n replace = {}\n rule[1] = r'north_([A-Za-z]{2,4}) (?P[A-Za-z]*)_NNP'\n replace[1] = r'north_JJ \\g_NNP'\n rule[2] = r'DT north_[A-Za-z]{2,4}'\n replace[2] = r'DT north_NN'\n rule[3] = r'_NN (?P[A-Za-z]*)_IN mid-January_[A-Za-z]{2,4} (?P[A-Za-z]*)_IN'\n replace[3] = r'_NN \\g_IN mid-January_JJ \\g_IN'\n rule[4] = r'(?P[A-Za-z_ ]*)_NN cut_[A-Za-z]{2,4} '\n replace[4] = r'\\g_NN cut_VBD '\n rule[5] = r'cut_VB (?P[A-Za-z _]*)VBD'\n replace[5] = r'cut_VBD \\gVBD'\n rule[6] = r'retired_[A-Za-z]{2,4} (?P[A-Za-z]*)_NN(?P.*)'\n replace[6] = r'retired_JJ \\g_NN\\g'\n rule[7] = r'(?P[A-Za-z17]*)_-RRB- (?P[A-Za-z17:]*)_CD (?P[A-Za-z0-9:]*)_CD -0-_[A-Za-z]{2,4}'\n replace[7] = r'\\g_-RRB- \\g_CD \\g_CD -0-_.'\n \n newRuleFile = open(\"newTagFile.txt\", \"r\")\n newFile = open(\"newFinalFile.txt\", \"w\")\n \n for line in newRuleFile.readlines():\n for count in xrange(1, 8):\n if(re.search(rule[count], line)):\n line = re.sub(rule[count], replace[count], line)\n newFile.write(line)\n \n #Error Analysis after applying rules \n def newError(self):\n myErrorTable = defaultdict(lambda: 0)\n finalFile = open(\"newFinalFile.txt\", \"r\")\n lineCount = 0\n for lineTwo in finalFile.readlines():\n line = self.trainingSet[lineCount]\n lineCount = lineCount + 1\n tokenOne = line.split()\n tokenTwo = lineTwo.split()\n tokenCount = 0\n for inputTwo in tokenTwo:\n wordOne = tokenOne[tokenCount].split('_')\n tokenCount = tokenCount + 1\n wordTwo = inputTwo.split('_')\n if wordOne[0] == wordTwo[0] and wordOne[1] != wordTwo[1]:\n myErrorTable[wordOne[0]] = myErrorTable[wordOne[0]] + 1\n myNewErrorDF = DataFrame(columns=['Word', 'New Error Probability']) \n myErrorPercentage = {}\n count = 0 \n for keyOne in self.topErrorDataFrame.iloc[:,0]:\n countWord = 0\n for keyTwo in self.wordTagTable[keyOne].keys():\n countWord = countWord + self.wordTagTable[keyOne][keyTwo]\n myErrorPercentage[keyOne] = (float) (myErrorTable[keyOne])/countWord\n myNewErrorDF.loc[count]=[keyOne, myErrorPercentage[keyOne]]\n count = count + 1\n #print(myErrorPercentage)\n print(\"Error Analysis after applying rules\")\n print(myNewErrorDF)\n \nPOSTagObject = POSTag(open(\"HW2_F16_NLP6320_POSTaggedTrainingSet-Unix.txt\", \"r\"))\nPOSTagObject.computeCondnProbTable()\nPOSTagObject.computeCondnProb()\nPOSTagObject.computeNewTags()\nPOSTagObject.newTagFileWrite()\nPOSTagObject.errorAnalysis()\nPOSTagObject.printTopErrors()\nPOSTagObject.newRuleFileWrite()\nPOSTagObject.newError()\n\n\n","sub_path":"RuleBasedPOSTagging/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599655000","text":"import csv\n\n# returns list of gene names from uniprot\n# http://www.uniprot.org/uniprot/?query=keyword:KW-0656\ndef get_uniprot_oncogenes():\n with open(\"data/uniprot.tsv\") as f:\n rows = list(csv.reader(f, delimiter=\"\\t\"))\n column_names = { name: i for i,name in enumerate(rows[0]) }\n rows = rows[1:]\n at = lambda row, column_name : row[column_names[column_name]]\n gene_names = []\n for row in rows:\n gene_names += at(row,\"Gene names\").split(\" \")\n return gene_names\n\n\n# returns gene_names from Network of cancer genes database\n# http://ncg.kcl.ac.uk/download.php\ndef get_ncg_oncogenes():\n with open(\"data/ncg.tsv\") as f:\n rows = list(csv.reader(f, delimiter=\"\\t\"))\n column_names = { name: i for i,name in enumerate(rows[0]) }\n rows = rows[1:]\n at = lambda row, column_name : row[column_names[column_name]]\n gene_names = []\n print(column_names)\n for row in rows:\n if (\n at(row, \"potential_false_positive\") == \"FALSE\" and\n at(row,\"cancer\") == \"TRUE\"\n ):\n gene_names += [ at(row,\"symbol\") ]\n return gene_names\n\nif __name__ == \"__main__\":\n import pickle\n\n l1 = get_uniprot_oncogenes()\n l2 = get_ncg_oncogenes()\n with open(\"oncogen_genes.pickle\",\"wb\") as f:\n pickle.dump(list(set(l1+l2)), f)\n\n\n","sub_path":"make_gene_list.py","file_name":"make_gene_list.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"530502026","text":"from urllib import *\nfrom bs4 import BeautifulSoup\nfrom dbMethods import insertDataDICT\nimport datetime\n\ndef getEMCUSEP():\n html = urlopen(\"https://www.emcsg.com/marketdata/priceinformation\")\n soup = BeautifulSoup(html, \"lxml\")\n emc = soup.find(\"tr\", {\"class\":\"previous\"})\n emc.next_sibling\n USEP = emc.find_all('td')\n date = str(USEP[0].text)\n insertDate = str(datetime.date.today())\n USEPDict = {'Date': insertDate,\n 'Period':str(USEP[1].text),\n 'Demand': str(USEP[2].text),\n 'USEP':str(USEP[4].text)}\n insertDataDICT('USEP_data',USEPDict)\n\ngetEMCUSEP()\n","sub_path":"Scheduled_EMC_USEP.py","file_name":"Scheduled_EMC_USEP.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"219533147","text":"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis script serves three goals:\n (1) Demonstrate how to use NeMo Models outside of PytorchLightning\n (2) Shows example of batch ASR inference\n (3) Serves as CI test for pre-trained checkpoint\n\"\"\"\n\nimport copy\nimport json\nimport math\nimport os\nfrom argparse import ArgumentParser\n\nimport torch\nfrom omegaconf import OmegaConf\n\nimport nemo.collections.asr as nemo_asr\nfrom nemo.collections.asr.metrics.wer import word_error_rate\nfrom nemo.collections.asr.parts.utils.streaming_utils import FrameBatchASR\nfrom nemo.utils import logging\n\ncan_gpu = torch.cuda.is_available()\n\n\ndef get_wer_feat(mfst, asr, frame_len, tokens_per_chunk, delay, preprocessor_cfg, model_stride_in_secs, device):\n # Create a preprocessor to convert audio samples into raw features,\n # Normalization will be done per buffer in frame_bufferer\n # Do not normalize whatever the model's preprocessor setting is\n preprocessor_cfg.normalize = \"None\"\n preprocessor = nemo_asr.models.EncDecCTCModelBPE.from_config_dict(preprocessor_cfg)\n preprocessor.to(device)\n hyps = []\n refs = []\n\n with open(mfst, \"r\") as mfst_f:\n for l in mfst_f:\n asr.reset()\n row = json.loads(l.strip())\n asr.read_audio_file(row['audio_filepath'], delay, model_stride_in_secs)\n hyp = asr.transcribe(tokens_per_chunk, delay)\n hyps.append(hyp)\n refs.append(row['text'])\n\n wer = word_error_rate(hypotheses=hyps, references=refs)\n return hyps, refs, wer\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\n \"--asr_model\", type=str, required=True, help=\"Path to asr model .nemo file\",\n )\n parser.add_argument(\"--test_manifest\", type=str, required=True, help=\"path to evaluation data\")\n parser.add_argument(\"--batch_size\", type=int, default=32)\n parser.add_argument(\n \"--total_buffer_in_secs\",\n type=float,\n default=4.0,\n help=\"Length of buffer (chunk + left and right padding) in seconds \",\n )\n parser.add_argument(\"--chunk_len_in_ms\", type=int, default=1600, help=\"Chunk length in milliseconds\")\n parser.add_argument(\"--output_path\", type=str, help=\"path to output file\", default=None)\n parser.add_argument(\n \"--model_stride\",\n type=int,\n default=8,\n help=\"Model downsampling factor, 8 for Citrinet models and 4 for Conformer models\",\n )\n\n args = parser.parse_args()\n torch.set_grad_enabled(False)\n if args.asr_model.endswith('.nemo'):\n logging.info(f\"Using local ASR model from {args.asr_model}\")\n asr_model = nemo_asr.models.EncDecCTCModelBPE.restore_from(restore_path=args.asr_model)\n else:\n logging.info(f\"Using NGC cloud ASR model {args.asr_model}\")\n asr_model = nemo_asr.models.EncDecCTCModelBPE.from_pretrained(model_name=args.asr_model)\n\n cfg = copy.deepcopy(asr_model._cfg)\n OmegaConf.set_struct(cfg.preprocessor, False)\n\n # some changes for streaming scenario\n cfg.preprocessor.dither = 0.0\n cfg.preprocessor.pad_to = 0\n\n if cfg.preprocessor.normalize != \"per_feature\":\n logging.error(\"Only EncDecCTCModelBPE models trained with per_feature normalization are supported currently\")\n\n # Disable config overwriting\n OmegaConf.set_struct(cfg.preprocessor, True)\n asr_model.eval()\n asr_model = asr_model.to(asr_model.device)\n\n feature_stride = cfg.preprocessor['window_stride']\n model_stride_in_secs = feature_stride * args.model_stride\n total_buffer = args.total_buffer_in_secs\n\n chunk_len = args.chunk_len_in_ms / 1000\n\n tokens_per_chunk = math.ceil(chunk_len / model_stride_in_secs)\n mid_delay = math.ceil((chunk_len + (total_buffer - chunk_len) / 2) / model_stride_in_secs)\n print(tokens_per_chunk, mid_delay)\n\n frame_asr = FrameBatchASR(\n asr_model=asr_model, frame_len=chunk_len, total_buffer=args.total_buffer_in_secs, batch_size=args.batch_size,\n )\n\n hyps, refs, wer = get_wer_feat(\n args.test_manifest,\n frame_asr,\n chunk_len,\n tokens_per_chunk,\n mid_delay,\n cfg.preprocessor,\n model_stride_in_secs,\n asr_model.device,\n )\n logging.info(f\"WER is {round(wer, 2)} when decoded with a delay of {round(mid_delay*model_stride_in_secs, 2)}s\")\n\n if args.output_path is not None:\n\n fname = (\n os.path.splitext(os.path.basename(args.asr_model))[0]\n + \"_\"\n + os.path.splitext(os.path.basename(args.test_manifest))[0]\n + \"_\"\n + str(args.chunk_len_in_ms)\n + \"_\"\n + str(int(total_buffer * 1000))\n + \".json\"\n )\n hyp_json = os.path.join(args.output_path, fname)\n os.makedirs(args.output_path, exist_ok=True)\n with open(hyp_json, \"w\") as out_f:\n for i, hyp in enumerate(hyps):\n record = {\n \"pred_text\": hyp,\n \"text\": refs[i],\n \"wer\": round(word_error_rate(hypotheses=[hyp], references=[refs[i]]) * 100, 2),\n }\n out_f.write(json.dumps(record) + '\\n')\n\n\nif __name__ == '__main__':\n main() # noqa pylint: disable=no-value-for-parameter\n","sub_path":"examples/asr/speech_to_text_buffered_infer.py","file_name":"speech_to_text_buffered_infer.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533771934","text":"import sqlite3\nimport config\nimport datetime\n\nweekdays = {\n 0: 'Monday',\n 1: 'Tuesday',\n 2: 'Wednesday',\n 3: 'Thursday',\n 4: 'Friday',\n 5: 'Saturday',\n 6: 'Sunday'\n}\n\ndef todayIs():\n today = weekdays.get(datetime.datetime.today().weekday(), -1)\n return today\n\ndef dayAt(day_num):\n day = weekdays.get(day_num, -1)\n return day\n\ndef tomorrowIs():\n td = datetime.datetime.today() + datetime.timedelta(days=1)\n today = weekdays.get((td.weekday() + 1), -1)\n star = (td.isocalendar()[1] + 1) % 2\n if star == 0:\n star = 2\n return today, star\n\ndef starAtThisWeekIs():\n star = (datetime.datetime.today().isocalendar()[1] + 1) % 2\n if star == 0:\n star = 2\n return star\n\ndef parseClasses(sqlOutput):\n ret = []\n a = []\n for c in sqlOutput:\n a.clear()\n a.append(c[3])\n a.append(c[2])\n ret.append(a.copy())\n return ret\n\ndef prepareToSelect():\n today = todayIs()\n star = starAtThisWeekIs()\n return today, star\n\nclass Shedule:\n def __init__(self):\n self.connection = sqlite3.connect(config.cfg.db_name(), check_same_thread=False)\n self.cursor = self.connection.cursor()\n\n def getAllClasses(self):\n with self.connection:\n return self.cursor.execute(\"select * from classes\").fetchall()\n\n def classesToday(self):\n today, star = prepareToSelect()\n with self.connection:\n classes = self.cursor.execute(\n \"select * from classes where day=? and (star=? or star=?) order by number asc\",\n (today, 0, star)\n ).fetchall()\n return parseClasses(classes)\n\n def classesTomorrow(self):\n day, star = tomorrowIs()\n with self.connection:\n classes = self.connection.execute(\n \"select * from classes where day=? and (star=? or star=?) order by number asc\",\n (day, 0, star)\n ).fetchall()\n return parseClasses(classes)\n\n def classesMonday(self):\n return self.classesAtDay(0)\n\n def classesTuesday(self):\n return self.classesAtDay(1)\n\n def classesWednesday(self):\n return self.classesAtDay(2)\n\n def classesThursday(self):\n return self.classesAtDay(3)\n\n def classesFriday(self):\n return self.classesAtDay(4)\n\n def classesAtDay(self, day_of_week):\n day = dayAt(day_of_week)\n star = starAtThisWeekIs()\n with self.connection:\n classes = self.cursor.execute(\n \"select * from classes where day=? and (star=? or star=?) order by number asc\",\n (day, 0, star)\n ).fetchall()\n return parseClasses(classes)","sub_path":"shedule.py","file_name":"shedule.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"105532953","text":"# ----------------\n# Google\n# ----------------\n\n# https://developers.google.com/analytics/devguides/collection/protocol/v1/devguide\n# https://ga-dev-tools.appspot.com/hit-builder/?v=1&tid=UA-XXXXX-Y&cid=555&t=pageview&uip=1.2.3.4&ua=Opera/9.80\n# https://developers.google.com/analytics/devguides/reporting/data/v1/quotas\n#\n\n\n''' For Google Analytics tracking in web '''\n\nimport hashlib\nimport re\nimport uuid\nfrom ipaddress import IPv4Address, IPv6Address, ip_address\nfrom operator import itemgetter\nfrom random import randint\nfrom typing import Union\nfrom urllib.parse import quote_plus as _q\n\nfrom tornado.escape import url_escape\nfrom tornado.httpclient import AsyncHTTPClient, HTTPRequest\n\nRE_LOCALE = re.compile(r'(^|\\s*,\\s*)([a-zA-Z]{1,8}(-[a-zA-Z]{1,8})*)\\s*(;\\s*q\\s*=\\s*(1(\\.0{0,3})?|0(\\.[0-9]{0,3})))?', re.I)\n\ndef get_user_language(lang):\n user_locals = []\n matched_locales = RE_LOCALE.findall(str(lang))\n if matched_locales:\n lang_lst = map((lambda x: x.replace('-', '_')), (i[1] for i in matched_locales))\n quality_lst = map((lambda x: x and x or 1), (float(i[4] and i[4] or '0') for i in matched_locales))\n lang_quality_map = map((lambda x, y: (x, y)), lang_lst, quality_lst)\n user_locals = [x[0] for x in sorted(lang_quality_map, key=itemgetter(1), reverse=True)]\n\n if user_locals:\n return user_locals[0]\n else:\n return ''\n\ndef generate_hash(user_agent, screen_resolution, screen_color_depth):\n tmpstr = \"%s%s%s\" % (user_agent, screen_resolution, screen_color_depth)\n hash_val = 1\n\n if tmpstr:\n hash_val = 0\n for ordinal in map(ord, tmpstr[::-1]):\n hash_val = ((hash_val << 6) & 0xfffffff) + ordinal + (ordinal << 14)\n left_most_7 = hash_val & 0xfe00000\n if left_most_7 != 0:\n hash_val ^= left_most_7 >> 21\n\n return hash_val\n\ndef generate_unique_id(user_agent='', screen_resolution='', screen_color_depth=''):\n '''Generates a unique user ID from the current user-specific properties.'''\n return ((randint(0, 0x7fffffff) ^ generate_hash(user_agent, screen_resolution, screen_color_depth))\n & 0x7fffffff)\n\n\ndef generate_unique_id_v2(remote_ip: str, user_agent: str):\n \"\"\"\n Generates a unique user ID\n\n Using the remote IP and client user agent, to produce a somewhat\n unique identifier for users. A UUID version 4 conforming to RFC 4122\n is produced which is generally acceptable. It is not entirely random,\n but looks random enough to the outside. Using a hash func. the user\n info is completely anonymized.\n\n Args:\n remote_ip: Client IP address as a string, IPv4 or IPv6\n user_agent: User agent string of the client\n \"\"\"\n\n try:\n rip: Union[IPv4Address, IPv6Address] = ip_address(remote_ip)\n ip_packed = rip.packed\n except ValueError: # in the weird case I don't get an IP\n ip_packed = randint(0, 0xffffffff).to_bytes(4, 'big') # nosec\n\n h = hashlib.blake2b(digest_size=16, salt=b'biothings')\n h.update(ip_packed)\n h.update(user_agent.encode('utf-8', errors='replace'))\n\n d = bytearray(h.digest())\n # truncating hash is not that bad, fixing some bits should be okay, too\n d[6] = 0x40 | (d[6] & 0x0f) # set version\n d[8] = 0x80 | (d[8] & 0x3f) # set variant\n u = str(uuid.UUID(bytes=bytes(d)))\n return u\n\n\n# This is a mixin for biothing handlers, and references class variables from that class, cannot be used\n# without mixing in\nclass GAMixIn:\n def ga_track(self, event={}):\n # to control UID generation behavior to use the new algorithm\n # explicitly set GA_UID_GENERATOR_VERSION = 2 in config\n no_tracking = self.get_argument('no_tracking', None)\n is_prod = not self.settings.get('debug', False)\n if not no_tracking and is_prod and self.biothings.config.GA_ACCOUNT:\n _req = self.request\n path = _req.path\n ln = _req.headers.get('Accept-Language', '')\n remote_ip = _req.headers.get(\"X-Real-Ip\", _req.headers.get(\"X-Forwarded-For\", _req.remote_ip))\n user_agent = _req.headers.get(\"User-Agent\", \"\")\n host = _req.headers.get(\"Host\", \"N/A\")\n this_user = generate_unique_id(user_agent=user_agent)\n if getattr(self.web_settings, 'GA_UID_GENERATOR_VERSION', 1) == 2:\n this_user = generate_unique_id_v2(remote_ip, user_agent)\n user_agent = _q(user_agent)\n langua = get_user_language(ln)\n referrer = _req.headers.get(\"Referer\", None) # headers is c.i.\n # FIXME: in the case that the encoded value is actually more than\n # 2048 bytes (GA Limit), this truncate may break some things.\n # Typically we don't have to worry about it because most browsers\n # only send the host part now, not the full URL.\n # Legitimate requests from modern browsers is unlikely to be over\n # the limit, as domain names are limited to 255 chars. An attacker\n # might try to put really big headers here but we don't need to\n # worry about it.\n # Use 2047 here in case GA counts a \\0 internally\n if referrer:\n referrer = url_escape(referrer)[:2047]\n # compile measurement protocol string for google\n # first do the pageview hit type\n request_body = 'v=1&t=pageview&tid={}&ds=web&cid={}&uip={}&ua={}&an={}&av={}&dh={}&dp={}'.format(\n self.biothings.config.GA_ACCOUNT, this_user, remote_ip, user_agent,\n self.biothings.config.GA_TRACKER_URL, self.biothings.config.API_VERSION, host, path)\n # add referrer\n if referrer:\n request_body += f'&dr={referrer}'\n # add the event, if applicable\n if event:\n request_body += '\\nv=1&t=event&tid={}&ds=web&cid={}&uip={}&ua={}&an={}&av={}&dh={}&dp={}'.format(\n self.biothings.config.GA_ACCOUNT, this_user, remote_ip, user_agent,\n self.biothings.config.GA_TRACKER_URL, self.biothings.config.API_VERSION, host, path)\n # add event information also\n request_body += '&ec={}&ea={}'.format(event['category'], event['action'])\n if event.get('label', False) and event.get('value', False):\n request_body += '&el={}&ev={}'.format(event['label'], event['value'])\n if referrer:\n request_body += f'&dr={referrer}'\n\n req = HTTPRequest('http://www.google-analytics.com/batch', method='POST', body=request_body)\n\n #now send actual async requests\n http_client = AsyncHTTPClient()\n http_client.fetch(req)\n\n\n# ----------------\n# AWS\n# ----------------\n\n''' For Standalone biothing tracking '''\nimport sys\nimport os\nimport base64\nimport datetime\nimport hashlib\nimport hmac\nimport json\nimport logging\nfrom tornado.httpclient import HTTPRequest, AsyncHTTPClient\n\n# Key derivation functions. See:\n# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python\ndef sign(key, msg):\n return hmac.new(key, msg.encode(\"utf-8\"), hashlib.sha256).digest()\n\ndef getSignatureKey(key, date_stamp, regionName, serviceName):\n kDate = sign(('AWS4' + key).encode('utf-8'), date_stamp)\n kRegion = sign(kDate, regionName)\n kService = sign(kRegion, serviceName)\n kSigning = sign(kService, 'aws4_request')\n return kSigning\n\ndef tracking_callback(response):\n logging.debug(\"GA response: {}\".format(str(response)))\n logging.debug(\"Body: {}\".format(response.buffer.getvalue()))\n logging.debug(\"Response Headers: {}\".format(str(response.headers)))\n logging.debug(\"Request Headers: {}\".format(str(response.request.headers)))\n logging.debug(\"Request Body: {}\".format(str(response.request.body)))\n return\n\n# This is a mixin for biothing handlers, and references class variables from that class, cannot be used\n# without mixing in\nclass StandaloneTrackingMixin:\n def self_track(self, data={}):\n no_tracking = self.get_argument('no_tracking', None)\n access_key = self.biothings.config.STANDALONE_AWS_CREDENTIALS.get('AWS_ACCESS_KEY_ID', False)\n secret_key = self.biothings.config.STANDALONE_AWS_CREDENTIALS.get('AWS_SECRET_ACCESS_KEY', False)\n if not no_tracking and self.biothings.config.STANDALONE_TRACKING_URL and access_key and secret_key:\n self.biothings.config.tracking_payload.append(json.dumps({\n \"action\": data.get('action', 'NA'),\n \"biothing\": self.biothings.config.ES_DOC_TYPE,\n \"category\": data.get('category', 'NA')\n }))\n logging.debug(\"tracking_payload size: {}\".format(len(self.biothings.config.tracking_payload)))\n if (len(self.biothings.config.tracking_payload) == self.biothings.config.STANDALONE_TRACKING_BATCH_SIZE):\n\n # ************* REQUEST VALUES *************\n request_body = '\\n'.join(self.biothings.config.tracking_payload)\n # logging.debug(\"Standalone Request Body: {}\".format(request_body))\n # reset payload\n self.biothings.config.tracking_payload = []\n method = 'POST'\n service = 'execute-api'\n endpoint = self.biothings.config.STANDALONE_TRACKING_URL\n host = endpoint.split('://')[1].split('/')[0]\n canonical_uri = endpoint.split(host)[1]\n region = 'us-west-1'\n\n # POST requests use a content type header.\n content_type = 'application/x-amz-json-1.0'\n content_length = len(request_body)\n\n # Create a date for headers and the credential string\n t = datetime.datetime.utcnow()\n amz_date = t.strftime('%Y%m%dT%H%M%SZ')\n date_stamp = t.strftime('%Y%m%d') # Date w/o time, used in credential scope\n\n # ************* TASK 1: CREATE A CANONICAL REQUEST *************\n # http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html\n\n # Step 1 is to define the verb (GET, POST, etc.)--already done.\n\n # Step 2: Create canonical URI--the part of the URI from domain to query\n # string (use '/' if no path) -- already done.\n\n ## Step 3: Create the canonical query string. In this example, request\n # parameters are passed in the body of the request and the query string\n # is blank.\n canonical_querystring = ''\n\n # Step 4: Create the canonical headers. Header names must be trimmed\n # and lowercase, and sorted in code point order from low to high.\n # Note that there is a trailing \\n.\n canonical_headers = 'content-length:' + '{}'.format(content_length) + '\\n' + 'content-type:' + content_type + '\\n' + 'host:' + host + '\\n' + 'x-amz-date:' + amz_date + '\\n'\n\n # Step 5: Create the list of signed headers. This lists the headers\n # in the canonical_headers list, delimited with \";\" and in alpha order.\n # Note: The request can include any headers; canonical_headers and\n # signed_headers include those that you want to be included in the\n # hash of the request. \"Host\" and \"x-amz-date\" are always required.\n signed_headers = 'content-length;content-type;host;x-amz-date'\n\n # Step 6: Create payload hash.\n payload_hash = hashlib.sha256(request_body.encode('utf-8')).hexdigest()\n\n # Step 7: Combine elements to create create canonical request\n canonical_request = method + '\\n' + canonical_uri + '\\n' + canonical_querystring + '\\n' + canonical_headers + '\\n' + signed_headers + '\\n' + payload_hash\n\n # ************* TASK 2: CREATE THE STRING TO SIGN*************\n # Match the algorithm to the hashing algorithm you use, either SHA-1 or\n # SHA-256 (recommended)\n algorithm = 'AWS4-HMAC-SHA256'\n credential_scope = date_stamp + '/' + region + '/' + service + '/' + 'aws4_request'\n string_to_sign = algorithm + '\\n' + amz_date + '\\n' + credential_scope + '\\n' + hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()\n\n # ************* TASK 3: CALCULATE THE SIGNATURE *************\n # Create the signing key using the function defined above.\n signing_key = getSignatureKey(secret_key, date_stamp, region, service)\n\n # Sign the string_to_sign using the signing_key\n signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'), hashlib.sha256).hexdigest()\n\n # ************* TASK 4: ADD SIGNING INFORMATION TO THE REQUEST *************\n # Put the signature information in a header named Authorization.\n authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + credential_scope + ', ' + 'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature\n\n req = HTTPRequest(url=self.biothings.config.STANDALONE_TRACKING_URL, method=method, body=request_body,\n headers={\n \"Content-Type\": content_type,\n \"Content-Length\": content_length,\n \"X-Amz-Date\": amz_date,\n \"Authorization\": authorization_header,\n \"Host\": host\n }\n )\n\n #now send actual async requests\n http_client = AsyncHTTPClient()\n http_client.fetch(req) # , callback=tracking_callback)\n","sub_path":"biothings/web/analytics/channels.py","file_name":"channels.py","file_ext":"py","file_size_in_byte":14013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462342960","text":"# Remove Duplicate characters from string using Hash tables\n\ndef remove_duplicates(s):\n # we should create hash dict for all 26 lower case letters by filling with zeros\n hash_arr = [0 for _ in range(26)]\n ns = \"\" # new result string\n for i in s:\n # if we meet a letter, reassign hash value by index to 1\n # append to result string\n if (hash_arr[ord(i) - 96] == 0):\n hash_arr[ord(i) - 96] = 1\n ns += i\n return ns\n\nif __name__ == '__main__':\n \n s = input()\n\n # s = 'aabbbbccdef'\n\n result = remove_duplicates(s)\n \n print(result)\n","sub_path":"python/RemoveDuplicatesHash.py","file_name":"RemoveDuplicatesHash.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"278875296","text":"#!/usr/bin/python\n# -*- encoding: utf-8 -*-\nfrom app.models.Models import MenuAuth\nimport json\nfrom flask import request\nfrom app import db\n\nclass Auth(object):\n def __init__(self, user = None):\n self.user = user\n self.pids = []\n\n def auth_menus(self):\n path = request.path[1:]\n path = path +'index' if path == 'admin/' else path\n menus = db.session.query(\n MenuAuth\n ).order_by(MenuAuth.sort.asc()).filter(MenuAuth.type == 0).all()\n current_menus = db.session.query(MenuAuth).filter(MenuAuth.method == path).scalar()\n if current_menus:\n #获取所有父级菜单\n self.getPid(current_menus.id,menus)\n\n if str(self.user.group_id) == '1':\n return self.tree(menus)\n rules_str = self.user.group.rules\n rules = []\n if rules_str :\n rules = json.loads(rules_str)\n auth_menus = []\n for menu in menus:\n for rule_id in rules:\n if menu.id == rule_id:\n auth_menus.append(menu)\n auth_menus = self.tree(auth_menus)\n return auth_menus\n\n def tree(self,data,pid = 0):\n tree_list = []\n for da in data:\n d = da.to_json()\n if d['is_show'] == 0:\n continue\n for p in self.pids:\n if d['id'] == p:\n d['active'] = True\n if str(d.get('parent_id')) == str(pid):\n tmp = self.tree(data,d.get('id'))\n if tmp :\n d['child'] = tmp\n tree_list.append(d)\n return tree_list\n\n def tree_list(self,data,pid = 0):\n tree_list = []\n for da in data:\n d = da.to_json()\n if str(d.get('parent_id')) == str(pid):\n tmp = self.tree_list(data,d.get('id'))\n tree_list.append(d)\n\n if tmp :\n tree_list += tmp\n return tree_list\n\n def getPid(self,id,data):\n for da in data:\n if da.id == int(id):\n self.pids.append(da.id)\n self.getPid(da.parent_id,data)\n return self.pids\n\n","sub_path":"app/utils/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"200986442","text":"#! /usr/bin/env python\n\nimport glob\nimport sys\nimport os, errno\nimport argparse\nimport fnmatch\nfrom Bio import SeqIO\nfrom Bio.SeqFeature import FeatureLocation\n\n\nparser = argparse.ArgumentParser(description='')\nparser.add_argument(\"-p\",\"--path\", help=\"searching path of sequences files\",\n type=str)\nparser.add_argument(\"-i\",\"--infile\", help=\"input file list of sequence files\",\n type=str)\nparser.add_argument(\"-pfind\",\"--pathfind\", help=\"[mode] search all sequences files in given path (-p) otherwise parse a given list (-i)\",\n action=\"store_true\")\nif len(sys.argv)==1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\nargs = parser.parse_args()\n\n\n\nif args.pathfind:\n 'Search all fasta files files'\n path = args.path\n in_files = []\n for r, d, f in os.walk(path):\n for file in f:\n if '.fa' in file:\n in_files.append(os.path.join(r, file))\nelse:\n 'We parse a list of files'\n input_list = args.infile\n with open(input_list) as f:\n in_files = f.readlines()\n\nprint (\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (\"gene_name\",\"#taxa\",\"maxlen\",\"#1.0\",\"#0.75\",\"#0.5\",\"#0.25\"))\n\nfor file in in_files:\n cf = file.rstrip()\n cfile_path = cf\n cfile_name = os.path.basename(cfile_path)\n cindex_of_dot = cfile_name.index('.')\n gene_name = cfile_name[:cindex_of_dot]\n\n cur_genome = SeqIO.parse(file, \"fasta\")\n\n taxa = 0\n 'keep the seq object into new list'\n seqs = []\n for record in cur_genome:\n #info = record.description.split(\" \")[1:len(record.description.split(\" \"))]\n #dicinfo = dict(item.split(\"=\") for item in info)\n #dicinfo['seq']=str(record.seq)\n taxa = taxa+1\n seqs.append(str(record.seq))\n\n reflen=len(max(seqs))\n\n mseqs=[]\n for elem in seqs:\n match=elem.replace(\"N\",\"\")\n mseqs.append(float(len(match))/float(reflen))\n\n full=sum(i >= 1.0 for i in mseqs)\n perct25=sum(i >= 0.25 for i in mseqs)\n perct50=sum(i >= 0.50 for i in mseqs)\n perct75= sum(i >= 0.75 for i in mseqs)\n\n print (\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (str(gene_name),int(taxa),int(reflen),int(full),int(perct75),int(perct50),int(perct25)))\n","sub_path":"src/ExoStat.py","file_name":"ExoStat.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"146122812","text":"import tweepy\nimport time\nimport random\n\nconsumer_key = ''\nconsumer_secret = ''\naccess_token =''\naccess_token_secret = ''\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth)\n\n#takes in text file and numerates the text file lines\n#returns line with text at random\ndef random_line(afile):\n line = next(afile)\n for num, aline in enumerate(afile):\n if random.randrange(num + 2): continue\n line = aline\n return line\n\nfile = open('quotes.txt', 'r')\nwhile (True):\n tweet = random_line(file)\n api.update_status(tweet) #comment this line out if you do not want the tweets updating to the bot account\n tweet_flag = 1\n #print (tweet) #uncomment to print tweet on command line\n if(tweet_flag==1):\n time.sleep(86400) #sleep for the next 86400 seconds (24 hours)\n tweet_flag = 0\n file.seek(0)","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"16908582","text":"\"\"\"\nTrains behavioral cloning model with expert data by berkely cs234 \nExample usage:\npython train_bc.py Humanoid-v2\n\"\"\"\n\nimport pickle\n\nimport numpy as np\n\nnp.set_printoptions(suppress=True)\n\nfrom keras.layers import Dense, Dropout\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam, SGD\nfrom keras.initializers import glorot_normal\nfrom sklearn.model_selection import train_test_split\nimport argparse\n\n\ndef generate_batches(observations, actions, batch_size):\n \"\"\"\n Generate batches \n \"\"\"\n\n num = len(observations)\n while True:\n idxs = np.random.choice(num, batch_size)\n batch_obs, batch_actions = observations[idxs], actions[idxs].astype(float)\n batch_actions = [act.flatten() for act in batch_actions]\n\n yield np.asarray(batch_obs), np.asarray(batch_actions)\n\n\ndef make_model():\n\n model = Sequential()\n model.add(\n Dense(\n 256,\n input_shape=(input_shape,),\n activation=\"relu\",\n kernel_initializer=\"glorot_normal\",\n )\n )\n model.add(Dropout(0.1))\n model.add(Dense(256, activation=\"relu\", kernel_initializer=\"glorot_normal\"))\n model.add(Dense(out_shape))\n model.compile(\n loss=\"mean_squared_error\", optimizer=Adam(lr=0.001), metrics=[\"accuracy\"]\n )\n model.summary()\n\n return model\n\n\ndef run_and_save(model, epochs=30):\n\n batch_size = 32\n model.fit_generator(\n generate_batches(X_train, y_train, batch_size),\n validation_data=generate_batches(X_valid, y_valid, batch_size),\n epochs=epochs,\n steps_per_epoch=len(X_train) / batch_size,\n validation_steps=len(X_valid),\n )\n\n model.save(f\"./models/hw1/{agent_name}.h5\")\n print(f\"Model saved to ./models/hw1/{agent_name}.h5\")\n obs = observations[0]\n acs = model.predict(np.reshape(np.array(obs), (1, len(obs))))\n\n print(f\"obesrvation {obs} \\n \\naction - {acs}\")\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"env_name\", type=str)\n args = parser.parse_args()\n env_name = args.env_name\n print(env_name)\n\n agent_name = env_name\n\n file = open(f\"./expert_data/{agent_name}.pkl\", \"rb\")\n data = pickle.load(file)\n print(data)\n\n observations, actions = data[\"observations\"], data[\"actions\"]\n X_train, X_valid, y_train, y_valid = train_test_split(\n observations, actions, test_size=0.1, random_state=0\n )\n\n print(\n f\" Xtrain={X_train.shape}, ytrain = {y_train.shape},Xvalid = {X_valid.shape}\"\n f\", yvalid = {y_valid.shape}, lenght of X_train = {len(X_train)}, len of xvalid = {len(X_valid)}\"\n )\n\n input_shape = X_train.shape[1]\n out_shape = y_train.shape[-1]\n\n print(f\"input shape is {input_shape} and output shape is {out_shape}\")\n\n model = make_model()\n run_and_save(model, epochs=30)\n\n","sub_path":"behaviour_cloning_train.py","file_name":"behaviour_cloning_train.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"620373188","text":"import torch\nfrom torch.autograd import Variable\nimport json\nimport sys\nfrom ModelClass import LinearModel\nimport random\n\n\nargs = sys.argv\nif not len(args)>2:\n print(\"Usage: \\n \\t python {} [PATH_TO_DATASET_JSON] [PATH_TO_INDEX]\".format(args[0]))\n exit()\nDATASET = args[1]\nINDEX =args[2]\n\ndevice = torch.device(\"cuda\")\n\ndataset = json.load(open(DATASET,'r'))\nlabels = json.load(open(INDEX,'r'))\n\n\nINPUT = [dataset[dat]['input'] for dat in dataset]\nOUTPUT = [dataset[dat]['output'] for dat in dataset]\nprint(\"Total Sample Size:{}\\n\".format(len(INPUT)))\nprint(\"Partitioning Samples...\") \n\n#partition\nVAL_INPUT = []\nVAL_OUTPUT = []\nVAL_SIZE = int(0.1*len(INPUT))\nwhile len(VAL_INPUT) < VAL_SIZE:\n item_idx = random.randrange(0,len(INPUT))\n VAL_INPUT.append(INPUT.pop(item_idx))\n VAL_OUTPUT.append(OUTPUT.pop(item_idx))\n\nBATCH_INPUT = []\nBATCH_OUTPUT = []\nBATCH_SIZE = 100\n\nwhile len(INPUT)>=BATCH_SIZE:\n BATCH_INPUT.append(INPUT[0:BATCH_SIZE])\n BATCH_OUTPUT.append(OUTPUT[0:BATCH_SIZE])\n INPUT = INPUT[BATCH_SIZE:]\n OUTPUT = OUTPUT[BATCH_SIZE:]\nif len(INPUT)>=1:\n BATCH_INPUT.append(INPUT)\n BATCH_OUTPUT.append(OUTPUT)\n\n\n\nprint(\"Validation Sample Size:{}\\n\".format(len(VAL_INPUT)))\n\n\nBATCH_INPUT = [Variable(torch.tensor(i)) for i in BATCH_INPUT]\nBATCH_OUTPUT =[Variable(torch.tensor(i)) for i in BATCH_OUTPUT]\nVAL_INPUT = Variable(torch.tensor(VAL_INPUT))\nVAL_OUTPUT = Variable(torch.tensor(VAL_OUTPUT))\n\nmodel = LinearModel(len(INPUT[0]),len(OUTPUT[0]))\ncriterion = torch.nn.MSELoss(reduction='mean')\noptimizer = torch.optim.SGD(model.parameters(),lr=0.05)\n\nfor epoch in range(10000):\n Tloss = 0\n for i in range(len(BATCH_INPUT)):\n pred_y = model(BATCH_INPUT[i].float())\n loss = criterion(pred_y,BATCH_OUTPUT[i].float())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n Tloss+=loss.data\n print(\"Epoch {}, Loss {}\".format(epoch,loss.data),end='\\r')\n sys.stdout.flush()\t\n Tloss/=len(BATCH_INPUT)\n val_pred = model(VAL_INPUT.float())\n val_loss = criterion(val_pred,VAL_OUTPUT.float())\n print(\"Epoch {}:[VLoss:{:.8f}][TLoss:{:.8f}]\".format(epoch,val_loss,Tloss))\n\n\nsaved_model = \"models/classifier_net_\"+DATASET.split('.')[0]+\"_\"+str(len(INPUT[0]))+\"x\"+str(len(OUTPUT[0]))+\"_h1xh2_\"+str(model.hidden_1.in_features)+\"x\"+str(model.hidden_2.out_features)+\"_size_\"+str(len(BATCH_INPUT))+\".pth\"\ntorch.save(model.state_dict(),saved_model)\n\ntry:\n while True:\n x = int(input(\"\\nEnter index of element:\"))\n new_x = VAL_INPUT[x]\n pred = model(new_x.float())\n pred_label = labels[pred.data.tolist().index(max(pred.data.tolist()))]\n actual_label = [labels[i] for i,p in enumerate(VAL_OUTPUT[x]) if p]\n print(\"Prediction: {}\\n Actual: {}\\n Scores:{}\".format(pred_label,actual_label,pred.data.tolist()))\nexcept KeyboardInterrupt:\n print(\"\\nModel Written to: {}\".format(saved_model))\n","sub_path":"tasks/human_pose/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"548642026","text":"from flask import Flask, render_template, request, jsonify\nimport sqlite3\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n@app.route('/movie')\ndef movie():\n return render_template('movie.html')\n\n@app.route('/addmovie', methods = {'POST'})\ndef addmovie():\n connection=sqlite3.connect('database.db')\n cursor = connection.cursor()\n print ('connection opened')\n\n try:\n movie_name = request.form['movie_name']\n movie_year = request.form['movie_year']\n movie_description = request.form['movie_description']\n print('values added')\n cursor.execute('INSERT INTO movies (movie_name, movie_year, movie_description) VALUES (?, ?, ?)', (movie_name, movie_year, movie_description))\n print('values inserted into database')\n connection.commit()\n print('values finalized')\n message = 'Record Successfully Added'\n\n except:\n connection.rollback()\n message = 'Error in Insert Operation'\n\n finally:\n return render_template('result.html', message = message)\n connection.close()\n\n@app.route('/movielist')\ndef movielist():\n connection = sqlite3.connect('database.db')\n cursor = connection.cursor()\n cursor.execute('SELECT * FROM movies')\n movieList = cursor.fetchall()\n connection.close()\n return jsonify(movieList)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"629045329","text":"''''\n\nThis file is to wrap fibonacci code in a class\n\nReference: https://www.w3schools.com/python/python_classes.asp\n\nIf we have to use a method from Class, outside calling file\n-we have to import the class in calling file\n-create an object and call function\n-if it is a static function, call ClassName.function_name()\n\n'''\n\n\nclass MyFibonacciClass:\n\n @staticmethod\n def get_fibonacci_number(order=1):\n # Initialize\n f1 = 0\n f2 = 1\n fn = 0\n\n # Generate series\n for n in range(order):\n if n == 0:\n print(f1)\n else:\n fn = f1 + f2\n print(fn)\n f1 = f2 # For next iteration of the loop, f2 is our f1\n f2 = fn # For next iteration of the loop, fn is our f2\n\n print(\"Fibonacci number for order \" + str(order) + \" is \" + str(fn))\n return fn\n","sub_path":"learning/basics11_fibonacci_class.py","file_name":"basics11_fibonacci_class.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541149985","text":"import torch\nfrom torchvision import transforms, datasets\n\n\ndef inf_train_gen(batch_size):\n transf = transforms.Compose([\n transforms.Pad(2),\n transforms.ToTensor()\n \n ])\n loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n '../data/mnist', train=True, download=True,\n transform=transf\n ), batch_size, drop_last=True, shuffle = True\n )\n while True:\n for img, labels in loader:\n yield img","sub_path":"data/mnist_32.py","file_name":"mnist_32.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"592907972","text":"from Presentation.input_verifiers import Input_Verifiers\nfrom Logic.Enums import EnumManager\nfrom random import choice\ndef vehicle_editor(form):\n # make sure no vehicle is defective and available\n if form['vehicle_state'] == 'DEFECTIVE': form['vehicle_status'] = 'Unavailable'\n \n return form\n\ndef contract_editor(form): \n #Ediors for HR univeristy contract and Icelandi space agency\n if form['contract_start'] == '2020-11-01' and form['contract_end'] == '2021-03-31':\n if form['vehicle_type'].lower() == 'light water' or form['vehicle_type'].lower() == 'light road':\n form['late_fee'] = '0'\n if form['vehicle_type'].lower() == 'medium road':\n form['late_fee'] = '200'\n return form\n\ndef employee_editor(form):\n #editor so that if employee title is edited to office location will be reykjavik and country iceland\n if form['title'] == 'office':\n form['airport'] = 'reykjavik'\n form['country'] = 'Iceland'\n\n else:\n #if employee is edited to a country that does not match airport location this fixes it \n if form['airport'] == 'reykjavik': form['country'] = 'Iceland'\n if form['airport'] == 'kulusuk' or form['airport'] == 'nuuk': form['country'] = 'Greenland'\n if form['airport'] == 'tingwall': form['country'] = 'Shetland'\n if form['airport'] == 'longyearbyen': form['country'] = 'Svalbard'\n if form['airport'] == 'torshavn': form['country'] = 'Farao Islands'\n return form\n\n ","sub_path":"src/Logic/form_editors.py","file_name":"form_editors.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"450858534","text":"from pathlib import Path\nfrom sh import rsync\nfrom peewee import *\n\npinput = Path('input') # Set default paths\npoutput = Path('output')\n\nkey_list = ['README', 'Mr.Robot', '91 Days'] #,'.py']\n\ndef dbinit():\n db = SqliteDatabase('tsort.sqlite')\n class keys():\n id = IntergerField()\n key = TextField()\n hitcount = IntergerField()\n class Meta:\n database = db\n\ndef fileexists(path, name):\n for x in path.iterdir():\n if x == name:\n return(True)\n return(False)\n\ndef ifitemfound(string):\n if string != None: # Check if item was found\n print(string)\n else: # If not found, print unmatched message\n print('Unmatched Item:',string)\n\ndef findkey(i, string, key, c):\n keyseq = 0 # Declare variables that keep track of where we are in the key/string\n keylen = 0 # ^\n if c == key[keyseq]:\n keyseq = keyseq+1\n i = i+1\n while True: # Loop untill an entire key is found\n keyseq = keyseq+1 # Move one char place in key to check next set of chars\n i = i+1\n keylen = keylen+1\n if keyseq == len(key):\n return(True)\n else:\n if i >= len(string):\n break\n elif string[i] == key[keyseq]: # Check for matching chars\n continue\n\ndef chkmatchstr(string):\n for i, c in enumerate(string): # Get char from string and number postion\n for keyint, key in enumerate(key_list): # Loop through key_list for keys and number position. Also allows to search for chars inbetween strings.\n if findkey(i, string, key, c) == True:\n return(key) \n\ntsortconf = Path('.') / 'tsort.conf'\nif tsortconf.exists() == True:\n print('Found Config')\nelse:\n f = open('tsort.conf', 'w+')\n f.write('# tSort Configuration file\\n')\n print('Created Config')\n\nfor x in Path('.').iterdir(): # Find input and output sort folders.\n if x.is_dir:\n print(x)\n if x == 'input':\n pinput = x\n if x == 'output':\n poutput = x\nprint(pinput, poutput)\n\nfor x in Path('.').iterdir():\n if x.is_file():\n print('search:',x)\n sort = chkmatchstr(str(x))\n if sort != None:\n print('string:',x,'key:',sort)\n else:\n print('Unmatched Item:',x)\n\n#ifitemfound(chkmatchstr('[lol] README.md'))\n#ifitemfound(chkmatchstr('read'))\n#ifitemfound(chkmatchstr('Mr.Robot.S02E01.720p.WEB.h264-LowGear[eztv].mkv'))\n#ifitemfound(chkmatchstr('[HorribleSubs] 91 Days- 02 [720p].mkv'))\n","sub_path":"tsort.py","file_name":"tsort.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644146544","text":"import json\n\n\ndef load_json(path):\n try:\n with open(path, encoding='utf-8') as f:\n cfg = json.load(f)\n return cfg\n except FileNotFoundError:\n print(\"%s not found\" % path)\n exit(-1)\n except Exception as e:\n print(e)\n exit(-1)\n","sub_path":"sell_assistant/utils/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"58671749","text":"\"\"\"\nClient for simple Ouch Server\n\"\"\"\n\nimport sys\nimport asyncio\nimport asyncio.streams\nimport configargparse\nimport logging as log\n# import binascii\nfrom random import randrange\nimport itertools\n\nfrom OuchServer.ouch_messages import OuchClientMessages, OuchServerMessages\n\np = configargparse.ArgParser()\np.add('--port', default=12345)\np.add('--host', default='127.0.0.1', help=\"Address of server\")\noptions, args = p.parse_known_args()\n\ndef main():\n log.basicConfig(level=log.DEBUG)\n log.debug(options)\n\n async def client():\n reader, writer = await asyncio.streams.open_connection(\n options.host, \n options.port, \n loop=loop)\n\n async def send(request):\n writer.write(bytes(request))\n await writer.drain()\n\n async def recv():\n try:\n header = (await reader.readexactly(1))\n except asyncio.IncompleteReadError:\n log.error('connection terminated without response')\n return None\n log.debug('Received Ouch header as binary: %r', header)\n log.debug('bytes: %r', list(header))\n message_type = OuchServerMessages.lookup_by_header_bytes(header)\n try:\n payload = (await reader.readexactly(message_type.payload_size))\n except asyncio.IncompleteReadError as err:\n log.error('Connection terminated mid-packet!')\n return None\n log.debug('Received Ouch payload as binary: %r', payload)\n log.debug('bytes: %r', list(payload))\n\n response_msg = message_type.from_bytes(payload, header=False)\n return response_msg\n\n # send a line\n while True:\n message_type = OuchClientMessages.EnterOrder\n \n for index in itertools.count():\n request = message_type(\n order_token='{:014d}'.format(index).encode('ascii'),\n buy_sell_indicator=b'B',\n shares=randrange(1,10**6-1),\n stock=b'AMAZGOOG',\n price=randrange(1,10**9-100),\n time_in_force=randrange(0,99999),\n firm=b'OUCH',\n display=b'N',\n capacity=b'O',\n intermarket_sweep_eligibility=b'N',\n minimum_quantity=1,\n cross_type=b'N',\n customer_type=b' ')\n print('send message: ', request)\n log.info(\"Sending Ouch message: %s\", request)\n await send(request)\n response = await recv()\n print('recv message: ', response)\n log.info(\"Received response Ouch message: %s\", response) \n await asyncio.sleep(4.0)\n \n writer.close()\n await asyncio.sleep(0.5)\n\n\n loop = asyncio.get_event_loop()\n\n # creates a client and connects to our server\n try:\n loop.run_until_complete(client())\n finally:\n loop.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"exchange_server/OuchServer/ouch_client.py","file_name":"ouch_client.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"372667779","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2020 Jung Bong-Hwa\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport json\nimport re\nimport codecs\nfrom os import path\n\nhelp = \"\"\"\n* [[Sample]]: Button. ''\n* '[ ]' or [Sample]: Input Box. ''\n* { }: Check Box. ''\n* {V}: Checked Check Box. ''\n* ( ): Radio Button. ''\n* (V): Checked Radio Button. ''\n* @ @V or @Sample@V: Drop-down List. ''\n* !Sample!: Table Header Cell. 'Sample'\n* |Sample|: Table Cell. 'Sample'\n* End with two spaces: Line break. '
    '\n* Blank line: Paragraph break. '

    '\n\"\"\"\n\n# Defines arguments.\nparser = argparse.ArgumentParser(description='Text UI Converter\\n' + help, \n formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\"-f\", \"--file\", help=\"Output HTML file name.\")\nparser.add_argument(\"-c\", \"--css\", help=\"Use the css file.\")\nparser.add_argument(\"-p\", \"--print_output\", action='store_true', help=\"Print output without file\")\nparser.add_argument('text_files', nargs='+', help=\"Text files\")\n\ndef convert(text):\n if text == '':\n return '

    '\n text = re.sub(r' $', r'
    ', text)\n text = text.strip()\n\n text = re.sub(r'\\[\\[([^\\]]+)\\]\\]', r'', text)\n text = re.sub(r'\\[ \\]', r'', text)\n text = re.sub(r'\\[([^\\]]+)\\]', r'', text)\n text = re.sub(r'\\{ \\}', r'', text)\n text = re.sub(r'\\{V\\}', r'', text)\n text = re.sub(r'\\( \\)', r'', text)\n text = re.sub(r'\\(V\\)', r'', text)\n text = re.sub(r'@ @V', r'', text)\n text = re.sub(r'@([^@]+)@V', r'', text)\n \n # Titles\n if text.startswith('!') and text.endswith('!'):\n text = re.sub(r'^!', r'', text)\n text = re.sub(r'!$', r'', text)\n text = re.sub(r'!', r'', text)\n\n if text.startswith('|') and text.endswith('|'):\n text = re.sub(r'^\\|', r'', text)\n text = re.sub(r'\\|$', r'', text)\n text = re.sub(r'\\|', r'', text)\n \n return text\n\ndef convert_text(text_file):\n html = ''\n f = open(text_file, 'r', encoding='utf-8', errors='ignore')\n while True:\n line = f.readline()\n if not line: break\n html += convert(line) + '\\n'\n f.close()\n\n return html\n\ndef convert_text_to_html_file(text_file):\n html = convert_text(text_file)\n\n if args.css: \n html = '\\n' + html\n elif path.exists('default.css'):\n html = '\\n' + html\n\n html = '\\n' + html + '\\n\\n'\n return html\n\ndef write_file(path, html):\n fo = codecs.open(path, encoding='utf-8', mode='w')\n fo.write(html)\n fo.close()\n\n\n# Main\nargs = parser.parse_args()\n\n\nif args.file:\n html = convert_text_to_html_file(args.text_files[0])\n write_file(args.file, html)\nelif args.print_output:\n html = convert_text(args.text_files[0])\n print(html)\nelse:\n for text_file in args.text_files:\n html = convert_text_to_html_file(text_file)\n filename, file_extension = path.splitext(text_file)\n write_file(filename + \".html\", html)\n","sub_path":"src/tui.py","file_name":"tui.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504413025","text":"import logging\nfrom collections import OrderedDict\nfrom importlib import import_module\nfrom typing import List\n\nimport graphene\nimport inflection\nimport sqlalchemy\nfrom graphene import Connection, Int, Node\nfrom graphene.types.objecttype import ObjectType, ObjectTypeOptions\nfrom graphene_sqlalchemy.types import (\n SQLAlchemyObjectType,\n sort_argument_for_object_type,\n)\nfrom graphene_sqlalchemy_filter import FilterableConnectionField, FilterSet\nfrom graphene_sqlalchemy_filter.connection_field import FilterableFieldFactory\nfrom sqlalchemy.ext.declarative import DeclarativeMeta\n\n\nclass CustomConnectionField(FilterableConnectionField):\n def __init__(self, connection, *args, **kwargs):\n \"\"\"\n add default query\n limit\n offset\n \"\"\"\n model = connection.Edge.node._type._meta.model\n if \"limit\" not in kwargs:\n kwargs.setdefault(\"limit\", sort_argument_for_object_type(model))\n elif \"limit\" in kwargs and kwargs[\"limit\"] is None:\n del kwargs[\"limit\"]\n if \"offset\" not in kwargs:\n kwargs.setdefault(\"offset\", sort_argument_for_object_type(model))\n elif \"offset\" in kwargs and kwargs[\"offset\"] is None:\n del kwargs[\"offset\"]\n super(CustomConnectionField, self).__init__(connection, *args, **kwargs)\n\n @classmethod\n def get_query(cls, model, info, **args):\n query = super(CustomConnectionField, cls).get_query(model, info, **args)\n if \"limit\" in args:\n query = query.limit(args[\"limit\"])\n if \"offset\" in args:\n query = query.offset(args[\"offset\"])\n return query\n\n\nclass CustomConnection(Connection):\n class Meta:\n abstract = True\n\n total_count = graphene.Int()\n\n @staticmethod\n def resolve_total_count(root, info):\n return root.iterable.limit(None).offset(None).count()\n\n\ndef init_custom_connection_field(\n custom_connection_field: FilterableConnectionField,\n declarative_base: DeclarativeMeta,\n custom_filters_path,\n exclude_models=None,\n base_filter_class=FilterSet,\n):\n if exclude_models is None:\n exclude_models = []\n models = [\n m_cls\n for m_cls in declarative_base._decl_class_registry.values()\n if isinstance(m_cls, type) and issubclass(m_cls, declarative_base)\n if m_cls.__name__ not in exclude_models\n ] # all models except exclude_models\n generated_filters = {\n sqla_model: filter_factory(base_filter_class, sqla_model, custom_filters_path)()\n for sqla_model in models\n }\n filters = {**custom_connection_field.filters, **generated_filters}\n custom_connection_field.filters = filters\n custom_connection_field.factory = FilterableFieldFactory(filters)\n\n\ndef filter_factory(\n base_filter_class: FilterSet,\n sqla_model: DeclarativeMeta,\n custom_filters_path: str = None,\n) -> FilterSet:\n filter_class_name = sqla_model.__name__ + \"Filter\"\n try:\n # import our filters if exists\n filter_class = getattr(import_module(custom_filters_path), filter_class_name)\n except AttributeError:\n logging.debug(\n \"Can't get {} from {} - auto generate\".format(\n filter_class_name, custom_filters_path\n )\n )\n generated_fields = {\n column.key: [...] for column in sqlalchemy.inspect(sqla_model).attrs\n }\n filter_class = base_filter_class.create_type(\n filter_class_name, model=sqla_model, fields=generated_fields\n )\n return filter_class\n\n\ndef node_factory(\n custom_connection_field, model: DeclarativeMeta, custom_schemas_path: str = None\n) -> SQLAlchemyObjectType:\n node_name = model.__name__ + \"Node\"\n model_description = _get_table_args_key(model, \"comment\")\n\n if hasattr(model, \"id\"):\n model.db_id = model.id\n\n try:\n # import our nodes if exists\n model_node_class = getattr(import_module(custom_schemas_path), node_name)\n except AttributeError:\n logging.debug(\n \"Can't get {} from {} - auto generate\".format(\n node_name, custom_schemas_path\n )\n )\n meta = type(\n \"Meta\",\n (object,),\n {\n \"model\": model,\n \"interfaces\": (Node,),\n \"connection_field_factory\": custom_connection_field.factory,\n \"description\": model_description,\n },\n )\n model_node_class = type(\n node_name,\n (SQLAlchemyObjectType,),\n {\"db_id\": Int(description=\"Real ID from DB\"), \"Meta\": meta},\n )\n\n return model_node_class\n\n\ndef connections_factory(node: SQLAlchemyObjectType, custom_connection) -> Connection:\n connection_name = node.__name__.replace(\"Node\", \"Connection\")\n return custom_connection.create_type(connection_name, node=node)\n\n\ndef _get_table_args_key(sqla_model: DeclarativeMeta, key: str, default=\"\"):\n \"\"\"\n Get key's value from __table_args__\n \"\"\"\n value = default\n if isinstance(sqla_model.__table_args__, dict):\n value = sqla_model.__table_args__.get(key, default)\n elif isinstance(sqla_model.__table_args__, dict):\n value = next(\n (o.get(key) for o in sqla_model.__table_args__ if isinstance(o, dict)),\n default,\n )\n return value\n\n\nclass QueryObjectType(ObjectType):\n @classmethod\n def __init_subclass_with_meta__(\n cls,\n declarative_base: DeclarativeMeta,\n exclude_models: List[str],\n base_filter_class=FilterSet,\n custom_connection=CustomConnection,\n custom_connection_field=CustomConnectionField,\n custom_schemas_path: str = None,\n custom_filters_path: str = None,\n _meta=None,\n **options\n ):\n logging.info(\"Generate auto query...\")\n if not _meta:\n _meta = ObjectTypeOptions(cls)\n init_custom_connection_field(\n custom_connection_field,\n declarative_base,\n custom_filters_path,\n exclude_models,\n base_filter_class,\n )\n fields = OrderedDict()\n fields[\"node\"] = graphene.relay.Node.Field()\n for model in custom_connection_field.filters:\n logging.debug(\"Generate fields for {}\".format(model.__name__))\n node = node_factory(custom_connection_field, model, custom_schemas_path)\n connection = connections_factory(node, custom_connection)\n query_name = \"%s_list\" % inflection.underscore(model.__name__)\n fields.update(\n {\n inflection.underscore(model.__name__): graphene.relay.Node.Field(\n node\n ),\n query_name: custom_connection_field(\n connection,\n limit=graphene.types.Int(),\n offset=graphene.types.Int(),\n ),\n }\n )\n\n if _meta.fields:\n _meta.fields.update(fields)\n else:\n _meta.fields = fields\n logging.info(\"Generate auto query done\")\n return super(QueryObjectType, cls).__init_subclass_with_meta__(\n _meta=_meta, **options\n )\n","sub_path":"graphene_sqlalchemy_auto_filter/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":7286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"421189086","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 06 10:12:02 2019\r\n\r\n@author: huckg\r\n\"\"\"\r\n#from __future__ import division\r\nimport numpy\r\nimport copy\r\nimport itertools\r\n\r\n''' function that takes a list of dicts and turns it into Dict of lists'''\r\ndef LDtoDL(LD):\r\n nd={}\r\n for d in LD:\r\n for k,v in d.items():\r\n try:\r\n nd[k].append(v)\r\n except KeyError:\r\n nd[k]=[v] \r\n return nd\r\n\r\ndef DLtoLD(DL):\r\n return [dict(zip(DL,t)) for t in zip(*DL.values())]\r\n \r\ndef unpack(parameters,scores,observables,criteria = [],binary = []):\r\n if type(parameters) == list:\r\n parameters = {i:parameters[i] for i in range(len(parameters))}\r\n p_unpack = {i:[] for i in list(parameters.values())[-1]}\r\n for i in range(len(parameters)):\r\n for parameter,value in parameters[i].items():\r\n p_unpack[parameter].append(value)\r\n \r\n s_unpack = {i:{} for i in observables}\r\n for k,v in s_unpack.items():\r\n s_unpack[k] = {i:[] for i in criteria}\r\n s_bin = copy.deepcopy(s_unpack)\r\n pdict = {i:[] for i in list(p_unpack.keys())}\r\n p_bin = {i:copy.deepcopy(pdict) for i in observables}\r\n for i in range(len(scores)):\r\n for state in observables:\r\n for criterion,value in scores[i][state].items():\r\n s_unpack[state][criterion].append(value)\r\n if binary:\r\n for state in observables:\r\n for bit in binary:\r\n for i in range(len(s_unpack[state][bit])):\r\n if s_unpack[state][bit][i]:\r\n for criterion in criteria:\r\n s_bin[state][criterion].append(s_unpack[state][criterion][i])\r\n for parameter,values in p_unpack.items():\r\n p_bin[state][parameter].append(values[i])\r\n if not s_unpack[state][bit][i]:\r\n for criterion in criteria:\r\n s_bin[state][criterion].append(s_unpack[state][criterion][i])\r\n for parameter,values in p_unpack.items():\r\n p_bin[state][parameter].append(values[i]) \r\n return p_unpack,s_unpack,p_bin,s_bin\r\n \r\n\r\ndef quantiles(scrs,prmt,observables,criteria,binary,fraction = 0.1):\r\n '''get the quantiles which seperate the scores in their extremes to find\r\n parameter sets which are grouped together'''\r\n lower,upper = ({},{})\r\n for state in observables:\r\n lower[state] = {}\r\n upper[state] = {}\r\n for cr in criteria:\r\n '''rank the elements in place of the list by creating a list that ranks\r\n the elements from low to high''' \r\n ranked = sorted(range(len(scrs[state][cr])),key=scrs[state][cr].__getitem__)\r\n '''list indices of the elements that were ranked in place and sort them \r\n according to an upper and lower, its total number limited by\r\n the fraction ''' \r\n ilow = [i for i in range(len(ranked)) if ranked[i] < int(len(ranked)*fraction)]\r\n ihigh = [i for i in range(len(ranked)) if ranked[i] > int(len(ranked)*(1-fraction))] \r\n\r\n '''the dictionaries with the parameter values needed for ''' \r\n lower[state][cr] = {p:[] for p in prmt.keys()}\r\n upper[state][cr] = {p:[] for p in prmt.keys()}\r\n for k in prmt.keys():\r\n for i in ilow:\r\n lower[state][cr][k].append(prmt[k][i]) \r\n for i in ihigh:\r\n upper[state][cr][k].append(prmt[k][i]) \r\n return [lower,upper]\r\n\r\n\r\ndef savitzky_golay(y, window_size, order, deriv=0, rate=1):\r\n #taken from http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html, not by @bobvansluijs\r\n \"\"\"Smooth (and optionally differentiate) data with a Savitzky-Golay filter.\r\n The Savitzky-Golay filter removes high frequency noise from data. It has the advantage of preserving the original shape and\r\n features of the signal better than other types of filtering approaches, such as moving averages techniques.\r\n Parameters\r\n ----------\r\n y : array_like, shape (N,)\r\n the values of the time history of the signal.\r\n window_size : int\r\n the length of the window. Must be an odd integer number.\r\n order : int\r\n the order of the polynomial used in the filtering.\r\n Must be less then `window_size` - 1.\r\n deriv: int\r\n the order of the derivative to compute (default = 0 means only smoothing)\r\n Returns\r\n -------\r\n ys : ndarray, shape (N)\r\n the smoothed signal (or it's n-th derivative).\r\n Notes\r\n \"\"\"\r\n import numpy as np\r\n from math import factorial\r\n try:\r\n window_size = np.abs(np.int(window_size))\r\n order = np.abs(np.int(order))\r\n except ValueError:\r\n raise ValueError(\"window_size and order have to be of type int\")\r\n if window_size % 2 != 1 or window_size < 1:\r\n raise TypeError(\"window_size size must be a positive odd number\")\r\n if window_size < order + 2:\r\n raise TypeError(\"window_size is too small for the polynomials order\")\r\n order_range = range(order+1)\r\n half_window = (window_size -1) // 2\r\n \r\n # precompute coefficients\r\n b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])\r\n m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)\r\n \r\n # pad the signal at the extremes with values taken from the signal itself\r\n firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )\r\n lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])\r\n y = np.concatenate((firstvals, y, lastvals))\r\n return np.convolve( m[::-1], y, mode='valid')\r\n\r\ndef interpolate(t_vector,s_time,ts):\r\n inrtpl_data = numpy.interp(t_vector, s_time,ts)\r\n sid = savitzky_golay(inrtpl_data,101,3)\r\n return sid\r\n\r\n \r\ndef interpolate_dataframe(tv,dv,tu,desired = \"min\"):\r\n units = [\"sec\",\"min\",\"hour\",\"day\"];factors = [1,60,3600,86400] \r\n permutations = [i for i in itertools.product(units,repeat = 2)]\r\n \r\n for i in range(len(permutations)):\r\n start,end = permutations[i]\r\n istart = units.index(start)\r\n iend = units.index(end)\r\n if istart == iend:\r\n permutations[i] += (1,)\r\n else:\r\n fct = factors[istart]/factors[iend]\r\n permutations[i] += (fct,)\r\n for i in permutations: \r\n initial,des,factor = i\r\n if (initial,des) == (tu,desired):\r\n conversion = factor\r\n else:\r\n conversion = 1\r\n c = 0 \r\n for i in dv:\r\n if i > 10**100:\r\n dv[c] = dv[c-1]\r\n c += 1\r\n \r\n intp = numpy.interp(numpy.linspace(0,int(conversion*tv[-1]),int(conversion*tv[-1])),tv*conversion,dv)\r\n profile = savitzky_golay(intp,11,3)\r\n time = range(0,int(conversion*int(tv[-1])),1)\r\n return profile,time,conversion\r\n\r\ndef powerspectrum(data,dt = 1):\r\n #these functions are meant to transform datasets to something more manageable e.g. a fourier transform\r\n data = numpy.array(data)\r\n normalized = data - numpy.mean(data)\r\n transform = numpy.abs(numpy.fft.fft(normalized))**2\r\n freqs = numpy.fft.fftfreq(int(data.size),dt)\r\n arg = numpy.argsort(freqs)\r\n data = transform[arg]\r\n data = data[int(len(data)/2):-1]\r\n return data\r\n\r\n","sub_path":"OED/Dependencies/DataTransform.py","file_name":"DataTransform.py","file_ext":"py","file_size_in_byte":7506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"251811986","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Sven Mayer\n\"\"\"\nimport unittest\nfrom asteroids import GameBoard\nfrom asteroids import GamePiece\n\n\nclass TestGameBoardInit(unittest.TestCase):\n def test_init_arguments(self):\n gameboard = GameBoard.GameBoard(size=(200., 150.),\n no_asteroids=12)\n self.assertSequenceEqual(gameboard.size, (200., 150.))\n self.assertEqual(gameboard.no_asteroids, 12)\n\n def test_init(self):\n gameboard = GameBoard.GameBoard(size=(200., 150.),\n no_asteroids=12)\n self.assertTrue(hasattr(gameboard, '_asteroids'))\n self.assertTrue(hasattr(gameboard, 'moving_objects'))\n self.assertTrue(hasattr(gameboard, '_ship'))\n self.assertTrue(hasattr(gameboard, '_projectiles'))\n self.assertTrue(hasattr(gameboard, 'gameover'))\n\n def test_init_wrong_size_argument(self):\n with self.assertRaises(AttributeError):\n gameboard = GameBoard.GameBoard(size=(200., 150., 300.),\n no_asteroids=10)\n\n def test_init_wrong_no_asteroids_arguemnts(self):\n with self.assertRaises(AttributeError):\n gameboard = GameBoard.GameBoard(size=(100., 120.),\n no_asteroids=10.2)\n\n def test_init_wrong_size_argument2(self):\n with self.assertRaises(AttributeError):\n gameboard = GameBoard.GameBoard(size=\"Sven\", no_asteroids=10)\n\n\nclass TestGameBoardHiddenMethods(unittest.TestCase):\n def setUp(self):\n self.gameboard = GameBoard.GameBoard(size=(100., 100.), no_asteroids=2)\n\n def test_add_asteroid_wrong_type(self):\n with self.assertRaises(AttributeError):\n self.gameboard._add_asteroid(10)\n\n def test_add_asteroid(self):\n asteroid = GamePiece.Asteroid1(1., (0., 0., 0.), (1., 1.), 1.)\n self.gameboard._add_asteroid(asteroid)\n self.assertEqual(self.gameboard._asteroids[-1], asteroid)\n self.assertEqual(self.gameboard.moving_objects[-1], asteroid)\n\n def test_add_ship_wrong_type(self):\n with self.assertRaises(AttributeError):\n self.gameboard._add_ship(10)\n\n def test_add_ship(self):\n ship = GamePiece.Ship(1.)\n self.gameboard._add_ship(ship)\n self.assertEqual(self.gameboard._ship, ship)\n self.assertEqual(self.gameboard.moving_objects[-1], ship)\n\n def test_add_multiple_ships(self):\n ship1 = GamePiece.Ship(1.)\n ship2 = GamePiece.Ship(1.)\n self.gameboard._add_ship(ship1)\n with self.assertRaises(RuntimeError):\n self.gameboard._add_ship(ship2)\n\n def test_add_projectile_wrong_type(self):\n with self.assertRaises(AttributeError):\n self.gameboard._add_projectile(10)\n\n def test_add_projectile(self):\n proj = GamePiece.Projectile(1., (0., 0., 0.), (1., 1.))\n self.gameboard._add_projectile(proj)\n self.assertEqual(self.gameboard._projectiles[-1], proj)\n self.assertEqual(self.gameboard.moving_objects[-1], proj)\n\n def test_asteroid_out_of_bounds(self):\n asteroid = GamePiece.Asteroid1(1., (120., 40., 2.), (1., 1.), 1.)\n self.gameboard._add_asteroid(asteroid)\n self.gameboard._asteroids_out_of_bounds()\n self.assertSequenceEqual(asteroid.position, (20., 40., 2.))\n\n def test_projectile_out_of_bounds(self):\n projectile = GamePiece.Projectile(size=4., position=(30., -10, 2.),\n velocity=(0., 0.))\n self.gameboard._add_projectile(projectile)\n self.gameboard._projectiles_out_of_bounds()\n self.assertNotIn(projectile, self.gameboard._projectiles)\n self.assertNotIn(projectile, self.gameboard.moving_objects)\n\n def test_ship_out_of_bounds(self):\n ship = GamePiece.Ship(size=10., position=(-30., 20., 1.))\n self.gameboard._add_ship(ship)\n self.gameboard._ship_out_of_bounds()\n self.assertSequenceEqual(ship.position, (70., 20., 1.))\n\n def test_collision_asteroid_collides_ship(self):\n ship = GamePiece.Ship(size=1.,position=(0., 0., 2.))\n asteroid = GamePiece.Asteroid1(size=1., position=(0., 0., 4.),\n start_velocity=(1., 1.),\n angular_velocity=-10.)\n self.gameboard._add_ship(ship)\n self.gameboard._add_asteroid(asteroid)\n self.gameboard._resolve_collision()\n self.assertTrue(self.gameboard.gameover)\n\n def test_collision_asteroid_collides_projectile(self):\n projectile = GamePiece.Projectile(1.0, (0., 0., 0.), (1., 0.))\n asteroid = GamePiece.Asteroid1(size=1., position=(0., 0., 4.),\n start_velocity=(1., 1.),\n angular_velocity=-10.)\n self.gameboard._add_asteroid(asteroid)\n self.gameboard._add_projectile(projectile)\n self.gameboard._resolve_collision()\n self.assertNotIn(asteroid, self.gameboard._asteroids)\n self.assertNotIn(projectile, self.gameboard._projectiles)\n\n\nclass TestGameBoardUserMethods(unittest.TestCase):\n def setUp(self):\n self.gameboard = GameBoard.GameBoard(size=(100., 100.), no_asteroids=2)\n self.ship = GamePiece.Ship(1., (10., 10., 0.))\n self.Asteroid = GamePiece.Asteroid1(1., (20., 10., 0), (0., 0.), 1.)\n\n def test_turn_ship(self):\n self.gameboard._add_ship(self.ship)\n self.gameboard.ship_turn(1)\n self.assertEqual(self.ship.turn, 1)\n\n def test_accelerate_ship(self):\n self.gameboard._add_ship(self.ship)\n self.gameboard.ship_accelerate(True)\n self.assertEqual(self.ship.thrust, 1)\n self.gameboard.ship_accelerate(False)\n self.assertEqual(self.ship.thrust, 0)\n\n def test_fire_ship(self):\n self.gameboard._add_ship(self.ship)\n self.gameboard.ship_fire()\n self.assertSequenceEqual(self.gameboard._projectiles[-1].position,\n self.ship.gunposition)\n self.assertSequenceEqual(self.gameboard.moving_objects[-1].position,\n self.ship.gunposition)\n\nif __name__ == u\"__main__\":\n unittest.main()","sub_path":"test_asteroids/test_gameboard.py","file_name":"test_gameboard.py","file_ext":"py","file_size_in_byte":6285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"341956848","text":"\"\"\" Compiled: 2020-09-18 10:38:54 \"\"\"\n\n#__src_file__ = \"extensions/swift/etc/FSwiftSettlementWrapper.py\"\n\"\"\"----------------------------------------------------------------------------\nMODULE\n FSwiftSettlement - Module that provides a wrapper class for the ACM entity\n Confirmation\n\n\n (c) Copyright 2013 SunGard FRONT ARENA. All rights reserved.\n\n----------------------------------------------------------------------------\"\"\"\n\nclass FSwiftSettlement(object):\n\n def __init__(self, boEntity):\n self.boEntity = boEntity\n\n def __getattr__(self, attr):\n return getattr(self.boEntity, attr)\n\n def GetAccountBic(self):\n '''Returns SWIFT bic code of the FAccount,\n if empty fallback value is BIC from the general tab.'''\n bic = ''\n cpRef = self.boEntity.CounterpartyAccountRef()\n if cpRef and cpRef.NetworkAlias():\n bic = cpRef.NetworkAlias().Alias()\n if not bic and self.boEntity.Counterparty():\n bic = self.boEntity.Counterparty().Swift()\n return bic\n\n def GetCounterpartyType(self):\n counterPartyType = ''\n counterParty = self.boEntity.Counterparty()\n if (counterParty and len(counterParty.Accounts()) != 0) :\n counterPartyType = counterParty.Type()\n return counterPartyType\n\n def GetNotifyReceipt(self):\n notifyReceipt = ''\n acqRef = self.boEntity.AcquirerAccountRef()\n if acqRef:\n correspondentBank = acqRef.CorrespondentBank()\n if correspondentBank:\n notifyReceipt = correspondentBank.NotifyReceipt()\n return notifyReceipt\n\n def IsTargetTwo(self):\n \"\"\"This method is used to determine if settlement is for\n Target2 or not. Target2 is a Sub Network choice list on\n the counterparty account.\n \"\"\"\n\n import FSwiftParameters as Global\n\n SWIFT_SUB_NETWORKS = Global.SWIFT_SUB_NETWORKS\n if not SWIFT_SUB_NETWORKS:\n return False\n\n if self.boEntity:\n if self.Amount() >= 0:\n return False\n if self.Currency().Name() != 'EUR':\n return False\n\n subNetwork = self.CounterpartyAccountSubNetworkName()\n if subNetwork:\n return (subNetwork.upper() == \"TARGET2\"\n and subNetwork in SWIFT_SUB_NETWORKS)\n\n return False\n\n\n\n def IsEba(self):\n \"\"\"This method is used to determine if settlement is for\n EBA or not. EBA is a Sub Network choice list on\n the counterparty account.\n \"\"\"\n\n import FSwiftParameters as Global\n\n SWIFT_SUB_NETWORKS = Global.SWIFT_SUB_NETWORKS\n if not SWIFT_SUB_NETWORKS:\n return False\n\n if self.boEntity:\n if self.Amount() >= 0:\n return False\n if self.Currency().Name() != 'EUR':\n return False\n\n subNetwork = self.CounterpartyAccountSubNetworkName()\n if subNetwork:\n return (subNetwork.upper() == \"EBA\"\n and subNetwork in SWIFT_SUB_NETWORKS)\n\n return False\n\n\n\n","sub_path":"Extensions/Default/FPythonCode/FSwiftSettlementWrapper.py","file_name":"FSwiftSettlementWrapper.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"602595314","text":"from binance.client import Client\n\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom math import floor\n\n# This file is to update data-frames with current marker quotes(to do buy sell calculations)\n# And to make trades using buy sell info\n# Automate data collection, analysis and placing buy and sell orders\n\nSYMBOL_TYPE_SPOT = 'SPOT'\n\nORDER_STATUS_NEW = 'NEW'\nORDER_STATUS_PARTIALLY_FILLED = 'PARTIALLY_FILLED'\nORDER_STATUS_FILLED = 'FILLED'\nORDER_STATUS_CANCELED = 'CANCELED'\nORDER_STATUS_PENDING_CANCEL = 'PENDING_CANCEL'\nORDER_STATUS_REJECTED = 'REJECTED'\nORDER_STATUS_EXPIRED = 'EXPIRED'\n\nKLINE_INTERVAL_1MINUTE = '1m'\nKLINE_INTERVAL_3MINUTE = '3m'\nKLINE_INTERVAL_5MINUTE = '5m'\nKLINE_INTERVAL_15MINUTE = '15m'\nKLINE_INTERVAL_30MINUTE = '30m'\nKLINE_INTERVAL_1HOUR = '1h'\nKLINE_INTERVAL_2HOUR = '2h'\nKLINE_INTERVAL_4HOUR = '4h'\nKLINE_INTERVAL_6HOUR = '6h'\nKLINE_INTERVAL_8HOUR = '8h'\nKLINE_INTERVAL_12HOUR = '12h'\nKLINE_INTERVAL_1DAY = '1d'\nKLINE_INTERVAL_3DAY = '3d'\nKLINE_INTERVAL_1WEEK = '1w'\nKLINE_INTERVAL_1MONTH = '1M'\n\nSIDE_BUY = 'BUY'\nSIDE_SELL = 'SELL'\n\nORDER_TYPE_LIMIT = 'LIMIT'\nORDER_TYPE_MARKET = 'MARKET'\nORDER_TYPE_STOP_LOSS = 'STOP_LOSS'\nORDER_TYPE_STOP_LOSS_LIMIT = 'STOP_LOSS_LIMIT'\nORDER_TYPE_TAKE_PROFIT = 'TAKE_PROFIT'\nORDER_TYPE_TAKE_PROFIT_LIMIT = 'TAKE_PROFIT_LIMIT'\nORDER_TYPE_LIMIT_MAKER = 'LIMIT_MAKER'\n\nTIME_IN_FORCE_GTC = 'GTC'\nTIME_IN_FORCE_IOC = 'IOC'\nTIME_IN_FORCE_FOK = 'FOK'\n\nORDER_RESP_TYPE_ACK = 'ACK'\nORDER_RESP_TYPE_RESULT = 'RESULT'\nORDER_RESP_TYPE_FULL = 'FULL'\n\n# For accessing the data returned by Client.aggregate_trades().\nAGG_ID = 'a'\nAGG_PRICE = 'p'\nAGG_QUANTITY = 'q'\nAGG_FIRST_TRADE_ID = 'f'\nAGG_LAST_TRADE_ID = 'l'\nAGG_TIME = 'T'\nAGG_BUYER_MAKES = 'm'\nAGG_BEST_MATCH = 'M'\n\napi_key = ''\napi_secret = ''\n\nclient = Client(api_key, api_secret)\n\n\ndef get_1min_ohlc_df_binance(ticker_str, number_of_days):\n\n number_of_days_ago = datetime.now() - timedelta(days=number_of_days)\n date = datetime.strftime(number_of_days_ago, \"%d %b, %Y %H:00:00\")\n\n klines = client.get_historical_klines(ticker_str, Client.KLINE_INTERVAL_1MINUTE, date, \"today UTC\")\n\n frame = pd.DataFrame(klines, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume', 'close_time', 'quote_av', 'trades', 'tb_base_av', 'tb_quote_av', 'ignore' ])\n frame = frame.drop(['volume', 'close_time', 'quote_av', 'trades', 'tb_base_av', 'tb_quote_av', 'ignore'], axis=1)\n frame['timestamp'] = pd.to_datetime(frame['timestamp'], unit='ms')\n frame.set_index('timestamp', inplace=True)\n frame.to_csv(ticker_str+'_1min_ohlc_data.csv')\n\n return frame\n\n\ndef update_data_binance(df_ohlc, str_name_binance):\n\n # get input df, check last data\n # get values from a specific date to the current date (from binance with the api)\n # IMPLEMENT: is it possible to include the hour to improve efficiency? YES, moving window of data points\n # manipulate into ohlc format\n # update the existing data with the new data\n # It returns a dataframe does not save the updated data as a file, it returns 1 min data\n\n date = datetime.strftime(datetime.strptime(df_ohlc.last_valid_index(),'%Y-%m-%d %H:%M:%S'), \"%d %b, %Y %H:%M:00\")\n klines = client.get_historical_klines(str_name_binance, Client.KLINE_INTERVAL_1MINUTE, date, \"today UTC\")\n\n new_values = pd.DataFrame(klines,\n columns=['timestamp', 'open', 'high', 'low', 'close', 'volume', 'close_time', 'quote_av',\n 'trades', 'tb_base_av', 'tb_quote_av', 'ignore'])\n new_values = new_values.drop(['volume', 'close_time', 'quote_av', 'trades', 'tb_base_av', 'tb_quote_av', 'ignore'], axis=1)\n new_values = new_values.iloc[1:]\n new_values['timestamp'] = pd.to_datetime(new_values['timestamp'], unit='ms')\n new_values.set_index('timestamp', inplace=True)\n\n # IMPLEMENT: for the last part keep last 700 entries (arbitrarily as indicators need a max of 600 for\n # rolling window),\n # to boost preformance\n\n df_ohlc_updated = df_ohlc.append(new_values)\n '''df_shortened = pd.DataFrame\n # keep only last 700 elements in df (faster processing)\n for index in range(-650, -1):\n df_shortened = df_shortened.append(df_ohlc_updated.iloc[index])'''\n\n return df_ohlc_updated\n\n\ndef get_top_5_binance():\n prices = client.get_all_tickers()\n\n for index in prices:\n if index[\"symbol\"] == \"BTCUSDT\":\n print(\"BTCUSDT price is: \" + index[\"price\"])\n\n if index[\"symbol\"] == \"ETHBTC\":\n print(\"ETHBTC price is: \" + index[\"price\"])\n\n if index[\"symbol\"] == \"XRPBTC\":\n print(\"XRPBTC price is: \" + index[\"price\"])\n\n if index[\"symbol\"] == \"EOSBTC\":\n print(\"EOSBTC price is: \" + index[\"price\"])\n\n if index[\"symbol\"] == \"LTCBTC\":\n print(\"LTCBTC price is: \" + index[\"price\"])\n\n return\n\n\ndef all_fees():\n fees = client.get_trade_fee()\n return fees\n\n\ndef market_price_order(symbol_binance, quantity, type):\n\n if type == 'BUY':\n market_order = client.order_market_buy(\n symbol=symbol_binance,\n quantity=quantity)\n elif type == 'SELL':\n market_order = client.order_market_sell(\n symbol=symbol_binance,\n quantity=quantity)\n else:\n print('\\nError: type of order not specified correctly')\n\n return market_order\n\n\ndef current_position(str_name_binance, str_name_binance_pair):\n\n # this function returns the current position of the trading pair (long/ short),\n # and returns the quantity of asset that can be traded\n\n # IMPLEMENT THIS: get symbol info, read precision required and\n # implement the rounding needed (line 238/9 does it work?)\n\n # this records change in balances of the trading pair\n\n balance = client.get_asset_balance(asset=str_name_binance)\n balance_pair = client.get_asset_balance(asset=str_name_binance_pair)\n\n print(f\"{balance['free']} {str_name_binance} is available\")\n print(f\"{balance_pair['free']} {str_name_binance_pair} is available\")\n\n available_coin = balance['free']\n available_coin_pair = balance_pair['free']\n\n date = str(datetime.strftime(datetime.now(), '%m/%d/%Y %H:%M:%S')) # error\n date_and_balance = {'Date': date, f'{str_name_binance}': available_coin,\n f'{str_name_binance_pair}': available_coin_pair}\n df_date_and_balance = pd.DataFrame(date_and_balance, index=[0])\n df_date_and_balance.set_index('Date')\n\n try:\n df = pd.read_csv(f'{str_name_binance}_portfolio_price_over_time.csv')\n df_available_coin = df.append(date_and_balance, ignore_index=True)\n df_available_coin.set_index('Date', inplace=True)\n\n except:\n df_available_coin = pd.DataFrame(date_and_balance, index=[0])\n df_available_coin.set_index('Date', inplace=True)\n\n df_available_coin.to_csv(f'{str_name_binance}_portfolio_price_over_time.csv')\n print('Balance logged')\n\n balances = {'balance': available_coin, 'balance_pair': available_coin_pair}\n\n prices = client.get_all_tickers()\n\n # loop for the current asset\n # price (through list of all assets and prices from Binance)\n for index in prices:\n if index[\"symbol\"] == \"BTCUSDT\":\n current_BTC_price = float(index['price'])\n current_USDT_price_ito_BTC = float(1/current_BTC_price)\n\n BTC_portfolio_current = float(balances['balance'])\n USDT_portfolio_current = float(balances['balance_pair'])\n\n # manipulate amounts to a valid precision (get precision from info) for the API\n\n info = client.get_symbol_info('BTCUSDT')\n min_qty = info['filters'][2]['minQty']\n min_qty = float(min_qty)\n\n USDT_ito_BTC_balance = USDT_portfolio_current * current_USDT_price_ito_BTC\n\n sell_amount_BTC = floor(BTC_portfolio_current*(1/min_qty))/(1/min_qty)\n buy_amount_BTC = floor(USDT_ito_BTC_balance*(1/min_qty))/(1/min_qty)\n\n # display balances to user in terminal\n\n print('The balances in terms of BTC is:')\n print(f\" The current BTC balance (sellable) is {sell_amount_BTC}BTC\")\n print(f\" The current USDT balance (buyable) in BTC is {buy_amount_BTC}BTC\")\n\n if sell_amount_BTC > buy_amount_BTC:\n trading_position = 'LONG'\n else:\n trading_position = 'SHORT'\n\n trading_position = {'position': trading_position, 'btc_balance': sell_amount_BTC,\n 'usdt_ito_btc': buy_amount_BTC}\n\n return trading_position\n\n\n\n\n\n\n","sub_path":"binance_API.py","file_name":"binance_API.py","file_ext":"py","file_size_in_byte":8630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"457033641","text":"import os\n\n\ndef read_message(input_pipe):\n fd = os.open(input_pipe, os.O_RDONLY)\n message = (\n \"I pid {} received message => {}\".format(os.getpid(), os.read(fd, 22).decode(encoding='UTF-8'))\n )\n os.close(fd)\n return message\n\nprint(read_message(\"my_pipe\"))","sub_path":"ppalgo/parallel_algorithms/read_from_named_pipe.py","file_name":"read_from_named_pipe.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"437223109","text":"import numpy, pandas, glob, sys, astropy, emcee, scipy\nsys.path.append(\"/Users/pwangel/Gene_Analysis\")\nfrom ga_utils import clean_data\n\n# Run mcmc to various models\n\ndef find_prob_distribution(gene_counts, sample_counts, p_dist, del_p):\n\n dist = numpy.zeros(shape=len(p_dist))\n\n dist = numpy.array([scipy.stats.binom.pmf(gene_counts, sample_counts, p) for p in numpy.power(2.0, p_dist)/1.e6])\n #print(gene_counts, dist[0], dist[-1])\n dist = dist/(dist*del_p).sum()*del_p\n\n '''Do checks\n\n mean = (p_dist*sample_counts*dist).sum()\n std = numpy.sqrt(((p_dist*sample_counts-mean)**2*dist).sum())\n\n '''\n\n return dist\n\ndef TwoD_t_dist(parameters, x_val, y_val):\n\n mu_a, mu_b, sigma_a, sigma_b, nu, rho = parameters\n\n delta_a = x_val-mu_a\n delta_b = y_val-mu_b\n\n sigma_matrix = numpy.mat([[sigma_a**2, rho*sigma_a*sigma_b], [rho*sigma_a*sigma_b, sigma_b**2]])\n inv_sigma = numpy.linalg.inv(sigma_matrix)\n numerator_one = numpy.sqrt(numpy.linalg.det(inv_sigma))/(2.0*numpy.pi)\n\n numerator_two = (1.0+1.0/nu*(delta_a**2*inv_sigma[0,0] + 2.0*delta_a*delta_b*inv_sigma[0,1] + delta_b**2*inv_sigma[1,1]))**(-(nu+2.0)/2.0)\n\n return numerator_one*numerator_two \n\ndef ln_pearson_prior(parameters):\n\n mu_a, mu_a, sigma_a, sigma_b, nu, rho = parameters\n\n if (sigma_a > 0.0) and (sigma_b > 0.0) and (nu > 1.0) and (-1.0 < rho < 1.0):\n return 0.0\n\n return -numpy.inf\n\ndef discrete_lnlike(parameters, x, y, x_weights, y_weights): \n\n mu_a, mu_a, sigma_a, sigma_b, nu, rho = parameters\n delta_a_sqr = (x-mu_a)**2\n delta_b_sqr = (y-mu_b)**2\n delta_ab = (x-mu_a)*(y-mu_b)\n\n sigma_matrix = numpy.mat([[sigma_a**2, rho*sigma_a*sigma_b], [rho*sigma_a*sigma_b, sigma_b**2]])\n inv_sigma = numpy.linalg.inv(sigma_matrix)\n numerator_one = numpy.sqrt(numpy.linalg.det(inv_sigma))/(2.0*numpy.pi)\n\n numerator_two = (1.0+1.0/nu*(delta_a_sqr*inv_sigma[0,0] + 2.0*delta_ab*inv_sigma[0,1] + delta_b_sqr*inv_sigma[1,1]))**(-(nu+2.0)/2.0)\n\n return numpy.log(numerator_one) + numpy.log(numerator_two)\n\ndef discrete_lnprob(parameters, x, y, x_weights, y_weights):\n\n if not numpy.isfinite(ln_pearson_prior(parameters)):\n return -numpy.inf\n return ln_pearson_prior(parameters)+discrete_lnlike(parameters, x, y, x_weights, y_weights)\n\ndef run_discrete_mcmc(x_data, y_data, x_weights, y_weights):\n\n ndim, nwalkers = 6, 100\n init = [numpy.array([5.0, 5.0, 2.0, 2.0, 2.0, 0.0]) + numpy.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])*numpy.random.random(ndim) for i in range(nwalkers)] \n\n sampler = emcee.EnsembleSampler(nwalkers, ndim, discrete_lnprob, args=(x_data, y_data, x_weights, y_weights))\n sample.run_mcmc(init, 1000)\n\n return sampler.chain\n\ndef smooth_lnlike(parameters, prob_array, bins): \n\n mu_a, mu_b, sigma_a, sigma_b, nu, rho = parameters\n\n sigma_matrix = numpy.mat([[sigma_a**2, rho*sigma_a*sigma_b], [rho*sigma_a*sigma_b, sigma_b**2]])\n inv_sigma = numpy.linalg.inv(sigma_matrix)\n numerator_one = numpy.sqrt(numpy.linalg.det(inv_sigma))/(2.0*numpy.pi)\n\n delta_b = bins-mu_b\n delta_a = bins-mu_a\n\n delta_a_sqr = numpy.repeat((delta_a**2), len(delta_b))\n delta_b_sqr = numpy.tile(delta_b**2, len(delta_a))\n delta_ab = numpy.outer(delta_a, delta_b).reshape(-1)\n\n diff_squared = numpy.sum((prob_array.reshape(-1) - numerator_one*(1.0+1.0/nu*(delta_a_sqr*inv_sigma[0,0] + 2.0*delta_ab*inv_sigma[0,1] + delta_b_sqr*inv_sigma[1,1]))**(-(nu+2.0)/2.0))**2)\n\n #print(diff_squared)\n\n return -1.0*(diff_squared)\n\ndef smooth_lnprob(parameters, prob_array, bins):\n\n if not numpy.isfinite(ln_pearson_prior(parameters)):\n return -numpy.inf\n return ln_pearson_prior(parameters)+smooth_lnlike(parameters, prob_array, bins) \n\n\ndef run_smooth_mcmc(prob_array, bins):\n\n ndim, nwalkers = 6, 50\n init = [numpy.array([5.0, 5.0, 2.0, 2.0, 2.0, .0]) + numpy.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])*numpy.random.random(ndim) for i in range(nwalkers)] \n\n sampler = emcee.EnsembleSampler(nwalkers, ndim, smooth_lnprob, args=(prob_array, bins))\n sampler.run_mcmc(init, 1000) \n\n return sampler.chain\n","sub_path":"ga_utils/mcmc.py","file_name":"mcmc.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"489568561","text":"class LLRB: \r\n class Node: \r\n def __init__(self, key, val): # Constructor\r\n self.key, self.val = key, val\r\n self.left = self.right = None\r\n self.count = 1 # Number of nodes itself and below \r\n self.red = True # Color of parent link\r\n\r\n def __init__(self): # Constructor\r\n self.root = None\r\n\r\n @staticmethod\r\n def getOnNode(h, key):\r\n while h != None:\r\n if key < h.key: h = h.left\r\n elif key > h.key: h = h.right\r\n else: return h.val # key == x.key\r\n return None # The key was NOT found\r\n\r\n def get(self, key):\r\n return LLRB.getOnNode(self.root, key)\r\n\r\n def contains(self, key):\r\n return self.get(key) != None\r\n\r\n @staticmethod \r\n def isRed(x):\r\n if x == None: return False\r\n return x.red\r\n\r\n @staticmethod \r\n def fixUp(h): # Fix the tree such that it conforms to the LLRB representation\r\n if h == None: return None\r\n if LLRB.isRed(h.right) and not LLRB.isRed(h.left): h = LLRB.rotateLeft(h) # Lean right -> lean left\r\n if LLRB.isRed(h.left) and LLRB.isRed(h.left.left): h = LLRB.rotateRight(h) # 4-node all leaning left -> 4-node leaning left and right\r\n if LLRB.isRed(h.left) and LLRB.isRed(h.right): LLRB.flipColors(h) # Split a 4-node into two 2-nodes\r\n return h\r\n \r\n @staticmethod \r\n def rotateLeft(h):\r\n assert(LLRB.isRed(h.right))\r\n x = h.right\r\n h.right = x.left\r\n x.left = h\r\n x.red = h.red\r\n h.red = True\r\n return x\r\n\r\n @staticmethod\r\n def rotateRight(h):\r\n assert(LLRB.isRed(h.left))\r\n x = h.left\r\n h.left = x.right\r\n x.right = h\r\n x.red = h.red\r\n h.red = True\r\n return x\r\n\r\n @staticmethod\r\n def moveRedLeft(h):\r\n LLRB.flipColors(h)\r\n if LLRB.isRed(h.right.left):\r\n h.right = LLRB.rotateRight(h.right)\r\n h = LLRB.rotateLeft(h)\r\n LLRB.flipColors(h)\r\n return h\r\n \r\n @staticmethod\r\n def moveRedRight(h):\r\n LLRB.flipColors(h)\r\n if LLRB.isRed(h.left.left):\r\n h = LLRB.rotateRight(h)\r\n LLRB.flipColors(h)\r\n return h\r\n\r\n @staticmethod\r\n def flipColors(h):\r\n #assert((not LLRB.isRed(h) and LLRB.isRed(h.left) and LLRB.isRed(h.right)) or\\\r\n # (LLRB.isRed(h) and not LLRB.isRed(h.left) and not LLRB.isRed(h.right))) \r\n h.red = not h.red\r\n h.left.red = not h.left.red\r\n h.right.red = not h.right.red\r\n\r\n @staticmethod\r\n def deleteMin(h):\r\n if h.left == None: return None\r\n if not LLRB.isRed(h.left) and not LLRB.isRed(h.left.left):\r\n h = LLRB.moveRedLeft(h)\r\n h.left = LLRB.deleteMin(h.left)\r\n h = LLRB.fixUp(h)\r\n h.count = LLRB.sizeOnNode(h.left) + 1 + LLRB.sizeOnNode(h.right)\r\n return h\r\n\r\n def delete(self, key):\r\n def deleteOnNode(h, key):\r\n if h == None: return None\r\n if key < h.key:\r\n if h.left != None and not LLRB.isRed(h.left) and not LLRB.isRed(h.left.left):\r\n h = LLRB.moveRedLeft(h)\r\n h.left = deleteOnNode(h.left, key)\r\n else:\r\n if LLRB.isRed(h.left): h = LLRB.rotateRight(h)\r\n if key == h.key and h.right == None: return None\r\n if h.right != None and not LLRB.isRed(h.right) and not LLRB.isRed(h.right.left):\r\n h = LLRB.moveRedRight(h)\r\n if key == h.key: # Hibbard deletion: place the min in the right subtree on the deleted spot \r\n h.key = LLRB.minOnNode(h.right)\r\n h.value = LLRB.getOnNode(h.right, h.key)\r\n h.right = LLRB.deleteMin(h.right)\r\n else:\r\n h.right = deleteOnNode(h.right, key)\r\n h = LLRB.fixUp(h)\r\n h.count = LLRB.sizeOnNode(h.left) + 1 + LLRB.sizeOnNode(h.right) \r\n return h\r\n self.root = deleteOnNode(self.root, key)\r\n if self.root != None:\r\n self.root.red = False # To not violate the assertion in flipColors(h), where the root splits\r\n\r\n def put(self, key, val):\r\n def putOnNode(x, key, val):\r\n if x == None: return self.Node(key, val)\r\n if key < x.key: x.left = putOnNode(x.left, key, val)\r\n elif key > x.key: x.right = putOnNode(x.right, key, val)\r\n else: x.val = val # key == x.key\r\n x = LLRB.fixUp(x)\r\n x.count = LLRB.sizeOnNode(x.left) + 1 + LLRB.sizeOnNode(x.right)\r\n return x \r\n self.root = putOnNode(self.root, key, val)\r\n self.root.red = False # To not violate the assertion in flipColors(h), where the root splits\r\n\r\n @staticmethod\r\n def minOnNode(h):\r\n if h == None: return None\r\n else:\r\n while h.left != None:\r\n h = h.left\r\n return h.key\r\n\r\n def min(self):\r\n return LLRB.minOnNode(self.root)\r\n \r\n def max(self):\r\n if self.root == None: return None\r\n else: \r\n x = self.root\r\n while x.right != None:\r\n x = x.right\r\n return x.key\r\n\r\n def floor(self, key):\r\n def floorOnNode(x, key):\r\n if x == None: return None\r\n if key == x.key: return x\r\n elif key < x.key: return floorOnNode(x.left, key)\r\n\r\n t = floorOnNode(x.right, key)\r\n if t != None: return t\r\n else: return x\r\n x = floorOnNode(self.root, key)\r\n if x == None: return None\r\n else: return x.key\r\n\r\n def ceiling(self, key):\r\n def ceilingOnNode(x, key):\r\n if x == None: return None\r\n if key == x.key: return x\r\n elif x.key < key: return ceilingOnNode(x.right, key)\r\n\r\n t = ceilingOnNode(x.left, key)\r\n if t != None: return t\r\n else: return x\r\n x = ceilingOnNode(self.root, key)\r\n if x == None: return None\r\n else: return x.key\r\n\r\n @staticmethod\r\n def sizeOnNode(x):\r\n if x == None: return 0\r\n else: return x.count\r\n\r\n def size(self): \r\n return LLRB.sizeOnNode(self.root) \r\n\r\n def rank(self, key): # How many keys < key?\r\n def rankOnNode(x, key): # rank(key) on the subtree rooted at x\r\n if x == None: return 0\r\n if key < x.key: return rankOnNode(x.left, key)\r\n elif key > x.key: return LLRB.sizeOnNode(x.left) + 1 + rankOnNode(x.right, key)\r\n else: return LLRB.sizeOnNode(x.left) # key == x.key\r\n return rankOnNode(self.root, key)\r\n\r\n def select(self, idx):\r\n def selectOnNode(x, idx): # idx-th element on the subtree rooted at x\r\n if x == None: return None # idx-th element does not exist\r\n if idx < LLRB.sizeOnNode(x.left): return selectOnNode(x.left, idx)\r\n elif idx > LLRB.sizeOnNode(x.left): return selectOnNode(x.right, idx-LLRB.sizeOnNode(x.left)-1)\r\n else: return x.key # idx == LLRB.sizeOnNode(x.left)\r\n return selectOnNode(self.root, idx) \r\n\r\n def inorder(self): \r\n def inorderOnNode(x, q):\r\n if x == None: return\r\n inorderOnNode(x.left, q)\r\n q.append(x.key)\r\n inorderOnNode(x.right, q)\r\n q = []\r\n inorderOnNode(self.root, q)\r\n return q\r\n\r\n def levelorder(self):\r\n qNode, qKey, idx = [], [], 0\r\n if self.root == None: return qNode\r\n else: qNode.append(self.root) \r\n while idx < len(qNode):\r\n x = qNode[idx]\r\n if x.left != None: qNode.append(x.left)\r\n if x.right != None: qNode.append(x.right)\r\n qKey.append(x.key)\r\n idx += 1\r\n return qKey\r\n\r\n def rangeCount(self, lo, hi): # Number of keys between lo and hi, both inclusive\r\n if self.contains(hi): return self.rank(hi) - self.rank(lo) + 1\r\n else: return self.rank(hi) - self.rank(lo)\r\n\r\n def rangeSearch(self, lo, hi): # Return all keys between lo and hi, both inclusive\r\n def rangeSearchOnNode(x, lo, hi, q):\r\n if x == None: return\r\n if lo < x.key: rangeSearchOnNode(x.left, lo, hi, q)\r\n if lo <= x.key and x.key <= hi: q.append(x.key)\r\n if x.key < hi: rangeSearchOnNode(x.right, lo, hi, q)\r\n q = []\r\n rangeSearchOnNode(self.root, lo, hi, q)\r\n return q\r\n\r\nif __name__ == \"__main__\": \r\n '''\r\n bst = LLRB() \r\n print(bst.size())\r\n print(\"min\", bst.min())\r\n print(\"max\", bst.max())\r\n \r\n bst.put(\"a\",1)\r\n bst.put(\"c\",2)\r\n bst.put(\"e\",3)\r\n bst.put(\"b\",4)\r\n bst.put(\"c\",5)\r\n print(\"level order\", bst.levelorder())\r\n print(\"size\", bst.size())\r\n\r\n print(bst.get(\"a\"))\r\n print(bst.get(\"b\"))\r\n print(bst.get(\"c\"))\r\n print(bst.get(\"d\"))\r\n print(bst.get(\"e\"))\r\n print(bst.floor(\"a\"))\r\n print(bst.floor(\"b\")) \r\n\r\n print(\"ceiling\") \r\n print(bst.ceiling(\"a\"))\r\n print(bst.ceiling(\"b\"))\r\n print(bst.ceiling(\"c\"))\r\n print(bst.ceiling(\"d\"))\r\n print(bst.ceiling(\"e\"))\r\n print(bst.ceiling(\"f\"))\r\n\r\n print(\"min\", bst.min())\r\n print(\"max\", bst.max())\r\n\r\n print(\"rank\")\r\n print(bst.rank(\"a\"))\r\n print(bst.rank(\"b\"))\r\n print(bst.rank(\"c\"))\r\n print(bst.rank(\"d\"))\r\n print(bst.rank(\"e\"))\r\n print(bst.rank(\"f\"))\r\n\r\n print(\"select\")\r\n print(bst.select(-1))\r\n print(bst.select(0))\r\n print(bst.select(1))\r\n print(bst.select(2))\r\n print(bst.select(3))\r\n print(bst.select(4))\r\n print(bst.select(5))\r\n print(bst.select(6))\r\n\r\n '''\r\n print(\"inorder traversal\")\r\n bst2 = LLRB()\r\n bst2.put(\"S\",1)\r\n bst2.put(\"E\",2)\r\n bst2.put(\"Y\",3)\r\n bst2.put(\"A\",4)\r\n bst2.put(\"R\",5)\r\n bst2.put(\"C\",6)\r\n bst2.put(\"H\",7)\r\n bst2.put(\"M\",8)\r\n bst2.put(\"L\",9)\r\n bst2.put(\"P\",10)\r\n print(bst2.rank(\"H\"))\r\n print(bst2.select(4))\r\n print(\"level order\", bst2.levelorder())\r\n print(\"inorder\",bst2.inorder()) \r\n print(\"range count\", bst2.rangeCount(\"F\", \"T\"))\r\n print(\"range search\", bst2.rangeSearch(\"F\", \"T\"))\r\n print(\"range count\", bst2.rangeCount(\"B\", \"I\"))\r\n print(\"range search\", bst2.rangeSearch(\"B\", \"I\"))\r\n print(\"range count\", bst2.rangeCount(\"C\", \"H\"))\r\n print(\"range search\", bst2.rangeSearch(\"C\", \"H\"))\r\n print(\"range count\", bst2.rangeCount(\"J\", \"R\"))\r\n print(\"range search\", bst2.rangeSearch(\"J\", \"R\"))\r\n\r\n '''\r\n print(\"delete Z\")\r\n bst2.delete(\"Z\")\r\n print(\"level order\", bst2.levelorder())\r\n print(\"inorder\",bst2.inorder())\r\n\r\n print(\"delete M\")\r\n bst2.delete(\"M\")\r\n print(\"level order\", bst2.levelorder())\r\n print(\"inorder\",bst2.inorder())\r\n\r\n print(\"delete A\")\r\n bst2.delete(\"A\")\r\n print(\"level order\", bst2.levelorder())\r\n print(\"inorder\",bst2.inorder())\r\n \r\n print(\"delete L\")\r\n bst2.delete(\"L\")\r\n print(\"level order\", bst2.levelorder())\r\n print(\"inorder\",bst2.inorder())\r\n\r\n print(\"delete all\")\r\n bst2.delete(\"C\")\r\n bst2.delete(\"E\")\r\n bst2.delete(\"H\")\r\n bst2.delete(\"P\")\r\n bst2.delete(\"R\")\r\n bst2.delete(\"S\")\r\n bst2.delete(\"Y\")\r\n bst2.delete(\"X\") # This element does not exist in the LLRB, so it will not delete any element\r\n print(\"level order\", bst2.levelorder())\r\n print(\"inorder\",bst2.inorder())\r\n '''","sub_path":"2022-2/알고리즘2/코드 자료/[6] [Symbol Table] 코드/RedBlackBST.py","file_name":"RedBlackBST.py","file_ext":"py","file_size_in_byte":11525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155216942","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Product\nfrom .form import AddProduct\n# Create your views here.\n\n\ndef home(request):\n products = Product.objects.all()\n template_name = 'store/home.html'\n context = {\n 'products': products\n }\n\n return render(request, template_name, context)\n\n\ndef details(request, slug):\n\n product = get_object_or_404(Product, slug=slug)\n context = {\n 'product': product\n }\n template_name = 'store/details.html'\n return render(request, template_name, context)\n\n\ndef productlist(request):\n products = Product.objects.all()\n template_name = 'store/productslist.html'\n context = {\n 'products': products\n }\n return render(request, template_name, context)\n\ndef addproduct(request):\n\n template_name ='store/addproduct.html'\n\n if request.method == \"POST\":\n form = AddProduct(request.POST)\n if form.is_valid():\n form = form.save(commit=False)\n form.user = request.user\n form.save()\n return redirect('store:productlist')\n else:\n form = AddProduct()\n return render(request, template_name,{'form':form})","sub_path":"inflr3/store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"451830460","text":"from app.model import Prediction\nfrom _plotly_future_ import v4_subplots\nimport plotly.graph_objs as go\nfrom plotly.subplots import make_subplots\n\nclass Visualization(Prediction):\n \n def __init__(self, exchange, interval, asset, indication, action_model, price_model, market = None):\n super().__init__(exchange, interval, asset, action_model, price_model, market) \n super(Visualization, self).get_prediction()\n super(Visualization, self).prediction_postprocessing(indication)\n\n def prediction_graph(self):\n if self.score_price < 50.:\n future = False\n else:\n future = True\n\n self.fig_action = make_subplots(specs = [[{\"secondary_y\": True}]])\n self.fig_action.add_trace(go.Scatter(x = self.df_visulization.index, y = self.df_visulization['Adj Close'], name = \"Close Price\", connectgaps = False, \n marker = dict(color = '#000000')), secondary_y = False)\n\n if future:\n self.fig_action.add_trace(go.Scatter(x = self.df_future_price.index, y = self.df_future_price['Future Price'], name = \"Furture Price\", \n connectgaps = False, marker = dict(color = '#A9A9A9', size = 6)), secondary_y = False)\n\n self.fig_action.add_trace(go.Scatter(x = self.df_visulization.index, y = self.df_visulization['Price_Buy'], mode = 'markers', name = \"Buy\", \n marker = dict(color = '#32AB60', opacity = 0.8, size = 7.5)), secondary_y = False)\n self.fig_action.add_trace(go.Scatter(x = self.df_visulization.index, y = self.df_visulization['Price_Sell'], mode = 'markers', name = \"Sell\", \n marker = dict(color = '#DB4052', opacity = 0.8, size = 7.5)), secondary_y = False)\n self.fig_action.add_trace(go.Bar(x = self.df_visulization.index, y = self.df_visulization['Volume'], name = \"Volume\", \n marker = dict(color = '#5DADE2', opacity = 0.45)), secondary_y = True)\n\n self.fig_action.update_layout(autosize = False, height = 750, dragmode = False, hovermode = 'x', plot_bgcolor = '#ECF0F1', \n title = dict(text = f\"{self.asset} to {self.market}.\", y = 0.95, x = 0.5, xanchor = 'center', yanchor = 'top', font = dict(size = 20)))\n self.fig_action.update_xaxes(title_text = \"Date\", showline = True, linewidth = 2, linecolor = '#000000', rangeslider_visible = True, \n range = [self.df_visulization.index.min(), self.df_visulization.index.max()])\n self.fig_action.update_yaxes(title_text = \"Close Price & Action\", secondary_y = False, showline = True, linewidth = 2, linecolor = '#000000')\n self.fig_action.update_yaxes(title_text = \"Volume\", secondary_y = True, showline = True, linewidth = 2, linecolor = '#000000')\n\n return self.fig_action\n\n def technical_analysis_graph(self):\n self.df_visulization_technical = self.df.iloc[-366:]\n \n self.fig_analysis = make_subplots(rows = 3, cols = 1)\n self.fig_analysis.append_trace(go.Scatter(x = self.df_visulization_technical.index, y = self.df_visulization_technical['MACD'], name = \"MACD\", \n marker = dict(color = '#2ECC71')), row = 1, col = 1)\n self.fig_analysis.append_trace(go.Scatter(x = self.df_visulization_technical.index, y = self.df_visulization_technical['MACDS'], name = \"MACDS\", \n marker = dict(color = '#E74C3C')), row = 1, col = 1)\n self.fig_analysis.append_trace(go.Bar(x = self.df_visulization_technical.index, y = self.df_visulization_technical['MACDH'], name = \"MACDH\", \n marker = dict(color = '#000000')), row = 1, col = 1)\n self.fig_analysis.add_shape(type = 'line', x0 = self.df_visulization_technical.index.min(), x1 = self.df_visulization_technical.index.max(), \n y0 = 0, y1 = 0, line = dict(color = '#000000', width = 0.5), row = 1, col = 1)\n\n self.fig_analysis.append_trace(go.Scatter(x = self.df_visulization_technical.index, y = self.df_visulization_technical['RSI'], name = \"RSI\", \n marker = dict(color = '#A569BD')), row = 2, col = 1)\n self.fig_analysis.add_shape(type = 'line', x0 = self.df_visulization_technical.index.min(), x1 = self.df_visulization_technical.index.max(), \n y0 = 30, y1 = 30, line = dict(color = '#008000', width = 1), row = 2, col = 1)\n self.fig_analysis.add_shape(type = 'line', x0 = self.df_visulization_technical.index.min(), x1 = self.df_visulization_technical.index.max(), \n y0 = 70, y1 = 70, line = dict(color = '#FF0000', width = 1), row = 2, col = 1)\n\n self.fig_analysis.append_trace(go.Scatter(x = self.df_visulization_technical.index, y = self.df_visulization_technical['SR_K'], name = \"Stochastic K\", \n marker = dict(color = '#F39C12')), row = 3, col = 1)\n self.fig_analysis.append_trace(go.Scatter(x = self.df_visulization_technical.index, y = self.df_visulization_technical['SR_D'], name = \"Stochastic D\", \n marker = dict(color = '#3780BF')), row = 3, col = 1)\n self.fig_analysis.add_shape(type = 'line', x0 = self.df_visulization_technical.index.min(), x1 = self.df_visulization_technical.index.max(), y0 = 20, y1 = 20, line = dict(color = '#008000', width = 1), row = 3, col = 1)\n self.fig_analysis.add_shape(type = 'line', x0 = self.df_visulization_technical.index.min(), x1 = self.df_visulization_technical.index.max(), y0 = 80, y1 = 80, line = dict(color = '#FF0000', width = 1), row = 3, col = 1)\n\n self.fig_analysis.update_layout(autosize = False, height = 750, dragmode = False, hovermode = 'x', plot_bgcolor = '#ECF0F1', \n title = dict(text = \"Technical Analysis.\", y = 0.95, x = 0.5, xanchor = 'center', yanchor = 'top', font = dict(size = 20)))\n self.fig_analysis.update_shapes(dict(opacity = 0.7))\n self.fig_analysis.update_xaxes(showgrid = True, zeroline = True, showline = True, linewidth = 2, linecolor = '#000000')\n self.fig_analysis.update_xaxes(title_text = \"Date\", row = 3, col = 1)\n self.fig_analysis.update_yaxes(zeroline = True, showline = True, linewidth = 2, linecolor = '#000000')\n self.fig_analysis.update_yaxes(title_text = \"MACD\", row = 1, col = 1)\n self.fig_analysis.update_yaxes(title_text = \"RSI\", range = [0, 100], tickvals = [0, 30, 70, 100], row = 2, col = 1)\n self.fig_analysis.update_yaxes(title_text = \"%K & %D\", range = [-1, 101], tickvals = [0, 20, 80, 100], row = 3, col = 1)\n\n return self.fig_analysis\n","sub_path":"app/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":6364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"406832815","text":"class Heap:\n def __init__(self, keys: list = None, min=True, compare=None):\n self.compare = compare\n # denote whether it's a min heap\n self.min = min\n if keys:\n self.pq = [0] + keys\n self.qp = dict(zip(keys, range(1, len(self.pq))))\n k = len(keys) // 2\n while k >= 1:\n self.__sink(k)\n k -= 1\n else:\n self.pq = [0]\n self.qp = {}\n\n def insert(self, key):\n self.pq.append(key)\n self.qp[key] = self.size()\n self.__swim(self.size())\n\n def pop(self):\n if self.empty():\n raise Exception('Priority queue underflow')\n m = self.pq[1]\n self.__exch(1, self.size())\n self.pq.pop()\n self.__sink(1)\n del self.qp[m]\n return m\n\n def top(self):\n if self.empty():\n raise Exception('Priority queue underflow')\n return self.pq[1]\n\n def empty(self):\n return self.size() == 0\n\n def size(self):\n return len(self.pq) - 1\n\n def delete(self, key):\n self.validate(key)\n i = self.qp[key]\n if i == self.size():\n self.pq.pop()\n del self.qp[key]\n return\n self.__exch(i, self.size())\n self.pq.pop()\n self.__swim(i)\n self.__sink(i)\n del self.qp[key]\n\n def validate(self, key):\n if key not in self:\n raise ValueError(\"index is not in the priority queue\")\n\n def __contains__(self, item):\n return item in self.qp\n\n def __compare(self, i, j):\n if self.compare:\n comp = self.compare(self.pq[i], self.pq[j]) > 0\n else:\n comp = self.pq[i] > self.pq[j]\n return comp if self.min else not comp\n\n def __exch(self, i, j):\n swap = self.pq[i]\n self.pq[i] = self.pq[j]\n self.pq[j] = swap\n self.qp[self.pq[i]] = i\n self.qp[self.pq[j]] = j\n\n def __swim(self, k):\n while k > 1 and self.__compare(k // 2, k):\n self.__exch(k, k // 2)\n k = k // 2\n\n def __sink(self, k):\n while 2 * k <= self.size():\n j = 2 * k\n if j < self.size() and self.__compare(j, j + 1):\n j += 1\n if not self.__compare(k, j):\n break\n self.__exch(k, j)\n k = j\n\n\n# order (key,value) pairs by value\nclass IndexHeap(Heap):\n def __init__(self, keys: list = None, values: list = None, min=True, compare=None):\n if values or keys:\n if not (values and keys):\n raise ValueError('illegal arguments')\n self.keys = dict(zip(keys, values))\n else:\n self.keys = {}\n super().__init__(keys, min, compare)\n\n def pop(self):\n m = super().pop()\n key = self.keys[m]\n del self.keys[m]\n return m, key\n\n def top(self):\n m = super().top()\n return m, self.keys[m]\n\n def insert(self, key, value):\n if key in self:\n raise ValueError(\"index is already in the priority queue\")\n super().insert(key)\n self.keys[key] = value\n\n def changeKey(self, key, value):\n self.validate(key)\n self.keys[key] = value\n self.__swim(self.qp[key])\n self.__sink(self.qp[key])\n\n def valueOf(self, key):\n self.validate(key)\n return self.keys[key]\n\n def delete(self, key):\n super().delete(key)\n del self.keys[key]\n\n def __compare(self, i, j):\n if self.compare:\n comp = self.compare(self.keys[self.pq[i]], self.keys[self.pq[j]]) > 0\n else:\n comp = self.keys[self.pq[i]] > self.keys[self.pq[j]]\n return comp if self.min else not comp\n","sub_path":"libheap/libheap.py","file_name":"libheap.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"176135721","text":"# -*- coding: utf-8 -*-\n# flake8: noqa\n\nfrom qiniu import Auth, put_file, etag,put_data\nimport qiniu.config\n\nfrom flask import current_app\n\ndef upload(data):\n # 需要填写你的 Access Key 和 Secret Key\n access_key = current_app.config.get(\"QINIU_ACCESS_KEY\")\n secret_key = current_app.config['QINIU_SECRET_KEY']\n\n # 构建鉴权对象\n q = Auth(access_key, secret_key)\n\n # 要上传的空间\n bucket_name = current_app.config.get(\"QINIU_BUCKET_NAME\")\n\n # 上传后保存的文件名\n key = None\n\n # 生成上传 Token,可以指定过期时间等\n token = q.upload_token(bucket_name, key, 360000)\n # data表示上传文件的二进制流\n ret,info = put_data(token,key,data)\n print('info={}'.format(info))\n print('ret={}'.format(ret))\n\n return ret['key']\n\n # 要路径上传文件的本地\n # localfile = './sync/bbb.jpg'\n\n # ret, info = put_file(token, key, localfile)\n # print(info)\n # assert是python关键字,表示断言,判断结果是否符合预期;\n # assert ret['key'] == key\n # assert ret['hash'] == etag(localfile)\n\n\n","sub_path":"flask_prj/tbd_42/common/utils/qiniu_storage.py","file_name":"qiniu_storage.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"607079939","text":"'''Tools used to work with AES encryption.'''\n#ignore invalid constant names\n# pylint: disable=C0103\n\nfrom Crypto.Cipher import AES\nimport binascii\nimport random\n\n# -----------------------------------------------------------------------------\n# TOOLS -----------------------------------------------------------------------\n\ndef bit_error_vector(target):\n '''Returns everything you need for a cbc bitflip attack:\ndummy: stands in for the character you want to turn in to target\npreflip: sits blocklength characters before your target and\nflipped: that byte but after the desired single bit error.'''\n #assumes the target oracle strips all non alphanumerics\n alnum = b'1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZZXCVBNM'\n #'q' and 'r' are the only two characters with seven alphanum bit neighbours\n qflips = {0: b'p', 1: b's', 2: b'u', 3: b'y', 4: b'a', 5: b'Q', 6: b'1'}\n #no alphanumerics contain 1xxx xxxx\n for exp, dummy in enumerate(bit_neighbours(target)[:7]):\n if not dummy in alnum:\n continue\n return dummy, b'q', qflips[exp]\n return None, None, None\n\ndef bit_neighbours(character):\n '''Returns all 1 bit neighbours of character in eight ordered bytes.'''\n if isinstance(character, int):\n assert character < 256 and character >= 0, 'Value out of range [0-255]'\n character = bytes([character])\n elif isinstance(character, bytes):\n assert len(character) == 1, 'Too many characters.'\n else:\n assert 'Bit neighbours only accepts 1 byte or an 8 bit integer.'\n neighbours = []\n for exp in range(8):\n bitflip = bytes([pow(2,exp)])\n neighbours.append(xor(character, bitflip))\n return neighbours\n\ndef byte_dictionary(oracle, block_size, known_text):\n '''Creates a rainbow table to attack a specific byte in an ecb block.'''\n index = len(known_text)\n block_id, block_index = divmod(index, block_size)\n\n padding = b''\n if block_id == 0:\n padding += b'\\x00' * (block_size - 1 - block_index)\n padding += known_text[-block_index:]\n else:\n padding = known_text[-15:]\n return {oracle(padding + bytes([i]))[:block_size]: bytes([i])\n for i in range(256)}\n\ndef count_block_repeats(cipher, block_size):\n '''Chunks cipher by block size, returns a duplicate count.'''\n assert len(cipher) % block_size == 0, \\\n 'AES ECB cipher length must be an even multiple of block size.'\n cipher_blocks = list(dice(cipher, block_size))\n cipher_blocks.sort()\n duplicate_count = 0\n for index in range(len(cipher_blocks) - 1):\n if cipher_blocks[index] == cipher_blocks[index+1]:\n duplicate_count += 1\n return duplicate_count\n\n#gets flummoxed by random padding; could be fixed, don't need to now.\ndef detect_block_size(oracle):\n '''Returns the block size used by an encryption oracle, 0 IFF > 32.'''\n base_text = random_bytes(64)\n base_len = len(oracle(base_text))\n for offset in range(1,33):\n test_len = len(oracle(base_text + b'\\x00' * offset))\n if test_len != base_len:\n return test_len - base_len\n return 0\n\ndef detect_ecb(oracle):\n '''Returns True if the oracle uses ECB, False otherwise.'''\n block_size = detect_block_size(oracle)\n if count_block_repeats(oracle(b'\\x00' * (block_size*3 - 1)), block_size):\n return True\n return False\n\ndef dice(source, width):\n '''Dices source into width sized chunks.'''\n for chunk_start in range(0, len(source), width):\n yield source[chunk_start : chunk_start + width]\n\ndef pad(text, block_size, padding=b''):\n '''Pads text into an even multiple of block_size, returns bytes.'''\n assert isinstance(text, bytes), 'pad only accepts text bytes'\n assert isinstance(padding, bytes), 'pad only accepts padding bytes'\n text_remainder = len(text) % block_size\n if text_remainder == 0:\n return text\n else:\n chrs_needed = block_size - text_remainder\n #by default enforce PKCS#7\n if padding == b'':\n return text + bytes([chrs_needed]) * chrs_needed\n else:\n div, mod = divmod(chrs_needed, len(padding))\n return text + padding * div + padding[0:mod]\n\ndef parse_url(url, delin=b'&'):\n '''Parses a url into a dictionary. Doesn't sanitize or sanity check.'''\n entries = url.split(delin)\n #double list comprehension to handle list output of split()\n return {k:v for k,v in [kv.split(b'=') for kv in entries]}\n\ndef percent_encode(text):\n '''Uses percent encoding to make text url-safe.'''\n encode_dict = {b' ': b'%20', b'!': b'%21', b'#': b'%23', b'$': b'%24',\n b'&': b'%26', b\"'\": b'%27', b'(': b'%28', b')': b'%29',\n b'*': b'%2A', b'+': b'%2B', b',': b'%2C', b'/': b'%2F',\n b':': b'%3A', b';': b'%3B', b'=': b'%3D', b'?': b'%3F',\n b'@': b'%40', b'[': b'%5B', b']': b'%5D'}\n encode_text = b''\n for char in text:\n if bytes([char]) in encode_dict:\n encode_char = encode_dict[bytes([char])]\n else:\n encode_char = bytes([char])\n encode_text += encode_char\n return encode_text\n\ndef percent_decode(text):\n '''Takes the url-safe text and turns it into a human readable string.'''\n decode_dict = {b'%20': b' ', b'%21': b'!', b'%23': b'#', b'%24': b'$',\n b'%26': b'&', b'%27': b\"'\", b'%28': b'(', b'%29': b')',\n b'%2A': b'*', b'%2B': b'+', b'%2C': b',', b'%2F': b'/',\n b'%3A': b':', b'%3B': b';', b'%3D': b'=', b'%3F': b'?',\n b'%40': b'@', b'%5B': b'[', b'%5D': b']'}\n decode_text = b''\n last_code_index = len(text) - 2\n text_enum = enumerate(text)\n for index, char in text_enum:\n #percent sign or percent code\n if char == ord('%') and index < last_code_index:\n percent_code = text[index:index+3]\n #percent code\n if percent_code in decode_dict:\n decode_char = decode_dict[percent_code]\n next(text_enum)\n next(text_enum)\n #percent sign\n else:\n decode_char = bytes([char])\n #everything else\n else:\n decode_char = bytes([char])\n decode_text += decode_char\n return decode_text\n\ndef random_bytes(n):\n '''Returns n random bytes.'''\n return b''.join([bytes([random.getrandbits(8)]) for _ in range(n)])\n\ndef strip_pkcs7(text):\n '''Strips PKCS7 padding from bytes. Throws an exception on bad padding.'''\n pad_len = ord(text[-1:])\n for padding in text[-pad_len:]:\n if padding != pad_len:\n raise ValueError\n return text[:-pad_len]\n\ndef xor(text1, text2):\n '''xors two texts, repeating either as necessary.'''\n assert isinstance(text1, bytes), 'text1 is not bytes'\n assert isinstance(text2, bytes), 'text2 is not bytes'\n l1 = len(text1)\n l2 = len(text2)\n message = bytearray(max(l1,l2))\n for index in range(max(l1, l2)):\n message[index] = text1[index % l1] ^ text2[index % l2]\n return bytes(message)\n\n# -----------------------------------------------------------------------------\n# ENCRYPT / DECRYPT -----------------------------------------------------------\n\ndef encrypt_aes_cbc(plaintext, key, init_vector):\n '''Encrypts plaintext using AES in CBC mode.'''\n assert len(key) in [16,24,32], 'key length must be 16, 24, or 32 bytes'\n block_size = len(key)\n assert len(init_vector) == block_size, \\\n 'initialization vector length must equal to key length'\n cipher_blocks = []\n plaintext = pad(plaintext, block_size)\n plain_blocks = list(dice(plaintext, block_size))\n\n cipher_blocks.append(cbc_encrypt_round(plain_blocks[0], init_vector, key))\n for r in range(1, len(plain_blocks)):\n cipher_blocks.append(\n cbc_encrypt_round(plain_blocks[r], cipher_blocks[r-1], key))\n\n return b''.join(cipher_blocks)\n\ndef decrypt_aes_cbc(cipher_text, key, init_vector):\n '''Encrypts plaintext using AES in CBC mode.'''\n assert len(key) in [16,24,32], 'key length must be 16, 24, or 32 bytes'\n block_size = len(key)\n plain_blocks = []\n assert len(cipher_text) % block_size == 0,\\\n 'irregularly sized cipher text for AES_CBC decription.'\n cipher_blocks = list(dice(cipher_text, block_size))\n\n plain_blocks.append(cbc_decrypt_round(cipher_blocks[0], init_vector, key))\n for r in range(1, len(cipher_blocks)):\n plain_blocks.append(\n cbc_decrypt_round(cipher_blocks[r], cipher_blocks[r-1], key))\n\n return b''.join(plain_blocks)\n\ndef cbc_encrypt_round(text, cipher, key):\n '''XOR's the current plaintext block with the previous cipher block, then\nencrypts both using key. Block 0 will require some initialization vector.'''\n return encrypt_aes_ecb(xor(text, cipher), key)\n\ndef cbc_decrypt_round(this_cipher_block, prev_cipher_block, key):\n '''Decrypt the current block with key, XOR with previous block.'''\n return xor(decrypt_aes_ecb(this_cipher_block, key), prev_cipher_block)\n\ndef encrypt_aes_ecb(plaintext, key):\n '''Encrypts plaintext using key in ECB mode.'''\n assert len(key) in [16, 24, 32], 'aes_ecb key length must be 16, 24, or 32'\n plaintext = pad(plaintext, len(key))\n return AES.new(key, AES.MODE_ECB).encrypt(plaintext)\n\ndef decrypt_aes_ecb(cipher, key):\n '''Decrypts cipher using key in ECB mode.'''\n assert len(key) in [16, 24, 32], 'aes_ecb key length must be 16, 24, or 32'\n return AES.new(key, AES.MODE_ECB).decrypt(cipher)\n\n# -----------------------------------------------------------------------------\n# ATTACKS ---------------------------------------------------------------------\n\ndef escale_admin_cbc():\n '''Uses a bitflip attack to create a cyphertext that gives admin rights.'''\n #ord('=') == 61 == 0011 1101\n #ord(';') == 59 == 0011 1011\n #ord('9') == 57 == 0011 1001\n #the prepend's 32 characters\n block1 = b'X'*16\n block2 = b'X'*5 + b'9admin9True'\n block_error = b'\\x00'*5 + b'\\x02' + b'\\x00'*5 + b'\\x04' + b'\\x00'*4\n\n cipher = cbc_oracle(block1 + block2)\n corrupt_cipher = cipher[:32] + xor(cipher[32:48],block_error) + cipher[48:]\n\n print(is_admin_cbc_oracle(corrupt_cipher))\n\ndef escale_admin_ecb():\n '''Forges an encrypted (signed) user account with admin access.'''\n #i cheated, and used my knowledge of the structure of accounts\n #done fair, this'd check for ECB, blocksize, and decrypt default data\n admin_block = make_user(b'X' * 10 + b'admin' + b'\\x11' * 11)[16:32]\n user_name = b'gman'\n #find length of padding\n for offset in range(16):\n if len(make_user(b'\\x00' * offset)) < \\\n len(make_user(b'\\x00' * (offset + 1))):\n user_name += b'X' * offset\n #create a block who's penultimate block ends with \"role=\"\n user_blocks = make_user(user_name)[:-16]\n #and append an encrypted \"admin{padding}\" block\n return user_blocks + admin_block\n\n#chokes on oracles with variable end padding?\ndef break_ecb(oracle):\n '''Returns the plaintext appended to some '''\n assert detect_ecb(oracle), 'encryption oracle is not using ecb mode'\n block_size = detect_block_size(oracle)\n\n #sets up a wrapper to avoid text prepended by the oracle\n prepend = detect_prepend(oracle, block_size)\n if prepend > 0:\n setup_slice_oracle(oracle, prepend, block_size)\n oracle = slice_oracle\n\n known_text = b''\n crypt_len = len(oracle(b''))\n for index in range(crypt_len):\n block_id, block_index = divmod(index, block_size)\n #position that byte at the end of an encryption block\n offset = b'\\x00' * (block_size - 1 - block_index)\n #create a dictionary for the next byte to decrypt it\n temp_dict = byte_dictionary(oracle, block_size, known_text)\n newest_char = temp_dict[oracle(offset)[block_id * block_size\n :(block_id + 1) * block_size]]\n #the rest is just padding on the final block, skip it\n if newest_char == b'\\x01' and (crypt_len - index) < block_size:\n break\n #update the decrypted text\n known_text += newest_char\n return known_text\n\ndef detect_prepend(oracle, block_size):\n '''Returns the length of text prepend by an oracle.'''\n prepad = b''\n #figure out how much padding is needed to square the prepend.\n for _ in range(block_size):\n null_repeats = count_block_repeats(oracle(prepad), block_size)\n prepad_cipher = oracle(prepad + b'\\x00' * (block_size*2))\n if count_block_repeats(prepad_cipher, block_size) > null_repeats:\n break\n prepad += b'X'\n #figure out where the prepend + padding terminates\n cipher_blocks = list(dice(prepad_cipher, block_size))\n for index in range(len(cipher_blocks) - 1):\n if cipher_blocks[index] == cipher_blocks[index+1]:\n return index * block_size - len(prepad)\n #no prepend found\n return 0\n\ndef slice_oracle(attack):\n '''Reduces aes_ecb(prepend|attack|secret) to aes_ecb(attack|secret).'''\n assert slice_oracle.oracle != None, 'slice_oracle used before set up.'\n return slice_oracle.oracle(slice_oracle.padding + attack)[slice_oracle.start_index:]\n\n#python doesn't have static functions? this feels kludgy...\nslice_oracle.oracle = None\nslice_oracle.padding = b''\nslice_oracle.start_index = 0\n\ndef setup_slice_oracle(oracle, prepend, block_size):\n '''Sets up slice_oracle to be used.'''\n slice_oracle.oracle = oracle\n slice_oracle.padding = b'X' * (block_size - prepend % block_size)\n slice_oracle.start_index = prepend + len(slice_oracle.padding)\n\n# -----------------------------------------------------------------------------\n# ORACLES ---------------------------------------------------------------------\n\ndef odd_encryption_oracle(plaintext, return_mode=False):\n '''Encrypts plaintext with a random padding, key, mode, and init vector.'''\n pad_front = random_bytes(random.randint(5,10))\n pad_back = random_bytes(random.randint(5,10))\n plaintext = pad_front + plaintext + pad_back\n\n key = random_bytes(16)\n ecb = random.getrandbits(1) #1 = ECB, 0 = CBC\n\n #50/50 chance of ECB/CBC mode\n if ecb:\n cipher = encrypt_aes_ecb(plaintext, key)\n else:\n iv = random_bytes(16)\n cipher = encrypt_aes_cbc(plaintext, key, iv)\n\n if return_mode:\n return cipher, ecb\n else:\n return cipher\n\ndef make_user(email):\n '''Returns a new user's encrypted profile.'''\n #sanitize data\n email = email.replace(b'&', b'_')\n email = email.replace(b'=', b'_')\n return encrypt_user(b'email=' + email + b'&uid=10&role=user')\n\ndef encrypt_user(plaintext):\n '''Privledged access function to encrypt (sign?) a user account.'''\n random.seed(1)\n key = random_bytes(16)\n plaintext = pad(plaintext, 16)\n return encrypt_aes_ecb(plaintext, key)\n\ndef decrypt_user(cipher):\n '''Privledged access function to decrypt a user account.'''\n random.seed(1)\n key = random_bytes(16)\n return strip_pkcs7(decrypt_aes_ecb(cipher, key))\n\ndef encryption_oracle(plaintext):\n '''Encrypts plaintext using an unknown but consistent key.'''\n random.seed(1)\n key = random_bytes(16)\n enigma = binascii.a2b_base64( \\\n b'Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg' + \\\n b'aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq' + \\\n b'dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg' + \\\n b'YnkK')\n return encrypt_aes_ecb(plaintext + enigma, key)\n\ndef prepend_oracle(plaintext):\n '''Returns aes128ecb(jibberish || plaintext || secret).'''\n random.seed(2)\n jibberish = random_bytes(random.randint(16,64))\n return encryption_oracle(jibberish + plaintext)\n\ndef cbc_oracle(plaintext):\n '''Returns CBC128(prepend || %code(plaintext) || append).'''\n random.seed(3)\n key = random_bytes(16)\n init_vector = random_bytes(16)\n prepend = b\"comment1=cooking%20MCs;userdata=\"\n append = b\";comment2=%20like%20a%20pound%20of%20bacon\"\n safetext = percent_encode(plaintext)\n return encrypt_aes_cbc(prepend + safetext + append, key, init_vector)\n\ndef is_admin_cbc_oracle(cipher):\n '''Checks cbc_oracle for admin status.'''\n random.seed(3)\n key = random_bytes(16)\n init_vector = random_bytes(16)\n\n plaintext = strip_pkcs7(decrypt_aes_cbc(cipher, key, init_vector))\n\n user_profile = parse_url(plaintext, b';')\n if b'admin' in user_profile:\n return user_profile[b'admin']\n return False\n\n","sub_path":"tools/aes.py","file_name":"aes.py","file_ext":"py","file_size_in_byte":16619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"248138567","text":" # -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 18 11:42:50 2019\r\n\r\n@author: lhuismans\r\n\"\"\"\r\nimport time\r\nfrom stage import LudlStage\r\n#import visa\r\n#First initialize the stage, the correct COM-port has to be specified. I think you can find the COM# under device manager.\r\n\r\nludlStage = LudlStage(\"COM6\")\r\n\r\n#ludlStage.send_end='True'\r\n#ludlStage.delay = 0.2\r\n#ludlStage.baud_rate=9600\r\n#ludlStage.read_termination = '\\r'\r\n#ludlStage.write_termination='\\r'\r\n\r\n\r\n#Now the stage is initialized functions can be past to it. In the stage.py file each function is explained and it is specified what parameters it takes.\r\n#ludlStage.Joystick(True)\r\n#ludlStage.timeout = 0.1\r\n#ludlStage.delay = 0.1\r\n\r\ni= 0\r\nj= 0\r\nludlStage.moveAbs(i,j)\r\n#ludlStage.moveRel(i,j)\r\n#time.sleep(1)\r\nii, jj =ludlStage.getPos() #j increase = fov in labview shifts down\r\n\r\nrow_start = 0\r\nrow_end = 3200\r\ncolumn_start = 0\r\ncolumn_end = 3200\r\n\r\nstep = 1500\r\nposition_index=[]\r\ngood_position_index = []\r\n\r\nfor i in range(row_start, row_end, step):\r\n position_index.append(i)\r\n for j in range(column_start, column_end, step):\r\n position_index.append(j)\r\n print ('-----------------------------------')\r\n print (position_index)\r\n \r\n #stage movement\r\n ludlStage.moveAbs(i,j)\r\n time.sleep(1)\r\n \r\n k=[i,j]\r\n x = input('蔷ʅ(´◔౪◔)ʃ薇:')\r\n if x == 'y':\r\n good_position_index.append(k)\r\n \r\n #input(\"Press Enter to continue...\")\r\n \r\n time.sleep(1)\r\n \r\n ludlStage.getPos()\r\n \r\n \r\n del position_index[-1]\r\n print ('---------------^^^^---------------')\r\n position_index=[]\r\n \r\nfor i in range(len(good_position_index)):\r\n ludlStage.moveAbs(good_position_index[i][0],good_position_index[i][1])\r\n input(\"Press Enter to continue...\")\r\n\"\"\"\r\nadress = \"COM7\"\r\nrm = visa.ResourceManager()\r\ntime.sleep(5)\r\nludlStageConnection = rm.open_resource(adress)\r\ncommand = 'Move X = %d Y = %d' % (0, 0)\r\nprint(ludlStageConnection.query(command))\r\ntime.sleep(5)\r\ncommand = 'Move X = %d Y = %d' % (-10000, -10000)\r\nprint(ludlStageConnection.query(command))\r\nludlStageConnection.clear()\r\n\"\"\"\r\n#If more functions are needed the programming manual from Ludl has an extensive list of functions that can be provided.\r\n","sub_path":"SampleStageControl/testStage.py","file_name":"testStage.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"55197220","text":"\"\"\"Script for pulling projections and comparing FAs to my roster.\n\nmore details to come\n\"\"\"\n\nimport argparse\nimport json\nimport pprint\nimport os\nfrom halo import Halo\nfrom sleeper_wrapper import Players, League\nfrom tqdm import tqdm, trange\nfrom utils.utils import combine_projections\n\n# add help descriptions\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--pull_players\",\n action=argparse.BooleanOptionalAction,\n required=True,\n help=\"True if you want to re-pull players from Sleeper\",\n)\nparser.add_argument(\n \"-p\",\n \"--positions\",\n required=False,\n nargs=\"+\",\n help=\"List of positions to pull. Space separated and no quotes.\",\n)\n\n\ndef main():\n with Halo(\"Setting up script details.\", spinner=\"dots\") as spinner:\n league_id = os.environ.get(\"SLEEPER_LEAGUE_ID\", None)\n user_id = os.environ.get(\"SLEEPER_USER_ID\", None)\n\n args = parser.parse_args()\n command_args = dict(vars(args))\n pull_players = command_args.pop(\"pull_players\", None)\n keep_positions = tuple(command_args.pop(\"positions\", None).pop().split(\",\"))\n if keep_positions == (\"all\",):\n keep_positions = [\"QB\", \"RB\", \"WR\", \"TE\"]\n elif keep_positions == (\"flex\",):\n keep_positions = [ \"RB\", \"WR\", \"TE\"]\n spinner.succeed()\n\n Halo(\n f\"Included positions are {', '.join(keep_positions)}\", spinner=\"dots\"\n ).succeed()\n league = League(league_id)\n players = Players()\n\n league_rosters = league.get_rosters()\n # shouldn't this be if not is_dry_run?\n if pull_players:\n all_players = players.get_all_players()\n with open(\"./data/sleeper_players_current.json\", \"w\") as outfile:\n json.dump(all_players, outfile)\n else:\n with open(\"./data/sleeper_players_current.json\", \"r\") as infile:\n all_players = json.load(infile)\n\n own_team = [team for team in league_rosters if team[\"owner_id\"] == user_id].pop()\n own_players = own_team[\"players\"]\n keep_players = {\n p_id: p_data\n for p_id, p_data in all_players.items()\n if p_data[\"position\"] in keep_positions\n }\n # save keep_players for testing\n with open(\"./data/sleeper_players_keep.json\", \"w\") as outfile:\n json.dump(keep_players, outfile)\n # ID free agents by comparing keep_players to rosters\n rostered_player_ids = [\n player for team in league_rosters for player in team[\"players\"]\n ]\n with Halo(\"Separating players into rostered and FAs.\", spinner=\"dots\") as spinner:\n free_agents = {\n p_id: p_data\n for p_id, p_data in keep_players.items()\n if p_id not in rostered_player_ids and p_data[\"team\"] is not None\n }\n rostered_players = {\n p_id: p_data\n for p_id, p_data in keep_players.items()\n if p_id in rostered_player_ids\n }\n spinner.succeed()\n\n with Halo(\"Pulling projections\", spinner=\"dots\") as spinner:\n combined_proj = combine_projections(keep_positions)\n spinner.succeed()\n\n cleaned_names = combined_proj.keys()\n # add projections in to rosters\n for p_id, p_data in free_agents.items():\n # workaround for players who aren't in all 3\n # if p_data[\"search_full_name\"] in cleaned_names:\n try:\n p_data[\"projections\"] = combined_proj[p_data[\"search_full_name\"]][\n \"avg_proj_pts\"\n ]\n\n # else:\n except KeyError:\n p_data[\"projections\"] = 0\n\n for p_id, p_data in rostered_players.items():\n # if p_data[\"search_full_name\"] in cleaned_names:\n try:\n p_data[\"projections\"] = combined_proj[p_data[\"search_full_name\"]][\n \"avg_proj_pts\"\n ]\n # else:\n except KeyError:\n p_data[\"projections\"] = 0\n Halo(\"Added projections to FAs and rostered players.\", spinner=\"dots\").succeed()\n\n # comparison\n own_roster = {\n p_id: p_data for p_id, p_data in rostered_players.items() if p_id in own_players\n }\n waiver_players = dict()\n for p_id, p_data in own_roster.items():\n if p_data[\"status\"] == \"Injured Reserve\":\n continue\n waiver_dict = {\n \"drop_proj\": p_data[\"projections\"],\n \"players_to_add\": list(),\n }\n # don't look at FA if projection is 0\n if p_data[\"projections\"] == 0:\n continue\n for fa_id, fa_data in free_agents.items():\n if (fa_data[\"projections\"] > 0.95 * p_data[\"projections\"]) and (\n fa_data[\"position\"] == p_data[\"position\"]\n ):\n fa_dict = {\n \"waiver_player\": fa_data[\"search_full_name\"],\n \"waiver_proj\": fa_data[\"projections\"],\n }\n waiver_dict[\"players_to_add\"].append(fa_dict)\n waiver_players[p_data[\"search_full_name\"]] = waiver_dict\n Halo(\n \"Compared FA projections to your roster. Returning players with better projections.\",\n spinner=\"dots\",\n ).succeed()\n\n pp = pprint.PrettyPrinter()\n pp.pprint(waiver_players)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/sleeper_waivers.py","file_name":"sleeper_waivers.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322846547","text":"from torch import nn\nimport torch\nfrom torchvision import models,transforms,datasets\nimport torch.nn.functional as F\nclass multimodels(nn.Module):\n def __init__(self, num_class):\n super(multimodels,self).__init__()\n # resnet50 = models.resnet50(pretrained=True)\n resnet152 = models.resnet152(pretrained=True)\n densenet161 = models.densenet161(pretrained=True)\n # self.base_model50 = nn.Sequential(*list(resnet50.children())[:-2])\n self.base_model152 = nn.Sequential(*list(resnet152.children())[:-2])\n self.base_model161 = nn.Sequential(*list(densenet161.children())[:-1])\n self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)\n # self.classifier50 = nn.Linear(resnet50.fc.in_features, num_class)\n self.classifier152 = nn.Linear(resnet152.fc.in_features, num_class)\n self.classifier161 = nn.Linear(densenet161.classifier.in_features, num_class)\n\n def forward(self, x):\n # input_size = x.size()[2]\n # self.interp = nn.UpsamplingBilinear2d(size = (int(input_size*0.85)+1, int(input_size*0.85)+1))\n\n # x2 = self.interp(x)\n x2 = self.base_model152(x)\n x3 = self.base_model161(x)\n\n # x2 = self.interp(x)\n # x2 = self.base_model152(x)\n\n # x = self.base_model50(x)\n # x = self.avgpool(x)\n # x = x.view(x.size(0), -1)\n # x = self.classifier50(x)\n\n x2 = self.avgpool(x2)\n x2 = x2.view(x2.size(0), -1)\n x2 = self.classifier152(x2)\n\n x3 = F.relu(x3, inplace=True)\n x3 = F.avg_pool2d(x3, kernel_size=7, stride=1).view(x3.size(0), -1)\n x3 = self.classifier161(x3)\n \n out = torch.add(x2, x3)\n # out = x\n return out\n","sub_path":"utils/multimodels.py","file_name":"multimodels.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"453282495","text":"\n## Python script for updating the NCBI database \n## updates are held depending on the command line argument\n##makes sence to rerun the script from time to time, so the data stays up to date.\nimport os\nimport pickle\nimport pandas as pd\n\n#download and unzip the file\nbashCommand=\"wget ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene_info.gz\"\nos.system(bashCommand)\nbashCommand=\"gunzip gene_info.gz\"\nos.system(bashCommand)\n\n##reading the file, making the series, saving it as a pickle\ngene_info=pd.read_csv(\"gene_info\", sep=\"\\t\", low_memory=False)\nprint(\"data read\")\n\ngene_info.index=gene_info.GeneID\ngene_id_to_symbol=gene_info['Symbol']\npickle.dump(gene_id_to_symbol, open(\"geneID_to_symbol.series.pkl\", \"wb\"))\n\n\n#deleting the initial df to save the space:\nbashCommand=\"rm gene_info*\"\nos.system(bashCommand)\n \n\n#TO DO: \n#add command lines argument for parts of the database\n#add gene2pubmed\n","sub_path":"ncbi.py","file_name":"ncbi.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583193265","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Fri Apr 10 11:33:23 2020\n\n@author: hankui\n\n\"\"\"\n\n\n#%% Print a single integer denoting the number of characters you must delete \n# to make the two strings anagrams of each other.\nimport collections \n\ndef makeAnagram(a, b):\n\n # count the lengths of both strings\n len_a, len_b = len(a), len(b)\n \n # count the frequencies of each unique entry in each string\n ctr_a = collections.Counter(a)\n ctr_b = collections.Counter(b)\n \n # count the number of strings that can be matched\n intersect = set(a).intersection(set(b))\n count = 0\n for val in intersect:\n num = min(ctr_a[val], ctr_b[val])\n count += num\n \n # calculate the number of deletions needed\n num_del = len_a + len_b - count*2 \n \n return num_del \n\n\n#%% testing \na = 'aabcdde'\nb = 'acdefg'\nmakeAnagram(a, b)","sub_path":"General/SolvedFirstTime/makeAnagram.py","file_name":"makeAnagram.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"471180046","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Decorators for the public API and for internal purpose.\"\"\"\n\nimport functools\nfrom string import ascii_letters, digits, punctuation\nfrom typing import Callable\n\nfrom mimesis import data\nfrom mimesis.exceptions import UnsupportedLocale\n\n__all__ = ['romanize']\n\n\ndef romanize(locale: str = '') -> Callable:\n \"\"\"Romanize the cyrillic text.\n\n Transliterate the cyrillic script into the latin alphabet.\n\n .. note:: At this moment it works only for `ru`, `uk`, `kk`.\n\n :param locale: Locale code.\n :return: Romanized text.\n \"\"\"\n\n def romanize_deco(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n # Cyrillic string can contain ascii\n # symbols, digits and punctuation.\n alphabet = {s: s for s in\n ascii_letters + digits + punctuation}\n alphabet.update({\n **data.ROMANIZATION_DICT[locale],\n **data.COMMON_LETTERS,\n })\n except KeyError:\n raise UnsupportedLocale(locale)\n\n result = func(*args, **kwargs)\n txt = ''.join([alphabet[i] for i in result if i in alphabet])\n return txt\n\n return wrapper\n\n return romanize_deco\n\n\n# For backward compatibility\nromanized = romanize\n","sub_path":"venv/lib/python3.9/site-packages/mimesis/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"492308936","text":"\n\n#python3.5 mac osx\n#a simple implementation of slope one\n#2016.2.1\n#manggobada@163.com\n'''\ncollabrative filtering based on slopeone algorithm\n\n\n'''\n\nimport numpy as np\n\nclass Slope_One():\n\n def __init__(self):\n self.diffs={}\n self.freqs={}\n\n def update(self, data):\n for user,prefs in data.items():\n for item,rating in prefs.items():\n self.freqs.setdefault(item, {})\n self.diffs.setdefault(item, {})\n for item2, rating2 in prefs.items():\n self.freqs[item].setdefault(item2,0)\n self.diffs[item].setdefault(item2, 0.0)\n self.freqs[item][item2] += 1\n self.diffs[item][item2] += (rating - rating2)\n for item, ratings in self.diffs.items():\n for item2,rating in ratings.items():\n ratings[item2] /= self.freqs[item][item2]\n\n def predict(self, userprefs):\n preds={}\n freqs={}\n for item,rating in userprefs.items():\n for diffitem,diffratings in self.diffs.items():\n try:\n freq = self.freqs[diffitem][item]\n except KeyError:\n continue\n preds.setdefault(diffitem, 0.0)\n freqs.setdefault(diffitem, 0)\n preds[diffitem] += freq * (diffratings[item] + rating)\n freqs[diffitem] += freq\n result=[]\n for item,value in preds.items():\n if item not in userprefs and freqs[item]>0:\n result.append((item,value/freqs[item]))\n #result=[(item,value/freqs[item]) for item,value in preds.items if item not in userprefs and freqs[item]>0]\n result=dict(result)\n return result\n","sub_path":"models/slopeOne.py","file_name":"slopeOne.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"496651630","text":"import cv2\nimport numpy as np\n\n#Should we show the current inspected bounding box and for how long\nVISUALISE = True\nVISUALISE_PAUSE = 5\n\n#How much does the sliding window shrink by each iteration\nSLIDE_SHRINK_PERCENT = 0.75\n\n#What is our original sliding window target pixels\nTARGET_X_PIXELS = 60\nTARGET_Y_PIXELS = 60\n\n#Do we save the 'found' ROIs?\nSAVE_FOUND_ROI = True\nFOUND_ROI_SAVE_PATH = \"D:/Programming/PythonTests/Wally2/ROI_Found/\"\n\n#Helper function to draw a rectangle on an image and COPY it\ndef draw_rect_img_copy(image, point1, point2, thickness=3, col=(0, 255, 0)):\n img = image.copy()\n rtn_img = cv2.rectangle(img, point1, point2, col, thickness)\n return rtn_img\n\n#Helper function to draw a region of interest on a cloned image. \n#Just transforms arguments into those required for draw_rect_img_copy\ndef draw_region_of_interest(image, window_dimensions, coord_top_left):\n pt1 = (coord_top_left[0], coord_top_left[1])\n pt2 = (pt1[0] + window_dimensions[0], (pt1[1] + window_dimensions[1]))\n return draw_rect_img_copy(image, pt1, pt2)\n\n#Iterates an ROI defined by window_dimensions over the image with a stride\n#At each step, the sub image is passed to the process_roi_func\n#If that function returns TRUE, the bounding box representing that sub image is added\n#to the list of locations to be returned\ndef iterate_roi(image, window_dimensions, stride, process_roi_func):\n locations = []\n x_size = image.shape[0]\n y_size = image.shape[1]\n x_steps = int((x_size - window_dimensions[0]) / stride)\n y_steps = int((y_size - window_dimensions[1]) / stride)\n for y in range(0, y_steps+1):\n for x in range(0, x_steps+1):\n base_img = image.copy()\n sub_img = base_img[y*stride:y*stride+window_dimensions[1], x*stride:x*stride+window_dimensions[0]]\n sub_img = resize_sub_img(sub_img, TARGET_X_PIXELS, TARGET_Y_PIXELS)\n if VISUALISE:\n IMG_A = draw_region_of_interest(base_img, window_dimensions, (x*stride, y*stride))\n cv2.imshow('Test image', IMG_A)\n key = cv2.waitKey(VISUALISE_PAUSE) #pauses for 0.001 * x seconds before fetching next image\n found = process_roi(np.array(sub_img), process_roi_func=process_roi_func)\n if found:\n point1 = (x*stride, y*stride)\n point2 = (x*stride+window_dimensions[0], y*stride+window_dimensions[1])\n points = (point1, point2)\n locations.append(points)\n if SAVE_FOUND_ROI:\n file_name = FOUND_ROI_SAVE_PATH + \"ROI-\" + str(x) + \"-\" + str(y) + \".bmp\"\n cv2.imwrite(file_name, sub_img)\n print(\"Saved \" + file_name)\n cv2.destroyAllWindows()\n return locations\n\n#Sub function to process the ROI. This just passes off the image to the function provided\n#RETURNS TRUE if the object is found\n#Possibly can be removed.\ndef process_roi(sub_img, process_roi_func):\n if VISUALISE:\n cv2.imshow('Sub image', sub_img)\n\n # Found logic\n return process_roi_func(sub_img)\n\n#Placeholder function to do analysis of sub images.\n#In reality a function that will analyse the sub image will be passed instead of this one\n#This is ONLY for testing \n#RETURNS TRUE IF WE WANT TO KEEP THIS IMAGE AS A POSITIVE LOCATION\ndef analyse_roi(sub_img):\n if VISUALISE:\n cv2.imshow('Sub image', sub_img)\n return False\n\n#Helper function to resize images\ndef resize_sub_img(sub_img, target_x_pixels, target_y_pixels):\n resized_sub_img = cv2.resize(sub_img,(int(target_x_pixels),int(target_y_pixels)))\n return resized_sub_img\n\n#--------- MAIN FUNCTION ----------\n#Search all ROI, starting with the full image size, using progressively smaller windows \n#(shrinkage definde by SLIDE_SHRINK_PERCENT) until the window searched is the same size\n#or smaller than the original window dimensions passed to this function\n#---TODO:----\n#* Update TARGET_X_PIXELS and TARGET_Y_PIXELS with the window dimensions passed to this function\n#* Change the approach to maintain the aspect ratio of the window_dimensions passed instead of original image\n#Currently it will scale the full original image and maintain that aspect ratio. A better way is to identify the\n#largest size window of the correct window_dimensions aspect ratio that will fit in the image and use that to start\n#iterating through the ROI\n#* Currently the edge pixels outside a window multiple are ignored. Consider adding an additional ROI at the far edge\n#of the image inset as required (and an additional row at the bottom offset)\ndef do_all_roi(image, window_dimensions, stride, process_roi_func):\n TARGET_X_PIXELS = window_dimensions[0]\n TARGET_Y_PIXELS = window_dimensions[1]\n locations = []\n stride_as_window_percent = stride/window_dimensions[0]\n #print(\"stride_as_window_percent=\"+str(stride_as_window_percent) )\n x_size = image.shape[0]\n y_size = image.shape[1]\n x_steps = int((x_size - window_dimensions[0]) / stride)\n y_steps = int((y_size - window_dimensions[1]) / stride)\n\n cur_x_size = x_size\n cur_y_size = y_size\n while cur_x_size >= window_dimensions[0]:\n cur_x_size = int(cur_x_size * SLIDE_SHRINK_PERCENT)\n cur_y_size = int(cur_y_size * SLIDE_SHRINK_PERCENT)\n print(\"Sliding window \" + str(cur_x_size) + \"x\" + str(cur_y_size))\n #slide window over image at this size\n #append results to list\n #shrink window\n this_window_dimensions = (cur_x_size, cur_y_size)\n this_stride = int(this_window_dimensions[0] * stride_as_window_percent)\n new_locations = iterate_roi(image, this_window_dimensions, this_stride, process_roi_func)\n if len(new_locations) > 0:\n locations = locations + new_locations\n print(str(len(new_locations)) + \" locations found at this level\")\n else:\n print(\"No new locations found at this level\")\n #print(\"cur_x_size=\"+str(cur_x_size) )\n print(\"Finished sliding all windows\")\n return locations\n\n\nif __name__ == \"__main__\": \n #TESTING ONLY\n IMG = cv2.imread('1.bmp')\n do_all_roi(IMG, (60,60), 20, analyse_roi)\n print(\"DONE!\")\n","sub_path":"Deliverables/6. Machine learning familiarisation activities/Wally/sliding_window.py","file_name":"sliding_window.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"38739152","text":"\n\"\"\"\nCreated on FEB 19\n\nNinja Robot Thesis\n\n@author: Mohamad Sayegh\n\nMPC \n\nusing path parameters\n\n\n \n\"\"\"\n\n\nimport numpy as np\nfrom numpy import pi, cos, sin\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nfrom pylab import *\nfrom casadi import Function, linspace, vertcat, horzcat, DM, interpolant, sum1, MX, hcat, sumsqr\nfrom rockit import *\nfrom rockit import Ocp , FreeTime, MultipleShooting\nfrom MPC_Bubble_tunnel_generation_v2 import generate_bubbles_mpc_v2, generate_bubbles_mpc_v3, plotting, get_bubbles_mpc_loop\nfrom MPC_Grid_generation import create_obstacles_mpc, create_global_path_mpc\nfrom Bubble_tunnel_generation_v2 import create_tunnel, plotting_v2\n\n\n\n\nobstacles_option = 1\npath_option = 1\n\n\nglobal_end_goal_x = 9 #position of initial and end point\nglobal_end_goal_y = 9\ninitial_pos_x = 0\ninitial_pos_y = 0\nxlim_min = -2 #xlim and ylim of plotsR\nxlim_max = 12\nylim_min = -2\nylim_max = 12\n\n \n\nobs_horizon = 50\npath_horizon = 2 \n \nN = 5\ndt = 2 \nNsim = 30\n\n#------------- Initialize OCP\n\nocp = Ocp(T = N*dt) \n\n\n#---------------- Initialize grid, occupied positions and bubbles\n\noccupied_positions_x , occupied_positions_y = create_obstacles_mpc(obstacles_option,initial_pos_x,initial_pos_y,obs_horizon)\n\nglobal_path_x, global_path_y, Bspline_obj = create_global_path_mpc(path_option,initial_pos_x,initial_pos_y,path_horizon)\n\n# midpoints_x, midpoints_y, radii_x, radii_y = generate_bubbles_mpc_v2(global_path_x, global_path_y,occupied_positions_x,occupied_positions_y)\n \nmidpoints_x, midpoints_y, radii_x = generate_bubbles_mpc_v2(global_path_x, global_path_y,occupied_positions_x,occupied_positions_y)\n \nradii_y = radii_x\n\n\nwhile len(midpoints_x) < N:\n midpoints_x.append(global_path_x[-1])\n midpoints_y.append(global_path_y[-1])\n radii_x.append(radii_x[-1])\n radii_y.append(radii_y[-1])\n\n\n\nglobal_path_x = global_path_x[0:N]\nglobal_path_y = global_path_y[0:N]\nmidpoints_x = midpoints_x[0:N]\nmidpoints_y = midpoints_y[0:N]\nradii_x = radii_x[0:N]\nradii_y = radii_y[0:N]\n\n\n\n# ----------- draw ellipse\nnpoints = 500 #numbr of points of every circle\nts = np.linspace(0, 2*np.pi, npoints) #for creating circles points\n\n\n\n#------------------------- System model\n\nx = ocp.state()\ny = ocp.state()\ntheta = ocp.state()\nv = ocp.control()\nw = ocp.control()\n\n#--------------------------path parameters \n\ns_path = ocp.state()\nsdot_path = ocp.control()\n\n#-----------------------------ODEs\n\nocp.set_der(x , v*cos(theta))\nocp.set_der(y , v*sin(theta))\nocp.set_der(theta , w)\nocp.set_der(s_path , sdot_path)\n\n\n#-------------------------------------------------------------------------------#\n# Solve the first iteration #\n#-------------------------------------------------------------------------------#\n\n\n#------------------------- Constraints on initial and end point\n\n# ---- initial \n\nX_0 = ocp.parameter(4)\nX = vertcat(x, y, theta, s_path)\n\nocp.subject_to(ocp.at_t0(X) == X_0)\n\ncurrent_X = vertcat(initial_pos_x,initial_pos_y,0.0,0.0) \n\nocp.set_value(X_0, current_X)\n\n\n#------- end \n\nglobal_goal = vertcat(global_end_goal_x,global_end_goal_y) #final end point of general problem\n\n\nend_goal_x = ocp.parameter(1)\nend_goal_y = ocp.parameter(1)\n\nocp.set_value( end_goal_x, midpoints_x[-1])\nocp.set_value( end_goal_y, midpoints_y[-1])\n\n\n\n#----------------------------- constraints on controls \n\nocp.subject_to( 0 <= ( v <= 1 ))\nocp.subject_to( -pi <= ( w <= pi ))\n\nocp.subject_to( sdot_path >= 0) \n\n\n#--------------------------\n\nslack_tf_x = ocp.variable()\nslack_tf_y = ocp.variable()\n\nocp.subject_to(slack_tf_x >= 0)\nocp.subject_to(slack_tf_y >= 0)\n\nocp.subject_to(-slack_tf_x <= ((ocp.at_tf(x) - end_goal_x) <= slack_tf_x))\nocp.subject_to(-slack_tf_y <= ((ocp.at_tf(y) - end_goal_y) <= slack_tf_y))\n\nocp.add_objective(100*(slack_tf_x + slack_tf_y))\n\n\n#---------------------- Obscatles avoidance \n\n\nbubbles_x = ocp.parameter(1, grid = 'control')\nbubbles_y = ocp.parameter(1, grid = 'control')\nbubbles_radii_x = ocp.parameter(1, grid = 'control')\nbubbles_radii_y = ocp.parameter(1, grid = 'control')\n\n\nocp.set_value(bubbles_x, midpoints_x)\nocp.set_value(bubbles_y, midpoints_y)\nocp.set_value(bubbles_radii_x, radii_x)\nocp.set_value(bubbles_radii_y, radii_y)\n\n\ntlength1 = len(midpoints_x)\ntunnel_s1 = np.linspace(0,1,tlength1) \n\nocp.subject_to(ocp.at_tf(s_path) == 1)\n\nspline_x = interpolant('x' ,'bspline',[tunnel_s1], 1 , {\"algorithm\": \"smooth_linear\",\"smooth_linear_frac\":0.49})\nspline_y = interpolant('y' ,'bspline',[tunnel_s1], 1 , {\"algorithm\": \"smooth_linear\",\"smooth_linear_frac\":0.49})\n# spline_rx = interpolant('rx','bspline',[tunnel_s1], 1 , {\"algorithm\": \"smooth_linear\",\"smooth_linear_frac\":0.49})\n# spline_ry = interpolant('ry','bspline',[tunnel_s1], 1 , {\"algorithm\": \"smooth_linear\",\"smooth_linear_frac\":0.49})\n\n\n\n# ------------------------ obstacle avoidance -------------------------------\n\n\n# ocp.subject_to( ( (x-spline_x(s_path, bubbles_x))**2/(spline_rx(s_path, bubbles_radii_x)**2 ) ) + ( (y-spline_y(s_path, bubbles_y))**2/(spline_ry(s_path, bubbles_radii_y)**2 ) ) <= 1 )\n# \n# ocp.subject_to( ( ( ( x - spline_x(s_path,bubbles_x) )**2 + ( y-spline_y(s_path,bubbles_y) )**2 ) <= (spline_rx(s_path,radii_x)**2 ) ) )\n\n\n\n# -------------------------------- Initial guess \n\n\n#path parameters\ns_guess = np.linspace(0,1,N)\n\nocp.set_initial(s_path, s_guess)\n\nsdot_guess = (s_guess[1]-s_guess[0])/dt\n\nocp.set_initial(sdot_path, sdot_guess)\n\nv_guess = np.ones(N)\nw_guess = np.ones(N)\n\nocp.set_initial(v , v_guess)\nocp.set_initial(w , w_guess)\n\nocp.set_initial(x, midpoints_x) \nocp.set_initial(y, midpoints_y) \n\n\n\n\n# -------------------------------------- Objective function \n\n#path following\n\nocp.add_objective( 1*ocp.integral((x - spline_x(s_path, bubbles_x))**2 + (y-spline_y(s_path,bubbles_y))**2)) \n\n# ocp.add_objective( - 100*ocp.at_tf(s_path) )\n \n\n# ----------------- Solver\n\noptions = {\"ipopt\": {\"print_level\": 0}}\noptions[\"expand\"] = False\noptions[\"print_time\"] = True\nocp.solver('ipopt', options)\n\n\n# Multiple shooting\nocp.method(MultipleShooting(N=N,M=2,intg='rk'))\n\n\n#-------------------------------- OCP Solution and Results \n\n\ntry:\n sol = ocp.solve()\nexcept:\n #failed_to_converge = True\n ocp.show_infeasibilities(1e-6)\n sol = ocp.non_converged_solution\n\n\n\n\n\n#-------------------------------------------------------------------------------#\n# MPC #\n#-------------------------------------------------------------------------------#\n\n\n# Get discretised dynamics as CasADi function to simulate the system\nSim_system_dyn = ocp._method.discrete_system(ocp)\n\n# Log data for post-processing \nt_sol, x_sol = sol.sample(x, grid='control')\nt_sol, y_sol = sol.sample(y, grid='control')\nt_sol, theta_sol = sol.sample(theta, grid='control')\nt_sol, s_path_sol = sol.sample(s_path, grid='control')\nt_sol, v_sol = sol.sample(v, grid='control')\nt_sol, w_sol = sol.sample(w, grid='control')\nt_sol, sdot_path_sol = sol.sample(sdot_path, grid='control')\n\n\nt_sol_ref, x_sol_ref = sol.sample(x, grid='integrator', refine = 10)\nt_sol_ref, y_sol_ref = sol.sample(y, grid='integrator', refine = 10)\n\n\n\n\n\n\n\n#--------------------- MPC \n\n#---------------- Initialize Logging variables\n\ntime_hist = np.zeros((Nsim+1, N+1))\nx_hist = np.zeros((Nsim+1, N+1))\ny_hist = np.zeros((Nsim+1, N+1))\ntheta_hist = np.zeros((Nsim+1, N+1))\ns_path_hist = np.zeros((Nsim+1, N+1))\ns_obs_hist = np.zeros((Nsim+1, N+1))\nv_hist = np.zeros((Nsim+1, N+1))\nw_hist = np.zeros((Nsim+1, N+1))\nsdot_path_hist = np.zeros((Nsim+1, N+1))\nsdot_obs_hist = np.zeros((Nsim+1, N+1))\n\n\n# for post processing\ntime_hist[0,:] = t_sol\nx_hist[0,:] = x_sol\ny_hist[0,:] = y_sol\ntheta_hist[0,:] = theta_sol\ns_path_hist[0,:] = s_path_sol\nv_hist[0,:] = v_sol\nw_hist[0,:] = w_sol\nsdot_path_hist[0,:] = sdot_path_sol\n\n\nclearance = 0.2\n\n \ni = 0\n \ntime = 0 \n \nfor i in range(Nsim):\n \n \n print(\"timestep\", i+1, \"of\", Nsim)\n \n \n #------------------- Update initial position ------------------------------\n \n # Combine control inputs\n current_U = vertcat(v_sol[0], w_sol[0] , sdot_path_sol[0])\n\n # Simulate dynamics (applying the first control input) and update the current state\n current_X = Sim_system_dyn(x0=current_X, u=current_U, T=t_sol[1]-t_sol[0])[\"xf\"]\n \n print( f' x: {current_X[0]}' )\n print( f' y: {current_X[1]}' )\n # print( f' theta: {current_X[2]}' )\n \n initial_pos_x = double(current_X[0])\n initial_pos_y = double(current_X[1])\n \n #------------ Update time spent to reach goal \n \n time = time + (t_sol[1]-t_sol[0])\n \n #------------------------- Generate grid and path -------------------------\n\n global_path_x, global_path_y, Bspline_obj = create_global_path_mpc(path_option,initial_pos_x,initial_pos_y,path_horizon)\n \n\n #----------------- get obstacles ------------------------------------------\n \n occupied_positions_x , occupied_positions_y = create_obstacles_mpc(obstacles_option,initial_pos_x,initial_pos_y,obs_horizon)\n \n #---------------- Creating the Bubbles-------------------------------------\n\n\n # midpoints_x, midpoints_y, radii_x, radii_y = generate_bubbles_mpc_v2(global_path_x, global_path_y,occupied_positions_x,occupied_positions_y)\n midpoints_x, midpoints_y, radii_x = generate_bubbles_mpc_v2(global_path_x, global_path_y,occupied_positions_x,occupied_positions_y)\n \n radii_y = radii_x\n \n #-------------------- \n \n \n \n while len(midpoints_x) < N:\n midpoints_x.append(global_path_x[-1])\n midpoints_y.append(global_path_y[-1])\n radii_x.append(radii_x[-1])\n radii_y.append(radii_y[-1])\n\n\n midpoints_x = midpoints_x[0:N]\n midpoints_y = midpoints_y[0:N]\n radii_x = radii_x[0:N]\n radii_y = radii_y[0:N]\n\n\n\n #------------------- Updating Tunnels ------------------------------------\n \n ocp.set_value(bubbles_x, midpoints_x)\n ocp.set_value(bubbles_y, midpoints_y)\n ocp.set_value(bubbles_radii_x, radii_x)\n ocp.set_value(bubbles_radii_y, radii_y)\n\n \n ocp.set_value( end_goal_x, midpoints_x[-1])\n ocp.set_value( end_goal_y, midpoints_y [-1])\n\n #initial guess\n ocp.set_initial(x, midpoints_x) \n ocp.set_initial(y, midpoints_y) \n \n ocp.set_initial(s_path, s_guess)\n ocp.set_initial(sdot_path, sdot_path_sol)\n \n ocp.set_initial(v , v_sol)\n ocp.set_initial(w , w_sol)\n\n \n #---------------- Simulate dynamic system --------------------------------\n \n\n \n error = sumsqr(current_X[0:2] - global_goal)\n if error < clearance: \n break #solution reached the global end goal \n \n # Set the parameter X0 to the new current_X\n ocp.set_value(X_0, current_X)\n \n\n\n #------------------------ Plot results every iteration\n\n ellipse_x = []\n ellipse_y = []\n for it in range(0, len(midpoints_x)): \n ellipse_x.append(midpoints_x[it] + radii_x[it]*cos(ts) )\n ellipse_y.append(midpoints_y[it] + radii_y[it]*sin(ts) ) \n \n\n plt.figure(dpi=300)\n plt.title('MPC') \n plt.plot(x_sol_ref, y_sol_ref, 'b-')\n plt.plot(ellipse_x,ellipse_y,'r.', markersize = 1)\n plt.plot(occupied_positions_x,occupied_positions_y,'co',markersize = 1.5)\n plt.plot(global_path_x, global_path_y, 'g--')\n # plt.plot(global_path_x[-1], global_path_y[-1], 'go')\n plt.plot(x_hist[0:i,0],y_hist[0:i,0], 'bo', markersize = 5)\n plt.plot(x_sol[0], y_sol[0], 'bo', markersize = 5)\n plt.xlim([xlim_min,xlim_max])\n plt.ylim([ylim_min,ylim_max])\n plt.pause(0.01)\n \n \n print(global_path_x)\n\n \n #------------------------- Solve the optimization problem\n\n try:\n sol = ocp.solve()\n except:\n #failed_to_converge = True\n ocp.show_infeasibilities(1e-6)\n sol = ocp.non_converged_solution\n\n #-------------------------- Log data for next iteration \n \n t_sol, x_sol = sol.sample(x, grid='control')\n t_sol, y_sol = sol.sample(y, grid='control')\n t_sol, theta_sol = sol.sample(theta, grid='control')\n t_sol, s_path_sol = sol.sample(s_path, grid='control')\n t_sol, v_sol = sol.sample(v, grid='control')\n t_sol, w_sol = sol.sample(w, grid='control')\n t_sol, sdot_path_sol = sol.sample(sdot_path, grid='control')\n \n t_sol_ref, x_sol_ref = sol.sample(x, grid='integrator', refine = 20)\n t_sol_ref, y_sol_ref = sol.sample(y, grid='integrator', refine = 20)\n\n # for post processing\n time_hist[i+1,:] = t_sol\n x_hist[i+1,:] = x_sol\n y_hist[i+1,:] = y_sol\n theta_hist[i+1,:] = theta_sol\n s_path_hist[i+1,:] = s_path_sol\n v_hist[i+1,:] = v_sol\n w_hist[i+1,:] = w_sol\n sdot_path_hist[i+1,:] = sdot_path_sol\n \n\n\n \n\n# -------------------------------------------\n# Plot the results\n# -------------------------------------------\n\n# #global path from initial to end point\n# global_path_x, global_path_y, Bspline_obj = create_global_path_mpc(path_option,0,0,1000,50)\n# occupied_positions_x , occupied_positions_y = create_obstacles_mpc(obstacles_option,initial_pos_x,initial_pos_y,100)\n# shifted_midpoints_x, shifted_midpoints_y, shifted_radii = generate_bubbles_mpc_v2(global_path_x, global_path_y,occupied_positions_x,occupied_positions_y) \n# tunnel_x, tunnel_y = create_tunnel(shifted_midpoints_x,shifted_midpoints_y,shifted_radii)\n \n\n# fig = plt.figure(dpi=300)\n# ax2 = plt.subplot(1, 1, 1)\n# ax2.plot(global_path[0], global_path[1], '--')\n# plt.plot(occupied_positions_x,occupied_positions_y,'ko',markersize = 2)\n# ax2.plot(x_hist[0,0], y_hist[0,0], 'b-')\n# ax2.set_xlabel('x pos [m]')\n# ax2.set_ylabel('y pos [m]')\n# ax2.set_title('Interations of OCP solutions')\n# ax2.plot(x_hist[0:i,0], y_hist[0:i,0], 'ro') \n# for k in range(i):\n# # ax2.plot(x_hist[k,:], y_hist[k,:], 'b-')\n# ax2.plot(x_hist[k,:], y_hist[k,:], 'g.') \n# plt.savefig('MPC solution with all ocp iterations', dpi=300)\n\n\n\n# plt.figure(dpi=300)\n# plt.plot(x_hist[0:i,0],y_hist[0:i,0], 'bo', markersize = 5)\n# plt.plot(x_hist[0:i,0],y_hist[0:i,0], 'b-', markersize = 5)\n# plt.plot(8.8,9,'bo', markersize = 10)\n# plt.plot(global_path_x, global_path_y, 'g--')\n# plt.plot(occupied_positions_x,occupied_positions_y,'ko',markersize = 2)\n# plt.plot(tunnel_x, tunnel_y, 'ro', markersize = 1)\n# plt.legend(['MPC solution','solution trajectory','end goal',' global path ', 'Obstacles', 'Feasible Bubbles'], loc = \"best\")\n# plt.title('MPC Solution')\n# plt.xlabel('x [m]')\n# plt.ylabel('y [m]')\n# plt.xlim([-0.5,12])\n# plt.ylim([-0.2,10.2])\n# plt.savefig('MPC solution', dpi=300)\n\n\n# plt.figure(dpi=300)\n# plt.plot(x_hist[0:i,0],y_hist[0:i,0], 'bo', markersize = 5)\n# plt.plot(x_hist[0:i,0],y_hist[0:i,0], 'b-', markersize = 5)\n# plt.plot(8.8,9,'bo', markersize = 10)\n# plt.plot(global_path_x, global_path_y, 'g--')\n# plt.plot(occupied_positions_x,occupied_positions_y,'ko',markersize = 2)\n# plt.plot(tunnel_x, tunnel_y, 'ro', markersize = 1)\n# plt.legend(['MPC solution','solution trajectory','end goal',' global path ', 'Obstacles', 'Feasible Bubbles'], loc = \"best\")\n# plt.title('MPC Solution')\n# plt.xlabel('x [m]')\n# plt.ylabel('y [m]')\n# plt.xlim([-0.5,12])\n# plt.ylim([-0.2,10.2])\n# plt.savefig('MPC solution controls', dpi=300)\n\n\n\n# print(\"MPC solution time: \", time)\n\n\n\n\n\n\n","sub_path":"March27/dev_ref_formulation_dyn/other/mpstest2.py","file_name":"mpstest2.py","file_ext":"py","file_size_in_byte":16380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"212613407","text":"from random import randint\n\ndef qs(tab, left, right):\n pivot = tab[(left + right) // 2]\n\n l = left\n r = right\n\n while l <= r:\n while tab[l] < pivot:\n l += 1\n\n while tab[r] > pivot:\n r -= 1\n\n if l <= r:\n tab[l], tab[r] = tab[r], tab[l]\n l += 1\n r -= 1\n\n if left < r:\n qs(tab, left, r)\n\n if right > l:\n qs(tab, l, right)\n\n\nt1 = [randint(-100, 100) for _ in range(1000)]\nt2 = t1.copy()\nqs(t2, 0, len(t2) - 1)\nprint(sorted(t1) == t2)\n","sub_path":"quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"264868281","text":"# Cracking the Coding Interview: 1.3\n# Written by Josh Humphrey\n\nunique = \"abcdefgh\"\nrepeated = \"abcabc\"\n\ndef remove_duplicates(word):\n char_dict = dict()\n word_list = list(word)\n final_list = []\n\n for i in range(len(word_list)):\n key = word_list[i]\n if key not in char_dict:\n char_dict[key] = key\n final_list.append(key)\n\n result = ''.join(final_list)\n return result\n\n\n\n\n\nresult1 = remove_duplicates(unique)\nprint(\"Result 1: \" + str(result1))\n\nresult2 = remove_duplicates(repeated)\nprint(\"Result 2: \" + str(result2))\n","sub_path":"ch1/1_3.py","file_name":"1_3.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"108181619","text":" \ndef arithmagic():\n \n step_1 = input(\"Enter a 3-digit number where the first and last \"\n \"digits differ by 2 or more: \")\n if int(step_1) < 100 or int(step_1) > 999:\n raise ValueError(\"not a 3-digit number\")\n \n if abs(int(step_1[0]) - int(step_1[-1])) < 2:\n raise ValueError(\"first and last digits must differ by 2 or more\")\n step_2 = input(\"Enter the reverse of the first number, obtained \"\n \"by reading it backwards: \")\n \n reverse = ''\n for i in step_1:\n reverse = ''.join((i, reverse))\n print(reverse)\n if step_2 != reverse:\n raise ValueError(\"not the reverse of the first number\")\n \n step_3 = input(\"Enter the positive difference of these numbers: \")\n \n diff = abs(int(step_1) - int(step_2))\n if int(step_3) != diff:\n raise ValueError(\"not the positive difference\")\n \n step_4 = input(\"Enter the reverse of the previous result: \")\n \n reverse_diff = ''\n for i in str(diff):\n reverse_diff = ''.join((i, reverse_diff))\n if step_4 != reverse_diff:\n raise ValueError(\"not the reverse of the previous number\")\n \n print(str(step_3), \"+\", str(step_4), \"= 1089 (ta-da!)\")\n \narithmagic()","sub_path":"Probsets/Comp/Probset1/arithmagic.py","file_name":"arithmagic.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626147410","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 22 20:02:40 2019\r\n\r\n@author: hp\r\n\"\"\"\r\nimport numpy as np #\r\nimport matplotlib.pyplot as plt # Plotting\r\nimport matplotlib.colors as colors # Coloring\r\nimport seaborn as sns # Statistical visualization\r\nimport pandas as pd\r\ndf=pd.read_excel('volcano_data.xlsx')\r\ntest=[]\r\nfor i in df.index.values:\r\n row_data=df.ix[i,['log2FoldChange','padj']].to_dict()\r\n test.append(row_data)\r\n#print(\"最终获取到的数据是:{0}\".format(test))\r\ndata1 = pd.DataFrame(test)\r\n# In[*]\r\nresult = data1\r\nresult.columns = ['fold','pvalue']\r\nresult['log(pvalue)'] = -np.log2(result['pvalue'])\r\n# In[*]\r\nresult['sig'] = 'normal'\r\nresult['size'] =np.abs(result['fold'])/10\r\nresult.loc[(result.fold> 1 )&(result.pvalue < 0.05),'sig'] = 'up'\r\nresult.loc[(result.fold< -1 )&(result.pvalue < 0.05),'sig'] = 'down'\r\n# In[*]\r\nax = sns.scatterplot(x=\"fold\", y=\"log(pvalue)\",\r\n hue='sig',\r\n hue_order = ('down','normal','up'),\r\n palette=(\"#377EB8\",\"green\",\"#E41A1C\"),\r\n data=result)\r\nax.set_ylabel('-log2(pvalue)',fontweight='bold')\r\nax.set_xlabel('FoldChange',fontweight='bold')\r\n","sub_path":"volcano/volcano_1.py","file_name":"volcano_1.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155404880","text":"import random\nimport string\nfrom django.shortcuts import render\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Item, OrderItem,Order,Address,Coupon\nfrom taggit.models import Tag\nfrom django.views.generic import ListView,DetailView,View,CreateView\nfrom django.shortcuts import get_object_or_404,redirect\nfrom django.db.models import Count\nfrom django.contrib.postgres.search import SearchVector,SearchQuery,SearchRank\nfrom django.contrib.postgres.aggregates import StringAgg\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.utils import timezone\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom .forms import checkoutform,couponform\nfrom cities_light.models import Country,City\ndef create_ref_code():\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20))\nclass products(ListView):\n model = Item\n template_name = 'ecommercewebapp/products.html'\ndef productDetail(request,slug):\n item=get_object_or_404(Item,slug=slug)\n item_tags_ids=item.tags.values_list('id',flat=True)\n similar_items=Item.objects.filter(tags__in=item_tags_ids).exclude(id=item.id)\n similar_items=similar_items.annotate(same_tag=Count('tags'))\n context={\n 'item':item,\n 'similar_items':similar_items,\n }\n return render(request,'ecommercewebapp/product.html',context)\ndef search_by_catagory(request,catagory):\n items=Item.objects.filter(catagory=catagory)\n context={\n 'items':items,\n 'catagory':catagory\n }\n return render(request,'ecommercewebapp/search_by_catagory.html',context)\ndef search_by_label(request,label):\n items=Item.objects.filter(label=label)\n context={\n 'items':items,\n 'label':label\n }\n return render(request,'ecommercewebapp/search_by_label.html',context)\ndef search_by_badge(request,badge):\n items=Item.objects.filter(badge=badge)\n context={\n 'badge':badge,\n 'items':items\n }\n return render(request,'ecommercewebapp/search_by_badge.html',context)\ndef item_list_view_by_tags(request,tag_slug):\n tag=get_object_or_404(Tag,slug=tag_slug)\n items=Item.objects.all()\n items=items.filter(tags__in=[tag])\n context={\n 'tag':tag,\n 'items':items\n }\n return render(request,'ecommercewebapp/items_list_by_tags.html',context)\ndef search(request):\n try:\n q=request.GET.get('q')\n except:\n q='Please enter the query'\n if q:\n query=SearchQuery(q)\n vector=SearchVector('catagory',weight='A')+SearchVector(StringAgg('tags__name',weight='B',delimiter=','))\n rate=SearchRank(vector,query)\n items=Item.objects.annotate(rank=rate).order_by('-rank')\n context={\n 'items':items,\n 'query':q\n }\n return render(request,'ecommercewebapp/search.html',context)\n else:\n items=Item.objects.all()\n context={\n 'query':q,\n 'items':items\n }\n return render(request,'ecommercewebapp/nosearch.html',context)\n\n@login_required\ndef add_to_cart(request,slug):\n item=get_object_or_404(Item,slug=slug)\n order_item,created=OrderItem.objects.get_or_create(\n item=item,\n user=request.user,\n ordered=False\n )\n order_qs=Order.objects.filter(user=request.user,Ordered=False)\n print(order_qs)\n if order_qs.exists():\n order=order_qs[0]\n if order.items.filter(item__slug=slug).exists():\n order_item.quantity+=1\n order_item.save()\n return redirect('ecommercewebapp:order_summary')\n else:\n order.items.add(order_item)\n return redirect('ecommercewebapp:product',slug=slug)\n else:\n start_date=timezone.now()\n order_date=timezone.now()\n order=Order.objects.create(user=request.user,order_date=order_date,start_date=start_date)\n order.items.add(order_item)\n return redirect('ecommercewebapp:order_summary')\ndef remove_single_item_from_cart(request,slug):\n item=get_object_or_404(Item,slug=slug)\n order_item = OrderItem.objects.filter(item=item, user=request.user, ordered=False)[0]\n order_qs=Order.objects.filter(user=request.user,Ordered=False)\n if order_qs.exists():\n order=order_qs[0]\n if order.items.filter(item__slug=slug).exists():\n if order_item.quantity>1:\n order_item.quantity-=1\n order_item.save()\n return redirect(\"ecommercewebapp:order_summary\")\n else:\n order_item.quantity=1\n return redirect(\"ecommercewebapp:order_summary\")\ndef remove_from_cart(request,slug):\n item=get_object_or_404(Item,slug=slug)\n order_qs=Order.objects.filter(user=request.user,Ordered=False)\ndef delete_item(request,slug):\n item=get_object_or_404(Item,slug=slug)\n order_item=OrderItem.objects.get(item=item,user=request.user,ordered=False)\n order=Order.objects.filter(user=request.user,Ordered=False)[0]\n order.items.remove(order_item)\n order_item.delete()\n if not order.items.all():\n return redirect(\"ecommercewebapp:home\")\n else:\n return redirect(\"ecommercewebapp:order_summary\")\n\n\nclass ordersummary(LoginRequiredMixin,View):\n def get(self,*args,**kwargs):\n try:\n order = Order.objects.get(user=self.request.user, Ordered=False)\n context={\n 'object':order\n }\n return render(self.request, 'ecommercewebapp/order_summary.html', context)\n except ObjectDoesNotExist:\n redirect('/')\nclass checkout(CreateView):\n def get(self,*args,**kwargs):\n try:\n order=Order.objects.get(user=self.request.user,Ordered=False)\n form=checkoutform()\n copform=couponform()\n context={\n 'form':form,\n 'order':order,\n 'couponform':copform\n }\n shipping_address_qs=Address.objects.filter(user=self.request.user,address_type='S',default=True)\n if shipping_address_qs.exists():\n context.update({\n 'default_shipping_address':shipping_address_qs\n })\n billing_address_qs=Address.objects.filter(user=self.request.user,address_type='B',default=True)\n if billing_address_qs.exists():\n context.update({\n\n 'default_billing_address':billing_address_qs[0]\n })\n return render(self.request, 'ecommercewebapp/checkout.html', context)\n except ObjectDoesNotExist:\n messages.info(self.request,'You donot have an active order')\n return redirect('ecommercewebapp:checkout')\n\n\n def post(self,*args,**kwargs):\n form=checkoutform(self.request.POST or None, self.request.FILES or None, )\n if form.is_valid():\n form.instance.user=self.request.user\n form.save(commit=True)\n return redirect('ecommercewebapp:home')\n else:\n print(form.errors)\n return render(self.request,'ecommercewebapp/checkout.html',{'errors': form.errors, 'form':form})\ndef load_cities(request):\n country_id=request.GET.get('country')\n cities=City.objects.filter(country=country_id)\n return render(request,'ecommercewebapp/city_dropdown.html', {'cities':cities})\ndef get_coupon(request,code):\n try:\n couponcode=Coupon.objects.get(code=code)\n return couponcode\n except ObjectDoesNotExist:\n return None\nclass addCouponView(View):\n def post(self,*args,**kwargs):\n form=couponform(self.request.POST or None)\n if form.is_valid():\n try:\n code=form.cleaned_data.get('code')\n order=Order.objects.get(user=self.request.user,Ordered=False)\n order.coupon=get_coupon(self.request,code)\n if order.coupon==None:\n messages.info(self.request,'this copoun does not exists')\n else:\n messages.info(self.request, 'code has been redeemed')\n order.save()\n return redirect('ecommercewebapp:checkout')\n except ObjectDoesNotExist:\n messages.info(self.request,'you do not have an active order')\n return redirect('ecommercewebapp:checkout')\n else:\n print(form.errors)\n return render(self.request, 'ecommercewebapp/checkout.html', {'errors': form.errors, 'form': form})\n\n","sub_path":"ecommerce/ecommercewebapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"308222645","text":"import setuptools\nwith open(r'README.md', 'r', encoding='utf-8') as fh:\n\tlong_description = fh.read()\n\nsetuptools.setup(\n\tname='vka',\n\tversion='1.2.12',\n\tauthor='Major4ik',\n\tauthor_email='2772771882@mail.ru',\n\tdescription='module for the vk api wrapper',\n\tlong_description=long_description,\n\tlong_description_content_type='text/markdown',\n\turl='https://github.com/MrCreEper002/vka',\n\tinclude_package_data=True,\n\tpackages=['vka', 'vka/base', 'vka/base/buiders', 'vka/chatbot', 'vka/chatbot/wrappers'],\n\tinstall_requires=[\n\t\t'loguru==0.6.0',\n\t\t'bs4==0.0.1',\n\t],\n\tclassifiers=[\n\t\t'Programming Language :: Python :: 3.10',\n\t\t'Programming Language :: Python :: 3.11',\n\t\t\"License :: OSI Approved :: MIT License\",\n\t\t\"Operating System :: OS Independent\",\n\t],\n\tpython_requires='>=3.10',\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"80555020","text":"# -*- coding: utf-8 -*-\nfrom model.Aluno import Aluno, Disciplina\n\n\nclass Repositorio:\n def __init__(self, teste=True):\n self.alunos = []\n if teste:\n self.base_de_testes()\n\n def base_de_testes(self):\n historicos = [\n [Disciplina(\"Fenômenos Térmicos\", \"BCJ0205-13\", 4, \"E\", \"02 de 2014\"),\n Disciplina(\"Fenômenos Mecânicos\", \"BCJ0208-13\", 5, \"E\", \"02 de 2014\"),\n Disciplina(\"Fenômenos Eletromagnéticos\", \"BCJ0209-13\", 5, \"E\", \"02 de 2014\"),\n Disciplina(\"Processamento da Informação\", \"BCM0505-13\", 5, \"E\", \"02 de 2014\")],\n\n [Disciplina(\"Fenômenos Térmicos\", \"BCJ0205-13\", 4, \"E\", \"02 de 2014\"),\n Disciplina(\"Fenômenos Mecânicos\", \"BCJ0208-13\", 5, \"E\", \"02 de 2014\"),\n Disciplina(\"Fenômenos Eletromagnéticos\", \"BCJ0209-13\", 5, \"E\", \"02 de 2014\"),\n Disciplina(\"Processamento da Informação\", \"BCM0505-13\", 5, \"E\", \"02 de 2014\")]\n\n ]\n\n self.alunos = [\n Aluno(\"André Rodrigues Barbosa\", \"UFABC\", \"11001814\", \"1\", historicos[0]),\n Aluno(\"João Da Silva\", \"UTI\", \"1\", \"2\", historicos[1])\n ]\n\n def inserir_aluno(self, aluno):\n print(\"Repo: Inserindo aluno\", aluno)\n if self._aluno_ja_existe(aluno):\n return False\n else:\n self.alunos.append(aluno)\n # print(self.alunos)\n return True\n\n def remover_aluno(self, aluno):\n if self._aluno_ja_existe(aluno):\n a_remover = self._get_aluno_by_rbuha(aluno.rbuha)\n self.alunos.remove(a_remover)\n\n def editar_aluno(self, aluno_anterior, novo_aluno):\n # print(\"Editando aluno. Antes %s\\n\\tDepois %s.\" %(aluno_anterior, novo_aluno))\n a_editar = self._get_index_of_rbuha(aluno_anterior.rbuha)\n # print(\"Index = %d\" % a_editar)\n if a_editar is not -1:\n # print(\"Efetuando alteracao\")\n self.alunos[a_editar] = novo_aluno\n return True\n else:\n # print(\"Aluno não encontrado. Inserindo\")\n return self.inserir_aluno(novo_aluno)\n\n def obtem_aluno_por_rbuha(self, rbuha):\n # print(\"Repo: obtem aluno por rbuha\")\n return self._get_aluno_by_rbuha(rbuha)\n\n def _aluno_ja_existe(self, aluno):\n return any(map(lambda a: a.rbuha == aluno.rbuha, self.alunos))\n\n def _get_aluno_by_rbuha(self, rbuha):\n # print(\"Repo _get_aluno_by_rbuha(%s)\" %rbuha)\n for a in self.alunos:\n a_rbuha = a.rbuha\n # print(\"checando %s\" %a_rbuha)\n if a_rbuha == rbuha:\n return a\n # print(\"Todos os elementos verificados.\")\n # print(self.alunos)\n return None\n\n def _get_index_of_rbuha(self, rbuha_desejado):\n # print(\"\\t\\tget_index_of_rbuha. Desejado: {}\".format(rbuha_desejado))\n for i in range(len(self.alunos)):\n rbuha_aluno = self.alunos[i].rbuha\n # print(\"\\t\\tI = %s, RBUHA=%s\" %(i, rbuha_aluno))\n if rbuha_aluno == rbuha_desejado:\n # print(\"\\t\\tencontrado\")\n return i\n # print(\"\\t*\\t*Não encontrado.*\\t*\")\n return -1\n\n\nif __name__ == \"__main__\":\n print(\"Iniciando testes.\")\n\n repo = Repositorio()\n\n aluno = Aluno(\"t1\", \"uf1\", \"0\", \"01\", None)\n aluno2 = Aluno(\"t2\", \"x\", \"1\", \"02\", None)\n aluno3 = Aluno(\"editado\", \"editado\", \"editado\", aluno.rbuha, None)\n\n testeNone = repo.obtem_aluno_por_rbuha(aluno)\n if not testeNone is None:\n print(\"Falha 1. Obtido aluno!\", testeNone)\n\n print(repo.alunos)\n\n repo.inserir_aluno(aluno)\n testeAluno = (repo.obtem_aluno_por_rbuha(aluno), repo.obtem_aluno_por_rbuha(aluno2))\n\n if not ((testeAluno[0] == aluno) and (testeAluno[1] == None)):\n print(\"Falha 2. testeAluno != aluno\")\n\n print(repo.alunos)\n\n print(\"\\n Insere aluno 2 \")\n repo.inserir_aluno(aluno2)\n print(repo.alunos)\n print(\"\\nRemove aluno2\")\n repo.remover_aluno(aluno2)\n print(repo.alunos)\n\n print(\"\\nEditar aluno1 para aluno3\")\n repo.inserir_aluno(aluno2)\n print(repo.alunos)\n repo.editar_aluno(aluno, aluno3)\n testeEdita = repo.obtem_aluno_por_rbuha(aluno3.rbuha)\n print(\"Apos editar obtive o aluno: \", testeEdita)\n if not (testeEdita.nome == aluno3.nome):\n print(\"Falha 3. testeEdita não é uma edição de aluno.\", testeEdita)\n\n print(repo.alunos)\n\n repo.remover_aluno(aluno2)\n testeAlunoRemovido = repo.obtem_aluno_por_rbuha(aluno2)\n if testeAlunoRemovido is not None:\n print(\"Falha 4. Obtido um aluno.\")\n\n print(repo.alunos)\n\n print(\"Sucesso\")\n","sub_path":"src/model/Repositorio.py","file_name":"Repositorio.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"140817229","text":"import os\nimport sys\nimport yaml\n\n\ndef _find_project_env(path):\n env_file_name = '%s/%s_env.yml' % (os.path.dirname(path), os.path.basename(path))\n env_file_name_dot = '%s/.%s_env.yml' % (os.path.dirname(path), os.path.basename(path))\n\n if os.path.exists(env_file_name):\n return path, env_file_name\n elif os.path.exists(env_file_name_dot):\n return path, env_file_name_dot\n elif path != '/':\n return _find_project_env(os.path.dirname(path))\n else:\n return None, None\n\n\ndef _format_val(val):\n return str(val)\n\n\ndef load_env():\n \"\"\"\n Loads environment variables from project folder ../folder_name_env.yml file\n \"\"\"\n app_dir, _file = _find_project_env(os.getcwd())\n if _file:\n os.environ[\"CRATIS_APP_PATH\"] = app_dir\n\n with open(_file) as f:\n for key, val in yaml.load(f).iteritems():\n val = _format_val(val)\n os.environ[key] = val\n\n sys.path += (os.environ.get('CRATIS_APP_PATH', '.'), )\n\n if os.path.exists(os.environ.get('CRATIS_APP_PATH', '.') + os.sep + 'settings.py'):\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"settings\")\n else:\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cratis.settings\")","sub_path":"cratis/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"82696214","text":"import json\nimport os\nimport shutil\nimport tempfile\nfrom unittest import TestCase\nfrom uuid import uuid4\n\nimport yaml\n\nimport galaxy.model\nfrom galaxy.tool_util.cwl import (\n to_cwl_job,\n tool_proxy as real_tool_proxy,\n workflow_proxy,\n)\nfrom galaxy.tool_util.cwl.parser import (\n _to_cwl_tool_object,\n tool_proxy_from_persistent_representation,\n)\nfrom galaxy.tool_util.cwl.representation import USE_FIELD_TYPES\nfrom galaxy.tool_util.parser.cwl import CWL_DEFAULT_FILE_OUTPUT\nfrom galaxy.tool_util.parser.factory import get_tool_source\nfrom galaxy.tools.parameters import populate_state\nfrom galaxy.tools.parameters.wrapped import WrappedParameters\nfrom .. import tools_support\nfrom ..unittest_utils import galaxy_mock\n\n\nTESTS_DIRECTORY = os.path.dirname(__file__)\nCWL_TOOLS_DIRECTORY = os.path.abspath(os.path.join(TESTS_DIRECTORY, \"cwl_tools\"))\n\n\ndef tool_proxy(*args, **kwd):\n if 'uuid' not in kwd:\n kwd['uuid'] = str(uuid4())\n return real_tool_proxy(*args, **kwd)\n\n\ndef test_tool_proxy():\n \"\"\"Test that tool proxies load some valid tools correctly.\"\"\"\n tool_proxy(_cwl_tool_path(\"v1.0/v1.0/cat1-testcli.cwl\"))\n tool_proxy(_cwl_tool_path(\"v1.0/v1.0/cat3-tool.cwl\"))\n tool_proxy(_cwl_tool_path(\"v1.0/v1.0/env-tool1.cwl\"))\n tool_proxy(_cwl_tool_path(\"v1.0/v1.0/sorttool.cwl\"))\n tool_proxy(_cwl_tool_path(\"v1.0/v1.0/bwa-mem-tool.cwl\"))\n tool_proxy(_cwl_tool_path(\"v1.0/v1.0/parseInt-tool.cwl\"))\n\n\ndef test_tool_source_records():\n record_output_path = _cwl_tool_path(\"v1.0/v1.0/record-output.cwl\")\n tool_source = get_tool_source(record_output_path)\n inputs = _inputs(tool_source)\n assert len(inputs) == 1, inputs\n\n output_data, output_collections = _outputs(tool_source)\n assert len(output_data) == 1\n assert len(output_collections) == 1\n\n\ndef test_serialize_deserialize():\n path = _cwl_tool_path(\"v1.0/v1.0/cat5-tool.cwl\")\n tool = tool_proxy(path)\n expected_uuid = tool._uuid\n print(tool._tool.tool)\n rep = tool.to_persistent_representation()\n tool = tool_proxy_from_persistent_representation(rep)\n assert tool._uuid == expected_uuid\n print(tool)\n tool.job_proxy({\"file1\": \"/moo\"}, {})\n print(tool._tool.tool)\n\n with open(path) as f:\n tool_object = yaml.safe_load(f)\n tool_object = json.loads(json.dumps(tool_object))\n tool = _to_cwl_tool_object(tool_object=tool_object, uuid=expected_uuid)\n assert tool._uuid == expected_uuid\n\n\ndef test_job_proxy():\n bwa_parser = get_tool_source(_cwl_tool_path(\"v1.0/v1.0/bwa-mem-tool.cwl\"))\n bwa_inputs = {\n \"reference\": {\n \"class\": \"File\",\n \"location\": _cwl_tool_path(\"v1.0/v1.0/chr20.fa\"),\n \"size\": 123,\n \"checksum\": \"sha1$hash\"\n },\n \"reads\": [\n {\n \"class\": \"File\",\n \"location\": _cwl_tool_path(\"v1.0/v1.0/example_human_Illumina.pe_1.fastq\")\n },\n {\n \"class\": \"File\",\n \"location\": _cwl_tool_path(\"v1.0/v1.0/example_human_Illumina.pe_2.fastq\")\n }\n ],\n \"min_std_max_min\": [\n 1,\n 2,\n 3,\n 4\n ],\n \"minimum_seed_length\": 3\n }\n bwa_proxy = bwa_parser.tool_proxy\n bwa_id = bwa_parser.parse_id()\n\n job_proxy = bwa_proxy.job_proxy(\n bwa_inputs,\n {},\n \"/\",\n )\n\n cmd = job_proxy.command_line\n print(cmd)\n\n bind_parser = get_tool_source(_cwl_tool_path(\"v1.0/v1.0/binding-test.cwl\"))\n binding_proxy = bind_parser.tool_proxy\n binding_id = bind_parser.parse_id()\n\n job_proxy = binding_proxy.job_proxy(\n bwa_inputs,\n {},\n \"/\",\n )\n\n cmd = job_proxy.command_line\n assert bwa_id != binding_id, bwa_id\n\n\ndef test_cores_min():\n sort_parser = get_tool_source(_cwl_tool_path(\"v1.0/v1.0/sorttool.cwl\"))\n bwa_parser = get_tool_source(_cwl_tool_path(\"v1.0/v1.0/bwa-mem-tool.cwl\"))\n\n assert sort_parser.parse_cores_min() == 1\n assert bwa_parser.parse_cores_min() == 2\n\n\ndef test_success_codes():\n exit_success_parser = get_tool_source(_cwl_tool_path(\"v1.0/v1.0/exit-success.cwl\"))\n\n stdio, _ = exit_success_parser.parse_stdio()\n assert len(stdio) == 2\n stdio_0 = stdio[0]\n assert stdio_0.range_start == float(\"-inf\")\n assert stdio_0.range_end == 0\n\n stdio_1 = stdio[1]\n assert stdio_1.range_start == 2\n assert stdio_1.range_end == float(\"inf\")\n\n bwa_parser = get_tool_source(_cwl_tool_path(\"v1.0/v1.0/bwa-mem-tool.cwl\"))\n stdio, _ = bwa_parser.parse_stdio()\n\n assert len(stdio) == 2\n stdio_0 = stdio[0]\n assert stdio_0.range_start == float(\"-inf\")\n assert stdio_0.range_end == -1\n\n stdio_1 = stdio[1]\n assert stdio_1.range_start == 1\n assert stdio_1.range_end == float(\"inf\")\n\n\ndef test_serialize_deserialize_workflow_embed():\n # Test inherited hints and requirements from workflow -> tool\n # work here.\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/count-lines2-wf.cwl\"))\n step_proxies = proxy.step_proxies()\n tool_proxy = step_proxies[0].tool_proxy\n assert tool_proxy.requirements, tool_proxy.requirements\n\n\ndef test_reference_proxies():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/count-lines1-wf.cwl\"))\n proxy.tool_reference_proxies()\n\n\ndef test_subworkflow_parsing():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/count-lines10-wf.cwl\"))\n assert len(proxy.tool_reference_proxies()) == 2\n\n assert len(proxy.output_labels) == 1\n assert \"count_output\" in proxy.output_labels, proxy.output_labels\n\n galaxy_workflow_dict = proxy.to_dict()\n steps = galaxy_workflow_dict[\"steps\"]\n assert len(steps) == 2 # One input, one subworkflow\n\n subworkflow_step = steps[1]\n assert subworkflow_step[\"type\"] == \"subworkflow\"\n\n\ndef test_checks_is_a_tool():\n \"\"\"Test that tool proxy cannot be created for a workflow.\"\"\"\n exception = None\n try:\n tool_proxy(_cwl_tool_path(\"v1.0/v1.0/count-lines1-wf.cwl\"))\n except Exception as e:\n exception = e\n\n assert exception is not None\n assert \"CommandLineTool\" in str(exception), str(exception)\n\n\ndef test_workflow_of_files_proxy():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/count-lines1-wf.cwl\"))\n step_proxies = proxy.step_proxies()\n assert len(step_proxies) == 2\n\n galaxy_workflow_dict = proxy.to_dict()\n\n assert len(proxy.runnables) == 2\n\n assert len(galaxy_workflow_dict[\"steps\"]) == 3\n wc_step = galaxy_workflow_dict[\"steps\"][1]\n exp_step = galaxy_workflow_dict[\"steps\"][2]\n assert wc_step[\"input_connections\"]\n assert exp_step[\"input_connections\"]\n\n\ndef test_workflow_embedded_tools_proxy():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/count-lines2-wf.cwl\"))\n step_proxies = proxy.step_proxies()\n assert len(step_proxies) == 2\n print(step_proxies[1].requirements)\n print(step_proxies[1]._step.embedded_tool.requirements)\n galaxy_workflow_dict = proxy.to_dict()\n\n assert len(proxy.runnables) == 2\n print(proxy.runnables[1])\n\n assert len(galaxy_workflow_dict[\"steps\"]) == 3\n wc_step = galaxy_workflow_dict[\"steps\"][1]\n exp_step = galaxy_workflow_dict[\"steps\"][2]\n assert wc_step[\"input_connections\"]\n assert exp_step[\"input_connections\"]\n\n\ndef test_workflow_scatter():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/count-lines3-wf.cwl\"))\n\n step_proxies = proxy.step_proxies()\n assert len(step_proxies) == 1\n\n galaxy_workflow_dict = proxy.to_dict()\n assert len(galaxy_workflow_dict[\"steps\"]) == 2\n\n # TODO: For CWL - deactivate implicit scattering Galaxy does\n # and force annotation in the workflow of scattering? Maybe?\n wc_step = galaxy_workflow_dict[\"steps\"][1]\n assert wc_step[\"input_connections\"]\n\n assert \"inputs\" in wc_step\n wc_inputs = wc_step[\"inputs\"]\n assert len(wc_inputs) == 1\n file_input = wc_inputs[0]\n assert file_input[\"scatter_type\"] == \"dotproduct\", wc_step\n\n assert len(wc_step[\"workflow_outputs\"]) == 1\n\n\ndef test_workflow_outputs_of_inputs():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/any-type-compat.cwl\"))\n\n galaxy_workflow_dict = proxy.to_dict()\n assert len(galaxy_workflow_dict[\"steps\"]) == 3\n input_step = galaxy_workflow_dict[\"steps\"][0]\n\n assert len(input_step[\"workflow_outputs\"]) == 1\n\n\ndef test_workflow_scatter_multiple_input():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/count-lines4-wf.cwl\"))\n\n step_proxies = proxy.step_proxies()\n assert len(step_proxies) == 1\n\n galaxy_workflow_dict = proxy.to_dict()\n assert len(galaxy_workflow_dict[\"steps\"]) == 3\n\n\ndef test_workflow_multiple_input_merge_flattened():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/count-lines7-wf.cwl\"))\n\n galaxy_workflow_dict = proxy.to_dict()\n assert len(galaxy_workflow_dict[\"steps\"]) == 3\n\n tool_step = galaxy_workflow_dict[\"steps\"][2]\n assert \"inputs\" in tool_step\n inputs = tool_step[\"inputs\"]\n assert len(inputs) == 1\n input = inputs[0]\n assert input[\"merge_type\"] == \"merge_flattened\"\n\n\ndef test_workflow_step_value_from():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/step-valuefrom-wf.cwl\"))\n\n galaxy_workflow_dict = proxy.to_dict()\n assert len(galaxy_workflow_dict[\"steps\"]) == 3\n\n print(galaxy_workflow_dict[\"steps\"])\n tool_step = [s for s in galaxy_workflow_dict[\"steps\"].values() if s[\"label\"] == \"step1\"][0]\n assert \"inputs\" in tool_step\n inputs = tool_step[\"inputs\"]\n assert len(inputs) == 1\n assert \"value_from\" in inputs[0], inputs\n\n\ndef test_workflow_input_without_source():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/step-valuefrom3-wf.cwl\"))\n\n galaxy_workflow_dict = proxy.to_dict()\n assert len(galaxy_workflow_dict[\"steps\"]) == 3\n\n tool_step = galaxy_workflow_dict[\"steps\"][2]\n assert \"inputs\" in tool_step\n inputs = tool_step[\"inputs\"]\n assert len(inputs) == 3, inputs\n assert inputs[2].get(\"value_from\")\n\n\ndef test_workflow_input_default():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/pass-unconnected.cwl\"))\n galaxy_workflow_dict = proxy.to_dict()\n assert len(galaxy_workflow_dict[\"steps\"]) == 3\n\n tool_step = galaxy_workflow_dict[\"steps\"][2]\n\n assert \"inputs\" in tool_step\n inputs = tool_step[\"inputs\"]\n assert len(inputs) == 2, inputs\n assert inputs[1]\n\n\ndef test_search_workflow():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/search.cwl#main\"))\n galaxy_workflow_dict = proxy.to_dict()\n assert len(galaxy_workflow_dict[\"steps\"]) == 5\n\n\ndef test_workflow_simple_optional_input():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0_custom/int-opt-io-wf.cwl\"))\n\n galaxy_workflow_dict = proxy.to_dict()\n assert len(galaxy_workflow_dict[\"steps\"]) == 2\n\n input_step = galaxy_workflow_dict[\"steps\"][0]\n assert input_step['type'] == \"parameter_input\", input_step\n assert input_step['tool_state']['parameter_type'] == \"field\", input_step\n\n\ndef test_boolean_defaults():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.2/tests/conditionals/cond-wf-002_nojs.cwl\"))\n galaxy_workflow_dict = proxy.to_dict()\n assert len(galaxy_workflow_dict[\"steps\"]) == 3\n bool_input = galaxy_workflow_dict[\"steps\"][0]\n assert bool_input[\"label\"] == \"test\", bool_input\n bool_tool_state = bool_input[\"tool_state\"]\n assert bool_tool_state[\"optional\"]\n assert bool_tool_state[\"default\"][\"value\"] is False\n\n\ndef test_workflow_file_optional_input():\n proxy = workflow_proxy(_cwl_tool_path(\"v1.0/v1.0/count-lines11-wf.cwl\"))\n\n galaxy_workflow_dict = proxy.to_dict()\n assert len(galaxy_workflow_dict[\"steps\"]) == 3\n\n input_step = galaxy_workflow_dict[\"steps\"][0]\n # TODO: make this File? - implemented in Galaxy now\n assert input_step['type'] == \"parameter_input\", input_step\n assert input_step['tool_state']['optional'] is True, input_step\n\n\ndef test_load_proxy_simple():\n cat3 = _cwl_tool_path(\"v1.0/v1.0/cat3-tool.cwl\")\n tool_source = get_tool_source(cat3)\n\n # Behavior was changed - too verbose?\n # description = tool_source.parse_description()\n # assert description == \"Print the contents of a file to stdout using 'cat' running in a docker container.\", description\n\n input_sources = _inputs(tool_source)\n assert len(input_sources) == 1\n\n input_source = input_sources[0]\n assert input_source.parse_help() == \"The file that will be copied using 'cat'\"\n assert input_source.parse_label() == \"Input File\"\n\n outputs, output_collections = tool_source.parse_outputs(None)\n assert len(outputs) == 1\n\n output1 = outputs['output_file']\n assert output1.format == CWL_DEFAULT_FILE_OUTPUT, output1.format # Have Galaxy auto-detect\n\n _, containers = tool_source.parse_requirements_and_containers()\n assert len(containers) == 1\n\n\ndef test_cwl_strict_parsing():\n md5sum_non_strict_path = _cwl_tool_path(\"v1.0_custom/md5sum_non_strict.cwl\")\n threw_exception = False\n try:\n get_tool_source(md5sum_non_strict_path).tool_proxy\n except Exception:\n threw_exception = True\n\n assert threw_exception\n get_tool_source(md5sum_non_strict_path, strict_cwl_validation=False).tool_proxy\n\n\ndef test_load_proxy_bwa_mem():\n bwa_mem = _cwl_tool_path(\"v1.0/v1.0/bwa-mem-tool.cwl\")\n tool_source = get_tool_source(bwa_mem)\n tool_id = tool_source.parse_id()\n assert tool_id == \"bwa-mem-tool.cwl\", tool_id\n _inputs(tool_source)\n # TODO: test repeat generated...\n\n\ndef test_representation_id():\n cat3 = _cwl_tool_path(\"v1.0/v1.0/cat3-tool.cwl\")\n with open(cat3) as f:\n representation = yaml.safe_load(f)\n representation[\"id\"] = \"my-cool-id\"\n\n uuid = str(uuid4())\n proxy = tool_proxy(tool_object=representation, tool_directory=\"/\", uuid=uuid)\n tool_id = proxy.galaxy_id()\n # assert tool_id == \"my-cool-id\", tool_id\n assert tool_id == uuid, tool_id\n id_proxy = tool_proxy_from_persistent_representation(proxy.to_persistent_representation())\n tool_id = id_proxy.galaxy_id()\n assert tool_id == uuid, tool_id\n assert proxy._uuid == id_proxy._uuid\n # assert tool_id == \"my-cool-id\", tool_id\n\n\ndef test_env_tool1():\n env_tool1 = _cwl_tool_path(\"v1.0/v1.0/env-tool1.cwl\")\n tool_source = get_tool_source(env_tool1)\n _inputs(tool_source)\n\n\ndef test_wc2_tool():\n env_tool1 = _cwl_tool_path(\"v1.0/v1.0/wc2-tool.cwl\")\n tool_source = get_tool_source(env_tool1)\n _inputs(tool_source)\n datasets, collections = _outputs(tool_source)\n assert len(datasets) == 1, datasets\n output = datasets[\"output\"]\n assert output.format == \"expression.json\", output.format\n\n\ndef test_optional_output():\n optional_output2_tool1 = _cwl_tool_path(\"v1.0/v1.0/optional-output.cwl\")\n tool_source = get_tool_source(optional_output2_tool1)\n datasets, collections = _outputs(tool_source)\n assert len(datasets) == 2, datasets\n output = datasets[\"optional_file\"]\n assert output.format == CWL_DEFAULT_FILE_OUTPUT, output.format\n\n\ndef test_sorttool():\n env_tool1 = _cwl_tool_path(\"v1.0/v1.0/sorttool.cwl\")\n tool_source = get_tool_source(env_tool1)\n\n assert tool_source.parse_id() == \"sorttool.cwl\"\n\n inputs = _inputs(tool_source)\n assert len(inputs) == 2\n bool_input = inputs[0]\n file_input = inputs[1]\n assert bool_input.parse_input_type() == \"param\"\n assert bool_input.get(\"type\") == \"boolean\"\n\n assert file_input.parse_input_type() == \"param\"\n assert file_input.get(\"type\") == \"data\", file_input.get(\"type\")\n\n output_data, output_collections = _outputs(tool_source)\n assert len(output_data) == 1\n assert len(output_collections) == 0\n\n\ndef test_scheadef_tool():\n tool_path = _cwl_tool_path(\"v1.0/v1.0/schemadef-tool.cwl\")\n tool_source = get_tool_source(tool_path)\n _inputs(tool_source)\n\n\ndef test_params_tool():\n tool_path = _cwl_tool_path(\"v1.0/v1.0/params.cwl\")\n tool_source = get_tool_source(tool_path)\n _inputs(tool_source)\n\n\ndef test_cat1():\n cat1_tool = _cwl_tool_path(\"v1.0/v1.0/cat1-testcli.cwl\")\n tool_source = get_tool_source(cat1_tool)\n inputs = _inputs(tool_source)\n\n assert len(inputs) == 3, inputs\n file_input = inputs[0]\n\n assert file_input.parse_input_type() == \"param\"\n assert file_input.get(\"type\") == \"data\", file_input.get(\"type\")\n\n # User needs to specify if want to select boolean or not.\n if not USE_FIELD_TYPES:\n null_or_bool_input = inputs[1]\n assert null_or_bool_input.parse_input_type() == \"conditional\"\n else:\n field_input = inputs[1]\n assert field_input.parse_input_type() == \"param\"\n assert field_input.get(\"type\") == \"field\", field_input.get(\"type\")\n\n output_data, output_collections = _outputs(tool_source)\n assert len(output_data) == 1\n assert len(output_collections) == 1\n\n\ndef test_tool_reload():\n cat1_tool = _cwl_tool_path(\"v1.0/v1.0/cat1-testcli.cwl\")\n tool_source = get_tool_source(cat1_tool)\n _inputs(tool_source)\n\n # Test reloading - had a regression where this broke down.\n cat1_tool_again = _cwl_tool_path(\"v1.0/v1.0/cat1-testcli.cwl\")\n tool_source = get_tool_source(cat1_tool_again)\n _inputs(tool_source)\n\n\nclass CwlToolObjectTestCase(TestCase, tools_support.UsesApp, tools_support.UsesTools):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n self.app = galaxy_mock.MockApp()\n self.history = galaxy.model.History()\n self.trans = galaxy_mock.MockTrans(history=self.history)\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_default_data_inputs(self):\n self._init_tool(tool_path=_cwl_tool_path(\"v1.0/v1.0/default_path.cwl\"))\n print(\"TOOL IS %s\" % self.tool)\n hda = self._new_hda()\n errors = {}\n cwl_inputs = {\n \"file1\": {\"src\": \"hda\", \"id\": self.app.security.encode_id(hda.id)}\n }\n inputs = self.tool.inputs_from_dict({\"inputs\": cwl_inputs, \"inputs_representation\": \"cwl\"})\n print(inputs)\n print(\"pre-populated state is %s\" % inputs)\n populated_state = {}\n populate_state(self.trans, self.tool.inputs, inputs, populated_state, errors)\n print(\"populated state is %s\" % inputs)\n wrapped_params = WrappedParameters(self.trans, self.tool, populated_state)\n input_json = to_cwl_job(self.tool, wrapped_params.params, self.test_directory)\n print(inputs)\n print(\"to_cwl_job is %s\" % input_json)\n # assert False\n\n def _new_hda(self):\n hda = galaxy.model.HistoryDatasetAssociation(history=self.history)\n hda.visible = True\n hda.dataset = galaxy.model.Dataset()\n self.trans.model.context.add(hda)\n self.trans.model.context.flush()\n return hda\n\n\ndef _outputs(tool_source):\n return tool_source.parse_outputs(object())\n\n\ndef get_cwl_tool_source(path):\n path = _cwl_tool_path(path)\n return get_tool_source(path)\n\n\ndef _inputs(tool_source=None, path=None):\n if tool_source is None:\n path = _cwl_tool_path(path)\n tool_source = get_tool_source(path)\n\n input_pages = tool_source.parse_input_pages()\n assert input_pages.inputs_defined\n page_sources = input_pages.page_sources\n assert len(page_sources) == 1\n page_source = page_sources[0]\n input_sources = page_source.parse_input_sources()\n return input_sources\n\n\ndef _cwl_tool_path(path):\n return os.path.join(CWL_TOOLS_DIRECTORY, path)\n","sub_path":"test/unit/tools/test_cwl.py","file_name":"test_cwl.py","file_ext":"py","file_size_in_byte":19436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"303849312","text":"## Get Characters that appear at least two times\n# Developer: nickumia\n# 04.06.2021\n\nimport sys\nimport time\n\nVERBOSE = False\n\ndef count_chars(test_string):\n '''\n Frequency counter function\n '''\n # Start time\n start = time.time()\n\n # Dictionary to hold frequence count\n # Only the alphabet characters are supported\n char_frequency = {key:0 for key in \n [chr(i) for i in range(ord('a'), ord('z')+1)]}\n\n for char in input_string:\n try:\n char_frequency[char.lower()] += 1\n except KeyError:\n print(\"Input string contains non-supported characters. \" + \n \"Try again with only characters in the range of [a-z].\")\n\n char_al_two = []\n for char in char_frequency:\n if char_frequency[char] >= 2:\n char_al_two.append(char)\n if VERBOSE:\n print(\"Character \\'%s\\' occurred %d times\" % \n (char, char_frequency[char]))\n\n # End time\n end = time.time()\n return char_al_two, (end-start)\n\n\n\n## MAIN\nif __name__ == \"__main__\":\n # Input string to test\n input_string = input(\"Enter a character string: \")\n result,duration = count_chars(input_string)\n print(result)\n print(sys.getsizeof(result), \"bytes\")\n print(duration, \"secs\")\n\n # -> Result list\n # -> Memory usage in terms of bytes\n # THe memory usage is the size of the dictionary and\n # (optionally) the size of the result list\n # -> Time\n # The time cost is time it costs to visit every character\n # in the input string. O(n) where n = number of characters.","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231101954","text":"#-*- coding: utf-8 -*-\nfrom random import randint, choice\nfrom functions import Function, OneVariableFunction, TwoVariableFunction\nfrom sets import one_variable_function_set, two_variable_function_set, get_terminal\nimport tree_creation\nfrom copy import deepcopy\nfrom constants import MAX_DEPTH, VARIABLE_SET\n\n\nclass Tree(object):\n\n def __init__(self, tree_struct, tree_map):\n self.init_tree = list(tree_struct)\n self.tree_map = dict(tree_map)\n self.tree_del = deepcopy(tree_struct)\n self.childs_counter = 1 #счетчик количества вершин у поддерева\n self.children = None\n self.vertex_counter = 0\n self.index = 0\n self.fitness = 100\n\n @staticmethod\n def tree_map_to_string(tree_map):\n string = \"\"\n for key in tree_map.keys():\n if isinstance(tree_map[key], Function):\n string += str(key) + \": \" + tree_map[key].function_name\n else:\n string += str(key) + \": \" + str(tree_map[key])\n string += \"; \"\n return string\n\n def check_var_existence(self):\n for key in self.tree_map.keys():\n if self.tree_map[key] in VARIABLE_SET:\n return True\n return False\n\n def find_children(self):\n self.children.tree_map[0] = self.tree_map[self.index]\n self._find_children([self.index], [])\n\n def _find_children(self, queue, visited):\n if len(queue) == 0:\n return\n vertex = queue[0]\n del queue[0]\n self.children.init_tree.append([])\n if vertex < len(self.init_tree):\n for v in self.init_tree[vertex]:\n if v not in visited:\n queue.append(v)\n visited.append(v)\n self.children.init_tree[len(self.children.init_tree)-1].append(self.childs_counter)\n self.children.tree_map[self.childs_counter] = self.tree_map[v]\n self.childs_counter += 1\n self._find_children(queue, visited)\n\n def delete_subtree(self):\n deleted_vertexes = []\n if self.index < len(self.init_tree):\n for v in self.init_tree[self.index]:\n if v < len(self.init_tree):\n deleted_vertexes = self._calculate_deleted_vertexes(v, self.init_tree[v], deleted_vertexes)\n deleted_vertexes.sort(None, None, True)\n for ver in deleted_vertexes:\n del self.tree_del[ver]\n self.tree_del[self.index] = []\n self._recalculate_vertexes_number()\n self._delete_last_empty()\n\n def _calculate_deleted_vertexes(self, index, children, deleting):\n if index < len(self.init_tree) and index not in deleting:\n deleting.append(index)\n for v in children:\n if v < len(self.init_tree):\n self._calculate_deleted_vertexes(v, self.init_tree[v], deleting)\n return deleting\n\n def _recalculate_vertexes_number(self):\n temp_map = dict(self.tree_map)\n self.tree_map = {0: temp_map[0]}\n count = 1\n i = 0\n while i < len(self.tree_del):\n j = 0\n while j < len(self.tree_del[i]):\n self.tree_map[count] = temp_map[self.tree_del[i][j]]\n self.tree_del[i][j] = count\n self.vertex_counter = count\n count += 1\n j += 1\n i += 1\n\n def _delete_last_empty(self):\n i = len(self.tree_del) - 1\n while i >= 0:\n if self.tree_del[i] == []:\n del self.tree_del[i]\n else:\n return\n i -= 1\n\n @staticmethod\n def _get_last_vertex_number(tree):\n last_vertex_number = 0\n for vertex in tree.tree_map.keys():\n if vertex > last_vertex_number:\n last_vertex_number = vertex\n return last_vertex_number\n\n def _add_parent_residue(self, new_tree, current_position, saved_vertexes, next_vertex_number):\n while current_position < len(self.tree_del):\n new_tree.init_tree.append([])\n last_position = len(new_tree.init_tree)-1\n if last_position in saved_vertexes:\n saved_vertexes.remove(last_position)\n continue\n for v in self.tree_del[current_position]:\n new_tree.init_tree[len(new_tree.init_tree) - 1].append(next_vertex_number)\n new_tree.tree_map[next_vertex_number] = self.tree_map[v]\n next_vertex_number += 1\n current_position += 1\n return new_tree\n\n def _copy_constant_tree_part(self):\n new_tree = Tree([], {})\n new_tree.tree_map[0] = self.tree_map[0]\n n = self.index\n for i in range(0, n):\n new_tree.init_tree.append([])\n if i < len(self.tree_del):\n for j in range(0, len(self.tree_del[i])):\n new_tree.init_tree[len(new_tree.init_tree) - 1].append(self.tree_del[i][j])\n new_tree.tree_map[self.tree_del[i][j]] = self.tree_map[self.tree_del[i][j]]\n return new_tree\n\n def _add_child(self, new_tree, next_vertex_number):\n index = self.index\n saved_vertexes = [] #новые добавленные вершины текущего шага\n saved_vertexes2 = []\n last_adding_parent_vertex = len(self.tree_del)\n new_vertex_counter = 0\n for vs in self.children.init_tree:\n new_tree.init_tree.append([])\n if self.children.init_tree.index(vs) > 0:\n new_vertex_counter += 1\n if len(new_tree.init_tree)-1 in saved_vertexes2:\n saved_vertexes2.remove(len(new_tree.init_tree)-1)\n if len(vs) > 0:\n for v in vs:\n new_tree.init_tree[len(new_tree.init_tree)-1].append(next_vertex_number)\n new_tree.tree_map[next_vertex_number] = self.children.tree_map[v]\n saved_vertexes.append(next_vertex_number)\n next_vertex_number += 1\n if len(saved_vertexes2) > 0:\n if self.children.init_tree.index(vs) == len(self.children.init_tree)-1:\n new_tree.init_tree.append([])\n saved_vertexes2.remove(len(new_tree.init_tree)-1)\n saved_vertexes2 = list(saved_vertexes)\n continue\n i = index + 1\n if len(saved_vertexes) == 0:\n m = i\n else:\n m = min(saved_vertexes) - new_vertex_counter\n while i < m and i <= self.vertex_counter:\n new_tree.init_tree.append([])\n if i < len(self.tree_del):\n for k in self.tree_del[i]:\n new_tree.init_tree[len(new_tree.init_tree) - 1].append(next_vertex_number)\n new_tree.tree_map[next_vertex_number] = self.tree_map[k]\n next_vertex_number += 1\n last_adding_parent_vertex = i\n i += 1\n index = m - 1\n saved_vertexes2 = list(saved_vertexes)\n saved_vertexes = []\n last_adding_parent_vertex += 1\n new_tree = deepcopy(self._add_parent_residue(new_tree, last_adding_parent_vertex, saved_vertexes2, next_vertex_number))\n return new_tree\n\n def add_child_to_tree(self):\n \"\"\"Добавляет дерево-потомок к текущему дереву-родителю.\n Корень дерева-потомка добавляется на заданный индекс текущего дерева.\n :return Новое дерево с присоединенным поддеревом\"\"\"\n index = self.index\n new_tree = deepcopy(self._copy_constant_tree_part())\n next_vertex_number = Tree._get_last_vertex_number(new_tree) + 1\n new_tree.tree_map[index] = self.children.tree_map[0]\n\n self._add_child(new_tree, next_vertex_number)\n\n if Tree.get_depth(new_tree.init_tree, 0, 0, [], []) > MAX_DEPTH:\n return Tree([], {})\n return new_tree\n\n def mutate_from_func_to_func(self):\n if len(self.init_tree) <= 1:\n return\n position = randint(0, len(self.init_tree)-1)\n if isinstance(self.tree_map[position], OneVariableFunction):\n self.tree_map[position] = choice(one_variable_function_set)\n elif isinstance(self.tree_map[position], TwoVariableFunction):\n self.tree_map[position] = choice(two_variable_function_set)\n else:\n self.mutate_from_func_to_func()\n\n def mutate_from_term_to_term(self):\n try:\n if len(self.init_tree) <= 1:\n return\n if len(self.tree_map.keys()) <= 2:\n position = 1\n else:\n position = randint(1, len(self.tree_map.keys())-1)\n if isinstance(self.tree_map[position], Function):\n self.mutate_from_term_to_term()\n else:\n self.tree_map[position] = get_terminal()\n except RuntimeError:\n return\n\n def mutate_from_func_to_term(self):\n if len(self.init_tree) <= 1:\n return\n else:\n position = randint(0, len(self.init_tree)-1)\n self.index = position\n self.tree_del = deepcopy(self.init_tree)\n self.delete_subtree()\n self.tree_map[position] = get_terminal()\n self.init_tree = self.tree_del\n\n def mutate_from_term_to_func(self):\n try:\n if len(self.tree_map.keys()) < 2:\n return\n if len(self.tree_map.keys()) < 3:\n position = 1\n else:\n position = randint(1, len(self.tree_map.keys())-1)\n if isinstance(self.tree_map[position], Function):\n self.mutate_from_term_to_func()\n else:\n depth = Tree.get_depth(self.init_tree, 0, 0, [], [])\n if depth < MAX_DEPTH:\n creator = tree_creation.TreeCreator(3)\n creator.create(False)\n child = creator.tree\n self.children = child\n\n self.index = position\n self.tree_del = deepcopy(self.init_tree)\n self._recalculate_vertexes_number()\n\n t = self.add_child_to_tree()\n self.tree_del = deepcopy(t.tree_del)\n self.init_tree = list(t.init_tree)\n self.tree_map = t.tree_map\n else:\n return\n except RuntimeError:\n return\n except:\n print(\"mutate_from_term_to_func EXCEPT\")\n print(self.tree_map)\n print(position)\n exit()\n\n @staticmethod\n def get_depth(tree_struct, current_depth, index, visited, depths):\n visited.append(index)\n if index < len(tree_struct):\n for vertex in tree_struct[index]:\n if not (vertex in visited):\n Tree.get_depth(tree_struct, current_depth+1, vertex, visited, depths)\n if len(depths) == 0:\n return 0\n return max(depths)\n else:\n depths.append(current_depth)\n return\n\n def get_coefficients(self):\n coefficients = []\n for keys in self.tree_map.keys():\n if not isinstance(self.tree_map[keys], Function):\n if not self.tree_map[keys] in VARIABLE_SET:\n coefficients.append(self.tree_map[keys])\n return coefficients\n","sub_path":"IO/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":11668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"2706271","text":"\"\"\"\nThis module implements the real valued (non convolutionary) network for the conv dataset\n\"\"\"\nimport torch\n\nfrom discrete_nn.models.base_model import BaseModel\nfrom discrete_nn.layers.Flatten import Flatten\n\n\nclass ConvReal(BaseModel):\n \"\"\"\n Real valued convolutional network\n \"\"\"\n\n def __init__(self):\n super().__init__()\n # defining all the network's layers\n self.netlayers = torch.nn.Sequential(\n\n torch.nn.Conv2d(1, 32, 5, stride=1, bias=False, padding=2),\n torch.nn.BatchNorm2d(32, track_running_stats=False),\n torch.nn.Tanh(),\n torch.nn.MaxPool2d(2),\n #\n torch.nn.Dropout(p=0.2),\n torch.nn.Conv2d(32, 64, 5, stride=1, bias=False, padding=2),\n torch.nn.BatchNorm2d(64, track_running_stats=False),\n torch.nn.Tanh(),\n torch.nn.MaxPool2d(2),\n #\n Flatten(),\n torch.nn.Dropout(p=0.3),\n torch.nn.Linear(3136, 512, bias=False),\n torch.nn.BatchNorm1d(512, track_running_stats=False),\n torch.nn.Tanh(),\n #\n torch.nn.Linear(512, 10)\n )\n self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-3, weight_decay=1e-6)\n self.loss_funct = torch.nn.CrossEntropyLoss()\n\n def forward(self, x):\n # takes image vector\n return self.netlayers(x)\n\n def set_net_parameters(self, param_dict):\n new_state_dict = {\n \"netlayers.0.weight\": param_dict[\"L1_Conv_W\"],\n \"netlayers.1.weight\": param_dict[\"L1_BatchNorm_W\"],\n \"netlayers.1.bias\": param_dict[\"L1_BatchNorm_b\"],\n \"netlayers.5.weight\": param_dict[\"L2_Conv_W\"],\n \"netlayers.6.weight\": param_dict[\"L2_BatchNorm_W\"],\n \"netlayers.6.bias\": param_dict[\"L2_BatchNorm_b\"],\n \"netlayers.11.weight\": param_dict[\"L3_Linear_W\"],\n \"netlayers.12.weight\": param_dict[\"L3_BatchNorm_W\"],\n \"netlayers.12.bias\": param_dict[\"L3_BatchNorm_b\"],\n \"netlayers.14.weight\": param_dict[\"L4_Linear_W\"],\n \"netlayers.14.bias\": param_dict[\"L4_Linear_b\"].reshape(-1)\n }\n self.load_state_dict(new_state_dict, strict=False)\n\n def get_net_parameters(self):\n \"\"\":returns a dictionary with the trainable parameters\"\"\"\n internal_dict = {name: value for name, value in self.named_parameters()}\n\n repr_dict = dict()\n repr_dict[\"L1_Conv_W\"] = internal_dict[\"netlayers.0.weight\"]\n repr_dict[\"L1_BatchNorm_W\"] = internal_dict[\"netlayers.1.weight\"]\n repr_dict[\"L1_BatchNorm_b\"] = internal_dict[\"netlayers.1.bias\"]\n repr_dict[\"L2_Conv_W\"] = internal_dict[\"netlayers.5.weight\"]\n repr_dict[\"L2_BatchNorm_W\"] = internal_dict[\"netlayers.6.weight\"]\n repr_dict[\"L2_BatchNorm_b\"] = internal_dict[\"netlayers.6.bias\"]\n repr_dict[\"L3_Linear_W\"] = internal_dict[\"netlayers.11.weight\"]\n repr_dict[\"L3_BatchNorm_W\"] = internal_dict[\"netlayers.12.weight\"]\n repr_dict[\"L3_BatchNorm_b\"] = internal_dict[\"netlayers.12.bias\"]\n repr_dict[\"L4_Linear_W\"] = internal_dict[\"netlayers.14.weight\"]\n repr_dict[\"L4_Linear_b\"] = internal_dict[\"netlayers.14.bias\"].reshape(-1, 1)\n return repr_dict\n\n","sub_path":"discrete_nn/models/conv/real.py","file_name":"real.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"636437782","text":"import pandas as pd\nimport re\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\n\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\n\ndef load_data(file):\n return pd.read_excel(file)\n\n\ndef remove_tags(text):\n new = ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\",\" \", text).split())\n return new\n\n\ndef remove_stopwords(text):\n stop_words = set(stopwords.words('english') + ['RT'])\n word_tokens = word_tokenize(text)\n filtered_sentence = [w for w in word_tokens if not w in stop_words]\n sent = ' '.join(filtered_sentence)\n return sent\n\n\ndef cleaning(data):\n\n # removed tags\n data['Text'] = data['Text'].astype(str).apply(remove_tags)\n\n # remove stopwords\n data['Text'] = data['Text'].astype(str).apply(remove_stopwords)\n\n data = data.loc[data[\"Text\"] != '']\n\n return data\n\n\nif __name__ == '__main__':\n inp = 'Combined_all_datasets2.xlsx'\n out = 'Combined_all_datasets_cleaned2.xlsx'\n\n data = load_data(inp).dropna()\n data = cleaning(data)\n data.to_excel(out)\n\n","sub_path":"sma/utils/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"576216413","text":"#!/bin/env python\n# -*- coding: utf-8 -*-\n# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python\n\"\"\"\ntest_amin\n\"\"\"\n\nfrom apibase import randtool\nimport paddle\nimport pytest\nimport numpy as np\n\n\ndef cal_amin(x, jud=True, axis=None, keepdim=False):\n \"\"\"\n calculate amin\n \"\"\"\n res = np.amin(x, axis=axis, keepdims=keepdim)\n if axis is None and keepdim is False:\n res = np.array([res])\n grad = np.where(x == res, 1, 0)\n grad = grad / np.sum(grad, axis=axis, keepdims=True)\n return res, grad if jud else res\n\n\ndef cal_api(x, dtype=\"float32\", axis=None, keepdim=False):\n \"\"\"\n calculate api\n \"\"\"\n x = x.astype(dtype)\n dynamic_grad = None\n xp = paddle.to_tensor(x, stop_gradient=False, dtype=dtype)\n dynamic_res = paddle.amin(xp, axis=axis, keepdim=keepdim)\n if dtype == \"float32\" or dtype == \"float64\":\n dynamic_res.backward()\n dynamic_grad = xp.grad.numpy()\n\n paddle.enable_static()\n main_program, startup_program = paddle.static.Program(), paddle.static.Program()\n with paddle.utils.unique_name.guard():\n with paddle.static.program_guard(main_program=main_program, startup_program=startup_program):\n data0 = paddle.static.data(name=\"s0\", shape=x.shape, dtype=dtype)\n feed = {\"s0\": x}\n out = paddle.amin(data0, axis=axis, keepdim=keepdim)\n if dtype == \"float32\" or dtype == \"float64\":\n data0.stop_gradient = False\n grad = paddle.static.gradients(out, data0)\n exe = paddle.static.Executor()\n exe.run(startup_program)\n if dtype == \"float32\" or dtype == \"float64\":\n static_res = exe.run(main_program, feed=feed, fetch_list=[out] + grad)\n else:\n static_res = exe.run(main_program, feed=feed, fetch_list=[out])\n paddle.disable_static()\n assert np.allclose(dynamic_res.numpy(), static_res[0])\n if dtype == \"float32\" or dtype == \"float64\":\n assert np.allclose(dynamic_grad, static_res[1])\n return dynamic_res.numpy(), dynamic_grad\n\n\n@pytest.mark.api_base_amin_vartype\ndef test_amin_base():\n \"\"\"\n base\n \"\"\"\n x = randtool(\"int\", -4, 40, (10,))\n types0 = [\"int32\", \"int64\"]\n types1 = [\"float32\", \"float64\"]\n for dtype in types0:\n np_res = cal_amin(x, jud=False)\n res, grad = cal_api(x, dtype=dtype)\n assert np.allclose(np_res, res)\n\n for dtype in types1:\n np_res, np_grad = cal_amin(x)\n res, grad = cal_api(x, dtype=dtype)\n assert np.allclose(np_res, res)\n assert np.allclose(np_grad, grad)\n\n\n@pytest.mark.api_base_amin_parameters\ndef test_amin0():\n \"\"\"\n default\n \"\"\"\n x = randtool(\"float\", -4, 14, (2, 4))\n np_res, np_grad = cal_amin(x)\n res, grad = cal_api(x)\n assert np.allclose(np_res, res)\n assert np.allclose(grad, np_grad)\n\n\n@pytest.mark.api_base_amin_parameters\ndef test_amin1():\n \"\"\"\n x: 3d-tensor\n \"\"\"\n x = randtool(\"float\", -4, 14, (2, 5, 4))\n np_res, np_grad = cal_amin(x)\n res, grad = cal_api(x)\n assert np.allclose(np_res, res)\n assert np.allclose(grad, np_grad)\n\n\n@pytest.mark.api_base_amin_parameters\ndef test_amin2():\n \"\"\"\n x: 4d-tensor\n \"\"\"\n x = randtool(\"float\", -40, 140, (3, 2, 5, 4))\n np_res, np_grad = cal_amin(x)\n res, grad = cal_api(x)\n assert np.allclose(np_res, res)\n assert np.allclose(grad, np_grad)\n\n\n@pytest.mark.api_base_amin_parameters\ndef test_amin3():\n \"\"\"\n x: 4d-tensor\n axis=-1\n keepdim=True\n \"\"\"\n x = randtool(\"float\", -40, 140, (3, 2, 4, 5))\n np_res, np_grad = cal_amin(x, axis=-1, keepdim=True)\n res, grad = cal_api(x, axis=-1, keepdim=True)\n assert np.allclose(np_res, res)\n assert np.allclose(grad, np_grad)\n\n\n@pytest.mark.api_base_amin_parameters\ndef test_amin4():\n \"\"\"\n x: 4d-tensor\n axis=2\n keepdim=True\n \"\"\"\n x = randtool(\"float\", -40, 140, (3, 2, 5, 4))\n np_res, np_grad = cal_amin(x, axis=2, keepdim=True)\n res, grad = cal_api(x, axis=2, keepdim=True)\n assert np.allclose(np_res, res)\n assert np.allclose(grad, np_grad)\n\n\n@pytest.mark.api_base_amin_parameters\ndef test_amin5():\n \"\"\"\n x: 4d-tensor\n axis=(1, 2)\n keepdim=True\n \"\"\"\n x = randtool(\"float\", -40, 140, (3, 2, 5, 4))\n np_res, np_grad = cal_amin(x, axis=(1, 2), keepdim=True)\n res, grad = cal_api(x, axis=(1, 2), keepdim=True)\n assert np.allclose(np_res, res)\n assert np.allclose(grad, np_grad)\n","sub_path":"test/tools/tool-test-op-correctness/cases/test_amin.py","file_name":"test_amin.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"535243338","text":"def parse_home():\n\n\n a = {'uno':' unob', 'dos': 'dosa', 'tres': 'tresb','cuatro':'cuatroa'}\n\n\n b = {'unoc':' unod', 'dos': 'dosb', 'tresc': 'tresd','cuatro':'cuatrob'}\n\n c={}\n \n \n n=0\n for i,j in a.items():\n for k,m in b.items():\n\n if i == k:\n aux={j:m}\n c.update(aux)\n\n print(a) \n print(b) \n print(c) \n\ndef run():\n parse_home()\n\nif __name__ == '__main__':\n run()","sub_path":"pruebas.py","file_name":"pruebas.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615769451","text":"# -*- coding: utf-8 -*-\n#scrapy crawl zhihulive -o file2.csv -t csv\nfrom scrapy.exceptions import CloseSpider\nimport scrapy\nfrom zhihulives_crawler.items import LiveItem #, SpeakerItem\nimport json\nimport time\nfrom flatten_json import flatten\nimport s3fs\nimport pandas as pd\nimport logging\nfrom scrapy.spidermiddlewares.httperror import HttpError\n\nlogger = logging.getLogger('httperror')\n\ndef to_flat(dic, mask):\n new = {k:v for k,v in dic.items() if k in mask}\n return new\n\ndef get_time(t):\n return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t))\n\ndef get_duration(l_json):\n if l_json['live_type']=='audio':\n return round(l_json['audio_duration']/60000,2)\n if l_json['live_type']=='video':\n try:\n return round(l_json['video']['formal_video_tape']['duration']/60,2)\n except:\n return l_json['duration']/60\n\ndef get_cospeakers(l_json):\n try:\n cos = [i['member']['id'] for i in l_json['cospeakers']]\n except:\n cos = None\n return str(cos)\n\n\n \n \n \n \n\n#all_speaker_ids = []\n#hot_liveids = {}\n\nclass ZhihuliveSpider(scrapy.Spider):\n #df = pd.read_csv('https://s3.amazonaws.com/zhihubow/live_monitor/zhihulive.csv')\n df_format = pd.read_csv('https://s3.amazonaws.com/zhihubow/format/zhihulive_format.csv')\n handle_httpstatus_list = [500]\n name = 'zhihulive_ab'\n allowed_domains = ['zhihu.com']\n #live_api_url = 'https://api.zhihu.com/lives?limit=10&offset=0'\n \n start_urls = ['https://api.zhihu.com/lives?limit=10&offset=0']\n timestamp = get_time(time.time())\n '''\n def start_requests(self):\n \n starturl_7 = 'https://api.zhihu.com/lives/hot/weekly?limit=1000&offset=0'\n starturl_30 = 'https://api.zhihu.com/lives/hot/monthly?limit=1000&offset=0'\n\n starturl = 'https://api.zhihu.com/lives?limit=10&offset=0'\n \n yield scrapy.Request(url=starturl_7, callback=self.parse_7, priority=1) \n yield scrapy.Request(url=starturl_30, callback=self.parse_30, priority=1) \n yield scrapy.Request(url=starturl, callback=self.parse)'''\n \n def parse(self, response):\n #self.log('visited: '+response.url)\n #pass\n try:\n text = json.loads(response.text)\n live_ids = [i['id'] for i in text['data']]\n live_url = 'https://api.zhihu.com/lives/{}'\n '''\n global all_speaker_ids\n speaker_ids = [i['speaker']['member']['id'] for i in text['data']]\n all_speaker_ids = all_speaker_ids + speaker_ids\n speaker_url = 'https://api.zhihu.com/people/{}'\n '''\n #parse live\n for live_id in live_ids:\n if response.status == 500:\n pass\n else:\n url = live_url.format(live_id)\n yield scrapy.Request(url = url, callback = self.parse_live, dont_filter=True)\n \n #check if the page ended, if not end scroll, if end remove duplicates of speakers and parse speakers\n if (text['paging']['is_end'] == False):\n new_url = text['paging']['next']\n yield scrapy.Request(url = new_url, callback = self.parse)\n \n except json.decoder.JSONDecodeError:\n new_url = response.url.split('offset=')[0]+'offset='+str(int(response.url.split('offset=')[1])+10)\n yield scrapy.Request(url = new_url, callback = self.parse)\n '''\n else:\n all_speaker_ids = list(set(all_speaker_ids))\n for speaker_id in all_speaker_ids:\n url = speaker_url.format(speaker_id)\n yield scrapy.Request(url = url, callback = self.parse_speaker)'''\n \n \n \n def parse_live(self, response):\n #global timestamp\n \n #live_url = 'https://api.zhihu.com/lives/{}'\n l_json = json.loads(response.text)\n #live = LiveItem()\n mask = ['access_new_live'\n , 'alert'\n , 'anonymous_purchase'\n , 'artwork'\n , 'attachment_count'\n #,'audio'\n , 'audio_duration'\n , 'audition_message_count'\n , 'buyable'\n , 'can_delete_message'\n , 'can_speak'\n , 'chapter_description'\n , 'chapter_status'\n , 'conv_id'\n #, 'created_at'\n , 'description'\n #, 'description_html'\n , 'duration'\n #, 'ends_at'\n , 'ends_in'\n , 'fee'\n , 'feedback_score'\n #, 'folding_message'\n , 'has_audition'\n , 'has_authenticated'\n , 'has_feedback'\n , 'has_shutdown_permission'\n , 'id'\n , 'in_promotion'\n , 'income'\n , 'is_admin'\n , 'is_anonymous'\n , 'is_audition_open'\n , 'is_commercial'\n , 'is_liked'\n , 'is_live_owner'\n , 'is_muted'\n , 'is_public'\n , 'is_refundable'\n , 'is_subscriber'\n , 'liked'\n , 'liked_num'\n , 'listened_progress'\n , 'live_subscription'\n , 'live_type'\n , 'outline'\n , 'product_list'\n , 'purchasable'\n , 'recommendation'\n , 'reply_message_count'\n , 'review'\n , 'role'\n , 'seats'\n , 'sku_id'\n , 'source'\n , 'speaker_audio_message_count'\n , 'speaker_message_count'\n #, 'starts_at'\n , 'status'\n , 'subject'\n , 'tags'\n , 'type'\n , 'vip_only']\n \n live = flatten(to_flat(l_json, mask))\n \n live['created_at'] = get_time(l_json['created_at'])\n live['starts_at'] = get_time(l_json['starts_at'])\n live['ends_at'] = get_time(l_json['ends_at'])\n if 'fee_end_time' in live:\n live['fee_end_time'] = get_time(l_json['fee']['end_time'])\n else:\n live['fee_end_time'] = None\n \n live['speaker_id'] = l_json['speaker']['member']['id']\n live['cospeakers'] = get_cospeakers(l_json)\n live['audio_video_duration'] = get_duration(l_json)\n \n live['crawl_time'] = self.timestamp\n #live['is_hot_monthly'] = get_hot(live_id ,hot_liveids, 'monthly')\n #live['is_hot_weekly'] = get_hot(live_id ,hot_liveids, 'weekly')\n '''\n if l_json['id'] in hot30: \n live['is_hot_monthly'] = 1\n else:\n live['is_hot_monthly'] = 0\n \n if l_json['id'] in hot7:\n live['is_hot_weekly'] = 1\n else:\n live['is_hot_weekly'] = 0'''\n\n live = LiveItem(live)\n \n yield live\n df_append = pd.DataFrame([live], columns=live.keys())\n self.df_format = self.df_format.append(df_append).drop_duplicates()\n\n def errback_httpbin(self, failure):\n if failure.check(HttpError):\n new_url = response.url.split('offset=')[0]+str(int(response.url.split('offset=')[1])+10)\n yield scrapy.Request(url = new_url, callback = self.parse)\n\n\n def closed( self, reason ):\n bytes_to_write = self.df_format.to_csv(None,index = False,header = False).encode()\n fs = s3fs.S3FileSystem(key='AKIAJTCKYEFYPWHPXGAA', secret='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')\n fs.ls('zhihubow')\n with fs.open('s3://zhihubow/live_monitor/zhihulive.csv', 'ab') as f:\n f.write(bytes_to_write)\n\n\n speaker_id_basic = pd.read_csv('https://s3.amazonaws.com/zhihubow/speaker_monitor/speaker_id.csv')\n speaker_id_append = pd.DataFrame(self.df_format['speaker_id'].drop_duplicates(), columns=['speaker_id'])\n speaker_id_new = speaker_id_basic.append(speaker_id_append).drop_duplicates()\n\n bytes_to_write = speaker_id_new.to_csv(None,index = False).encode()\n fs = s3fs.S3FileSystem(key='AKIAJTCKYEFYPWHPXGAA', secret='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')\n fs.ls('zhihubow')\n with fs.open('s3://zhihubow/speaker_monitor/speaker_id.csv', 'wb') as f:\n f.write(bytes_to_write)\n\n\n\n \n\n\n\n","sub_path":"zhihulives_crawler/spiders/zhihulive.py","file_name":"zhihulive.py","file_ext":"py","file_size_in_byte":8245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"448123463","text":"#OpenCV 4.20\n#Intel Realsense 2\n\n#dlib cnn face detector is too slow\n#128D data from image is the same to 128D data from the aligned version image\n#128D data from 5_face is not equal 128D data from 68 face\n\n#opencv dnn face + dlib face landmarks is not stable\n#than dlib face detector + dlib face landmarks\n\nimport pyrealsense2 as rs\nimport numpy as np\nimport cv2\n\nimport dlib\n\nfrom openFaceAlign import AlignDlib\n\nfrom sklearn.svm import SVC\nimport pandas as pd\n\nhjd = np.loadtxt(\"/home/hjd/hjd.txt\")\nliudehua = np.loadtxt(\"/home/hjd/liudehua.txt\")\nwangyuan = np.loadtxt(\"/home/hjd/wangyuan.txt\")\nzhangxueyou = np.loadtxt(\"/home/hjd/zhangxueyou.txt\")\nzhouhuajian = np.loadtxt(\"/home/hjd/zhouhuajian.txt\")\n\ndfhjd = pd.DataFrame(hjd)\ndfhjd[\"Label\"] = \"hjd\"\ndfliudehua = pd.DataFrame(liudehua)\ndfliudehua[\"Label\"] = \"Unknown\"\ndfwangyuan = pd.DataFrame(wangyuan)\ndfwangyuan[\"Label\"] = \"Unknown\"\ndfzhangxueyou = pd.DataFrame(zhangxueyou)\ndfzhangxueyou[\"Label\"] = \"Unknown\"\ndfzhouhuajian = pd.DataFrame(zhouhuajian)\ndfzhouhuajian[\"Label\"] = \"Unknown\"\n\ntotalpd = pd.concat([dfhjd, dfliudehua, dfwangyuan, dfzhangxueyou, dfzhouhuajian], ignore_index=True)\n\nrecognizer = SVC(C=1E6, kernel=\"rbf\", probability=True)\nrecognizer.fit(totalpd.iloc[:, 0:-1], totalpd.iloc[: ,-1])\n\nCAMWIDTH = 1280\nCAMHEIGHT = 720\n\ndetector = dlib.get_frontal_face_detector()\npredictor68 = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\npredictor = dlib.shape_predictor(\"shape_predictor_5_face_landmarks.dat\")\ndlibcnnfacedetector = dlib.cnn_face_detection_model_v1(\"mmod_human_face_detector.dat\")\ndlibfacerec = dlib.face_recognition_model_v1(\"dlib_face_recognition_resnet_model_v1.dat\")\n\ncaffeNet = cv2.dnn.readNetFromCaffe(\"deploy.prototxt\", \"res10_300x300_ssd_iter_140000_fp16.caffemodel\")\ntfNet = cv2.dnn.readNetFromTensorflow(\"opencv_face_detector_uint8.pb\", \"opencv_face_detector.pbtxt\")\ntorchNet = cv2.dnn.readNetFromTorch(\"nn4.small2.v1.t7\")\n\n# Configure depth and color streams\npipeline = rs.pipeline()\nconfig = rs.config()\n\n#not use depth data\nconfig.enable_stream(rs.stream.depth, CAMWIDTH, CAMHEIGHT, rs.format.z16, 30)\nconfig.enable_stream(rs.stream.color, CAMWIDTH, CAMHEIGHT, rs.format.bgr8, 30)\n\n#calculate display width and height\nDSPWIDTH = 720\nDSPHEIGHT = 720\n\n# Start streaming\npipeline.start(config)\n\nwin = dlib.image_window()\n\nalign = AlignDlib()\n\ntry:\n while True:\n\n # Wait for a coherent pair of frames: depth and color\n frames = pipeline.wait_for_frames()\n depth_frame = frames.get_depth_frame()\n color_frame = frames.get_color_frame()\n if not depth_frame or not color_frame:\n continue\n\n # Convert images to numpy arrays\n depth_image = np.asanyarray(depth_frame.get_data())\n color_image = np.asanyarray(color_frame.get_data())\n\n #flip image\n color_image = cv2.flip(color_image, 1)\n\n color_image = color_image[0:720, 280:280+DSPWIDTH]\n\n rgb_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)\n\n #print(crop_img.shape, crop_img.dtype)\n\n # Apply colormap on depth image (image must be converted to 8-bit per pixel first)\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)\n\n # Stack both images horizontally\n #images = np.vstack((color_image, depth_colormap))\n\n blob = cv2.dnn.blobFromImage(cv2.resize(color_image, (300, 300)), 1.0,\n (300, 300), (104.0, 177.0, 123.0), False, False)\n\n #print(blob.shape, blob.dtype)\n\n tfNet.setInput(blob)\n detections = tfNet.forward()\n #print(detections.shape, detections.dtype, detections.shape[2])\n\n images = color_image\n #images = cv2.resize(color_image, (300, 300))\n\n w = DSPWIDTH\n h = DSPHEIGHT\n\n win.clear_overlay()\n win.set_image(rgb_image)\n\n for i in range(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated with the\n # prediction\n confidence = detections[0, 0, i, 2]\n # filter out weak detections by ensuring the `confidence` is\n # greater than the minimum confidence\n if confidence > 0.9:\n #print(detections[0, 0, i])\n # compute the (x, y)-coordinates of the bounding box for the\n # object\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n # tmpshape = predictor(rgb_image, dlib.rectangle(startX, startY, endX, endY))\n # # print(shape)\n # # win.add_overlay(shape)\n # tmpface_descriptor = dlibfacerec.compute_face_descriptor(rgb_image, tmpshape)\n # tmparr = np.asarray(tmpface_descriptor)\n # with open(\"/home/hjd/opencvFace2.txt\", \"ab\") as f:\n # np.savetxt(f, tmparr, newline=\" \")\n # f.write(b\"\\n\")\n\n # aligedFace = align.align(96,rgb_image,align.findLandmarks(tmpshape))\n # #calculate 128d with openface\n # #color_image[startY:endY, startX:endX]\n # faceBlob = cv2.dnn.blobFromImage(aligedFace, 1.0 / 255,\n # (96, 96),\n # (0, 0, 0), swapRB=False, crop=False)\n # torchNet.setInput(faceBlob)\n # vec = torchNet.forward()\n # vec = vec.flatten()\n # #print(vec.shape, vec.dtype)\n # with open(\"/home/hjd/openfacealign.txt\", \"ab\") as f:\n # np.savetxt(f, vec, newline=\" \")\n # f.write(b\"\\n\")\n\n # face_chip = dlib.get_face_chip(rgb_image, tmpshape)\n # faceBlob = cv2.dnn.blobFromImage(face_chip, 1.0 / 255,\n # (96, 96),\n # (0, 0, 0), swapRB=False, crop=False)\n # torchNet.setInput(faceBlob)\n # vec = torchNet.forward()\n # vec = vec.flatten()\n # # print(vec.shape, vec.dtype)\n # with open(\"/home/hjd/dlibalign.txt\", \"ab\") as f:\n # np.savetxt(f, vec, newline=\" \")\n # f.write(b\"\\n\")\n\n # draw the bounding box of the face along with the associated\n # probability\n text = \"{:.2f}%\".format(confidence * 100)\n y = startY - 10 if startY - 10 > 10 else startY + 10\n cv2.rectangle(images, (startX, startY), (endX, endY),\n (0, 0, 255), 2)\n cv2.putText(images, text, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n\n resized = cv2.resize(rgb_image, (180, 180))\n dets = detector(resized, 1)\n #dets = dlibcnnfacedetector(resized)\n for k, d in enumerate(dets):\n # print(\"Detection {}: Left: {} Top: {} Right: {} Bottom: {}\".format(\n # k, d.left(), d.top(), d.right(), d.bottom()))\n\n # Get the landmarks/parts for the face in box d.\n shape = predictor(rgb_image, dlib.rectangle(\n d.left()*4, d.top()*4, d.right()*4, d.bottom()*4))\n\n # shape2 = predictor68(rgb_image, dlib.rectangle(\n # d.left() * 4, d.top() * 4, d.right() * 4, d.bottom() * 4))\n\n # print(\"Part 0: {}, Part 1: {} ...\".format(shape.part(0),\n # shape.part(1)))\n\n # Draw the face landmarks on the screen.\n win.add_overlay(shape)\n win.add_overlay(dlib.rectangle(d.left()*4, d.top()*4, d.right()*4, d.bottom()*4))\n\n face_descriptor = dlibfacerec.compute_face_descriptor(rgb_image, shape)\n tmparr1 = np.asarray(face_descriptor)\n\n print(recognizer.predict(tmparr1.reshape(1, 128)))\n\n # with open(\"/home/hjd/dlibFace.txt\", \"ab\") as f:\n # np.savetxt(f, tmparr1, newline=\" \")\n # f.write(b\"\\n\")\n\n # face_descriptor2 = dlibfacerec.compute_face_descriptor(color_image, shape2)\n # tmparr2 = np.asarray(face_descriptor2)\n #\n # print(np.linalg.norm(tmparr1 - tmparr2))\n\n #aligned face descriptor equals top\n # face_chip = dlib.get_face_chip(rgb_image, shape)\n # face_descriptor2 = dlibfacerec.compute_face_descriptor(face_chip)\n # tmparr2 = np.asarray(face_descriptor2)\n # print(np.linalg.norm(tmparr1 - tmparr2)) == 0\n\n # Show images\n cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('RealSense', images)\n\n key = cv2.waitKey(1)\n # Press esc or 'q' to close the image window\n if key & 0xFF == ord('q') or key == 27:\n cv2.destroyAllWindows()\n break\n\nfinally:\n\n # Stop streaming\n pipeline.stop()","sub_path":"testSVC.py","file_name":"testSVC.py","file_ext":"py","file_size_in_byte":9061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"278070132","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, models, fields, registry\nimport logging\nimport json\nimport ast\n\n_logger = logging.getLogger(__name__)\n\nclass pos_cache_database(models.Model):\n _name = \"pos.cache.database\"\n\n res_id = fields.Char('Id')\n res_model = fields.Char('Model')\n data = fields.Text('Data')\n\n @api.model\n def load_master_data(self, condition={}):\n database = {}\n domain = []\n for model, load in condition.items():\n if load == True:\n database[model] = []\n domain.append(model)\n caches = self.search_read(\n [('res_model', 'in', tuple(domain))], ['res_id', 'res_model', 'data', 'write_date'])\n if caches:\n for cache in caches:\n vals = json.loads(cache['data'])\n vals['write_date'] = cache['write_date']\n database[cache['res_model']].append(vals)\n return database\n else:\n return False\n\n @api.model\n def get_datas_updated(self, write_date):\n _logger.info('BEGIN get_datas_update')\n _logger.info(write_date)\n datas = []\n caches = self.search_read(\n [('write_date', '>', write_date)], ['res_id', 'res_model', 'data', 'write_date'])\n for cache in caches:\n val = json.loads(cache['data'])\n val['write_date'] = cache['write_date']\n val['model'] = cache['res_model']\n datas.append(val)\n return datas\n\n\n @api.multi\n def get_fields_by_model(self, model_name):\n params = self.env['ir.config_parameter'].sudo().get_param(model_name)\n if not params:\n list_fields = self.env[model_name].fields_get()\n fields_load = []\n for k, v in list_fields.items():\n if v['type'] not in ['one2many', 'binary']:\n fields_load.append(k)\n return fields_load\n else:\n params = ast.literal_eval(params)\n return params.get('fields', [])\n\n @api.multi\n def get_domain_by_model(self, model_name):\n params = self.env['ir.config_parameter'].sudo().get_param(model_name)\n if not params:\n return []\n else:\n params = ast.literal_eval(params)\n return params.get('domain', [])\n\n @api.model\n def insert_data(self, datas, model, first_install=False):\n if first_install:\n for data in datas:\n self.create({\n 'res_id': str(data['id']),\n 'res_model': model,\n 'data': json.dumps(data)\n })\n else:\n for data in datas:\n last_caches = self.search([('res_id', '=', str(data['id'])), ('res_model', '=', model)])\n if last_caches:\n last_caches.write({\n 'data': json.dumps(data)\n })\n else:\n self.create({\n 'res_id': str(data['id']),\n 'res_model': model,\n 'data': json.dumps(data)\n })\n return True\n\n def sync_to_pos(self, data):\n if data['model'] == 'product.product':\n data['price'] = data['list_price']\n sessions = self.env['pos.session'].sudo().search([\n ('state', '=', 'opened')\n ])\n self.insert_data([data], data['model'])\n for session in sessions:\n self.env['bus.bus'].sendmany(\n [[(self.env.cr.dbname, 'pos.sync.data', session.user_id.id), data]])\n return True\n\n @api.model\n def remove_record(self, data):\n self.search([('res_id', '=', str(data['id'])), ('res_model', '=', data['model'])]).unlink()\n sessions = self.env['pos.session'].sudo().search([\n ('state', '=', 'opened')\n ])\n data['deleted'] = True\n for session in sessions:\n self.env['bus.bus'].sendmany(\n [[(self.env.cr.dbname, 'pos.sync.data', session.user_id.id), data]])\n return True\n\n @api.model\n def save_parameter_models_load(self, model_datas):\n # when pos loaded, all params (model name, fields list, context dict will store to backend\n # and use for cache data loaded to pos\n set_param = self.env['ir.config_parameter'].sudo().set_param\n for model_name, value in model_datas.items():\n set_param(model_name, value)\n return True\n\n def get_all_stock_by_stock_id(self, stock_location_id, stock_location_ids=None):\n if stock_location_ids is None:\n stock_location_ids = []\n stock_location_ids = stock_location_ids\n stock_location_ids.append(stock_location_id)\n stock = self.env['stock.location'].browse(stock_location_id)\n for stock in stock.child_ids:\n stock_location_ids.append(stock.id)\n if stock.child_ids:\n self.get_all_stock_by_stock_id(stock.id, stock_location_ids)\n if len(stock_location_ids) == 1:\n stock_location_ids.append(0)\n return stock_location_ids\n\n @api.model\n def get_product_available_all_stock_location(self, stock_location_id):\n _logger.info('{get_product_available_all_stock_location}')\n sql = \"\"\"\n with\n uitstock as (\n select\n t.name product, sum(product_qty) sumout, m.product_id, m.product_uom \n from stock_move m \n left join product_product p on m.product_id = p.id \n left join product_template t on p.product_tmpl_id = t.id\n where\n m.state like 'done' \n and m.location_id in (select id from stock_location where usage like 'internal') \n and m.location_dest_id not in (select id from stock_location where usage like 'internal') \n group by product_id,product_uom, t.name order by t.name asc\n ),\n instock as (\n select\n t.list_price purchaseprice, t.name product, sum(product_qty) sumin, m.product_id, m.product_uom\n from stock_move m\n left join product_product p on m.product_id = p.id\n left join product_template t on p.product_tmpl_id = t.id\n where \n m.state like 'done' and m.location_id not in (select id from stock_location where usage like 'internal')\n and m.location_dest_id in (select id from stock_location where usage like 'internal')\n group by product_id,product_uom, t.name, t.list_price order by t.name asc\n ) \n select\n i.product, sumin-coalesce(sumout,0) AS stock, sumin, sumout, purchaseprice, ((sumin-coalesce(sumout,0)) * purchaseprice) as stockvalue\n from uitstock u \n full outer join instock i on u.product = i.product\n \"\"\"\n\n @api.model\n def get_on_hand_by_stock_location(self, stock_location_id):\n stock_ids = self.get_all_stock_by_stock_id(stock_location_id, [])\n if len(stock_ids) > 1:\n stock_datas = self.get_product_available_filter_by_stock_location_ids(tuple(stock_ids))\n else:\n stock_datas = self.get_product_available_filter_by_stock_location_id(\n stock_location_id)\n if stock_datas == {}:\n return False\n else:\n return stock_datas\n\n @api.model\n def get_product_available_filter_by_stock_location_id(self, stock_location_id):\n _logger.info('{get_product_available_filter_by_stock_location_id}')\n sql = \"\"\"\n with\n uitstock as (\n select\n t.name product, sum(product_qty) sumout, m.product_id, m.product_uom \n from stock_move m \n left join product_product p on m.product_id = p.id \n left join product_template t on p.product_tmpl_id = t.id\n where\n m.state like 'done'\n and t.type = 'product' \n and m.location_id in (select id from stock_location where id=%s) \n and m.location_dest_id not in (select id from stock_location where id=%s) \n group by product_id,product_uom, t.name order by t.name asc\n ),\n instock as (\n select\n t.list_price purchaseprice, t.name product, sum(product_qty) sumin, m.product_id, m.product_uom\n from stock_move m\n left join product_product p on m.product_id = p.id\n left join product_template t on p.product_tmpl_id = t.id\n where \n m.state like 'done' and m.location_id not in (select id from stock_location where id=%s)\n and m.location_dest_id in (select id from stock_location where id=%s)\n group by product_id,product_uom, t.name, t.list_price order by t.name asc\n ) \n select\n i.product_id, i.product, sumin-coalesce(sumout,0) AS stock, sumin, sumout, purchaseprice, ((sumin-coalesce(sumout,0)) * purchaseprice) as stockvalue\n from uitstock u \n full outer join instock i on u.product = i.product\n \"\"\" % (stock_location_id, stock_location_id, stock_location_id, stock_location_id)\n self.env.cr.execute(sql)\n results = self.env.cr.fetchall()\n pos_data = {}\n for result in results:\n if result[0]:\n pos_data[result[0]] = result[2]\n return pos_data\n\n @api.model\n def get_product_available_filter_by_stock_location_ids(self, stock_location_ids):\n _logger.info('begin get_product_available_filter_by_stock_location_ids')\n sql_out = \"\"\"\n select\n sum(product_qty) sumout, m.product_id, t.name product, m.product_uom \n from stock_move m \n left join product_product p on m.product_id = p.id \n left join product_template t on p.product_tmpl_id = t.id\n where\n t.available_in_pos is True\n and m.state like 'done'\n and t.type = 'product' \n and m.location_id in (select id from stock_location where id in %s) \n and m.location_dest_id not in (select id from stock_location where id in %s)\n group by product_id,product_uom, t.name order by t.name asc\n \"\"\" % (stock_location_ids, stock_location_ids)\n self.env.cr.execute(sql_out)\n results_out = self.env.cr.fetchall()\n sql_in = \"\"\"\n select\n sum(product_qty) sumin, m.product_id, t.name product, t.list_price purchaseprice, m.product_uom\n from stock_move m\n left join product_product p on m.product_id = p.id\n left join product_template t on p.product_tmpl_id = t.id\n where \n t.available_in_pos is True\n and m.state like 'done' and m.location_id not in (select id from stock_location where id in %s)\n and m.location_dest_id in (select id from stock_location where id in %s)\n group by product_id,product_uom, t.name, t.list_price order by t.name asc\n \"\"\" % (stock_location_ids, stock_location_ids)\n self.env.cr.execute(sql_in)\n results_in = self.env.cr.fetchall()\n dict_in = {}\n for result in results_in:\n dict_in[result[1]] = result[0]\n dict_out = {}\n for result in results_out:\n dict_out[result[1]] = result[0]\n for product_id, qty_in in dict_in.items():\n dict_in[product_id] = dict_in[product_id] - dict_out.get(product_id, 0)\n return dict_in\n","sub_path":"pos_retail/models/pos/pos_cache_database.py","file_name":"pos_cache_database.py","file_ext":"py","file_size_in_byte":11843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"600114416","text":"import urllib, re, random, os, math, sys\nfrom PIL import Image\nimport numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\n\n#check if destination folder exists, if not create it\ndef checkDownloadFolder(imgDirectory):\n if not os.path.exists(imgDirectory):\n os.makedirs(imgDirectory)\n\ndef getAngle(shorterCathetus, longerCathetus):\n return np.arctan(float(shorterCathetus)/float(longerCathetus))\n\n#distribution data taken from paper \"Handwriting Performance, Self-Reports, and Perceived Self-Efficacy Among Children With Dysgraphia\"\n#table page 188/7 using \"Computerized Penmanship Evaluation Tool\"\ndef getTimePerformance(isWithoutDysgraphia):\n lowBound, upBound = 0, 5 #taken from hpsq questionnaire\n mean, standardDeviation = 2.36, 0.45 #case without Dysgraphia (since more samples are required, it is here as default value) \n\n if isWithoutDysgraphia is 1: #case children With Dysgraphia\n mean, standardDeviation = 1.00, 0.66 #changes mean and DS values\n \n return np.random.normal(mean, standardDeviation, None)\n \ndef getStatisticalValues(isWithoutDysgraphia):\n #for future works could take into account: TotalTime, onPaperTime, inAirTime, meanPresure, meanWritingVelocity \n #it now calculates TotalTimeOnly\n while True:\n res = round(getTimePerformance(isWithoutDysgraphia), 3)\n if res > 0.20: \n #this IF protects results from Flash writers or results\n break\n return res\n\ndef printTrack(wordNumber, rangeArray, numberOfSamples, text):\n #this just adds a fancy animation\n animationSet = \"|/-\\\\\"\n animChar = animationSet[int(wordNumber) % len(animationSet)]\n #end of eyecandy effect\n\n sys.stdout.write(\"\\r\" + animChar + \" \" + wordNumber + \" of \" + rangeArray + \": getting \" + numberOfSamples + \" img(s) for word '\" + text + \"'\" + \" \" * 20)\n sys.stdout.flush()\n\n#input: image output: binarized image\ndef binarizeImage(image):\n return image.convert('1')\n\n#scales input image to imageSize\ndef scaleImage(image):\n maxWidth, maxHeight = 512, 128\n imageSize = maxWidth, maxHeight\n whiteCanvas = Image.new(\"RGB\", imageSize, \"white\")\n imgTmp = image\n \n #this scales only if it is bigger than imageSize\n imgTmp.thumbnail(imageSize, Image.ANTIALIAS) \n\n whiteCanvas.paste(imgTmp, (0, (maxHeight - imgTmp.height)))\n\n return whiteCanvas \n\n\n#This takes the image as input and gives as result the image formatted:\n#A formatted image is before scaled to a fixed dimension and subsequently binarized\ndef formatImage(isWithoutDysgraphia, img):\n imgTmp = scaleImage(img) \n imgTmp = binarizeImage(imgTmp) \n \n return imgTmp\n\ndef getHtmlFromURL(url):\n website = urllib.urlopen(url)\n return website.read()\n\ndef extractImages(html):\n #looks for the img elements which starts with \"data:image\" string: \n #this because the images we are interested to get have this string at the beginning\n pat = re.compile (r']*src=\"data:image/([^\"]+)')\n return pat.findall(html)\n\ndef computeReferenceDimensions(referenceArray):\n refHeight, refWidth, refAngle = 0, 0, 0\n\n for dim in referenceArray:\n H, W = dim\n refHeight = refHeight + H\n refWidth = refWidth + W\n\n refHeight = refHeight / len(referenceArray)\n refWidth = refWidth / len(referenceArray)\n\n refAngle = getAngle(refHeight, refWidth)\n\n #print(\"Average refHeight: \" + str(refHeight) + \", refWidth \" + str(refWidth) + \", Angle \" + str(refAngle))\n\n return refHeight, refWidth, refAngle\n\n\n\n\n''' ----------------------------------------------------------------------------------------- '''\n\nimgDirectory = os.getcwd() + \"/img/\"\n#image folder destination\ncheckDownloadFolder(imgDirectory) \n\n#creating an empty array which has to be fullfilled with words from words.txt file\narrayWord = []\nwith open('10k.txt','r') as f:\n for line in f:\n for word in line.split():\n arrayWord.append(word) \n\n# prototype of the request:\n# https://www.cs.toronto.edu/~graves/handwriting.cgi?text=texthere&style=&bias=0.9&samples=5\n# var \n# text=texthere string which has to be written\n# style= if empty, it should be random\n# bias=0.9 [0,1] 1 digit after comma only\n# samples=5 [1,5]\n\nwith open(\"db.txt\", \"w\") as resultFile:\n #counter variable is used just to print how many requestes have been done\n counter = 0\n\n resultFile.write(\"imageName, isWithoutDysgraphia,diffAngle, performanceTime \\n\\n\")\n #iterate the arrayWord\n for word in range(len(arrayWord)):\n\n refHeight = 0\n refWidth = 0\n refAngle = 0\n text = arrayWord[word] #retrieve the word from array\n\n samples = 5\n\n for isWithoutDysgraphia in xrange(1, -1, -1):\n\n bias=str(isWithoutDysgraphia) #choosing the bias with values between 0 or 1 \n #samples = 1 if isWithoutDysgraphia else 2; #this gives 1 sample if isWithoutDysgraphia, 2 samples if lowQuality\n\n #print on screen status of requests\n printTrack(str(counter), str(len(arrayWord)), str(samples), text)\n\n #compose the url\n url = \"https://www.cs.toronto.edu/~graves/handwriting.cgi?text=\" + text + \"&style=&bias=\" + bias + \"&samples=\" + str(samples)\n\n html = getHtmlFromURL(url)\n\n imgs = extractImages(html)\n\n #this array holds the 2-tuples of reference images' dimensions, needed to compute an average value\n referenceArray = list() \n\n #retrieve and save into \"/img\" folder\n for i in range(len(imgs)):\n imageName = text + \"_\" + str(bias) + \"_[\" + str(i) + \"].png\"\n imagePath = os.path.join(imgDirectory, imageName)\n urllib.urlretrieve(\"data:image/\" + imgs[i], imagePath)\n\n imgFile = Image.open(imagePath)\n\n diffAngle = 0\n\n if(isWithoutDysgraphia):\n referenceArray.append(imgFile.size) #saves reference height and width\n\n if(i is samples-1):\n refHeight, refWidth, refAngle = computeReferenceDimensions(referenceArray)\n #print(\"\\nrefWidth: \" + str(refWidth) + \", refHeight: \" + str(refHeight) + \", Angle of ref img is \" + str(refAngle))\n else:\n #get the diagonal angle after the image was stretched referring to the highQuality witdh: after that calculate the angles (highQ and lowQ) and save the slope as difference between them in the name or cvs\n lowQHeight, lowQWidth = imgFile.size #save height and width\n scaleFactor = float(refWidth) / float(lowQWidth)\n diffAngle = round(math.degrees(np.absolute(refAngle - getAngle(lowQHeight * scaleFactor, lowQWidth * scaleFactor)))%360, 3)\n #print(\"Scale factor: \" + str(scaleFactor)) \n #print(\"\\nlowQWidth: \" + str(lowQWidth) + \", lowQHeight: \" + str(lowQHeight) + \", Angle of lowQ img is \" + str(diffAngle))\n \n imgFile = formatImage(isWithoutDysgraphia, imgFile)\n if imgFile.mode != 'RGB':\n imgFile = imgFile.convert('RGB')\n imgFile.save(imagePath)\n\n perfTime = getStatisticalValues(isWithoutDysgraphia)\n resultFile.write(imageName + \",\" + str(bias) + \",\" + str(diffAngle) + \",\" + str(perfTime) + \"\\n\")\n\n counter+= 1\nf.close()","sub_path":"imageScraper/imageScraper.py","file_name":"imageScraper.py","file_ext":"py","file_size_in_byte":7691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"269676136","text":"# coding:utf-8\n\"\"\"\n@author:XuMing\n\"\"\"\n\nfrom gensim import corpora, models\nimport ConfigParser\nimport jieba\nimport jieba.analyse\nimport analysis\n\nconf = ConfigParser.ConfigParser()\nconf.read(\"arg.cfg\")\nstopwords_path = conf.get(\"path\", \"stopwords_path\")\nsegmented_path = conf.get(\"path\", \"segmented_path\")\ntest_path = conf.get(\"path\", \"test_path\")\nraw_path = conf.get('path', 'raw_path')\nresult_path = conf.get('path', 'result_path')\ntopic_num = 20\n\n\ndef get_stopwords_set(file_name):\n with open(file_name, 'r') as f:\n return set([line.strip().decode('utf-8') for line in f])\n\n\ndef get_words_list(file_name, stop_word_file):\n stop_words_set = get_stopwords_set(stop_word_file)\n word_list = []\n with open(file_name, 'r') as f:\n for line in f:\n tmp_list = list(jieba.cut(line.strip(), cut_all=False))\n word_list.append([i for i in tmp_list if i not in stop_words_set])\n return word_list\n\n\ndef extract_theme(raw_file, stop_word_file, num_topics=10):\n result = []\n # 列表,每个元素也是列表,即分词后的词语列表\n word_list = get_words_list(raw_file, stop_word_file)\n # 生成文档的词典,每个此与一个整形索引值对应\n word_dict = corpora.Dictionary(word_list)\n # 词频统计,转化为空间向量格式\n corpus_list = [word_dict.doc2bow(text) for text in word_list]\n lda = models.ldamodel.LdaModel(corpus=corpus_list, id2word=word_dict, num_topics=num_topics, alpha='auto')\n for pattern in lda.show_topics(num_topics=num_topics, num_words=1, formatted=False):\n result.append(pattern[1][0][0].encode('utf-8'))\n return result\n\n\ndef main():\n files = analysis.get_files(raw_path)\n f_word_result = file(result_path + \"/theme_result.txt\", \"w+\")\n f_word_result.write(\"主题词提取\" + \"\\n\")\n for f in files:\n f_word_result.write('\\n' + f.split(\"\\\\\")[-1][:-4].decode('gbk').encode('utf-8') + \":\\n\")\n topics = extract_theme(f, stopwords_path, 100)\n topic_list = []\n for t in topics:\n if t not in topic_list and len(topic_list) < topic_num:\n topic_list.append(t)\n f_word_result.write(t + '\\n')\n print(f + \" ok.\")\n f_word_result.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"theme.py","file_name":"theme.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"213319513","text":"#!/usr/bin/env python3\n\nimport sys\nimport random\n\nallowed_chars = 'ABCDEFGHJKLMNPQRSTUVWXYZ23456789'\ncode_length = 16\ncode_required = 200\n\ndef gen():\n code = ''\n for _ in range(code_length):\n code = code + random.choice(allowed_chars)\n return code\n\nif __name__ == '__main__':\n for _ in range(code_required):\n print(gen())\n\n","sub_path":"show-me-the-code/0001/redeem.py","file_name":"redeem.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"300098256","text":"import sys\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5 import uic\r\n\r\n#UI파일 연결\r\n#단, UI파일은 Python 코드 파일과 같은 디렉토리에 위치해야한다.\r\nform_class1 = uic.loadUiType(\"untitled.ui\")[0]\r\nform_class2 = uic.loadUiType(\"phoneNumber.ui\")[0]\r\n\r\n#화면을 띄우는데 사용되는 Class 선언\r\nclass mainWindow(QMainWindow,form_class1) :\r\n can_num = 0\r\n pet_num = 0\r\n phone_number = \"\"\r\n def __init__(self) :\r\n super().__init__()\r\n self.setupUi(self)\r\n self.pbtn_test_c.clicked.connect(self.test_c)\r\n self.pbtn_test_p.clicked.connect(self.test_p)\r\n self.pbtn_next_page.clicked.connect(self.next_page)\r\n\r\n def ready(self) :\r\n self.phone_number=phone_window.current_PN\r\n self.can_num=0\r\n self.pet_num=0\r\n self.label_intro.setText(\"쓰레기를 넣어 주세요.\\nphone number : \"+self.phone_number)\r\n\r\n def test_c(self) :\r\n self.can_num +=1\r\n self.label_can_num.setText(str(self.can_num)+'개')\r\n\r\n def test_p(self) :\r\n self.pet_num +=1\r\n self.label_pet_num.setText(str(self.pet_num)+'개')\r\n\r\n def next_page(self) :\r\n phone_window.ready()\r\n main_window.close()\r\n phone_window.show()\r\n \r\n\r\nclass phoneWindow(QMainWindow, form_class2) :\r\n current_PN = \"\"\r\n def __init__(self) :\r\n super().__init__()\r\n self.setupUi(self)\r\n\r\n self.button_0.clicked.connect(lambda: self.buttonClick(0))\r\n self.button_1.clicked.connect(lambda: self.buttonClick(1))\r\n self.button_2.clicked.connect(lambda: self.buttonClick(2))\r\n self.button_3.clicked.connect(lambda: self.buttonClick(3))\r\n self.button_4.clicked.connect(lambda: self.buttonClick(4))\r\n self.button_5.clicked.connect(lambda: self.buttonClick(5))\r\n self.button_6.clicked.connect(lambda: self.buttonClick(6))\r\n self.button_7.clicked.connect(lambda: self.buttonClick(7))\r\n self.button_8.clicked.connect(lambda: self.buttonClick(8))\r\n self.button_9.clicked.connect(lambda: self.buttonClick(9))\r\n self.button_bs.clicked.connect(self.bsClick)\r\n self.button_enter.clicked.connect(self.enClick)\r\n\r\n def ready(self) :\r\n self.current_PN=\"\"\r\n self.phone_number.setText(str(self.current_PN))\r\n\r\n def buttonClick(self, num) :\r\n self.current_PN = self.current_PN + str(num)\r\n self.phone_number.setText(str(self.current_PN))\r\n\r\n def bsClick(self) :\r\n self.current_PN = self.current_PN[:-1]\r\n self.phone_number.setText(str(self.current_PN))\r\n\r\n def enClick(self) :\r\n phone_window.close()\r\n main_window.ready()\r\n main_window.show()\r\n\r\nif __name__ == \"__main__\" :\r\n #QApplication : 프로그램을 실행시켜주는 클래스\r\n app = QApplication(sys.argv) \r\n\r\n #WindowClass의 인스턴스 생성\r\n phone_window = phoneWindow() \r\n main_window = mainWindow()\r\n\r\n #프로그램 화면을 보여주는 코드\r\n phone_window.show()\r\n\r\n #프로그램을 이벤트루프로 진입시키는(프로그램을 작동시키는) 코드\r\n app.exec_()","sub_path":"ui/uitest.py","file_name":"uitest.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"524375582","text":"\"\"\"\n第 0005 题:你有一个目录,装了很多照片,把它们的尺寸变成都不大于 iPhone5 分辨率的大小。\n\"\"\"\n\nimport glob\nfrom PIL import Image\n\n\ndef get_path_list():\n return glob.glob(r\".\\test_pictures\\*.jpg\")\n\n\ndef picture_process(pathname, num):\n with Image.open(pathname) as f:\n img = f.resize((640, 1136))\n img.save(r\".\\re_pictures\\\\\" + str(num) + r\".jpg\", 'jpeg')\n\n\ndef main():\n path_list = get_path_list()\n n = 1\n for pathname in path_list:\n picture_process(pathname, n)\n n += 1\n print('process done.')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"0005/ip5Img.py","file_name":"ip5Img.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45024451","text":"from django.conf.urls import url\nfrom appsindico.reservations.views import my_reservations_list, cancel, select_room, new_room_reservation,\\\n reservations_control, reservation_delete, authorize_reservation, rooms_control, new_room, room_edit, room_delete, \\\n room_rules\n\nurlpatterns = [\n url(r'^minhas-reservas/$', my_reservations_list, name='my_reservations_list'),\n url(r'^cancelar/(\\d+)/$', cancel, name='cancel'),\n url(r'^reservar/$', select_room, name='select_room'),\n url(r'^nova-reserva/(?P[\\w-]+)/$', new_room_reservation, name='new_room_reservation'),\n\n\n url(r'^controle-de-reservas/$', reservations_control, name='reservations_control'),\n url(r'^excluir-reserva/(\\d+)/$', reservation_delete, name='reservation_delete'),\n url(r'^autorizar-reserva/(\\d+)/$', authorize_reservation, name='authorize_reservation'),\n url(r'^controle-de-ambiente/$', rooms_control, name='rooms_control'),\n url(r'^novo-ambiente/$', new_room, name='new_room'),\n url(r'^alterar-ambiente/(\\d+)/$', room_edit, name='room_edit'),\n url(r'^excluir-ambiente/(\\d+)/$', room_delete, name='room_delete'),\n url(r'^regras-de-ambiente/(?P[\\w-]+)/$', room_rules, name='room_rules'),\n]","sub_path":"appsindico/reservations/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"77382790","text":"import poplib\nimport smtplib\nfrom email.parser import Parser\nfrom email.header import decode_header\nfrom email.utils import parseaddr\nfrom html.parser import HTMLParser\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\n\nclass MyHTMLParser(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.links = []\n\n def handle_starttag(self, tag, attrs):\n # print \"Encountered the beginning of a %s tag\" % tag\n if tag == \"a\":\n if len(attrs) == 0:\n pass\n else:\n for (variable, value) in attrs:\n if variable == \"href\":\n self.links.append(value)\n\n\n# from datetime import datetime\n# email = '18129832245@163.com'\n# passwd = 'Lin241507'\n# pop_server = 'pop3.163.com'\n\n\ndef get_email(email, password, server):\n server = poplib.POP3_SSL(server, '995')\n # 可以打开或关闭调试信息:\n # server.set_debuglevel(1)\n # 可选:打印POP3服务器的欢迎文字:\n # print(server.getwelcome().decode('utf-8'))\n server.user(email)\n server.pass_(password)\n\n # 邮件数量和占有空间\n # stat()返回 消息的数量 和 消息的总大小\n # print('Messages: %s. Size: %s' % server.stat())\n\n # list()返回:\n # 服务器的响应\n # 消息列表\n # 消息的大小\n _, mails, _ = server.list()\n\n # 查看返回列表\n # print(mails)\n\n # 获取最新的一封邮件,索引号为1开始\n # retr(index)返回:服务器响应 消息所有行 消息字节数\n index = len(mails)\n _, lines, _ = server.retr(index)\n\n # lines存储邮件原始文本每行并进行解析\n msg_content = b'\\r\\n'.join(lines).decode('utf-8')\n msg = Parser().parsestr(msg_content)\n # 可以根据邮件索引从服务器删除邮件\n # server.dele(index)\n # 关闭邮件\n server.quit()\n resu = print_info(msg)\n return resu\n\n\n# 编码设置\ndef guess_charset(my_msg):\n charset = my_msg.get_charset()\n if charset is None:\n content_type = my_msg.get('Content-Type', '').lower()\n pos = content_type.find('charset=')\n if pos >= 0:\n charset = content_type[pos + 8:].strip()\n return charset\n\n\ndef decode_str(s):\n value, charset = decode_header(s)[0]\n if charset:\n value = value.decode(charset)\n return value\n\n\n# indent用于缩进显示,递归打印\ndef print_info(my_msg, indent=0):\n if indent == 0:\n for header in ['From', 'To', 'Subject']:\n value = my_msg.get(header, '')\n if value:\n if header == 'Subject':\n value = decode_str(value)\n else:\n hdr, addr = parseaddr(value)\n name = decode_str(hdr)\n value = u'%s <%s>' % (name, addr)\n # print('%s%s: %s' % (' ' * indent, header, value))\n if my_msg.is_multipart():\n parts = my_msg.get_payload()\n for n, part in enumerate(parts):\n print('%spart %s' % (' ' * indent, n))\n print('%s-----------------------------------' % ' ' * indent)\n print_info(part, indent + 1)\n pass\n else:\n content_type = my_msg.get_content_type()\n if content_type == 'text/plain' or content_type == 'text/html':\n content = my_msg.get_payload(decode=True)\n charset = guess_charset(my_msg)\n if charset:\n content = content.decode(charset)\n # print('%sText: %s' % (' ' * indent, content + '...'))\n html_code = content\n hp = MyHTMLParser()\n hp.feed(html_code)\n hp.close()\n # print(hp.links)\n captcha = hp.links[0].split('=')\n # print(captcha)\n print(captcha[-1])\n return captcha[-1]\n else:\n print('%sAttachment: %s' % (' ' * indent, content_type))\n\n\n# 发送邮件\ndef send_email(info=''):\n # 邮件发送人\n sender = {\n 'email': '18129832245@163.com',\n 'psw': 'Lin241507',\n }\n\n # 邮件接收者\n receivers = ['leo.lin@longsys.com', '18129832245@163.com']\n\n # 邮件内容\n text = '此邮件由量产云平台自动化测试系统自动发送,请勿回复!\\r\\n' + info\n # 邮件附件\n\n # 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码\n message = MIMEText(text, 'plain', 'utf-8')\n message['From'] = Header(sender['email']) # 发送者\n message['To'] = Header(','.join(receivers)) # 接收者\n\n # 邮件标题\n subject = '量产云平台自动化测试-' + info\n message['Subject'] = Header(subject, 'utf-8')\n\n try:\n smtpObj = smtplib.SMTP('smtp.163.com')\n # smtpObj.set_debuglevel(1)\n smtpObj.login(sender['email'], sender['psw'])\n smtpObj.sendmail(sender['email'], receivers, message.as_string())\n print(\"邮件发送成功\")\n except smtplib.SMTPException as e:\n print(\"Error: 无法发送邮件\")\n print(e)\n\n\nif __name__ == '__main__':\n send_email()\n","sub_path":"1.0/pop_email.py","file_name":"pop_email.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"181733189","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAuteurs : Valentin Noel et Romain Pascual\nCette classe permet la représentation du Picross sous deux formes :\n - une première forme qui correspond aux nombres de cases à colorier par ligne\n ou par colonne sous la forme de liste de liste.\n Cette modélisation est obtenue par lecture d'un fichier au format picross.\n - la seconde forme est la modélisation sous forme booléenne, elle fait appelle \n à la classe Fnc\n\"\"\"\n\nfrom Variable import *\nfrom Litteral import *\nfrom Fnc import *\nfrom Resolu import *\nfrom SATSolveur import *\n\nimport os\n\nclass Picross:\n def __init__(self):\n # Attributs\n ## Attribut lu dans le fichier\n self.nb_ligne = None\n self.nb_colonne = None\n self.bloc_ligne = []\n self.bloc_colonne = []\n\n ## Attributs de modelisation\n self.list_clause = []\n self.fnc = None\n\n def fromArray(self, arr):\n \"\"\"Construit les blocs du picross en fonction du damier contenu dans arr\"\"\"\n self.nb_ligne = len(arr)\n self.nb_colonne = len(arr[0])\n self.bloc_ligne = []\n self.bloc_colonne = []\n for i in range(0, self.nb_ligne):\n taille_bloc = 0\n line = []\n for j in range(0, self.nb_colonne):\n if arr[i][j] == 0:\n taille_bloc += 1\n else:\n if taille_bloc != 0:\n line.append(taille_bloc)\n taille_bloc = 0\n if taille_bloc != 0:\n line.append(taille_bloc)\n self.bloc_ligne.append(line)\n for j in range(0, self.nb_colonne):\n taille_bloc = 0\n line = []\n for i in range(0, self.nb_ligne):\n if arr[i][j] == 0:\n taille_bloc += 1\n else:\n if taille_bloc != 0:\n line.append(taille_bloc)\n taille_bloc = 0\n if taille_bloc != 0:\n line.append(taille_bloc)\n self.bloc_colonne.append(line)\n\n def fromFile(self, nom_fichier):\n \"\"\"\n Cette méthode permet de récupérer les blocs décrits dans\n le fichier au format picross et de les transformer en listes d'entiers\n qui traduisent les longueurs des blocs.\n \"\"\"\n \n num_ligne = 0\n num_colonne = 0\n try:\n f = open(nom_fichier, \"r\")\n except FileNotFoundError:\n raise Exception(\"Le fichier n'a pas été trouvé. Nom du fichier :\"+nom_fichier)\n for line in f.readlines():\n comp_line = line.split()\n if len(comp_line) == 0:#ligne vide\n pass\n elif comp_line[0] == \"c\":#commentaire\n pass\n elif comp_line[0] == \"picross\":#la taille\n if len(comp_line) == 2:#picross carre\n if comp_line[1].isdigit():\n self.nb_ligne = int(comp_line[1])\n self.nb_colonne = self.nb_ligne\n else:\n raise Exception(\"La taille n'est pas un entier valide\")\n elif len(comp_line) == 3:#picross rectangle\n if comp_line[1].isdigit() and comp_line[2].isdigit():#On verifie que les tailles sont des entiers\n self.nb_ligne = int(comp_line[1])\n self.nb_colonne = int(comp_line[2])\n else:\n raise Exception(\"La taille n'est pas un entier valide\")\n else:#format non valide (on ne gere pas une dimension >= 3)\n raise Exception(\"Donnez une taille contenant un ou deux entiers\")\n elif self.nb_ligne == None or self.nb_colonne == None:#On verifie que la taille du picross a deja ete specifie\n raise Exception(\"Specifiez la taille avant de donner les blocs\")\n else:#On recupere les blocs\n #On verifie que toutes les tailles sont des entiers\n all_num = True\n for c in comp_line:\n all_num &= c.isdigit()\n if not all_num:\n raise Exception(\"La taille des blocs doivent etre des entiers\")\n else:\n if comp_line[-1] != \"0\":\n raise Exception(\"Les lignes qui specifient la taille des blocs doivent se terminer avec '0'\")\n else:\n comp_line = [int(comp_line[i]) for i in range (0, len(comp_line)-1)]#-1 pour enlever le 0\n if(num_ligne < self.nb_ligne):#On en est encore aux blocs des lignes\n self.bloc_ligne.append(comp_line)\n num_ligne += 1\n elif num_colonne < self.nb_colonne:\n self.bloc_colonne.append(comp_line)\n num_colonne += 1\n else:\n raise Exception(\"Vous specifiez trop de blocs par rapport a la taille du picross\")\n if num_ligne < self.nb_ligne or num_colonne < self.nb_colonne:\n raise Exception(\"Vous n'avez pas specifie assez de blocs par rapport a la taille du picross\")\n\n def save(self, nom_file_out):\n \"\"\"Enregistre les blocs du picross\"\"\"\n f = open(nom_file_out, \"w+\")\n f.write(\"picross \"+str(self.nb_ligne)+\" \"+str(self.nb_colonne)+\"\\n\")\n sep = ' '\n for i in range(0,self.nb_ligne):\n bloc = self.bloc_ligne[i]\n f.write(sep.join([str(bloc[k]) for k in range(0, len(bloc))])+\" 0\\n\")\n for j in range(0,self.nb_colonne):\n bloc = self.bloc_colonne[j]\n f.write(sep.join([str(bloc[k]) for k in range(0, len(bloc))])+\" 0\\n\")\n f.close()\n\n def computeFncModelisation1(self):\n \"\"\"\n Cette méthode permet de construire les variables et les clauses (en fnc)\n liées à la modélisation 1. Cette méthode doit être utilisée après la méthode\n fromFile ou après la méthode fromArray.\n \"\"\"\n if (len(self.bloc_ligne) == 0 or len(self.bloc_colonne) == 0):\n raise Exception(\"Il faut d'abord construire les blocs avant de construire les clauses\")\n \n list_sequences = self.bloc_ligne+self.bloc_colonne\n n = len(self.bloc_ligne)\n m = len(self.bloc_colonne)\n\n \"\"\"On creer les variables du damier\"\"\"\n variables_damier = []\n for i in range(0, n):\n ligne = []\n for j in range(0, m):\n ligne.append(VariableCase(\"x_\"+str(i+1)+\",\"+str(j+1),i+1,j+1))\n variables_damier.append(ligne)\n\n \"\"\"On creer les variables blocs\"\"\"\n variables_blocs = []\n for s in range(0, len(list_sequences)):\n sequence = []\n for q in range(0, len(list_sequences[s])):\n bloc = []\n if s < n:\n nb_position = n\n else:\n nb_position = m\n for i in range(0, nb_position):\n bloc.append(VariableBloc(\"b_\"+str(s+1)+\",\"+str(q+1)+\",\"+str(i+1),s+1,q+1,i+1))\n sequence.append(bloc)\n variables_blocs.append(sequence)\n\n \"\"\"On creer les clauses\"\"\"\n\n \"\"\"Pour toute sequence, pour tout q, le bloc existe\"\"\"\n for s in range(0, len(variables_blocs)):#pour toute sequence\n for q in range(0, len(variables_blocs[s])):#pour tout bloc de la sequence\n clause = Clause()\n nb_var = len(variables_blocs[s][q])\n for i in range(0, min(nb_var-list_sequences[s][q]+1, nb_var)):#pour chaque position possible\n clause.add_litteral(Litteral(True, variables_blocs[s][q][i]))\n self.list_clause.append(clause)\n\n \"\"\"Pour toute sequence, pour tout q, le bloc est unique\"\"\"\n for s in range(0, len(variables_blocs)):#pour toute sequence\n for q in range(0, len(variables_blocs[s])):#pour tout bloc de la sequence\n for p in range(0, len(variables_blocs[s][q])-list_sequences[s][q]+1):#pour chaque position possible\n nb_var = len(variables_blocs[s][q])\n for i in range(p+1, min(nb_var-list_sequences[s][q]+1, nb_var)):#pour chaque position possibles a partir de p+1\n clause = Clause()\n clause.add_litteral(Litteral(False, variables_blocs[s][q][p]))\n clause.add_litteral(Litteral(False, variables_blocs[s][q][i]))\n self.list_clause.append(clause)\n\n \"\"\"Pour toute sequence, deux blocs consecutifs sont dans l'ordre et ne se chevauchent pas\"\"\"\n for s in range(0, len(variables_blocs)):#pour toute sequence\n for q in range(0, len(variables_blocs[s])-1):#pour tout bloc de la sequence sauf le dernier\n nb_var = len(variables_blocs[s][q])\n for p in range(0, min(nb_var-list_sequences[s][q]+1, nb_var)):#pour chaque position possible\n for i in range(0, min(p+list_sequences[s][q]+1, len(variables_blocs[s][q]))):#pour chaque position jusqu'a la fin du bloc + 1 ou la fin du damier\n clause = Clause()\n clause.add_litteral(Litteral(False, variables_blocs[s][q][p]))\n clause.add_litteral(Litteral(False, variables_blocs[s][q+1][i]))\n self.list_clause.append(clause)\n\n \"\"\"Pour tout bloc, les cases qui le composent valent 1\"\"\"\n for s in range(0, len(variables_blocs)):#pour toute sequence\n for q in range(0, len(variables_blocs[s])):#pour tout bloc de la sequence\n nb_var = len(variables_blocs[s][q])\n for p in range(0, min(nb_var-list_sequences[s][q]+1, nb_var)):#pour chaque positionf\n for i in range(p, p+list_sequences[s][q]):#pour chaque position a l'interieur du bloc\n clause = Clause()\n clause.add_litteral(Litteral(False, variables_blocs[s][q][p]))\n if s < len(self.bloc_ligne):#si c'est une ligne\n clause.add_litteral(Litteral(True, variables_damier[s][i]))\n else:#si c'est une colonne\n clause.add_litteral(Litteral(True, variables_damier[i][s-n]))\n self.list_clause.append(clause)\n\n \"\"\"Pour chaque case coloree, elle appartient a un bloc ligne\"\"\"\n for i in range(0, n):\n for j in range(0, m):\n clause = Clause()\n clause.add_litteral(Litteral(False, variables_damier[i][j]))\n for q in range(0, len(variables_blocs[i])):#pour tout bloc de la sequence\n for p in range(max(0, j-list_sequences[i][q]+1), min(j+1, len(variables_blocs[i][q])-list_sequences[i][q]+1)):#pour chaque position entre la case-len(bloc) et min(case, n - len(bloc))\n clause.add_litteral(Litteral(True, variables_blocs[i][q][p]))\n self.list_clause.append(clause)\n\n \"\"\"Pour chaque case colorée, elle appartient a un bloc colonne\"\"\"\n for i in range(0, n):\n for j in range(0, m):\n clause = Clause()\n clause.add_litteral(Litteral(False, variables_damier[i][j]))\n for q in range(0, len(variables_blocs[j+n])):#pour tout bloc de la sequence\n for p in range(max(0, i-list_sequences[j+n][q]+1), min(i+1, len(variables_blocs[j+n][q])-list_sequences[j+n][q]+1)):#pour chaque position entre la case-len(bloc) et min(case, m - len(bloc)\n clause.add_litteral(Litteral(True, variables_blocs[j+n][q][p]))\n self.list_clause.append(clause)\n\n self.fnc = Fnc(self.list_clause,True,self.nb_ligne,self.nb_colonne)\n\n def toFncModelisation1(self):\n self.computeFncModelisation1()\n return self.fnc\n\n def saveClause(self, f, clause):\n \"\"\"Est utilisee dans l'optimisation pour enregistrer les clauses une a une\"\"\"\n for v in clause.get_variables():\n if v.nom not in self.dict_variable.keys():\n if isinstance(v,VariableCase):\n self.dict_variable[v.nom] = (v.colonne-1) + self.nb_colonne * (v.ligne-1) +1\n elif isinstance(v,VariableBloc):\n self.nb_var += 1\n self.dict_variable[v.nom] = self.nb_var\n ligne = []\n sep = \" \"\n for l in clause.get_litteraux():\n num_var = str(self.dict_variable[l.variable.nom])\n if l.positif:\n ligne.append(num_var)\n else:\n ligne.append(\"-\"+num_var)\n ligne.append(\"0\")\n f.write(\"\\n\"+sep.join(ligne))\n\n self.nb_clause += 1\n return self.dict_variable\n\n def toFncModelisationOptimise(self, nom_file_out):\n \"\"\"\n Idem que computeFncModelisation1, mais on enregistre directement dans le DIMACS\n (optimisation de la RAM)\n \"\"\"\n f = open(nom_file_out, \"w+\")\n self.nb_clause = 0\n self.dict_variable = {}\n self.nb_var = self.nb_ligne * self.nb_colonne + 1# utilisé ici uniquement pour les variables blocs\n\n\n nb_clause = len(self.list_clause)\n\n\n if (len(self.bloc_ligne) == 0 or len(self.bloc_colonne) == 0):\n raise Exception(\"Il faut d'abord construire les blocs avant de construire les clauses\")\n\n list_sequences = self.bloc_ligne+self.bloc_colonne\n n = len(self.bloc_ligne)\n m = len(self.bloc_colonne)\n\n \"\"\"On creer les variables du damier\"\"\"\n variables_damier = []\n for i in range(0, n):\n ligne = []\n for j in range(0, m):\n ligne.append(VariableCase(\"x_\"+str(i+1)+\",\"+str(j+1),i+1,j+1))\n variables_damier.append(ligne)\n\n \"\"\"On creer les variables blocs\"\"\"\n variables_blocs = []\n for s in range(0, len(list_sequences)):\n sequence = []\n for q in range(0, len(list_sequences[s])):\n bloc = []\n if s < n:\n nb_position = n\n else:\n nb_position = m\n for i in range(0, nb_position):\n bloc.append(VariableBloc(\"b_\"+str(s+1)+\",\"+str(q+1)+\",\"+str(i+1),s+1,q+1,i+1))\n sequence.append(bloc)\n variables_blocs.append(sequence)\n\n \"\"\"On creer les clauses\"\"\"\n\n \"\"\"Pour toute sequence, pour tout q, le bloc existe\"\"\"\n for s in range(0, len(variables_blocs)):#pour toute sequence\n for q in range(0, len(variables_blocs[s])):#pour tout bloc de la sequence\n clause = Clause()\n nb_var = len(variables_blocs[s][q])\n for i in range(0, min(nb_var-list_sequences[s][q]+1, nb_var)):#pour chaque position possible\n clause.add_litteral(Litteral(True, variables_blocs[s][q][i]))\n #self.list_clause.append(clause)\n self.saveClause(f, clause)\n\n \"\"\"Pour toute sequence, pour tout q, le bloc est unique\"\"\"\n for s in range(0, len(variables_blocs)):#pour toute sequence\n for q in range(0, len(variables_blocs[s])):#pour tout bloc de la sequence\n for p in range(0, len(variables_blocs[s][q])-list_sequences[s][q]+1):#pour chaque position possible\n nb_var = len(variables_blocs[s][q])\n for i in range(p+1, min(nb_var-list_sequences[s][q]+1, nb_var)):#pour chaque position possibles a partir de p+1\n clause = Clause()\n clause.add_litteral(Litteral(False, variables_blocs[s][q][p]))\n clause.add_litteral(Litteral(False, variables_blocs[s][q][i]))\n #self.list_clause.append(clause)\n self.saveClause(f, clause)\n\n \"\"\"Pour toute sequence, deux blocs consecutifs sont dans l'ordre et ne se chevauchent pas\"\"\"\n for s in range(0, len(variables_blocs)):#pour toute sequence\n for q in range(0, len(variables_blocs[s])-1):#pour tout bloc de la sequence sauf le dernier\n nb_var = len(variables_blocs[s][q])\n for p in range(0, min(nb_var-list_sequences[s][q]+1, nb_var)):#pour chaque position possible\n for i in range(0, min(p+list_sequences[s][q]+1, len(variables_blocs[s][q]))):#pour chaque position jusqu'a la fin du bloc + 1 ou la fin du damier\n clause = Clause()\n clause.add_litteral(Litteral(False, variables_blocs[s][q][p]))\n clause.add_litteral(Litteral(False, variables_blocs[s][q+1][i]))\n #self.list_clause.append(clause)\n self.saveClause(f, clause)\n\n \"\"\"Pour tout bloc, les cases qui le composent valent 1\"\"\"\n for s in range(0, len(variables_blocs)):#pour toute sequence\n for q in range(0, len(variables_blocs[s])):#pour tout bloc de la sequence\n nb_var = len(variables_blocs[s][q])\n for p in range(0, min(nb_var-list_sequences[s][q]+1, nb_var)):#pour chaque positionf\n for i in range(p, p+list_sequences[s][q]):#pour chaque position a l'interieur du bloc\n clause = Clause()\n clause.add_litteral(Litteral(False, variables_blocs[s][q][p]))\n if s < len(self.bloc_ligne):#si c'est une ligne\n clause.add_litteral(Litteral(True, variables_damier[s][i]))\n else:#si c'est une colonne\n clause.add_litteral(Litteral(True, variables_damier[i][s-n]))\n #self.list_clause.append(clause)\n self.saveClause(f, clause)\n\n \"\"\"Pour chaque case coloree, elle appartient a un bloc ligne\"\"\"\n for i in range(0, n):\n for j in range(0, m):\n clause = Clause()\n clause.add_litteral(Litteral(False, variables_damier[i][j]))\n for q in range(0, len(variables_blocs[i])):#pour tout bloc de la sequence\n for p in range(max(0, j-list_sequences[i][q]+1), min(j+1, len(variables_blocs[i][q])-list_sequences[i][q]+1)):#pour chaque position entre la case-len(bloc) et min(case, n - len(bloc))\n clause.add_litteral(Litteral(True, variables_blocs[i][q][p]))\n #self.list_clause.append(clause)\n self.saveClause(f, clause)\n\n \"\"\"Pour chaque case colorée, elle appartient a un bloc colonne\"\"\"\n for i in range(0, n):\n for j in range(0, m):\n clause = Clause()\n clause.add_litteral(Litteral(False, variables_damier[i][j]))\n for q in range(0, len(variables_blocs[j+n])):#pour tout bloc de la sequence\n for p in range(max(0, i-list_sequences[j+n][q]+1), min(i+1, len(variables_blocs[j+n][q])-list_sequences[j+n][q]+1)):#pour chaque position entre la case-len(bloc) et min(case, m - len(bloc)\n clause.add_litteral(Litteral(True, variables_blocs[j+n][q][p]))\n #self.list_clause.append(clause)\n self.saveClause(f, clause)\n\n f.close()\n f = open(nom_file_out, \"r\")\n content = f.read()\n f.seek(0, 0)\n line = \"p cnf \"+str(self.nb_var)+\" \"+str(self.nb_clause)\n f.close()\n f = open(nom_file_out, \"w+\")\n f.write(line.rstrip('\\r\\n') + content)\n f.close()\n\n\n def solve(self):\n print(\"\\nResolution du Picross\")\n\n print(\"\\nModelisation du picross\")\n path = os.path.join(\"trash\", \"temp.txt\")\n self.toFncModelisationOptimise(path)\n \"\"\"fnc_picross = self.toFncModelisation1()\n\n print(\"\\nEcriture du DIMACS correspondant\")\n path = os.path.join(\"trash\", \"temp.txt\")\n fnc_picross.writeDIMACS(path)\"\"\"\n\n print(\"\\nResolution du DIMACS en cours, veuillez patienter\")\n sat_solveur= SATSolveur(path)\n path = os.path.join(\"trash\", \"tempSol.txt\")\n sat_solveur.solve(path)\n\n print(\"\\nReconstruction du picross\")\n resolu = Resolu(path,self.nb_ligne,self.nb_colonne)\n resolu.read_solution()\n resolu.construit_damier()\n path = os.path.join(\"trash\", \"tempSolution.txt\")\n resolu.write_resolu(path, \"\")\n\n print(\"\\nAffichage du picross\")\n import MyImage\n image = MyImage.MyImage()\n image.fromPicross(path)\n image.show()\n\nif __name__ == \"__main__\":\n path = os.path.join(\"picross\", \"valide\", \"carre\", \"1.txt\")\n print(\"Test de lecture d'un fichier au format picross\")\n print(\"--> lecture du fichier :\", path)\n \n picross = Picross()\n picross.fromFile(path)\n print(\"--> lecture du fichier terminée\")\n \n print(\"--> les blocs lignes attendus ([1,1,1], [5], [3], [1,1] et [3]) sont obtenus :\",\n picross.bloc_ligne == [[1,1,1], [5], [3], [1,1], [3]])\n \n print(\"--> les blocs colonne attendus ([2], [4], [3,1], [4] et [2]) sont obtenus :\",\n picross.bloc_colonne == [[2], [4], [3,1], [4], [2]])\n \n print(\"\\nTest de la lecture du format picross terminée.\")\n","sub_path":"Picross.py","file_name":"Picross.py","file_ext":"py","file_size_in_byte":21475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"417824026","text":"import math\n\ndef tempConversion(a,b=\"Celcius\"):\n if b==\"Celcius\":\n x=a*(9/5)+32\n x=round(x,1)\n return x\n elif b==\"F\":\n x=(a-32)*5/9\n x=round(x,1)\n return x\n\ndef factorPair():\n factors=[b]\n for i in range(1,a+1):\n if b*i==a:\n factors.append(i)\n factors.sort()\n return factors\n\ndef toRadians(y):\n x=(y*math.pi/180) \n return x\n\ndef cosineLaw(a,b,c,oppositeside=True):\n if oppositeside==True:\n stepc1=math.pow(a,2)+math.pow(b,2)\n stepc2=(2*a*b)\n stepc3=toRadians(c)\n stepc4=math.cos(stepc3)\n stepc5=(stepc2*stepc4)\n stepc6=(stepc1)-(stepc5)\n stepc7=math.sqrt(stepc6)\n return stepc7\n elif oppositeside==False:\n d0=toRadians(c)\n print(d0)\n d1=math.sin(d0)*a/b\n print(d1)\n d2=math.asin(d1)\n print(d2)\n dc=d2*180/math.pi\n d3=180-dc-c\n print(d3)\n d4=toRadians(d3)\n print(d4)\n d5=(math.sin(d4)*180/math.pi)*b/(math.sin(d0)*180/math.pi)\n return d5\n\n\n\n\n\n\n\n\n\ndef solution(numbers):\n x=numbers[1]\n return x \n\ndef quadratic(a,b,c):\n stepx1=(b*-1)\n stepx2=math.pow(b,2)-(4*a*c)\n stepx2=math.sqrt(stepx2)\n stepx3=(stepx1)+(stepx2) \n stepx4=(stepx3)/(2*a)\n stepy1=(b*-1)\n stepy2=math.pow(b,2)-(4*a*c)\n stepy2=math.sqrt(stepy2)\n stepy3=(stepy1)-(stepy2) \n stepy4=(stepy3)/(2*a)\n numlist=[stepy4,stepx4]\n numlist.sort()\n return numlist\n\n","sub_path":"assignment_test.py","file_name":"assignment_test.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"218873266","text":"from socket import *\r\nimport time\r\nserverPort = 80\r\n\r\nserverSocket = socket(AF_INET, SOCK_STREAM)\r\nserverSocket.bind(('',serverPort))\r\ncount = 0\r\n\r\n\r\nserverSocket.listen(1)\r\n\r\nprint ('The TCP server is ready to receive')\r\nconnectionSocket, addr = serverSocket.accept()\r\nwhile count < 3000:\r\n sentence = connectionSocket.recv(1024).decode()\r\n print (sentence)\r\n count = count+1\r\n \r\n\r\nconnectionSocket.close()\r\n","sub_path":"Labb2 del D -1.py","file_name":"Labb2 del D -1.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"606962576","text":"\"\"\"\nDjango settings for remind_me project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport djcelery\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nPROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\nproject_path = lambda x: os.path.join(PROJECT_PATH, x)\n\ndjcelery.setup_loader()\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 't$%rhdq&eu1e#084#7691b=q@7zyp1aczhq0afqi#yr=$928c4'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'notifications',\n 'djcelery',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n project_path('templates'),\n)\n\n\nROOT_URLCONF = 'remind_me.urls'\n\nWSGI_APPLICATION = 'remind_me.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n# Configuration of Gmail SMTP to send emails\nEMAIL_BACKEND='django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST='smtp.gmail.com'\nEMAIL_PORT=587\nEMAIL_HOST_USER='YOUR_GMAIL_EMAIL'\nEMAIL_HOST_PASSWORD='YOUR_GMAIL_PASSWORD'\nEMAIL_USE_TLS=True\n\n# SMS gateway configuration of Infobip for sending email\nSMS_GATEWAY = {\n 'USER': 'SMS_GATEWAY_USER',\n 'PASSWORD': 'SMS_GATEWAY_PASSWORD',\n 'URL': 'http://api.infobip.com/api/v3/sendsms/xml',\n}\n\n# Configurable settings for Celery & Amqp. Complete configuration at: remind_me/celery.py\nCELERY_CONFIGURATION = {\n 'USER': 'AMQP_USER',\n 'PASSWORD': 'AMQP_PASSWORD',\n 'HOST': 'AMQP_HOST',\n 'PORT': '5672', # Default port of RabbitMQ\n}","sub_path":"remind_me/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"245942695","text":"from __future__ import annotations\nfrom typing import Optional, List\nfrom dataclasses import dataclass, fields\nfrom datetime import date\nimport logging\nimport functools\nimport re\n\nfrom RecordLib.crecord import Sentence\nfrom RecordLib.crecord.helpers import date_or_none\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass Charge:\n \"\"\"\n Track information about a charge\n \"\"\"\n\n offense: str\n grade: str\n statute: str\n sequence: Optional[str] = None # I think this really ought to be not optional\n disposition: Optional[str] = None\n disposition_date: Optional[date] = None\n sentences: Optional[List[Sentence]] = None\n # TODO sequence should not be optional.\n otn: Optional[str] = None\n\n @staticmethod\n def grade_GTE(grade_a: str, grade_b: str) -> bool:\n \"\"\"\n Greater-than-or-equal-to ordering for charge grades.\n\n Args:\n grade_a: A grade like \"M1\", \"F2\", \"S\", etc.\n grade_b: A grade like \"M1\", \"F2\", \"S\", etc.\n \n Returns: \n True if grade_a is the same grade as or more serious than grade_b \n \n Examples:\n grade_GTE(\"M1\", \"S\") == True\n grade_GTE(\"S\",\"\") == False\n \"\"\"\n grades = [\"\", \"S\", \"M\", \"IC\", \"M3\", \"M2\", \"M1\", \"F\", \"F3\", \"F2\", \"F1\"]\n try:\n i_a = grades.index(grade_a)\n except ValueError:\n logger.error(\n f\"Couldn't understand the first grade, {grade_a}, so assuming it has low seriousness.\"\n )\n i_a = 0\n try:\n i_b = grades.index(grade_b)\n except:\n logger.error(\n f\"Couldn't understand the second grade, {grade_b}, so assuming it has low seriousness.\"\n )\n i_b = 0\n return i_a >= i_b\n\n @staticmethod\n def from_dict(dct: dict) -> Charge:\n try:\n if dct.get(\"sentences\"):\n dct[\"sentences\"] = [Sentence.from_dict(s) for s in dct.get(\"sentences\")]\n else:\n dct[\"sentences\"] = []\n if dct.get(\"disposition_date\"):\n dct[\"disposition_date\"] = date_or_none(dct.get(\"disposition_date\"))\n return Charge(**dct)\n except Exception as err:\n logger.error(str(err))\n return None\n\n @staticmethod\n def reduce_merge(charges: List[Charge]) -> List[Charge]:\n \"\"\"\n Given a list of charges, reduce the list by merging charges with the same sequence number.\n\n In a Docket, there's often a number of records relating to a single charge. There records explain\n how a charge proceeded through the case. When we parse a docket, if we find lots of records of \n charges, we need to reduce them into a list where each charge only appears once.\n \"\"\"\n\n def reducer(accumulator, charge):\n \"\"\"\n Add charge to accumulator, if the charge is new. Otherwise combine charge with its pre-existing charge.\n \"\"\"\n if len(accumulator) == 0:\n return [charge]\n new_charges = []\n is_new = True\n for ch in accumulator:\n if (\n isinstance(charge.sequence, str)\n and charge.sequence == ch.sequence\n and charge.sequence.strip() != \"\"\n ):\n ch.combine_with(charge)\n is_new = False\n if is_new:\n accumulator.append(charge)\n return accumulator\n\n reduced = functools.reduce(reducer, charges, [])\n return reduced\n\n @staticmethod\n def combine(ch1: Optional[Charge], ch2: Optional[Charge]) -> Charge:\n \"\"\"\n Combine two charges, using the most complete information from both.\n \"\"\"\n\n def pick_more_complete(thing1, thing2):\n if thing1 in [None, \"\"]:\n # this means that None in thing2 would override \"\" in thing1. Is that good?\n return thing2\n return thing1\n\n if ch1 is None:\n return ch2\n if ch2 is None:\n return ch1\n\n return Charge(\n **{\n field.name: pick_more_complete(\n getattr(ch1, field.name), getattr(ch2, field.name)\n )\n for field in fields(ch1)\n }\n )\n\n def combine_with(self, charge) -> Charge:\n \"\"\"\n Combine this Charge with another, filling in missing info, or updating certain fields.\n \"\"\"\n for attr in self.__dict__.keys():\n if getattr(self, attr) is None and getattr(charge, attr) is not None:\n setattr(self, attr, getattr(charge, attr))\n elif (\n isinstance(getattr(self, attr), str)\n and getattr(self, attr).strip() == \"\"\n ) and (\n isinstance(getattr(charge, attr), str)\n and (getattr(charge, attr).strip() != \"\")\n ):\n setattr(self, attr, getattr(charge, attr))\n elif attr == \"disposition\":\n if re.search(r\"nolle|guilt|dismiss|withdraw\", charge.disposition, re.I):\n # the new charge has a disposition that should be saved as the final disposition of this charge.\n self.disposition = charge.disposition\n self.disposition_date = getattr(charge, \"disposition_date\", None)\n\n return self\n\n def is_unresolved(self) -> bool:\n \"\"\"Is this charge still an active charge? \n\n\n \"\"\"\n unresolved_dispositions = re.compile(\"proceed to court\", re.I)\n if (\n self.disposition == \"\"\n or self.disposition is None\n or unresolved_dispositions.search(self.disposition)\n ):\n return True\n else:\n return False\n\n def is_conviction(self) -> bool:\n \"\"\"Is this charge a conviction?\n\n There are lots of different dispositions, and this helps identify if a disp. counts as a conviction or not.\n \"\"\"\n if self.disposition is None:\n # logger.warning(\"No disposition.\")\n return False\n if re.match(\"^Guilty\", self.disposition.strip()) or re.match(\n \"nolo contendere\", self.disposition.strip(), re.I\n ):\n return True\n else:\n return False\n\n def get_statute_chapter(self) -> Optional[float]:\n \"\"\" Get the Chapter in the PA Code that this charge is related to. \n \"\"\"\n patt = re.compile(r\"^(?P\\d+)\\s*§\\s(?P

    \\d+).*\")\n match = patt.match(self.statute)\n if match:\n return float(match.group(\"chapt\"))\n else:\n return None\n\n def get_statute_section(self) -> Optional[float]:\n \"\"\" Get the Statute section of the PA code, to which this charge is related.\n \"\"\"\n patt = re.compile(r\"^(?P\\d+)\\s*§\\s(?P
    \\d+\\.?\\d*).*\")\n match = patt.match(self.statute)\n if match:\n return float(match.group(\"section\"))\n else:\n return None\n\n def get_statute_subsections(self) -> str:\n \"\"\" Get the subsection, if any, to which this charge relates\n \"\"\"\n patt = re.compile(\n r\"^(?P\\d+)\\s*§\\s(?P
    \\d+\\.?\\d*)\\s*§§\\s*(?P[\\(\\)A-Za-z0-9\\.\\*]+)\\s*.*\"\n )\n match = patt.match(self.statute)\n if match:\n return match.group(\"subsections\")\n else:\n return \"\"\n\n","sub_path":"RecordLib/crecord/charge.py","file_name":"charge.py","file_ext":"py","file_size_in_byte":7555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"544796863","text":"import pytest\nimport src.exercise\n\ndef test_exercise():\n input_values = [\"Hello there!\"]\n output = []\n\n def mock_input(s):\n output.append(s)\n return input_values[0]\n\n src.exercise.input = mock_input\n src.exercise.print = lambda s : output.append(s)\n\n src.exercise.main()\n\n assert output == ['Write a message...', input_values[0]]\n","sub_path":"tests/test_exercise.py","file_name":"test_exercise.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"173216365","text":"\r\nfrom flask import Flask, json\r\nimport file_utils\r\n\r\n\r\napp = Flask(__name__)\r\n\r\nconf = file_utils.get_conf()\r\n\r\nprint(conf)\r\n\r\n@app.route(\"/\")\r\ndef hello():\r\n return \"Hello World!123\"\r\n\r\n@app.route('/publish/', methods=['GET'])\r\ndef get_gis_url(file_id):\r\n try:\r\n publish_state = file_utils.read_json(conf['data_path'], file_id, 'publish.json')\r\n return json.jsonify(publish_state)\r\n except :\r\n return \"NOT found -- by Flask\", 404\r\n\r\n@app.route('/publish', methods=['POST'])\r\ndef file_publish():\r\n print(request.values)\r\n if 'file' in request.files :\r\n afile = request.files['file']\r\n if not ('.' in afile.filename and afile.filename.rsplit('.', 1)[1] == 'zip'):\r\n return 'NOT allowed filetype'\r\n file_utils.save_zip_file(afile, root_path = conf['data_path'])\r\n else:\r\n fileurl = request.values.get('fileurl', '')\r\n file_utils.download_file(fileurl, root_path = conf['data_path'])\r\n\r\n@app.route('/publish/', methods=['DELETE'])\r\ndef del_file(fileid):\r\n pass\r\n\r\n@app.route('/fakepage')\r\ndef fake():\r\n print('ya...fake')\r\n return 'U r redirected here.'\r\n\r\n@app.route('/arcgis/fakepage')\r\ndef fake2():\r\n print('ya...fake222')\r\n return 'U r redirected here222.'\r\n\r\n\r\n\r\n\r\n# . venv/Scripts/activate\r\n# python dev_server.py\r\nif __name__ == \"__main__\":\r\n app.run()\r\n","sub_path":"python_services/gis/publish_app.py","file_name":"publish_app.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"146305308","text":"\n\n# The LDA algorithm\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn.datasets as dt\nfrom scipy import linalg as la\n\ndef lda(data,labels,redDim):\n\n # Centre data\n data -= data.mean(axis=0)\n nData = np.shape(data)[0]\n nDim = np.shape(data)[1]\n \n Sw = np.zeros((nDim,nDim))\n Sb = np.zeros((nDim,nDim))\n \n C = np.cov(np.transpose(data))\n \n # Loop over classes\n classes = np.unique(labels)\n for i in range(len(classes)):\n # Find relevant datapoints\n indices = np.squeeze(np.where(labels==classes[i]))\n d = np.squeeze(data[indices,:])\n classcov = np.cov(np.transpose(d))\n Sw += np.float(np.shape(indices)[0])/nData * classcov\n \n Sb = C - Sw\n # Now solve for W and compute mapped data\n # Compute eigenvalues, eigenvectors and sort into order\n evals,evecs = la.eig(Sw,Sb)\n indices = np.argsort(evals)\n indices = indices[::-1]\n evecs = evecs[:,indices]\n evals = evals[indices]\n w = evecs[:,:redDim]\n newData = np.dot(data,w)\n return newData,w\n\n\niris = dt.load_iris()\ndata = iris.data\ntarget = iris.target\n\nnew_data, w = lda(data, target, 2)\n\nplt.figure(1)\nplt.scatter(new_data[:,0], new_data[:,1], c=target)\nplt.show()","sub_path":"lda.py","file_name":"lda.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"23371542","text":"#~ # coding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport pafy\nfrom six.moves.urllib.request import urlopen\n\nfrom .. import errors\nfrom ..base import FS\nfrom ..enums import ResourceType\nfrom ..info import Info\nfrom ..iotools import RawWrapper\n\n\nclass YoutubeFS(FS):\n \n \"\"\"A filesystem for reading Youtube Playlists and Videos.\n\n Arguments:\n url (str): The YouTube URL for a Playlist or a Video\n\n \"\"\"\n\n _meta = {\n 'case_insensitive': False,\n 'invalid_path_chars': '\\0\"\\[]+|<>=;?*',\n 'network': True,\n 'read_only': True,\n 'thread_safe': True,\n 'unicode_paths': True,\n 'virtual': False,\n }\n\n def __init__(self, url, playlist=True):\n super(YoutubeFS, self).__init__()\n self.playlist = playlist\n self.url = url\n self._cache = {}\n if playlist:\n self._title = pafy.get_playlist(self.url)['title']\n else:\n self._title = pafy.new(self.url).title\n\n def __str__(self):\n return 'YoutubeFS: %s' % self._title\n\n def _get_name(self, pafyobj):\n name = '%s.%s' % (pafyobj.title, pafyobj.getbest().extension)\n name.replace('/', '')\n name.replace('\\\\', '')\n return name\n\n def listdir(self, path):\n _path = self.validatepath(path)\n\n if _path in [u'.', u'/', u'./']:\n if self.playlist:\n parser = pafy.get_playlist(self.url)\n outlist = []\n for entry in parser['items']:\n name = self._get_name(entry['pafy'])\n self._cache[self.validatepath(u'/%s' % name)] = entry['playlist_meta']['encrypted_id']\n outlist.append(u'%s' % name)\n return outlist\n else:\n parser = pafy.new(self.url)\n name = self._get_name(parser)\n self._cache[self.validatepath(u'/%s' % name)] = self.url\n return [name]\n else:\n if _path in self._cache:\n raise errors.DirectoryExpected(path)\n else:\n raise errors.ResourceNotFound(path)\n\n def getinfo(self, path, namespaces=None):\n _path = self.validatepath(path)\n namespaces = namespaces or ('basic')\n\n if _path in [u'', u'.', u'/', u'./']:\n\n info = Info({\n \"basic\":\n {\n \"name\": '',\n \"is_dir\": True\n },\n \"details\":\n {\n \"type\": int(ResourceType.directory)\n }\n })\n return info\n else:\n if _path in self._cache:\n name = _path[1:]\n pafyobj = pafy.new(self._cache[_path])\n if not 'details' in namespaces:\n info = Info({\n \"basic\":\n {\n \"name\": name,\n \"is_dir\": False\n }})\n else:\n stream = pafyobj.getbest()\n info = Info({\n \"basic\":\n {\n \"name\": pafyobj.title,\n \"is_dir\": False\n },\n \"details\":\n {\n \"type\": int(ResourceType.file),\n \"size\":stream.get_filesize(),\n }\n })\n return info\n else:\n raise errors.ResourceNotFound(path)\n\n def openbin(self, path, mode=u'r', *args, **kwargs):\n _path = self.validatepath(path)\n\n if mode == 'rt':\n raise ValueError('rt mode not supported in openbin')\n\n if mode == 'h':\n raise ValueError('h mode not supported in openbin')\n\n if not 'r' in mode:\n raise errors.Unsupported()\n\n try:\n pafyobj = pafy.new(self._cache[_path])\n url = pafyobj.getbest().url\n response = urlopen(url)\n except:\n raise errors.ResourceNotFound(path)\n\n class HTTPFile(RawWrapper):\n\n def writable(self):\n return False\n\n def seekable(self):\n return False\n\n def flush(self):\n return\n\n return HTTPFile(response, mode=mode, *args, **kwargs)\n\n def makedir(self, *args, **kwargs):\n raise errors.Unsupported()\n\n def remove(self, *args, **kwargs):\n raise errors.Unsupported()\n\n def removedir(self, *args, **kwargs):\n raise errors.Unsupported()\n\n def setinfo(self, *args, **kwargs):\n raise errors.Unsupported()\n","sub_path":"fs/youtube/youtubefs.py","file_name":"youtubefs.py","file_ext":"py","file_size_in_byte":4856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"364930599","text":"from datetime import date, timedelta, datetime\nimport time\nimport requests\nimport json\n\ndef convert(s):\n\tcurrent_time = datetime.now()\n\tnew_time = current_time\n\ts = s.split(' ')\n\tif s[1][0:3]==\"day\":\n\t\tnew_time = current_time - timedelta(days=int(s[0]))\n\telif s[1][0:4]==\"week\":\n\t\tnew_time = current_time - timedelta(weeks=int(s[0]))\n\telif s[1][0:4]==\"hour\":\n\t\tnew_time = current_time - timedelta(hours=int(s[0]))\n\telif s[1][0:4]==\"year\":\n\t\tnew_time = current_time - timedelta(days=365*int(s[0]))\n\telif s[1][0:5]==\"month\":\n\t\tnew_time = current_time - timedelta(days=30*int(s[0]))\n\telif s[1][0:6]==\"second\":\n\t\tnew_time = current_time - timedelta(seconds=int(s[0]))\n\telif s[1][0:6]==\"minute\":\n\t\tnew_time = current_time - timedelta(minutes=int(s[0]))\n\treturn str(new_time)\n\ndef set_time_field(qdict):\n\tqdict['postedOn'] = convert(qdict['postedOn']).replace(\" \",\" T\")[:-7]\n\treturn qdict\n\ndef set_category_field(new_json):\n\tcategory = new_json['subCategory']\n\tflag = 0\n\tif category==\"Garbage\" or category==\"Garbage and Unsanitary Practices - Others\":\n\t\tnew_json[\"myCategory\"] = \"Garbage\"\n\t\tflag = 1\n\tif category==\"Bad Roads\" or category==\"Maintenance of Roads and Footpaths - Others\" or category==\"Potholes\" or category==\"Footpaths\":\n\t\tnew_json[\"myCategory\"] = \"Bad Roads and Footpath\"\n\t\tflag = 1\n\tif category==\"Need New Streetlights\" or category==\"Repair of streetlights\":\n\t\tnew_json[\"myCategory\"] = \"Streetlights\"\n\t\tflag = 1\n\tif category==\"Maintenance of Lakes\" or category==\"Lakes - Others\":\n\t\tnew_json[\"myCategory\"] = \"Lakes\"\n\t\tflag = 1\n\tif category==\"Trees Parks and Playgrounds - Others\" or category==\"Parks and playgrounds\":\n\t\tnew_json[\"myCategory\"] = \"Trees, Parks and Playgrounds\"\n\t\tflag = 1\n\tif category==\"Overflow of Storm Water Drains\" or category==\"Flooding of Roads and Footpaths\" or category==\"No Sewage Drains\" or category==\"Sewage and Storm Water Drains - Others\":\n\t\tnew_json[\"myCategory\"] = \"Sewage Drains\"\n\t\tflag = 1\n\tif category==\"Water Supply\":\n\t\tnew_json[\"myCategory\"] = \"Water Supply\"\n\t\tflag = 1\n\tif category==\"Water Leakage\":\n\t\tnew_json[\"myCategory\"] = \"Water Leakage\"\n\t\tflag = 1\n\tif category==\"Electricity and Power Supply – Others\" or category==\"Electricity\":\n\t\tnew_json[\"myCategory\"] = \"Electricity\"\n\t\tflag = 1\n\tif category==\"Illegal posters and Hoardings\" or category==\"Hoardings\":\n\t\tnew_json[\"myCategory\"] = \"Hoardings\"\n\t\tflag = 1\n\tif category==\"Air Pollution\":\n\t\tnew_json[\"myCategory\"] = \"Air Pollution\"\n\t\tflag = 1\n\tif category==\"Noise Pollution\":\n\t\tnew_json[\"myCategory\"] = \"Noise Pollution\"\n\t\tflag = 1\n\tif category==\"Mosquitos\":\n\t\tnew_json[\"myCategory\"] = \"Mosquitos\"\n\t\tflag = 1\n\tif category==\"Stray Dogs\":\n\t\tnew_json[\"myCategory\"] = \"Stray Dogs\"\n\t\tflag = 1\n\tif flag==0:\n\t\tnew_json[\"myCategory\"] = new_json[\"subCategory\"]\n\treturn new_json","sub_path":"utils_icmc.py","file_name":"utils_icmc.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556900339","text":"from tkinter import filedialog\nfrom PIL import *\nfrom PIL import Image\nimport numpy as np\nimport PIL\nimport tkinter as tk\nfrom PIL import ImageTk\nimport ImageReader as ir\n\nmethod = ir.HU\n\ndef searchdirectory(img, imgbox, dirtextbox, ermsgbox=None, size=(300, 400)):\n directory = filedialog.askopenfilename()\n dirtextbox.delete(0, len(dirtextbox.get()))\n dirtextbox.insert(0, directory)\n loaddirectory(img, dirtextbox.get(), imgbox, ermsgbox=ermsgbox, size=size)\n\ndef reset():\n size = 0\n\n if method == ir.HU:\n size = 7\n elif method == ir.R:\n size = 10\n elif method == ir.ZERNIKE:\n size = 12\n momentsdb = np.zeros(shape=(1, 36, size), dtype=float)\n\n np.save(method, momentsdb)\n\n\ndef loaddirectory(imge, dir, imgbox, ermsgbox=None, size=(300, 400)):\n try:\n pilimg = Image.open(dir)\n pilimg.thumbnail(size, PIL.Image.ANTIALIAS)\n imge = ImageTk.PhotoImage(pilimg)\n imgbox.config(image=imge)\n imgbox.image = imge\n if isinstance(ermsgbox, tk.Label):\n ermsgbox.config(text=\"Successfully opened file\", fg=\"green\")\n except:\n if isinstance(ermsgbox, tk.Label):\n ermsgbox.config(text=\"CANNOT OPEN FILE\", fg=\"red\")\n\n\ndef launch():\n global method\n global dirtextbox\n try:\n im = Image.open(dirtextbox.get())\n print(\"working...\")\n ir.ImageReader().launch(dirtextbox.get(), methoddb=method)\n except: # TODO: make exception specific\n direrrmsg.config(text=\"CANNOT OPERATE ON FILE\", fg=\"red\")\n\n\ndef savesample(chardirtextbox, character):\n image = Image.open(chardirtextbox.get())\n imagereader = ir.ImageReader()\n image = imagereader.to_greyscale(image)\n rectangles = imagereader.rectangles(imagereader.blob_coloring_8_connected(np.asarray(image)))\n charimg = image.crop(\n (rectangles[0][1] - 1, rectangles[0][0] - 1, rectangles[0][3] + 2, rectangles[0][2] + 2)).resize(\n ir.SAMPLE_SHAPE)\n #charimg.show() # show greyscale image, unnecessary.\n imagereader.storesample(np.asarray(charimg), character, methoddb=method)\n print(\"Saved\", character)\n\n\ndef onselect(evt, textbox):\n w = evt.widget\n index = int(w.curselection()[0])\n value = w.get(index)\n number = ir.ImageReader().getsamplecount(value)\n text = value + \" has \" + str(number) + \" samples stored.\"\n textbox.config(text=text)\n\n\ndef savesampleset(chardirtextbox, orderbox):\n order = orderbox.get()\n image = Image.open(chardirtextbox.get())\n imagereader = ir.ImageReader()\n image = imagereader.to_greyscale(image)\n rectangles = imagereader.rectangles(imagereader.blob_coloring_8_connected(np.asarray(image)))\n print(len(rectangles))\n for i in range(len(rectangles)):\n charimg = image.crop(\n (rectangles[i][1] - 1, rectangles[i][0] - 1, rectangles[i][3] + 2, rectangles[i][2] + 2)).resize(\n ir.SAMPLE_SHAPE)\n imagereader.storesample(np.asarray(charimg), order[i], methoddb=method)\n print(\"Saved set\")\n\n\ndef options_window(): # use .state to check if it is already up\n global charimg\n options = tk.Toplevel(root)\n\n scrollframe = tk.Frame(options)\n infoframe = tk.Frame(options, padx=20)\n\n scrollbar = tk.Scrollbar(scrollframe)\n characterslist = tk.Listbox(scrollframe, yscrollcommand=scrollbar.set)\n for i in range(10):\n characterslist.insert(tk.END, str(int(i)))\n for i in range(26):\n characterslist.insert(tk.END, chr(65 + int(i)))\n\n scrollbar.config(command=characterslist.yview)\n\n characterslist.pack(side=tk.LEFT, fill=tk.BOTH)\n scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n\n infotext = tk.Label(infoframe)\n characterslist.bind('<>', lambda e: onselect(e, infotext))\n infotext.pack()\n\n middleframe = tk.Frame(options)\n\n searchframe = tk.Frame(middleframe)\n\n charimgframe = tk.Frame(options, height=50, width=50, padx=40)\n\n pilimg1 = Image.open(\"sample.jpg\")\n pilimg1.thumbnail(ir.SAMPLE_SHAPE, PIL.Image.ANTIALIAS)\n charimg = ImageTk.PhotoImage(pilimg1)\n charimagebox = tk.Label(charimgframe, image=charimg)\n charimagebox.image = charimg # keep img as reference to avoid a certain bug\n charimagebox.pack(expand=1)\n\n chardirtextbox = tk.Entry(searchframe, text=\"\")\n chardirselect = tk.Button(searchframe, text=\"Search\",\n command=lambda: searchdirectory(\"charimg\", charimagebox, chardirtextbox,\n ermsgbox=chardirerrmsg, size=ir.SAMPLE_SHAPE))\n chardirload = tk.Button(searchframe, text=\"Load\",\n command=lambda: loaddirectory(\"charimg\", dirtextbox, chardirtextbox.get(),\n ermsgbox=chardirerrmsg, size=ir.SAMPLE_SHAPE))\n chardirerrmsg = tk.Label(searchframe, text=\"\")\n\n chardirtextbox.grid(row=0, column=0)\n chardirselect.grid(row=0, column=1)\n chardirload.grid(row=0, column=2)\n chardirerrmsg.grid(row=1, column=0)\n\n searchframe.pack()\n methodstropt = tk.StringVar(launchframe)\n methodstropt.set(\"HU\")\n methodselectopt = tk.OptionMenu(middleframe, methodstropt, \"HU\", \"R\", \"ZERNIKE\", command=method_select)\n savebutton1 = tk.Button(middleframe, text=\"Save sample\", command=lambda: savesample(chardirtextbox, characterslist.get(characterslist.curselection())))\n orderbox = tk.Entry(middleframe)\n orderbox.insert(tk.END, \"ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\")\n infotext2 = tk.Label(middleframe, text=\"Enter the order of characters below\")\n savebutton2 = tk.Button(middleframe, text=\"Save sample set\", command=lambda: savesampleset(chardirtextbox, orderbox))\n methodselectopt.pack()\n savebutton1.pack()\n savebutton2.pack()\n infotext2.pack()\n orderbox.pack()\n resetbutton = tk.Button(options, text=\"Reset samples\", command=lambda: reset())\n\n scrollframe.grid(row=0, column=0)\n infoframe.grid(row=0, column=1)\n charimgframe.grid(row=0, column=2)\n resetbutton.grid(row=1, column=2)\n middleframe.grid(row=0, column=3)\n\n\ndef method_select(value):\n global method\n if value == \"HU\":\n method = ir.HU\n elif value == \"R\":\n method == ir.R\n elif value == \"ZERNIKE\":\n method == ir.ZERNIKE\n\n\nroot = tk.Tk()\n\n# #\n# USER FRAME #\n# #\nuserframe = tk.Frame(root)\n\n# #\n# LAUNCH FRAME #\n# #\nlaunchframe = tk.Frame(userframe)\n\n# #\n# DIRECTORY FRAME #\n# #\ndirframe = tk.Frame(launchframe)\ndirtextbox = tk.Entry(dirframe, text=\"\")\ndirselect = tk.Button(dirframe, text=\"Search\",\n command=lambda: searchdirectory(\"img\", imagebox, dirtextbox, ermsgbox=direrrmsg))\ndirload = tk.Button(dirframe, text=\"Load\",\n command=lambda: loaddirectory(\"img\", dirtextbox.get(), imagebox, ermsgbox=direrrmsg))\ndirerrmsg = tk.Label(dirframe, text=\"\")\n\ndirtextbox.grid(row=0, column=0)\ndirselect.grid(row=0, column=1)\ndirload.grid(row=0, column=2)\ndirerrmsg.grid(row=1, column=0)\n\ndirframe.grid(row=0, columnspan=2)\n\nmethodstr = tk.StringVar(launchframe)\nmethodstr.set(\"HU\")\n\nmethodselect = tk.OptionMenu(launchframe, methodstr, \"HU\", \"R\", \"ZERNIKE\", command=method_select)\nlaunchbutton = tk.Button(launchframe, text=\"Launch\", command=lambda: launch())\n\nmethodselect.grid(row=1, column=0)\nlaunchbutton.grid(row=1, column=1)\n\nlaunchframe.pack()\n\nbottomframe = tk.Frame(userframe)\n\noptionsframe = tk.Frame(bottomframe, bd=20)\noptionsbutton = tk.Button(optionsframe, text=\"Edit lookup database\", command=lambda: options_window())\noptionsbutton.pack()\noptionsframe.grid(row=0, column=0)\nbottomframe.pack()\n\n# #\n# IMAGE FRAME #\n# #\nimgframe = tk.Frame(root, height=400, width=300)\npilimg = Image.open(\"sample.jpg\")\npilimg.thumbnail((300, 400), PIL.Image.ANTIALIAS)\nimg = ImageTk.PhotoImage(pilimg)\nimagebox = tk.Label(imgframe, image=img)\nimagebox.image = img # keep img as reference to avoid a certain bug\n\n\nimagebox.pack(expand=1)\nimgframe.pack_propagate(False)\n\nimgframe.grid(row=0, column=0)\nuserframe.grid(row=0, column=1)\n# imagebox.bind('', func)\nroot.mainloop()\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":8255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"106656318","text":"\"\"\"Base class for frustration\"\"\"\n\nimport math\nimport numpy as np\nfrom collections import deque, Counter\n\nclass Frustration(object):\n def __init__(self, hp):\n self.hp = hp\n self.value = 0.\n self.mem_size = hp.mem_size\n self.table = [None]\n self.count = 0\n self.tau = 0.0001\n self.jump = False\n self.limit = 0.1\n\n def update(self, score, top_score):\n score = score.item()\n top_score = top_score.item()\n self.append_table(top_score)\n self.update_count()\n self.set_tau()\n if self.tau != 0.0001:\n self.set_value()\n self.set_jump(score)\n else:\n self.jump = False\n\n def append_table(self, item):\n if self.table[-1] == item:\n self.table.append(item)\n else:\n self.table = [item]\n\n def update_count(self):\n self.count = len(self.table)\n\n def set_tau(self):\n \"\"\"Return most common n element (n=1) as a list. First entry in list is\n at index 0.\n \"\"\"\n if self.count > self.mem_size:\n r = self.count/self.mem_size\n self.tau = r-1.\n else:\n self.tau = 0.0001\n\n def set_value(self):\n \"\"\"Sets the frustration value based on tau. The function has a slow\n slope, and then rises until 0.9. It's a function with attractive\n properties in range[0, 1].\n \"\"\"\n argument = (4*self.tau)-2.0\n exp1 = math.tanh(argument)+1\n self.value = exp1*0.5\n\n def set_jump(self, score):\n \"\"\"As the frustruation increases, the probability of a \"jump\" increases\n thus getting unstuck.\n \"\"\"\n p0 = 1.-self.value\n p1 = self.value\n np.random.seed()\n jump = np.random.choice([0, 1], 1, p=[p0, p1])\n self.jump = bool(jump) # Convert float to boolean\n #if jump:\n # diff = abs(score-self.table[-1])/abs(self.table[-1])\n # self.jump = diff <= self.limit\n #else:\n # self.jump = jump\n\n\n\n#\n","sub_path":"backend/algorithms/neuro1_backend/frustration.py","file_name":"frustration.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"86760436","text":"from django.conf import settings\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom resources.models import Resource\n\n\nclass Comment(models.Model):\n resource = models.ForeignKey(Resource, on_delete=models.CASCADE)\n body = models.TextField()\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n ordering = ('-created_at',)\n\n def __str__(self):\n return '{user} - {created_at}'.format(user=self.created_by, created_at=self.created_at)\n\n\nclass Report(models.Model):\n body = models.TextField()\n comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n reviewed = models.BooleanField(default=False)\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n created_at = models.DateTimeField(default=timezone.now)\n","sub_path":"apps/comments/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"243324193","text":"import math\nimport numpy as np\nimport os\nimport sys\nimport IPython\nimport shutil\nimport random\n\n\ndef delete_all_dir_file(path):\n if not os.path.isdir(path):\n os.makedirs(path)\n print(\"Make dir to \" + path)\n else:\n filelist = os.listdir(path) # 列出该目录下的所有文件名\n for f in filelist:\n filepath = os.path.join(path, f) # 将文件名映射成绝对路劲\n if os.path.isfile(filepath): # 判断该文件是否为文件或者文件夹\n os.remove(filepath) # 若为文件,则直接删除\n print(str(filepath) + \" removed!\")\n # else:\n # # 若为folder\n # shutil.rmtree(filepath)\n # print(str(filepath) + \" removed!\")\n print(\"remove all old files in \" + path)\n\n\ndef delete_all_folder(path):\n if not os.path.isdir(path):\n os.makedirs(path)\n print(\"Make dir to \" + path)\n else:\n filelist = os.listdir(path) # 列出该目录下的所有文件名\n for f in filelist:\n filepath = os.path.join(path, f) # 将文件名映射成绝对路劲\n # if os.path.isfile(filepath): # 判断该文件是否为文件或者文件夹\n # os.remove(filepath) # 若为文件,则直接删除\n # print(str(filepath) + \" removed!\")\n if not os.path.isfile(filepath):\n # 若为folder\n shutil.rmtree(filepath)\n print(str(filepath) + \" removed!\")\n print(\"remove all old files in \" + path)\n\ndef extract_ground_points_from_plane_file(pointcloud, planefile, threshold):\n [a, b, c, d] = np.loadtxt(planefile)\n plane = np.array([a, b, c, d])\n pointcloud = np.array(pointcloud)\n #\n # ground = []\n # deground = []\n # for i in range(pointcloud.shape[0]):\n # x = pointcloud[i][0]\n # y = pointcloud[i][1]\n # z = pointcloud[i][2]\n # dis = abs(a * x + b * y + c * z + d) / math.sqrt(a * a + b * b + c * c)\n # if dis < threshold:\n # ground.append([x, y, z])\n # else:\n # deground.append([x, y, z])\n pc = np.append(pointcloud, np.ones((pointcloud.shape[0], 1)), axis=1)\n dis_matrix = np.dot(pc, plane) / math.sqrt(a * a + b * b + c * c)\n # deground_idx = np.where(dis_matrix > threshold)\n # deground_points = pointcloud[deground_idx]\n\n ground_idx = np.where(dis_matrix <= threshold)\n ground_points = pointcloud[ground_idx]\n\n return ground_points, ground_idx\n\n\ndef find_points_idx_in_bbox(pointcloud, bbox_center, bbox_size, bbox_angle, T_cam_velo, cover_ground=False):\n # if cover_ground=False --> idx_y has - 0.5 i\n THRESHOLD = [0.6, 0.3, 0.7]\n [l, w, h] = bbox_size + THRESHOLD\n pointcloud_dif = pointcloud - bbox_center\n pointcloud_trans = np.transpose(np.dot(roty4(-1 * bbox_angle), np.dot(np.linalg.inv(T_cam_velo),\n np.transpose(np.append(pointcloud_dif, np.ones((pointcloud_dif.shape[0], 1)), axis=1)))))[:, :3]\n idx_x = np.logical_and(pointcloud_trans[:, 0] <= l / 2.0, pointcloud_trans[:, 0] >= -l / 2.0)\n idx_y = np.logical_and(pointcloud_trans[:, 1] <= w / 2.0 - 0.5, pointcloud_trans[:, 1] >= -w / 2.0)\n idx_z = np.logical_and(pointcloud_trans[:, 2] <= h / 2.0, pointcloud_trans[:, 2] >= -h / 2.0)\n idx = np.logical_and(idx_x, np.logical_and(idx_y, idx_z))\n return idx\n\n\ndef rotx(t):\n \"\"\"Rotation about the x-axis.\"\"\"\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[1, 0, 0],\n [0, c, -s],\n [0, s, c]])\n\n\ndef roty(t):\n \"\"\"Rotation about the y-axis.\"\"\"\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, 0, s],\n [0, 1, 0],\n [-s, 0, c]])\n\n\ndef roty4(t):\n \"\"\"Rotation about the y-axis.\"\"\"\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, 0, s, 0],\n [0, 1, 0, 0],\n [-s, 0, c, 0],\n [0, 0, 0, 1]])\n\n\ndef rotz(t):\n \"\"\"Rotation about the z-axis.\"\"\"\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])\n\n\ndef filter_camera_angle(points):\n \"\"\"\n Filter camera angles (45 degrees) for KiTTI Datasets\n inputs:\n points (np.array): [#points, >=3]\n orders: [x,y,z]\n T_cam_velo: transformation matrix from cam to lidar\n return:\n pts in the camera angle (45degrees) (lidar frame)\n \"\"\"\n bool_in = np.logical_and((points[:, 1] < points[:, 0]), (-points[:, 1] < points[:, 0]))\n\n return points[bool_in]\n\n\ndef angle2class(angle, num_class):\n ''' Convert continuous angle to discrete class\n [optinal] also small regression number from\n class center angle to current angle.\n\n angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)\n return is class of int32 of 0,1,...,N-1 and a number such that\n class*(2pi/N) + number = angle\n '''\n angle = angle % (2 * np.pi)\n assert (angle >= 0 and angle <= 2 * np.pi)\n angle_per_class = 2 * np.pi / float(num_class)\n shifted_angle = (angle + angle_per_class / 2) % (2 * np.pi)\n class_id = int(shifted_angle / angle_per_class)\n residual_angle = shifted_angle - (class_id * angle_per_class + angle_per_class / 2)\n return class_id, residual_angle\n\n\ndef class2angle(pred_cls, residual, num_class, to_label_format=True):\n ''' Inverse function to angle2class '''\n angle_per_class = 2 * np.pi / float(num_class)\n angle_center = pred_cls * angle_per_class\n angle = angle_center + residual\n if to_label_format and angle > np.pi:\n angle = angle - 2 * np.pi\n return angle\n\n\ndef get_calibration(calibfile):\n ''' Read in a calibration file and parse into a dictionary.\n Ref: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py\n '''\n data = {}\n with open(calibfile, 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0: continue\n\n try:\n key, value = line.split(':', 1)\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n except ValueError:\n key = line.split(\" \")[0]\n value = line.split(\" \")[1:]\n try:\n data[key] = np.array([float(x) for x in value])\n except ValueError:\n pass\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n\n T_velo_cam = data['Tr_velo_cam'].reshape((3, 4))\n R_rect = data['R_rect'].reshape((3, 3))\n T_velo_cam = np.dot(R_rect, T_velo_cam)\n T_velo_cam = np.append(T_velo_cam, [[0, 0, 0, 1]], axis=0)\n T_cam_velo = np.linalg.inv(T_velo_cam)\n return data, T_cam_velo\n\n\ndef load_bin(file_name):\n scan = np.fromfile(file_name, dtype=np.float32)\n scan = scan.reshape((-1, 4))\n point_cloud_array = scan[:, :3]\n # pc = o3d.geometry.PointCloud()\n # pc.points = o3d.utility.Vector3dVector(point_cloud_array)\n return point_cloud_array\n\n\ndef load_kitti_tracking_label(self, label_filename, frame):\n lines = [line.rstrip() for line in open(label_filename)]\n label = []\n for i in range(len(lines)):\n frame_i = int(lines[i].split(' ')[0])\n label_i = read_line(lines[i])\n if frame_i == frame and label_i['type'] in self.type_whitelist:\n label.append(label_i)\n return label\n\n\ndef get_color_vec(color='green', point_num=0):\n color_dict = {'green': 1,\n 'blue': 2,\n 'red': 3}\n color_arr = np.zeros((point_num, 3))\n if color_dict[color] == 1:\n color_arr[:, 1] = 1\n elif color_dict[color] == 2:\n color_arr[:, 2] = 1\n else:\n color_arr[:, 0] = 1\n\n return color_arr\n\n\ndef get_random_color_vec(point_num=0):\n color_arr = np.array([random.random(), random.random(), random.random()]) + np.zeros((point_num, 3))\n return color_arr\n\n\ndef read_line(line):\n data = line.split(' ')\n track_id = float(data[1])\n data[0] = data[2] # type\n data[1:] = [float(x) for x in data[3:]]\n\n label = {\n 'track_id': track_id,\n # extract label, truncation, occlusion\n 'type': data[0], # 'Car', 'Pedestrian', ...\n 'truncation': data[1], # truncated pixel ratio [0..1]\n 'occlusion': int(data[2]), # 0=visible, 1=partly occluded, 2=fully occluded, 3=unknown\n 'alpha': data[3], # object observation angle [-pi..pi]\n\n # extract 2d bounding box in 0-based coordinates\n 'xmin': data[4], # left\n 'ymin': data[5], # top\n 'xmax': data[6], # right\n 'ymax': data[7], # bottom\n 'box2d': np.array([data[4], data[5], data[6], data[7]]),\n\n # extract 3d bounding box information\n 'h': data[8], # box height\n 'w': data[9], # box width\n 'l': data[10], # box length (in meters)\n 't': (data[11], data[12], data[13]), # location (x,y,z) in camera coord.\n 'ry': data[14] # yaw angle (around Y-axis in camera coordinates) [-pi..pi]\n }\n return label\n\nif __name__ == '__main__':\n DATA = '/home/skwang/data/KITTI_object_tracking/training'\n\n sequence = fn[0].split('/')[-2]\n labelfile = os.path.join(self.root, \"label_02\", sequence + \".txt\")\n","sub_path":"point_cloud_compression/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534089308","text":"import pygame\nimport random\nimport traceback\nfrom abc import ABCMeta, abstractmethod\n\n\nblack = (0, 0, 0)\ngreen = (0, 255, 0)\nwhite = (255, 255, 255)\nlight_grey = (200, 200, 200)\ngrey = (128, 128, 128)\ndark_grey = (64, 64, 64)\nzone_color = (200, 100, 50)\n\n\ndef random_color():\n return (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n\n\nclass Position:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def in_zone(self, zone):\n return (zone[1][0] >= self.x >= zone[0][0]) and (zone[1][1] >= self.y >= zone[0][1])\n\n def __str__(self):\n return f\"({self.x}, {self.y})\"\n\n\nclass RegisterException(Exception):\n pass\n\n\nclass AbstractZoneContent(metaclass=ABCMeta):\n\n @abstractmethod\n def set_zone(self): pass\n\n def set_screen(self, screen):\n self.screen = screen\n\n @abstractmethod\n def update(self): pass\n\n @abstractmethod\n def draw(self): pass\n\n def register(self, methods):\n self.methods = {}\n for method in methods:\n if type(method) == dict:\n self.methods.update(method)\n else:\n self.methods.update({method: None})\n\n for method in self.methods:\n if not hasattr(self, method):\n raise RegisterException(\n f\"Le méthode '{method}' n'existe pas dans la classe: {self.__class__.__name__}\")\n\n def has_registered(self, method):\n return method in self.methods\n\n def on_mouse_enter(self):\n self.mouse_entered = True\n\n def on_mouse_move(self):\n pass\n\n def on_mouse_exit(self):\n self.mouse_entered = False\n\n\nclass AbstractZone(AbstractZoneContent, metaclass=ABCMeta):\n\n @abstractmethod\n def add(self): pass\n\n\nclass Window(AbstractZone):\n\n def __init__(self, screen, bg_color, zone):\n self.screen = screen\n self.bg_color = bg_color\n self.zone = zone\n self.zones = []\n self.rules = []\n self.methods = []\n self.mouse_entered = False\n self.mouse = Position(0, 0)\n\n def set_zone(self, zone):\n self.zone = zone\n self.optimize()\n\n def add(self, nom, zone, rules):\n zone.set_screen(self.screen)\n zone.nom = nom\n self.zones.append(zone)\n self.rules.append(rules)\n\n def get_zone(self, nom):\n for zone in self.zones:\n if zone.nom == nom:\n return zone\n\n def optimize(self):\n\n def iif(condition, si_vrai, si_faux):\n return si_vrai if condition else si_faux\n\n def nfz(fonction, valeur, si_null_ou_erreur):\n try:\n return eval(f\"{fonction}({valeur})\")\n except Exception as e:\n print(\"Error:\", e)\n return si_null_ou_erreur\n\n def wait(formule):\n return formule\n\n def fisactive(nom_zone):\n return self.get_zone(nom_zone).active\n\n def ftop(nom_zone):\n if self.get_zone(nom_zone).active:\n return self.get_zone(nom_zone).zone[0][1]\n else:\n return 0\n\n def fbottom(nom_zone):\n if self.get_zone(nom_zone).active:\n return self.get_zone(nom_zone).zone[1][1]\n else:\n return 0\n\n def fleft(nom_zone):\n if self.get_zone(nom_zone).active:\n return self.get_zone(nom_zone).zone[0][0]\n else:\n return 0\n\n def fright(nom_zone):\n if self.get_zone(nom_zone).active:\n return self.get_zone(nom_zone).zone[1][0]\n else:\n return 0\n\n width, height = self.zone[1]\n left, top, right, bottom = 0, 0, 0, 0\n\n for i, zone in enumerate(self.zones):\n if zone.active:\n # print(f\"optimize {zone.nom}\", end=\": \")\n left = int(eval(self.rules[i].get(\"left\", \"0\")))\n top = int(eval(self.rules[i].get(\"top\", \"0\")))\n right = int(eval(self.rules[i].get(\"right\", \"width\")))\n bottom = int(eval(self.rules[i].get(\"bottom\", \"height\")))\n\n new_zone = ((left, top), (right, bottom))\n # print(new_zone, end=\" \")\n zone.set_zone(new_zone)\n # print(\"OK\")\n\n def update(self):\n if pygame.display.get_active():\n for zone in self.zones:\n zone.update()\n\n def draw(self):\n if pygame.display.get_active():\n self.screen.fill(self.bg_color, rect=self.zone)\n for zone in self.zones:\n zone.draw()\n\n def on_click(self):\n resTotal = []\n for zone in self.zones:\n if zone.has_registered(\"on_click\") and self.mouse.in_zone(zone.zone):\n res = zone.on_click()\n if res:\n resTotal += res\n return resTotal\n\n def on_mouse_enter(self):\n self.mouse_entered = True\n\n def on_mouse_move(self, mouse_position):\n self.mouse = Position(*mouse_position)\n for zone in self.zones:\n if self.mouse.in_zone(zone.zone):\n if zone.has_registered(\"on_mouse_enter\") and not zone.mouse_entered:\n zone.on_mouse_enter()\n if zone.has_registered(\"on_mouse_move\"):\n zone.on_mouse_move(mouse_position)\n else:\n if zone.has_registered(\"on_mouse_exit\") and zone.mouse_entered:\n zone.on_mouse_exit()\n\n def on_mouse_exit(self):\n self.mouse_entered = False\n for zone in self.zones:\n if zone.has_registered(\"on_mouse_exit\") and zone.mouse_entered:\n zone.on_mouse_exit()\n\n\nclass Zone(AbstractZone):\n index = 0\n\n def __init__(self, bg_color, zone=None):\n self.index = Zone.index\n Zone.index += 1\n self.objets = []\n self.active = True\n self.color = zone_color\n self.bg_color = bg_color\n self.screen = None\n self.mouse_entered = False\n self.mouse = Position(0, 0)\n self.methods = []\n if zone:\n self.set_zone(zone)\n else:\n self.set_zone(((0, 0), (0, 0)))\n\n def set_zone(self, zone):\n self.zone = zone\n for objet in self.objets:\n objet.set_zone(self.zone)\n\n def add(self, objet):\n objet.set_screen(self.screen)\n objet.set_zone(self.zone)\n self.objets.append(objet)\n\n def update(self):\n if not self.active:\n return\n\n for objet in self.objets:\n objet.update()\n\n def draw(self):\n if not self.active:\n return\n\n left, top = self.zone[0]\n width, height = self.zone[1]\n zone = (self.zone[0], (width-left, height-top))\n\n if self.bg_color:\n self.screen.fill(self.bg_color, rect=zone)\n if self.color:\n pygame.draw.rect(self.screen, self.color, zone, 1)\n for objet in self.objets:\n objet.draw()\n\n def on_click(self):\n resTotal = []\n for objet in self.objets:\n if objet.has_registered(\"on_click\") and self.mouse.in_zone(objet.zone):\n res = objet.on_click()\n if res:\n resTotal += res\n return resTotal\n\n def on_mouse_enter(self):\n self.mouse_entered = True\n\n def on_mouse_move(self, mouse_position):\n self.mouse = Position(*mouse_position)\n for objet in self.objets:\n if self.mouse.in_zone(objet.zone):\n if objet.has_registered(\"on_mouse_enter\") and not objet.mouse_entered:\n objet.on_mouse_enter()\n if objet.has_registered(\"on_mouse_move\"):\n objet.on_mouse_move(mouse_position)\n else:\n if objet.has_registered(\"on_mouse_exit\") and objet.mouse_entered:\n objet.on_mouse_exit()\n\n def on_mouse_exit(self):\n for objet in self.objets:\n if objet.has_registered(\"on_mouse_exit\") and objet.mouse_entered:\n objet.on_mouse_exit()\n\n self.mouse_entered = False\n\n\ndef main():\n pygame.init()\n screen = None\n try:\n assert isinstance(Window(screen, 0, 0), AbstractZoneContent), \"Problème de la classe StarField\"\n assert isinstance(Zone(0), AbstractZoneContent), \"Problème de la classe Zones\"\n except Exception:\n traceback.print_exc()\n\n pygame.quit()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"jeux/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":8529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"523093630","text":"from merge import merge_sort\nfrom BST import *\nfrom avl import *\n\ndef cross_product(p1, p2):\n return p1[0] * p2[1] - p2[0] * p1[1]\n\ndef difference(p1, p2):\n return (p2[0] - p1[0], p2[1] - p1[1])\n\n# positive result => right turn (clockwise)\n# negative result => left turn (counter-clockwise)\ndef direction(p0, p1, p2):\n return cross_product(difference(p2,p0), difference(p1,p0))\n\nif __name__ == \"__main__\":\n assert direction( (1,1), (2,2), (2,3) ) < 0\n assert direction( (1,1), (2,2), (3,2) ) > 0\n assert direction( (0,1), (2,1), (1,1) ) == 0\n\n# Does segment p1-p2 straddle the line <-p3-p4->?\ndef straddle(p1,p2,p3,p4):\n d1 = direction(p3,p4,p1)\n d2 = direction(p3,p4,p2)\n return (d1 > 0 and d2 < 0) or (d1 < 0 and d2 > 0)\n\nif __name__ == \"__main__\":\n assert straddle((0,0), (2,2), (0,1), (2,1))\n assert straddle((0,1), (2,1), (0,0), (2,2))\n assert not straddle((1,1), (2,1), (0,0), (2,2))\n\ndef on_segment(pi, pj, pk):\n return min(pi[0],pj[0]) <= pk[0] <= max(pi[0],pj[0]) \\\n and min(pi[1],pj[1]) <= pk[1] <= max(pi[1],pj[1])\n\nif __name__ == \"__main__\":\n pass \n\ndef segments_intersect(p1,p2,p3,p4):\n return (straddle(p1,p2,p3,p4) and straddle(p3,p4,p1,p2)) \\\n or (direction(p3,p4,p1) == 0 and on_segment(p3,p4,p1)) \\\n or (direction(p3,p4,p2) == 0 and on_segment(p3,p4,p2)) \\\n or (direction(p1,p2,p3) == 0 and on_segment(p1,p2,p3)) \\\n or (direction(p1,p2,p4) == 0 and on_segment(p1,p2,p4)) \n\nif __name__ == \"__main__\":\n assert segments_intersect((0,0),(2,2), (0,1),(2,1))\n assert segments_intersect((1,1),(2,2), (0,1),(2,1))\n assert not segments_intersect((1,3),(2,2), (0,1),(2,1))\n assert not segments_intersect((2,0),(4,2), (0,1),(2,1))\n\ndef left_end(seg):\n if seg[0][0] <= seg[1][0]:\n return seg[0]\n else:\n return seg[1]\n\ndef right_end(seg):\n if seg[0][0] <= seg[1][0]:\n return seg[1]\n else:\n return seg[0]\n\ndef segment_less(s1, s2):\n left1 = left_end(s1)\n right1 = right_end(s1)\n left2 = left_end(s2)\n right2 = right_end(s2)\n # I'm cheating by using division. -Jeremy\n m1 = (right1[1] - left1[1]) / (right1[0] - left1[0])\n m2 = (right2[1] - left2[1]) / (right2[0] - left2[0])\n y1 = left1[1] + m1 * (current_x - left1[0])\n y2 = left2[1] + m2 * (current_x - left2[0])\n return y1 < y2\n\n\ndef create_endpoint(point, other_point):\n assert point[0] != other_point[0]\n if point[0] < other_point[0]:\n side = 0 # left\n else:\n side = 1 # right\n return (point[0], side, point[1])\n\ncurrent_x = 0\n\ndef any_segments_intersect(S):\n global current_x\n endpoints = []\n segment_of = {}\n\n for (p1,p2) in S:\n assert p1[0] != p2[0] # don't allow vertical segments\n e = create_endpoint(p1,p2)\n segment_of[e] = (p1,p2)\n endpoints.append(e)\n\n e = create_endpoint(p2,p1)\n segment_of[e] = (p2,p1)\n endpoints.append(e)\n\n endpoints = merge_sort(endpoints)\n\n # To do: replace with AVLTree. -Jeremy\n T = BinarySearchTree(root=None, less=segment_less)\n #T = AVLTree(root=None, less=segment_less)\n\n for p in endpoints:\n current_x = p[0]\n s = segment_of[p]\n if p[1] == 0: # p is a left endpoint\n sn = T.insert(s)\n above = T.successor(sn)\n below = T.predecessor(sn)\n if above and \\\n segments_intersect(s[0],s[1], above.key[0], above.key[1]):\n return (True, (s[0],s[1]), (above.key[0], above.key[1]))\n if below and \\\n segments_intersect(s[0],s[1], below.key[0], below.key[1]):\n return (True, (s[0],s[1]), (below.key[0], below.key[1]))\n else: # p is a right endpoint\n sn = T.search(s)\n if sn:\n above = T.successor(sn)\n below = T.predecessor(sn)\n if above and below and \\\n segments_intersect(above.key[0], above.key[1],\n below.key[0], below.key[1]):\n return (True, (above.key[0], above.key[1]), (below.key[0], below.key[1]))\n T.delete_node(sn)\n\n return (False, None, None)\n\n\nif __name__ == \"__main__\":\n segments = [((0,0),(2,2)),\\\n ((2,3),(0,4)),\\\n ((3,1),(1,1)),\\\n ((4,4),(5,1))]\n assert any_segments_intersect(segments)[0]\n\n segments = [((1,3),(2,2)), \\\n ((0,1),(2,1)), \\\n ((2,0),(4,2))]\n assert not any_segments_intersect(segments)[0]\n\n segments = [((1,3),(4,2)),\\\n ((3,4),(2,6)),\\\n ((3,8),(6,8)),\\\n ((5,4),(4,4)),\\\n ((4,7),(7,4)),\\\n ((5,5),(7,6)),\\\n ((5,2),(7,5)),\\\n ((5,3),(8,2)),\\\n ((6,2),(9,5))]\n\n assert any_segments_intersect(segments)[0]\n\n # this test case fails\n# segments = [((48, 102), (239, 466)), ((135, 104), (409, 574)), ((251, 114), (501, 556)), ((351, 99), (615, 564)), ((469, 123), (677, 583)), ((560, 403), (607, 71))]\n# print any_segments_intersect(segments)[0]\n\nif __name__ == \"__main__\":\n print('all tests passed!')\n","sub_path":"C343Fall2015-master 2/project3/segment_intersection.py","file_name":"segment_intersection.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"301045831","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/devel/thasso/git/github/gemtools/python/test/gem_utils_tests.py\n# Compiled at: 2013-04-17 04:25:01\nimport gem.utils as gu, gem.gemtools as gt\nfrom testfiles import testfiles\nimport tempfile, os, sys\ntest_mapping = testfiles['test.map']\ntest_zipped_mapping = testfiles['test.map.gz']\ntest_fastq = testfiles['test.fastq']\n\ndef test_pipeing_simple_process_with_file_handle():\n of = open(test_mapping, 'rb')\n p = gu.run_tools([['cat', '-'], ['wc', '-l']], input=of)\n lines = 0\n while True:\n s = p.stdout.readline()\n if s is None or len(s) == 0:\n break\n lines = int(s.strip())\n\n assert lines == 10\n assert p.wait() == 0\n of.close()\n return\n\n\ndef test_pipeing_simple_process_with_file():\n p = gu.run_tools([['cat', '-'], ['wc', '-l']], input=test_mapping)\n lines = 0\n while True:\n s = p.stdout.readline()\n if s is None or len(s) == 0:\n break\n lines = int(s.strip())\n\n assert lines == 10\n assert p.wait() == 0\n return\n\n\ndef test_pipeing_simple_process_with_file_output_file():\n f, out = tempfile.mkstemp()\n os.close(f)\n p = gu.run_tools([['cat', '-'], ['wc', '-l']], input=test_mapping, output=out)\n lines = 0\n assert p.wait() == 0\n assert os.path.exists(out)\n with open(out) as (f):\n lines = int(f.readline().strip())\n assert lines == 10\n os.remove(out)\n\n\ndef test_pipeline_fastaq_input():\n ff = gt.InputFile(test_mapping)\n p = gu.run_tools([['cat', '-']], input=ff, force_debug=True)\n lines = 0\n while True:\n s = p.stdout.readline()\n if s is None or len(s) == 0:\n break\n lines += 1\n\n assert p.wait() == 0\n assert lines == 40\n return\n\n\ndef test_pipeline_map_input():\n ff = gt.InputFile(test_mapping)\n p = gu.run_tools([['cat', '-']], input=ff, force_debug=True, write_map=True, clean_id=True)\n lines = 0\n while True:\n s = p.stdout.readline()\n if s is None or len(s) == 0:\n break\n lines += 1\n\n assert p.wait() == 0\n assert lines == 10\n return","sub_path":"pycfiles/Gemtools-1.7.1.tar/gem_utils_tests.py","file_name":"gem_utils_tests.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"384323898","text":"#!/usr/bin/python\n\n###################################\n# module: linprog.py\n# Krista Gurney\n# A01671888\n###################################\n\nfrom line_eq import line_eq\nfrom maker import make_line_eq\nfrom maker import make_var, make_const, make_prod\nfrom maker import make_pwr, make_plus\nfrom maker import make_point2d\nfrom const import const\nfrom var import var\nfrom prod import prod\nfrom pwr import pwr\n# from poly12 import is_pwr_1\nfrom plus import plus\nfrom tof import tof\nfrom consts import is_const_line\nimport sys\n\n\n\n### sample line equations\nlneq1 = make_line_eq(make_var('y'),\n make_const(2))\nlneq2 = make_line_eq(make_var('y'),\n make_var('x'))\nlneq3 = make_line_eq(make_var('y'),\n make_var('y'))\nlneq4 = make_line_eq(make_var('y'),\n make_prod(make_const(2.0),\n make_pwr('x', 1.0)))\nlneq5 = make_line_eq(make_var('y'),\n make_prod(make_const(5.0),\n make_pwr('y', 1.0)))\nlneq6 = make_line_eq(make_var('y'),\n make_plus(make_prod(make_const(5.0),\n make_pwr('x', 1.0)),\n make_const(4.0)))\nlneq7 = make_line_eq(make_var('y'),\n make_plus(make_prod(make_const(5.0),\n make_pwr('y', 1.0)),\n make_const(4.0)))\nlneq8 = make_line_eq(make_var('y'),\n make_plus(make_prod(make_const(3.0),\n make_pwr('x', 1.0)),\n make_const(-4.0)))\n\n\ndef line_intersection(lneq1, lneq2):\n # Case 1: 2 const lines\n if is_const_line(lneq1):\n if is_const_line(lneq2):\n if lneq1.get_lhs().get_name() == 'x':\n x = lneq1.get_rhs().get_val()\n y = lneq2.get_rhs().get_val()\n elif lneq1.get_lhs().get_name() == 'y':\n y = lneq1.get_rhs().get_val()\n x = lneq2.get_rhs().get_val()\n else:\n raise Exception('line_intersection: ' + str(lneq1))\n else:\n y = lneq1.get_rhs().get_val()\n x = tof(lneq2.get_rhs())(y)\n elif is_const_line(lneq2):\n #Case 2: 1 const line y = 1 ;y = x -1\n y = lneq2.get_rhs().get_val()\n x = tof(lneq1.get_rhs())(y)\n elif isinstance(lneq1.get_rhs(), pwr):#y = 1x; y = -1x +6\n eq1_coeff = get_line_coeffs(lneq1)\n eq2_coeff = get_line_coeffs(lneq2)\n if isinstance(lneq2.get_rhs(), plus):\n if isinstance(lneq2.get_rhs().get_elt2(), const):\n eq2_const = lneq2.get_rhs().get_elt2().get_val()\n x = eq2_const/(eq1_coeff - eq2_coeff)\n y = tof(lneq1.get_rhs())(x)\n elif isinstance(lneq1.get_rhs(), plus):#y = -0.2x+10; y =0.2x+5\n eq1_coeff = get_line_coeffs(lneq1)\n eq2_coeff = get_line_coeffs(lneq2)\n if isinstance(lneq2.get_rhs(), plus):\n x = (lneq2.get_rhs().get_elt2().get_val() + lneq1.get_rhs().get_elt2().get_val())/ (eq1_coeff - eq2_coeff)\n y = tof(lneq1.get_rhs())(x)\n else:\n raise Exception(\"Unknown plus equation\")\n elif isinstance(lneq1.get_rhs(), prod):#y = 0.5x; y = -0.75x +3\n eq1_coeff = get_line_coeffs(lneq1)\n eq2_coeff = get_line_coeffs(lneq2)\n if isinstance(lneq2.get_rhs(), plus):\n eq2_const = lneq2.get_rhs().get_elt2().get_val()\n x = eq2_const/(eq1_coeff - eq2_coeff)\n y = tof(lneq1.get_rhs())(x)\n elif isinstance(lneq2.get_rhs(), pwr):#y = -x, y = x\n x = 0.0\n y = 0.0\n else:\n raise Exception(\"Unknown prod equation\")\n\n else:\n raise Exception('line_intersection: ' + 'unknown equations')\n\n return make_point2d(x, y)\n\ndef get_line_coeffs(lneq):\n if isinstance(lneq.get_rhs(), prod):\n if isinstance(lneq.get_rhs().get_mult1(), const):\n return lneq.get_rhs().get_mult1().get_val()\n else:\n raise Exception(\"Unknown product\")\n elif isinstance(lneq.get_rhs(), pwr):\n return 1.0\n elif isinstance(lneq.get_rhs(), plus):\n if isinstance(lneq.get_rhs().get_elt1(), prod):\n if isinstance(lneq.get_rhs().get_elt1().get_mult1(), const):\n return lneq.get_rhs().get_elt1().get_mult1().get_val()\n else:\n raise Exception('Unknown mult1')\n else:\n raise Exception('Unknown prod')\n else:\n raise Exception('Unknown line equation')\n\n\n\ndef maximize_obj_fun(f, corner_points):\n currentMax = 0\n for points in corner_points:\n max = f(points.get_x().get_val(), points.get_y().get_val())\n if max > currentMax:\n currentMax = max\n max_point = make_point2d(points.get_x().get_val(), points.get_y().get_val())\n\n return max_point\n\ndef minimize_obj_fun(f, corner_points):\n currentMin = 1000000\n for points in corner_points:\n min = f(points.get_x().get_val(), points.get_y().get_val())\n if min < currentMin:\n currentMin = min\n min_point = make_point2d(points.get_x().get_val(), points.get_y().get_val())\n\n return min_point\n\n\n## write your answer to problem 1a as x, y, mv\ndef opt_prob_1a():\n f1 = lambda x, y: 2 * x + y\n ln1 = make_line_eq(make_var('x'), make_const(1.0))\n ln2 = make_line_eq(make_var('y'), make_const(1.0))\n ln3 = make_line_eq(make_var('x'), make_const(5.0))\n ln4 = make_line_eq(make_var('y'), make_const(5.0))\n ln5 = make_line_eq(make_var('y'), make_plus(make_prod(make_const(-1.0),\n make_pwr('x', 1.0)),\n make_const(6.0)))\n\n cp_1 = line_intersection(ln1, ln5)\n cp_2 = line_intersection(ln1, ln2)\n cp_3 = line_intersection(ln4, ln5)\n\n corner_points = [cp_1, cp_2, cp_3]\n\n max_xy = maximize_obj_fun(f1, corner_points)\n max_val = f1(max_xy.get_x().get_val(), max_xy.get_y().get_val())\n print(max_xy, max_val)\n\n## write your answer to problem 1b as x, y, mv\ndef opt_prob_1b():\n f1 = lambda x, y: x/2 + y\n ln1 = make_line_eq(make_var('y'), make_const(2.0))\n ln2 = make_line_eq(make_var('x'), make_const(0.0))\n ln3 = make_line_eq(make_var('y'), make_pwr('x', 1.0))\n ln4 = make_line_eq(make_var('y'), make_plus(make_prod(make_const(-1.0),\n make_pwr('x', 1.0)),\n make_const(6.0)))\n\n cp_1 = line_intersection(ln3, ln4)\n cp_2 = line_intersection(ln1, ln3)\n cp_3 = line_intersection(ln1, ln4)\n\n corner_points = [cp_1, cp_2, cp_3]\n\n min_xy = minimize_obj_fun(f1, corner_points)\n min_val = f1(min_xy.get_x().get_val(), min_xy.get_y().get_val())\n print(min_xy, min_val)\n\n\n## write your answer to problem 1c as x, y, mv\ndef opt_prob_1c():\n f1 = lambda x, y: 3 * x - 2*y\n ln1 = make_line_eq(make_var('y'), make_prod(make_const(-1.0), make_pwr('x', 1.0)))\n ln2 = make_line_eq(make_var('y'), make_pwr('x', 1.0))\n ln3 = make_line_eq(make_var('y'), make_plus(make_prod(make_const(1.0 / 2.0),\n make_pwr('x', 1.0)),\n make_const(5.0 / 4.0)))\n\n cp_1 = line_intersection(ln1, ln2)#y = -x; y = x\n cp_2 = line_intersection(ln1, ln3)\n cp_3 = line_intersection(ln2, ln3)\n\n corner_points = [cp_1, cp_2, cp_3]\n\n max_xy = maximize_obj_fun(f1, corner_points)\n max_val = f1(max_xy.get_x().get_val(), max_xy.get_y().get_val())\n print(max_xy, max_val)\n\n\n\n \n \n\n\n","sub_path":"linprog.py","file_name":"linprog.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"558390106","text":"import sys\nsys.path.insert(0, '../host_communication')\n\nimport serial as s\nimport uart # enable_uart()\n\nDEVICE1 = \"/dev/ttyO5\"\nTEST_WORD = \"Hello\"\n\n\ndef init():\n uart.enable_uart()\n\n\ndef main():\n # Ask user to setup for test\n print(\"Please do the following:\\n\"\n + \" 1. Short the RX and TX pins of UART5 together on the beaglebone.\\n\")\n raw_input(\"Press ENTER when you're ready to begin the test: \")\n\n # Initialize ports\n init()\n P1 = s.Serial(DEVICE1, 9600, timeout=0.1)\n\n # Send and read hello in one direction\n P1.write(TEST_WORD)\n print(\"Tx'ed %r\" % TEST_WORD)\n result = P1.readline()\n\n # Check value\n print(\"Rx'ed %r\" % result)\n if result == TEST_WORD:\n print(\"SUCCESS! The board is working!\")\n else:\n print(\"FAILURE!!! Something on the board/BBB is not working.\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/test_UART05.py","file_name":"test_UART05.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"364466879","text":"#!/usr/bin/env python\n\nimport json\nimport logging\nimport os\nimport time\n\nfrom container_pipeline.lib import dj # noqa\nfrom django.utils import timezone\n\nfrom container_pipeline.lib.log import load_logger\nfrom container_pipeline.lib.openshift import Openshift, OpenshiftError\nfrom container_pipeline.utils import BuildTracker\nfrom container_pipeline.workers.base import BaseWorker\nfrom container_pipeline.models import Build, BuildPhase\n\n\nclass DeliveryWorker(BaseWorker):\n \"\"\"\n Delivery Worker tags the image built by Build Worker using the\n `desired-tag` field in index entry\n \"\"\"\n NAME = 'Delivery worker'\n\n def __init__(self, logger=None, sub=None, pub=None):\n super(DeliveryWorker, self).__init__(logger, sub, pub)\n self.build_phase_name = 'delivery'\n self.openshift = Openshift(logger=self.logger)\n\n def handle_job(self, job):\n \"\"\"Handles a job meant for delivery worker\"\"\"\n # TODO: this needs to be addressed after addressing CentOS#278\n self.job = job\n self.setup_data()\n self.set_buildphase_data(\n build_phase_status='processing',\n build_phase_start_time=timezone.now()\n )\n self.logger.info('Starting delivery for job: {}'.format(self.job))\n\n success = self.deliver_build()\n\n if success:\n self.handle_delivery_success()\n else:\n self.handle_delivery_failure()\n\n def deliver_build(self):\n \"\"\"\n Runs an `oc build` with the `run_delivery.sh` script as a part of build\n template. It mainly changes the tag of the image from a test tag\n generated by build process to the tag desired by user as mentioned in\n `desired-tag` field in cccp.yml\n \"\"\"\n project_hash_key = self.job[\"project_hash_key\"]\n\n try:\n self.openshift.login()\n # start the 'delivery' build\n delivery_id = self.openshift.build(project_hash_key, 'delivery')\n except OpenshiftError as e:\n self.logger.error(e)\n return False\n else:\n if not delivery_id:\n return False\n\n delivery_status = self.openshift.wait_for_build_status(\n project_hash_key, delivery_id, 'Complete', status_index=2)\n logs = self.openshift.get_build_logs(\n project_hash_key, delivery_id, \"delivery\")\n delivery_logs_file = os.path.join(\n self.job['logs_dir'], 'delivery_logs.txt')\n self.set_buildphase_data(build_phase_log_file=delivery_logs_file)\n self.export_logs(logs, delivery_logs_file)\n return delivery_status\n\n def handle_delivery_success(self):\n \"\"\"\n - Marks project build as complete\n - Sends job details to RPM tracking piece and deletes the job from the\n tube\n \"\"\"\n # Mark project build as complete\n BuildTracker(self.job['namespace'], logger=self.logger).complete()\n self.logger.debug('Marked project build: {} as complete.'.format(\n self.job['namespace']))\n self.logger.debug('Putting job details to master_tube for tracker\\'s'\n ' consumption')\n\n self.set_buildphase_data(\n build_phase_status='complete',\n build_phase_end_time=timezone.now()\n )\n self.set_build_data(\n build_status='complete',\n build_end_time=timezone.now()\n )\n # sending notification as delivery complete and also addingn this into\n # tracker.\n self.job['action'] = 'notify_user'\n self.queue.put(json.dumps(self.job), 'master_tube')\n\n # Put some delay to avoid mismatch in uploading jod details to\n # master_tube\n time.sleep(10)\n self.job['action'] = 'tracking'\n self.queue.put(json.dumps(self.job), 'master_tube')\n\n def handle_delivery_failure(self):\n \"\"\"\n Puts the job back to the delivery tube for later attempt at delivery\n and requests to notify the user about failure to deliver\n \"\"\"\n self.job[\"build_status\"] = False\n self.job['action'] = \"notify_user\"\n self.queue.put(json.dumps(self.job), 'master_tube')\n self.logger.warning(\n \"Delivery is not successful. Notifying the user.\")\n # data = {\n # 'action': 'notify_user',\n # 'namespace': self.job[\"namespace\"],\n # 'build_status': False,\n # 'notify_email': self.job['notify_email'],\n # 'delivery_logs_file': os.path.join(\n # self.job['logs_dir'], 'delivery_logs.txt'),\n # 'logs_dir': self.job['logs_dir'],\n # 'project_name': self.job[\"project_name\"],\n # 'job_name': self.job['jobid'],\n # 'test_tag': self.job['test_tag']}\n # self.notify(data)\n\n\nif __name__ == \"__main__\":\n load_logger()\n logger = logging.getLogger('delivery-worker')\n worker = DeliveryWorker(logger, sub='start_delivery',\n pub='delivery_failed')\n worker.run()\n","sub_path":"container_pipeline/workers/delivery.py","file_name":"delivery.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"477470753","text":"from fuzzywuzzy import process\nimport pandas as pd\nimport os\nimport pickle\n\npackage_dir = os.path.dirname(__file__)\nprint(package_dir)\n# put the movieId into the row index!\nmovies_path = package_dir + '/data/ml-latest-small/movies.csv'\nmovies = pd.read_csv(movies_path, index_col=0) \nratings_path = package_dir + '/data/ml-latest-small/ratings.csv'\nratings = pd.read_csv(ratings_path)\nmodel_path = package_dir + '/data/ml-latest-small/model.pickle'\nwith open(model_path, 'rb') as file:\n model = pickle.load(file)\n\nmovie_average_rating = None\n\nmovie_item_matrix = None\n\n\ndef lookup_movie(search_query, titles):\n \"\"\"\n given a search query, uses fuzzy string matching to search for similar \n strings in a pandas series of movie titles\n\n returns a list of search results. Each result is a tuple that contains \n the title, the matching score and the movieId.\n \"\"\"\n matches = process.extractBests(search_query, titles)\n # [(title, score, movieId), ...]\n return matches\n\nif __name__ == '__main__':\n results = lookup_movie('star wars', movies['title'])\n print(results)\n\n","sub_path":"movierecommender/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"160175251","text":"import sys\n\nimport pygame\n\nfrom .colours import WHITE\nfrom .constants import TITLE, DISPLAY_SIZE, BOARD_SIZE\nfrom .img import get_img\nfrom .util import snap_coordinates, horizontal_win, vertical_win, diagonal_win, first_digit\n\n\ndef init_display():\n pygame.display.set_caption(TITLE)\n return pygame.display.set_mode(DISPLAY_SIZE)\n\n\ndef has_won(board):\n if horizontal_win(board) or vertical_win(board) or diagonal_win(board):\n return True\n\n return False\n\n\ndef display_winner(player):\n if player:\n name = \"Naught\"\n else:\n name = \"Cross\"\n\n print(\"{0} Won!\".format(name))\n\n\ndef run():\n player = True\n board = [[None] * BOARD_SIZE for i in range(BOARD_SIZE)]\n\n pygame.init()\n display = init_display()\n display.fill(WHITE)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n pos = snap_coordinates(pygame.mouse.get_pos())\n x = first_digit(pos[0])\n y = first_digit(pos[1])\n\n if board[x][y] is None:\n display.blit(get_img(player), pos)\n board[x][y] = player\n\n if has_won(board):\n display_winner(player)\n\n player ^= True\n\n pygame.display.update()\n","sub_path":"tictactoe/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"380808914","text":"from sqlalchemy.orm import sessionmaker\nfrom obj.objs import step, Base, createTables, engine, value, component\n\n\nclass dbOpt:\n def __init__(self):\n # self.engine = create_engine(\"sqlite:///test.db\", echo=False)\n self.Session = sessionmaker(engine, autoflush=False, autocommit=False)\n self.ses = self.Session()\n\n self.query = self.ses.query\n self.add = self.ses.add\n self.delete = self.ses.delete\n self.update = self.ses.merge\n self.merge = self.ses.merge\n self.flush = self.ses.flush\n self.commit = self.ses.commit\n\n # flush & commit\n def fc(self):\n self.flush()\n self.commit()\n\n\nclass defque:\n def __init__(self):\n self.opt = dbOpt()\n\n def getStepsByCompentId(self, cid):\n i = self.opt.ses.query(value, step).filter(value.stepid == step.id, step.cid == cid)\n return i\n\n def getStepsBySuitId(self, sid):\n i = self.opt.ses.query(value, step, component).filter(value.stepid == step.id, step.cid == component.id,\n component.suiteid == sid)","sub_path":"obj/dbOpt.py","file_name":"dbOpt.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615845117","text":"#-*- coding:utf-8 -*-\nimport json\nfrom elasticsearch import Elasticsearch\nfrom flask import Flask, render_template,redirect, url_for,request\nimport sys\nfrom lxml import etree\nimport urllib2\nreload(sys)\nsys.setdefaultencoding('utf-8')\napp = Flask(__name__)\nes = Elasticsearch()\n\n@app.route(\"/\")\ndef main_page():\n return render_template('index.html')\n\n@app.route(\"/WtoF\")\ndef WtoF():\n response = urllib2.urlopen(\n 'https://search.naver.com/search.naver?sm=tab_hty.top&where=nexearch&oquery=%EC%84%9C%EC%9A%B8%ED%8A%B9%EB%B3%84%EC%8B%9C%EB%82%A0%EC%94%A8&ie=utf8&query=%EC%84%9C%EC%9A%B8%ED%8A%B9%EB%B3%84%EC%8B%9C%EB%82%A0%EC%94%A8')\n html = response.read()\n tree = etree.HTML(html)\n temperture = tree.xpath('//div[@class=\\'fl\\']/em/text()')[0]\n specification = tree.xpath('//div[@class=\\'fl\\']/em/strong/text()')[0]\n dust = tree.xpath('//p/a[@class=\\'_fine_dust_exp_open\\']/span/text()')[0]\n date = tree.xpath('//h4/span/text()')[0]\n factor = []\n if (int(temperture) < 6):\n factor.append(\"temperture_low\")\n elif (int(temperture) > 27):\n factor.append(\"temperture_high\")\n if '비' in specification.encode('utf-8'):\n factor.append(\"rain\")\n elif '눈' in specification.encode('utf-8'):\n factor.append(\"snow\")\n if '흐림 ' in specification.encode('utf-8'):\n factor.append(\"fog\")\n if '나쁨 ' in dust.encode('utf-8'):\n factor.append(\"yellow_dust\")\n if (int(date.split('.')[1]) == 4 or int(date.split('.')[1]) == 11 or int(date.split('.')[1]) == 18 or int(\n date.split('.')[1]) == 25):\n factor.append(\"friday\")\n if (int(date.split('.')[1]) == 5 or int(date.split('.')[1]) == 6 or int(date.split('.')[1]) == 12 or int(\n date.split('.')[1]) == 13 or int(date.split('.')[1]) == 19 or int(date.split('.')[1]) == 20 or int(\n date.split('.')[1]) == 26 or int(date.split('.')[1]) == 27):\n factor.append(\"weekend\")\n res = []\n size = len(factor)\n if (size == 1):\n search_results = es.search(index=\"analysis\", doc_type='ANOVA', body={\"sort\": {\"F\": {\"order\": \"desc\"}},\n \"query\": {\"bool\": {\"must\": [{\"match\": {\n \"factor\": factor[0]}}, {\n \"match\": {\n \"positive\": \"true\"}}],\n \"filter\": [{\"range\": {\n \"1-p\": {\n \"gte\": 0.99}}}]}}},\n size=800)\n results_1 = search_results['hits']['hits']\n exist = []\n max_1 = 0\n temp = []\n for i in range(0, len(results_1)):\n if (results_1[i]['_source']['food'] in exist): # 겹치면 삭제\n continue\n else:\n exist.append(results_1[i]['_source']['food'])\n temp.append(results_1[i])\n max_1 = max(max_1, results_1[i]['_source']['F'])\n results_1 = temp\n for result in results_1:\n result['_source']['F'] = (result['_source']['F'] * 100) / max_1\n res.append(result['_source'])\n\n elif (size > 1):\n search_results_1 = es.search(index=\"analysis\", doc_type='ANOVA', body={\"sort\": {\"F\": {\"order\": \"desc\"}},\n \"query\": {\"bool\": {\"must\": [{\"match\": {\n \"factor\": factor[0]}}, {\n \"match\": {\n \"positive\": \"true\"}}]\n \n \n }}},\n size=800)\n #search_results_2 = es.search(index=\"analysis\", doc_type='ANOVA', body={\"sort\": {\"F\": {\"order\": \"desc\"}},\n # \"query\": {\"bool\": {\"must\": [{\"match\": {\n # \"factor\": factor[1]}}, {\n # \"match\": {\n # \"positive\": \"true\"}}],\n # \"filter\": [{\"range\": {\n # \"1-p\": {\n # \"gte\": 0.99}}}]}}},\n # size=800)\n search_results_2 = es.search(index=\"analysis\", doc_type='ANOVA', body={\"sort\": {\"F\": {\"order\": \"desc\"}},\n \"query\": {\"bool\": {\"must\": [{\"match\": {\n \"factor\": factor[1]}}, {\n \"match\": {\n \"positive\": \"true\"}}]\n \n }}},\n size=800)\n\n results_1 = search_results_1['hits']['hits']\n results_2 = search_results_2['hits']['hits']\n\n exist = []\n max_1 = 0\n temp = []\n for i in range(0, len(results_1)):\n if (results_1[i]['_source']['food'] in exist): # 겹치면 삭제\n continue\n else:\n exist.append(results_1[i]['_source']['food'])\n temp.append(results_1[i])\n max_1 = max(max_1, results_1[i]['_source']['F'])\n results_1 = temp\n\n exist = []\n max_2 = 0\n temp = []\n for i in range(0, len(results_2)):\n if (results_2[i]['_source']['food'] in exist): # 겹치면 삭제\n continue\n else:\n exist.append(results_2[i]['_source']['food'])\n temp.append(results_2[i])\n max_2 = max(max_2, results_2[i]['_source']['F'])\n results_2 = temp\n\n\n for result in results_1:\n result['_source']['F'] = (result['_source']['F'] * 100) / max_1\n for result in results_2:\n result['_source']['F'] = (result['_source']['F'] * 100) / max_2\n\n k = 0\n res = []\n for i in range(0, len(results_1)):\n for j in range(0, len(results_2)):\n if (results_1[i]['_source']['food'] == results_2[j]['_source']['food']):\n results_1[i]['_source']['F'] = results_1[i]['_source']['F'] + results_2[j]['_source']['F']\n res.append(results_1[i]['_source'])\n\n # 하나 속성만 가질 때는 안보여주는 것으로 하지\n res = sorted(res, key=lambda k: k['F'], reverse=True)\n return render_template('WtoF.html',res=res,fac = factor)\n\n@app.route(\"/FtoW\", methods = ['POST', 'GET'])\ndef FtoW():\n res =[]\n if request.method == 'POST':\n if(request.form):\n search_results = es.search(index=\"category\", doc_type='food',\n body={\"query\":{\"match\":{\"food\": request.form.get('search_text')}}},\n size=9)\n results_1 = search_results['hits']['hits']\n for result in results_1:\n res.append(result['_source'])\n return render_template('FtoW.html', res=res)\n\n@app.route(\"/chart/\")\ndef chartmaking(data):\n #recommend_dic={\"thunder_lightning\":\"천둥치는 날\",\"halo\":\"안개 조금있는날\",\"temperture_high\":\"더운 날\", \"temperture_low\":\"추운 날\", \"snow\":\"눈오는 날\", \"yellow_dust\":\"황사 주의\", \"fog\":\"안개 많은날\", \"rain\":\"비오는 날\"}\n recommend_dic={0:\"천둥치는 날\",1:\"안개 조금있는날\",2:\"더운 날\", 3:\"추운 날\", 4:\"눈오는 날\", 5:\"황사 주의\", 6:\"안개 많은날\", 7:\"비오는 날\"}\n weather_dic = {\"thunder_lightning\":0,\"halo\":1,\"temperture_high\":2, \"temperture_low\":3, \"snow\":4, \"yellow_dust\":5, \"fog\":6, \"rain\":7}\n str=data\n search_results1 = es.search(index=\"analysis\", doc_type='ANOVA', body={\"query\":{\"bool\": {\"must\": {\"match\": {\"food\": str}}}}}, size=100)\n results1 = search_results1['hits']['hits']\n max_1 = 0\n\n search_results2 = es.search(index=\"instagram\", doc_type='numOfFoodPostsPerDay',\n body={\"sort\": {\"date\": {\"order\": \"asc\"}}, \"query\": {\"bool\": {\"must\":{\"match\": {\"food\": str}}}}},\n size=1000)\n\n results2 = search_results2['hits']['hits']\n max_1 = 0\n daylist = []\n countlist = []\n likeslist = []\n for i, result in enumerate(results2):\n if i == 1000:\n break\n likes = max(max_1, result['_source']['likes'])\n count = result['_source']['count']\n day = result['_source']['date']\n daylist.append(int(day))\n countlist.append(count)\n likeslist.append(likes)\n\n data4 = [0,0,0,0,0,0,0,0]\n data5= [0,0,0,0,0,0,0,0]\n for i, result in enumerate(results1):\n\n ax_1 = max(max_1, result['_source']['F'])\n positive=result['_source']['positive']\n factor = result['_source']['factor']\n ax_2=ax_1\n try:\n index=weather_dic[factor.strip()]\n value_recommend = recommend_dic[index]\n if positive.strip()=='false':\n ax_2 = (-1) * ax_1\n ax_1=0\n data4[index]=ax_1\n data5[index]=ax_2\n\n except:\n continue\n recommend_list=[]\n no_recommend_list=[]\n newdic_recommend=[]\n newdic_no_recommend=[]\n for index,value in enumerate(data5):\n if value>4:\n newdic_recommend.append(value)\n\n elif value<0:\n newdic_no_recommend.append(value)\n #newdic_recommend.sort(reverse=True)\n\n newdic_recommend2 = sorted(newdic_recommend, reverse=True)\n\n newdic_no_recommend2 = sorted(newdic_no_recommend, reverse=False)\n #newdic_no_recommend.sort(reverse=False)\n for element in newdic_recommend2:\n recommend_list.append((recommend_dic[data5.index(element)]))\n for element in newdic_no_recommend2:\n no_recommend_list.append(recommend_dic[data5.index(element)])\n\n\n return render_template('chart.html',name=data,data1=daylist,data2=countlist,data3=likeslist,data4=data4,data5=data4,recommend=recommend_list,no_recommend=no_recommend_list)\n\n@app.route('/result',methods = ['POST', 'GET'])\ndef routind():\n res = []\n if request.method == 'POST':\n if(request.form):\n size = len(request.form)\n if (size == 1):\n search_results = es.search(index=\"analysis\", doc_type='ANOVA', body={\"sort\": {\"F\": {\"order\": \"desc\"}},\"query\":{\"bool\" : { \"must\" : [{\"match\":{\"factor\": request.form.keys()[0]}},{\"match\":{\"positive\" : \"true\"}}],\"filter\" : [{\"range\":{\"1-p\":{\"gte\" : 0.99}}}]}}}, size=800)\n results_1 = search_results['hits']['hits']\n exist = []\n max_1 = 0\n temp = []\n for i in range(0, len(results_1)):\n if (results_1[i]['_source']['food'] in exist): # 겹치면 삭제\n continue\n else:\n exist.append(results_1[i]['_source']['food'])\n temp.append(results_1[i])\n max_1 = max(max_1, results_1[i]['_source']['F'])\n results_1 = temp\n for result in results_1:\n result['_source']['F'] = (result['_source']['F'] * 100) / max_1\n res.append(result['_source'])\n\n elif (size == 2):\n search_results_1 = es.search(index=\"analysis\", doc_type='ANOVA', body={\"sort\": {\"F\": {\"order\": \"desc\"}},\"query\":{\"bool\" : { \"must\" : [{\"match\":{\"factor\": request.form.keys()[0]}},{\"match\":{\"positive\" : \"true\"}}]}}}, size=800)\n search_results_2 = es.search(index=\"analysis\", doc_type='ANOVA', body={\"sort\": {\"F\": {\"order\": \"desc\"}},\"query\":{\"bool\" : { \"must\" : [{\"match\":{\"factor\": request.form.keys()[1]}},{\"match\":{\"positive\" : \"true\"}}]}}}, size=800)\n results_1 = search_results_1['hits']['hits']\n results_2 = search_results_2['hits']['hits']\n\n exist = []\n max_1 = 0\n temp = []\n for i in range(0, len(results_1)):\n if(results_1[i]['_source']['food'] in exist): #겹치면 삭제\n continue\n else:\n exist.append(results_1[i]['_source']['food'])\n temp.append(results_1[i])\n max_1 = max(max_1,results_1[i]['_source']['F'])\n results_1 = temp\n\n exist = []\n max_2 = 0\n temp = []\n for i in range(0, len(results_2)):\n if(results_2[i]['_source']['food'] in exist): # 겹치면 삭제\n continue\n else:\n exist.append(results_2[i]['_source']['food'])\n temp.append(results_2[i])\n max_2 = max(max_2, results_2[i]['_source']['F'])\n results_2 = temp\n\n\n\n for result in results_1:\n result['_source']['F'] = (result['_source']['F']*100)/max_1\n for result in results_2:\n result['_source']['F'] = (result['_source']['F'] * 100)/max_2\n\n k=0\n res = []\n for i in range(0,len(results_1)):\n for j in range(0, len(results_2)):\n if(results_1[i]['_source']['food'] == results_2[j]['_source']['food']):\n results_1[i]['_source']['F'] = results_1[i]['_source']['F'] + results_2[j]['_source']['F']\n res.append(results_1[i]['_source'])\n\n #하나 속성만 가질 때는 안보여주는 것으로 하지\n res = sorted(res, key=lambda k: k['F'],reverse=True)\n return render_template('WtoF.html', res=res, fac = request.form.keys())\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run(host = \"0.0.0.0\")","sub_path":"weateher/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"588696610","text":"import sqlite3\nfrom os import _exit\nfrom datetime import datetime\nimport hashlib\nfrom Defaults import root\n\n\ndef criarTabelas():\n\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS usuario (\n idUser INTEGER PRIMARY KEY,\n login TEXT NOT NULL,\n senha TEXT NOT NULL\n );\"\"\")\n\n\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS highscores (\n idHigh INTEGER PRIMARY KEY,\n score INT NOT NULL,\n data DATE NOT NULL,\n idUser INTEGER,\n FOREIGN KEY (idUser) REFERENCES usuario(idUser)\n );\"\"\")\n\n\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS skin (\n idSkin INTEGER PRIMARY KEY, \n idUser INTEGER UNIQUE,\n FOREIGN KEY (idUser) REFERENCES usuario(idUser)\n );\"\"\")\n\n\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS base(\n idBase INTEGER PRIMARY KEY,\n r TEXT NOT NULL,\n g TEXT NOT NULL,\n b TEXT NOT NULL,\n idSkin INTEGER UNIQUE,\n FOREIGN KEY (idSkin) REFERENCES usuario(idSkin)\n )\"\"\")\n\n\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS borda(\n idBase INTEGER PRIMARY KEY,\n r TEXT NOT NULL,\n g TEXT NOT NULL,\n b TEXT NOT NULL,\n idSkin INTEGER UNIQUE,\n FOREIGN KEY (idSkin) REFERENCES usuario(idSkin)\n )\"\"\")\n\n\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS sessao(\n id INTEGER NOT NULL\n )\"\"\")\n\n\n\ndef cadastro(login, senha):\n\n senha = doHash(senha)\n\n checkLogin = f\"\"\"\n SELECT login, idUser FROM usuario\n WHERE login == \"{login}\";\n \"\"\"\n\n cadastrar = f\"\"\"\n INSERT INTO usuario \n VALUES (null, \"{login}\", \"{senha}\");\n \"\"\"\n\n cursor.execute(checkLogin)\n resultado = cursor.fetchall()\n\n if resultado == [] and login != \"\" and senha != \"\":\n cursor.execute(cadastrar)\n conn.commit()\n\n cursor.execute(checkLogin)\n res = cursor.fetchall()\n\n iduser = res[0][1]\n\n cursor.execute(f\"\"\"\n INSERT INTO skin \n VALUES (null, {iduser})\n \"\"\")\n\n cursor.execute(f\"\"\"\n INSERT INTO base \n VALUES (null, \"255\", \"255\", \"255\", {iduser})\n \"\"\")\n\n cursor.execute(f\"\"\"\n INSERT INTO borda \n VALUES (null, \"255\", \"255\", \"255\", {iduser})\n \"\"\")\n\n conn.commit()\n return f\"O login {login} foi cadastrado com sucesso.\"\n\n elif login == \"\":\n return \"Digite um login.\"\n\n elif senha == \"\":\n return \"Digite uma senha.\"\n\n else:\n return \"Este login ja existe, digite outro.\"\n\n\ndef login(login, senha, keep):\n global iduser\n\n if login == None:\n return False\n\n if senha == None:\n return False\n\n selectLogin = f\"\"\"\n SELECT idUser, login, senha FROM usuario\n WHERE login == \"{login}\";\n \"\"\"\n\n cursor.execute(selectLogin)\n resultado = cursor.fetchall()\n\n if resultado != []:\n\n if senha == resultado[0][2] or doHash(senha) == resultado[0][2]:\n iduser = resultado[0][0]\n if keep:\n logSessao()\n return True\n\n else:\n return \"Senha incorreta, tente novamente\"\n\n else:\n return \"Este login não existe, digite outro\"\n\n\ndef mudarSkin(base, borda):\n \n cursor.execute(f\"\"\"\n UPDATE base\n SET r = \"{base[0]}\", g = \"{base[1]}\", b = \"{base[2]}\"\n WHERE idSkin = {iduser}\n \"\"\")\n conn.commit()\n\n cursor.execute(f\"\"\"\n UPDATE borda\n SET r = \"{borda[0]}\", g = \"{borda[1]}\", b = \"{borda[2]}\"\n WHERE idSkin = {iduser}\n \"\"\")\n conn.commit()\n\n return True\n \n\ndef getCor(tabela):\n cursor.execute(f\"\"\"\n SELECT r, g, b FROM {tabela}\n WHERE idSkin = {iduser};\n \"\"\")\n\n resultado = cursor.fetchall()\n\n rgb = []\n for i in range (0, len(resultado[0])):\n rgb.append(int(resultado[0][i]))\n \n return rgb\n\n\ndef logSessao():\n cursor.execute(f\"\"\"INSERT INTO sessao\n VALUES({iduser})\"\"\")\n\n conn.commit()\n\n\ndef checkSessao():\n cursor.execute(\"\"\"\n SELECT login, senha FROM usuario \n INNER JOIN sessao\n ON sessao.id = usuario.idUser\n \"\"\")\n\n res = cursor.fetchall()\n \n if res != []:\n return login(res[0][0], res[0][1], False)\n\n else:\n return False\n\ndef delSessao():\n cursor.execute(\"\"\"\n DELETE FROM sessao\"\"\")\n conn.commit()\n\n\ndef uploadScore(score):\n\n cursor.execute(f\"\"\"\n INSERT INTO highscores\n VALUES (null, \"{score}\", DATE(\"now\"), \"{iduser}\")\n \"\"\")\n conn.commit()\n\n\ndef mostrarScores(tempo):\n \n cursor.execute(f\"\"\"\n SELECT MAX(idUser) FROM usuario\n \"\"\")\n\n topScores = []\n\n if tempo == 0:\n condicao = f\"AND highscores.data BETWEEN DATE('now', '-7 days') AND DATE('now')\"\n\n elif tempo == 1:\n condicao = f\"AND highscores.data BETWEEN DATE('now', '-30 days') AND DATE('now')\"\n\n elif tempo == 2:\n condicao = \"\"\n\n\n\n for i in range(1, cursor.fetchall()[0][0] + 1):\n\n cursor.execute(f\"\"\"\n SELECT usuario.login, MAX(highscores.score), highscores.idUser, highscores.data FROM highscores\n INNER JOIN usuario\n ON usuario.idUser = highscores.idUser\n WHERE highscores.idUser = {i} {condicao}\"\"\")\n\n score = cursor.fetchall()[0]\n \n\n if score[0] and score[1] and score[3]:\n data = score[3].split(\"-\")\n troca = data[0]\n data[0] = data[2]\n data[2] = troca \n data = \"/\".join(data)\n \n if score[2] != iduser:\n score = [score[0], score[1], False, data]\n else:\n score = [score[0], score[1], iduser, data]\n\n topScores.append(score)\n \n \n for y in range(0, len(topScores)):\n for i in range(0, len(topScores)-1):\n\n if topScores[i][1] < topScores[i+1][1]:\n troca = topScores[i]\n topScores[i] = topScores[i+1]\n topScores[i+1] = troca\n\n \n return topScores\n \n \ndef jogoSair():\n conn.close()\n _exit(0)\n\n\ndef doHash(string):\n h1 = hashlib.sha256()\n h2 = hashlib.md5()\n h3 = hashlib.sha256()\n\n h1.update(bytes(string, encoding=\"utf-8\"))\n\n h2.update(bytes(str(h1.digest())+\"fazendohash\", encoding=\"utf-8\"))\n\n h3.update(bytes(\"string3?\"+str(h2.digest())+\"string2\", encoding=\"utf-8\"))\n\n\n return str(h3.digest())\n\n\nconn = sqlite3.connect(root+\"banco\")\ncursor = conn.cursor()\ncriarTabelas()","sub_path":"Arqs/User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225267827","text":"import webapp2\n\nclass Redirect(webapp2.RequestHandler):\n url = 'http://facebook.com/mikegoodspeed'\n def get(self):\n redirect = '' % self.url\n self.response.headers['Content-Type'] = 'text/html'\n self.response.out.write(redirect)\n\napp = webapp2.WSGIApplication([('/', Redirect)])\n","sub_path":"redirect.py","file_name":"redirect.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253671964","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.db import models, migrations\nfrom django.core.management import call_command\n\ndef load_categories(apps, schema_editor):\n call_command('loaddata', 'categories.yaml', app_label='ielts_app')\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=16)),\n ('ratio', models.IntegerField(blank=True)),\n ('task_type', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='SpeakingQuest',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('question', models.CharField(max_length=512)),\n ('round_off_question', models.CharField(max_length=128, blank=True)),\n ('task_type', models.IntegerField()),\n ('category', models.ForeignKey(to='ielts_app.Category')),\n ],\n ),\n migrations.CreateModel(\n name='SpeakingSampleAnswers',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('recording', models.FileField(upload_to=b'/writing/', blank=True)),\n ('transcript', models.CharField(max_length=2048, blank=True)),\n ('speaking_quest_id', models.ForeignKey(to='ielts_app.SpeakingQuest')),\n ],\n ),\n migrations.CreateModel(\n name='Vocabulary',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('phrase', models.CharField(max_length=48)),\n ('sentences', models.CharField(max_length=512)),\n ('task_type', models.IntegerField()),\n ('category', models.ManyToManyField(to='ielts_app.Category')),\n ],\n ),\n migrations.CreateModel(\n name='WritingSampleAnswers',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('answer', models.CharField(max_length=384)),\n ],\n ),\n migrations.CreateModel(\n name='WritingTask',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('content', models.CharField(max_length=768)),\n ('image_url', models.ImageField(upload_to=b'/writing/%Y/%m/%d', blank=True)),\n ('task_type', models.IntegerField()),\n ('category', models.ForeignKey(to='ielts_app.Category')),\n ],\n ),\n migrations.AddField(\n model_name='writingsampleanswers',\n name='writing_task_id',\n field=models.ForeignKey(to='ielts_app.WritingTask'),\n ),\n migrations.RunPython(load_categories),\n ]\n","sub_path":"ielts_app/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"516259815","text":"import random\nfrom time import sleep\nfrom selenium import webdriver\nfrom concurrent.futures import ThreadPoolExecutor\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Boolean\nfrom sqlalchemy.orm import sessionmaker\nfrom captcha import solve\n\n\n\nengine = create_engine('sqlite:///allaccounts.db', connect_args={'check_same_thread': False})\n\nBase = declarative_base()\n\n\nclass User(Base):\n __tablename__ = 'users'\n\n id = Column(Integer, primary_key=True)\n email = Column(String, unique=True)\n password = Column(String)\n #orakuru = Column(Boolean)\n #waultswap = Column(Boolean)\n #centric_swap = Column(Boolean)\n def __repr__(self):\n return f'User {self.email}'\n\n\nBase.metadata.create_all(engine)\n\nSession = sessionmaker(bind=engine)\nsession = Session()\nprint(session.query(User).count())\nserver = True\nif server:\n from pyvirtualdisplay import Display\n\n# PROXY = '95.216.10.237:5008'\n# PROXY = '209.205.212.35:444'\n\n\ncoins = ['centric swap']\n\ndef get_chromedriver():\n # driver = uc.Chrome()\n # chrome_options = uc.ChromeOptions()\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-dev-shm-usage')\n \n chrome_options.add_argument('--disable-blink-features=AutomationControlled')\n chrome_options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n chrome_options.add_experimental_option('useAutomationExtension', False)\n\n\n # chrome_options.add_argument(f'--proxy-server={PROXY}')\n chrome_options.add_extension('nick_proxy.zip')\n # chrome_options.add_extension('obaid_proxy.zip')\n # chrome_options.add_extension('./ext.crx')\n # chrome_prefs = {}\n # chrome_options.experimental_options[\"prefs\"] = chrome_prefs\n # chrome_prefs[\"profile.default_content_settings\"] = {\"images\": 2}\n # chrome_prefs[\"profile.managed_default_content_settings\"] = {\"images\": 2}\n # driver = uc.Chrome(options=chrome_options)\n # if not server:\n # driver.set_window_size(1285, 788)\n driver = webdriver.Chrome(executable_path='./chromedriver', options=chrome_options)\n return driver\n\n\ndef main(email):\n try:\n logged_in = False\n if server:\n display = Display(visible=False, size=(2600, 800))\n display.start()\n driver = get_chromedriver()\n driver.get('https://coinmarketcap.com/')\n WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, '//button[text()=\"Log In\"]'))).click()\n # user = session.query(User).first()\n print('result wait ' + email)\n result = solve()\n # print(result)\n if not(result and result != 'error'):\n print('error 2captcha')\n return\n code = result['code']\n driver.find_element_by_xpath('//input[@type=\"email\"]').send_keys(email)\n driver.find_element_by_xpath('//input[@type=\"password\"]').send_keys('Allah786$\\n')\n sleep(3)\n driver.execute_script(\n 'document.getElementById(\"g-recaptcha-response\").innerHTML = \"%s\"'\n % code\n )\n\n _ = 0\n while 1:\n driver.execute_script(f'___grecaptcha_cfg.clients[0].B.B.callback(\"{code}\")')\n sleep(3)\n avatar = driver.find_elements_by_xpath('//div[@class=\"avatar-img \"]')\n if avatar:\n logged_in = True\n break\n print('waiting')\n _ += 1\n if _ > 10:\n print('login error too much time loading ' + email)\n driver.save_screenshot('new.png')\n return\n\n if logged_in:\n print('logged in')\n i = 0\n errors = 0\n while i < len(coins):\n try:\n if errors > 1:\n print('ERROR STOPING ' + email)\n errors = 0\n i += 1\n continue\n user = session.query(User).filter_by(email=email).first()\n search_button = WebDriverWait(driver, 5).until(\n EC.presence_of_element_located((By.XPATH, '//div[text()=\"Search\"]')))\n driver.execute_script(\"arguments[0].click();\", search_button)\n# search_button.click()\n sleep(random.randint(1, 3))\n search = WebDriverWait(driver, 5).until(\n EC.presence_of_element_located((By.XPATH, '//input[@spellcheck=\"false\"]')))\n search.send_keys(coins[i])\n sleep(random.randint(2, 5))\n search.send_keys('\\n')\n sleep(random.randint(3, 4))\n print('SEARCH DONE ' + email)\n if not driver.find_elements_by_xpath('//span[@class=\"icon-Star-Filled\"]/..'):\n star = WebDriverWait(driver, 5).until(\n EC.element_to_be_clickable((By.XPATH, '//span[@class=\"icon-Star\"]/..')))\n star.click()\n sleep(random.randint(1, 3))\n print('STAR DONE ' + email)\n else:\n print('STAR NOT DONE ' + email)\n# driver.save_screenshot('star.png')\n driver.execute_script(f'window.scrollBy(0,2000);')\n sleep(random.randint(2, 4))\n if driver.find_elements_by_xpath('//button[contains(text(),\"Good\")]'):\n good = WebDriverWait(driver, 5).until(\n EC.element_to_be_clickable((By.XPATH, '//button[contains(text(),\"Good\")]')))\n good.click()\n sleep(random.randint(1, 3))\n print('GOOD DONE ' + email)\n else:\n print('GOOD NOT DONE ' + email)\n# driver.save_screenshot('good.png')\n\n# if coins[i] == 'centric swap':\n# user.centric_swap = True\n# elif coins[i] == 'orakuru':\n# user.orakuru = True\n# else:\n# print('WRONG COIN STOPPPPPPPP')\n# session.commit()\n i += 1\n driver.get('https://coinmarketcap.com/')\n except Exception as e:\n print('error occured ', e)\n errors += 1\n else:\n print('ERROR not logged in')\n return\n except Exception as e:\n print(e)\n finally:\n if server:\n display.stop()\n try:\n driver.quit()\n except:\n pass\n\nif __name__ == '__main__':\n u = session.query(User).all()\n# main(u[0].email)\n# exit()\n random.shuffle(u)\n with ThreadPoolExecutor(max_workers=40) as ex:\n for i in u[:40]:\n ex.submit(main, i.email)\n","sub_path":"login_only.py","file_name":"login_only.py","file_ext":"py","file_size_in_byte":7231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"352372961","text":"\"\"\"\nThis module provides data laoder, that stores all data in RAM.\nThis approach is worthwhile for smaller datasets, as it bypasses an I/O bottleneck, \nbut it is ill advised to use the data loader for data sets spanning more than a couple of Gb.\n\"\"\"\nimport numpy as np\nimport torch\nimport cv2\nfrom os.path import join, isdir\nfrom os import listdir\nfrom tqdm import tqdm\nfrom colorama import Fore\n\n\nclass RAMDataSet(torch.utils.data.Dataset):\n \"\"\"Dataset that loads all images into RAM, not recommendable for anything bigger than a couple of Gb\"\"\"\n\n def __init__(self, path, height, width, *args, flatten=False, inv=False, suffix='.png', norm_light=True,\n preload=True, black_white=True):\n \"\"\"\n :param path: path to images\n :param height: preffered height\n :param width: preffered width\n :param args: other arguments for the super-class :class:`torch.utils.data.Dataset`\n :param flatten: indicates whether to flatten the images\n :param inv: indicates whether to invert the images\n :param suffix: file ending of images, e.g. '.png'\n :param norm_light: indicates whether to normalize lighting\n :param preload: indicates imidiate preloading of data\n :param black_white: indicates whether to reduce batch and targets to black and white images\n \"\"\"\n super().__init__(*args)\n self.print_c = Fore.YELLOW\n self.BW = black_white\n self.img_path = path\n self.norm_light = norm_light\n self.paths = self.__search_imgs(path, suffix=suffix)\n self.width = width\n self.height = height\n self.flatten = flatten\n self.inv = inv\n self.imgs = None\n if preload:\n self.load_all()\n\n def __getitem__(self, idx):\n \"\"\"\n :param idx: image index\n :return: batch and targets tensors\n \"\"\"\n if self.BW:\n if not isinstance(idx, int):\n return torch.from_numpy(self.imgs[idx]),\\\n torch.from_numpy(self.imgs[idx])\n return torch.from_numpy(self.imgs[idx].reshape(1, 1, *self.imgs.shape[2:])),\\\n torch.from_numpy(self.imgs[idx].reshape(1, 1, *self.imgs.shape[2:]))\n else:\n if not isinstance(idx, int):\n return torch.from_numpy(self.imgs[idx][:, 0, :, :][:, None, :, :]),\\\n torch.from_numpy(self.imgs[idx][:, 1:, :, :])\n return torch.from_numpy(self.imgs[idx][0, :, :].reshape(1, 1, *self.imgs.shape[2:])),\\\n torch.from_numpy(self.imgs[idx][1:, :, :].reshape(1, 2, *self.imgs.shape[2:]))\n\n def __len__(self):\n return self.imgs.shape[0]\n\n def __load_img(self, idx):\n return RAMDataSet.__load_img_static(self.paths[idx], self.height, self.width, self.norm_light,\n self.inv, self.flatten, self.BW)\n\n @staticmethod\n def __load_img_static(img_path, height, width, norm_light, inv, flatten, BW=True):\n img = cv2.imread(img_path)\n img = cv2.resize(img, (height, width))\n if BW:\n img = img.mean(axis=2).astype(np.uint8)\n img = img.reshape(1, height, width).astype(np.float32) / 255.\n else:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)\n img = img.transpose(2, 0, 1).astype(np.float32) / 255.\n if BW and norm_light:\n img -= img.min()\n img /= img.max()\n if BW and inv:\n img = 1.0-img\n if flatten:\n return img.flatten()\n return img\n\n def __search_imgs(self, path, suffix='.png'):\n paths = []\n for p in listdir(path):\n if isdir(join(path, p)):\n paths += self.__search_imgs(join(path, p), suffix)\n elif p.endswith(suffix):\n paths.append(join(path, p))\n return paths\n\n def load_all(self):\n \"\"\"Loading all images into RAM.\"\"\"\n n_fmaps = 1 if self.BW else 3\n if not self.flatten:\n self.imgs = np.empty((len(self.paths), n_fmaps, self.height, self.width), dtype=np.float32)\n else:\n self.imgs = np.empty((len(self.paths), n_fmaps * self.height * self.width), dtype=np.float32)\n for i in tqdm(range(len(self)), bar_format=f'{self.print_c}preloading data set{Fore.RESET} '+\n \"{l_bar}%s{bar}%s{r_bar}\" % (self.print_c, Fore.RESET)):\n self.imgs[i] = self.__load_img(i)\n\n\nclass RAMDataSetIter:\n \"\"\"Iterator for RAMDataSet, does not need reinitialization after an epoch has finished (can be reset).\"\"\"\n\n def __init__(self, dset, shuffle=True, s_batch=16):\n \"\"\"\n :param dset: data set to iterate on\n :param shuffle: indicates whether to shuffle the data\n :param s_batch: size of batch\n \"\"\"\n self.dset = dset\n self.s_batch = s_batch\n self.idcs = np.arange(len(dset))\n self.shuffle = shuffle\n if self.shuffle:\n self.__shuffle()\n self.__idx = 0\n\n def __next__(self):\n if self.__idx >= len(self.dset):\n raise StopIteration\n self.__idx += self.s_batch\n idcs = self.idcs[self.__idx-self.s_batch:min(len(self.dset), self.__idx)]\n return self.dset[idcs]\n\n def __shuffle(self):\n self.idcs = np.random.permutation(self.idcs)\n\n def next(self):\n \"\"\"\n :return: next batch and targets from dataset\n \"\"\"\n return self.__next__()\n\n def reset(self):\n \"\"\"Resetting the iterator and shuffling again, if shuffling was specified.\"\"\"\n self.__idx = 0\n if self.shuffle:\n self.__shuffle()\n","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"489084073","text":"import cv2,threading, time\nimport queue \n\n# bufferless VideoCapture\nclass CustomVideoCapture:\n def __init__(self, name):\n self.cap = cv2.VideoCapture(name)\n self.q = queue.Queue()\n t = threading.Thread(target=self._reader)\n t.daemon = True\n t.start()\n\n # read frames as soon as they are available, keeping only most recent one\n def _reader(self):\n while True:\n ret, frame = self.cap.read()\n if not ret:\n break\n if not self.q.empty():\n try:\n self.q.get_nowait() # discard previous (unprocessed) frame\n except queue.Empty:\n pass\n self.q.put(frame)\n \n def read(self):\n return self.q.get()\n\n def get_frame_info(self):\n frame_rate = self.cap.get(5)\n width = self.cap.get(3) # float `width`\n height = self.cap.get(4)\n return int(height),int(width)\n print('Camera FPS: {}'.format(frame_rate))\n print('Frame Dimension: {} x {}'.format(height,width))\n ","sub_path":"utils/videocapture.py","file_name":"videocapture.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"151188653","text":"#copyright@2017 Aashirya Kausik\r\nprint('Welcome to my calculator')\r\ndef calculator():\r\n\t\"\"\"\r\n\tThis function is for the calculator that is capable of performing the 4 basic operations of mathematics.\r\n\t\r\n\t\"\"\"\r\n\toperation1 = 'Additon'\r\n\toperation2 = 'Subrtation'\r\n\toperation3 = 'Multiplication'\r\n\toperation4 = 'Division'\r\n\tprint('1. Addition')\r\n\tprint('2. Multiplication')\r\n\tprint('3. Subtraction')\r\n\tprint('4. Division')\r\n\toperation = int(input('Which operation(1-4)?' ))\r\n\ta = int(input(\"Enter a number:\" ))\r\n\tb = int(input(\"Enter a number:\" ))\r\n\tif operation == 1:\r\n\t\ts = a + b\t\t\r\n\t\tprint (' sum is ', s)\r\n\r\n\telif operation == 3:\r\n\t\tr = a - b\r\n\t\tprint(' result is', r)\r\n\r\n\telif operation == 2:\r\n\t\tp = a*b\r\n\t\tprint('product is', p)\r\n\r\n\telif operation == 4:\r\n\t\td = a//b\r\n\t\tprint('result is', d)\r\n\r\ncalculator()","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"119095877","text":"from django_cron import CronJobBase, Schedule\nfrom relay_manager.models import RelayNodes\nimport dpostools.api\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass CheckPeers(CronJobBase):\n RUN_EVERY_MINS = 60\n schedule = Schedule(run_every_mins=RUN_EVERY_MINS)\n code = 'relay_manager_check_peers'\n\n def do(self):\n \"\"\"\n Check all relay nodes registered in the DB and report to sentry if one is significantly behind\n \"\"\"\n\n MAX_DIF = 100\n\n arknetwork = dpostools.api.Network('Ark')\n\n # here we compile the info on all our nodes, + get the network height from ALL relay nodes.\n for node in RelayNodes.objects.all():\n if node.type == 'Ark':\n arknetwork.add_peer('http://{IP}:{PORT}'.format(\n IP=node.ip,\n PORT=node.port\n ))\n\n # status contains info on all our relay nodes\n status = arknetwork.status()\n\n # now we go over our registered nodes again and check their status\n for node in RelayNodes.objects.all():\n\n # we compare based on height (if they are significantly behind (i.e. 100 blocks = 800 seconds)\n height = status['peer_status'][node.ip]['height']\n if abs(int(height) - int(status['network_height'])) > MAX_DIF:\n logger.warning('Peer {PEER} is behind. Owner: {OWNER}, Email: {EMAIL}'.format(\n PEER=node.ip,\n OWNER=node.owner,\n EMAIL=node.email,\n ))\n\n # and based on their own status, which they report as \"ok\".\n # sometimes a node doesn't realise on its own something is wrong.\n\n ok = status['peer_status'][node.ip]['status']\n\n if ok != 'ok':\n logger.warning('Peer {PEER} not ok. Owner: {OWNER}, Email: {EMAIL}'.format(\n PEER=node.ip,\n OWNER=node.owner,\n EMAIL=node.email,\n ))\n\n","sub_path":"relay_manager/cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"496483529","text":"from django.urls import path\n\nfrom index import views\n\napp_name = 'cmfz'\n\nurlpatterns = [\n path('index/', views.index, name='index'),\n path('login/', views.login, name='login'),\n path('check_user/', views.check_user, name='check_user'),\n path('login_form/', views.login_form, name='login_form'),\n path('logout/', views.logout, name='logout'),\n]\n","sub_path":"index/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"631023862","text":"#!/usr/bin/env python \n\nimport inkex, cubicsuperpath, simplepath, cspsubdiv, os.path, serial, time, sys, string, gettext, datetime, simplestyle\n\n\n#copied from grbl_serial.py\ndef findPort():\n\t# Find a GRBL board connected to a USB port.\n\ttry:\n\t\tfrom serial.tools.list_ports import comports\n\texcept ImportError:\n\t\tcomports = None\n\t\treturn None\n\tif comports:\n\t\tcomPortsList = list(comports())\n\t\tfor port in comPortsList:\n\t\t\tdesc = port[1].lower()\n\t\t\tisUsbSerial = \"usb\" in desc and \"serial\" in desc\n\t\t\tisArduino = \"arduino\" in desc \n\t\t\tisCDC = \"CDC\" in desc \n\t\t\tif isUsbSerial or isArduino or isCDC:\n\t\t\t\treturn port[0]\n\t\t\t\t\n\treturn None\n\ndef testPort(comPort):\n\t'''\n\tReturn a SerialPort object for the first port with a GRBL board.\n\tYOU are responsible for closing this serial port!\n\t'''\n\tif comPort is not None:\n\t\ttry:\n\t\t\tserialPort = serial.Serial()\n\t\t\tserialPort.baudrate = 9600\n\t\t\tserialPort.timeout = 1.0\n\t\t\tserialPort.rts = False\n\t\t\tserialPort.dtr = True\n\t\t\tserialPort.port = comPort\n\t\t\tserialPort.open()\n\t\t\ttime.sleep(2)\n\t\t\twhile True:\n\t\t\t\tstrVersion = serialPort.readline()\n\t\t\t\tif len(strVersion) == 0:\n\t\t\t\t\tbreak\n\t\t\t\tif strVersion and strVersion.startswith('Grbl'):\n\t\t\t\t\treturn serialPort\n\t\t\tserialPort.close()\n\t\texcept serial.SerialException:\n\t\t\tpass\n\t\treturn None\n\telse:\n\t\treturn None\n\n# Return a GrblSerial object\ndef openPort(self):\n\tfoundPort = findPort()\n\tserialPort = testPort(foundPort)\n\tif serialPort:\n\t\tg = GrblSerial(serialPort, 1)\n\t\tif (self.options.setup == \"old\"):\n\t\t\tg.command('$0=340\\r')\n\t\t\tg.command('$1=340\\r')\n\t\t\tg.command('$4=150\\r')\n\t\t\tg.command('$5=300\\r')\n\t\t\tg.command('$8=10\\r')\n\t\telif (self.options.setup == \"new\"):\n\t\t\tg.command('$0=128\\r')\n\t\t\tg.command('$1=128\\r')\n\t\t\tg.command('$4=400\\r')\n\t\t\tg.command('$5=800\\r')\n\t\t\tg.command('$8=100\\r') \n\t\telse :\n\t\t\tg.command('$$\\r')\n\t\treturn g\n\treturn None\n\ndef escaped(s):\n\tr = ''\n\tfor c in s:\n\t\tif ord(c) < 32:\n\t\t\tr = r + ('<%02X>' % ord(c))\n\t\telse:\n\t\t\tr = r + c\n\treturn r\n\nclass GrblSerial(object):\n\tdef __init__(self, port, doLog):\n\t\tself.port = port\n\t\tself.doLog = doLog\n\n\tdef log(self, type, text):\n\t\tts = datetime.datetime.now()\n\t\ttry:\n\t\t\twith open(\"costycnc-serial.log\", \"a\") as myfile:\n\t\t\t\tmyfile.write('--- %s\\n%s\\n%s\\n' % (ts.isoformat(), type, escaped(text)))\n\t\texcept:\n\t\t\tinkex.errormsg(gettext.gettext(\"Error logging serial data.\"))\n\n\tdef close(self):\n\t\tif self.port is not None:\n\t\t\ttry:\n\t\t\t\tself.port.close()\n\t\t\texcept serial.SerialException:\n\t\t\t\tpass\n\n\tdef write(self, data):\n\t\tif self.doLog:\n\t\t\tself.log('SEND', data)\n\t\tself.port.write(data)\n\n\tdef readline(self):\n\t\tdata = self.port.readline()\n\t\tinkex.debug(data)\n\t\tif self.doLog:\n\t\t\tself.log('RECV', data)\n\t\treturn data\n\t\n\tdef query(self, cmd):\n\t\tif (self.port is not None) and (cmd is not None):\n\t\t\tresponse = ''\n\t\t\ttry:\n\t\t\t\tself.write(cmd)\n\t\t\t\tresponse = self.readline()\n\t\t\t\tnRetryCount = 0\n\t\t\t\twhile (len(response) == 0) and (nRetryCount < 100):\n\t\t\t\t\tif self.doLog:\n\t\t\t\t\t\tself.log('QUERY', 'read %d' % nRetryCount)\n\t\t\t\t\tresponse = self.readline()\n\t\t\t\t\tnRetryCount += 1\n\t\t\t\t\tif self.doLog:\n\t\t\t\t\t\tself.log('QUERY', 'response is '+response)\n\t\t\t\t# swallow 'ok'\n\t\t\t\tnRetryCount = 0\n\t\t\t\tok = self.readline()\n\t\t\t\twhile (len(ok) == 0) and (nRetryCount < 100):\n\t\t\t\t\tok = self.readline()\n\t\t\t\t\tnRetryCount += 1\n\t\t\texcept serial.SerialException:\n\t\t\t\tinkex.errormsg(gettext.gettext(\"Error reading serial data.\"))\n\t\t\treturn response\n\t\telse:\n\t\t\treturn None\n\n\tdef command(self, cmd):\n\t\tif (self.port is not None) and (cmd is not None):\n\t\t\ttry:\n\t\t\t\tself.write(cmd)\n\t\t\t\tresponse = self.readline()\n\t\t\t\tnRetryCount = 0\n\t\t\t\twhile (len(response) == 0) and (nRetryCount < 30):\n\t\t\t\t\t# get new response to replace null response if necessary\n\t\t\t\t\tresponse = self.readline()\n\t\t\t\t\tnRetryCount += 1\n\t\t\t\tif 'ok' in response.strip():\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tif (response != ''):\n\t\t\t\t\t\tinkex.errormsg('Error: Unexpected response from GRBL.') \n\t\t\t\t\t\tinkex.errormsg(' Command: ' + cmd.strip())\n\t\t\t\t\t\tinkex.errormsg(' Response: ' + str(response.strip()))\n\t\t\t\t\telse:\n\t\t\t\t\t\tinkex.errormsg('GRBL Serial Timeout after command: %s)' % cmd.strip())\n\t\t\t\t\t\tsys.exit()\n\t\t\texcept:\n\t\t\t\tinkex.errormsg('Failed after command: ' + cmd)\n\t\t\t\tsys.exit()\n\n#finished copy from grbl_serial.py\n\nclass MyEffect(inkex.Effect):\n\tdef __init__(self):\n\t\tinkex.Effect.__init__(self) \n\t\tself.OptionParser.add_option(\"-t\", \"--setup\", action=\"store\", type=\"string\",\n\t\t\t\t\t\t\t\t\t dest=\"setup\", default=\"new\", help=(\"Settings\"))\t\t\t\n\tdef effect(self): \n\t\topenPort(self)\nif __name__ == '__main__':\n\te = MyEffect()\n\te.affect()\n\n\n# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99\n","sub_path":"extensions/costycnc-parameter.py","file_name":"costycnc-parameter.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27730585","text":"# Convert a Binary Search Tree into a Skewed tree in increasing or decreasing order\r\nclass Node:\r\n\tdef __init__(self,key):\r\n\t\tself.left = None\r\n\t\tself.right = None\r\n\t\tself.value = key\r\n\r\n# A function to insert a new node with the given key value\r\ndef insert(root,node):\r\n\tif root is None:\r\n\t\troot=node\r\n\telse:\r\n\t\tif root.value>(self.__supports_unicast+self.__supports_local_admin);\n ##\n #####\n self.__layer1protocol = layer1protocol if layer1protocol is not None else Layer1Protocol.default;\n self.__hub = hub if hub else StubHub();\n ##\n \n ##########\n ### Properties\n @property\n @typechecked(group=__name__)\n def Layer1Address(self)->Optional[type]:\n \"\"\" Returns type retrieved by layer1protocol lookup, may return None \"\"\"\n return self.__Layer1AddressType;\n ##\n \n @property\n @typechecked(group=__name__)\n def layer1protocol(self)->Layer1Protocol:\n \"\"\" Returns Layer1Protocol e.g. Ethernet\n \n >>> lan = Lan(layer1protocol=Layer1Protocol.ETHERNET);\n >>> pr = lan.layer1protocol;\n >>> isinstance(pr,Layer1Protocol)\n True\n >>> expected = Layer1Protocol.ETHERNET;\n >>> lan = Lan(layer1protocol=expected);\n >>> pr = lan.layer1protocol;\n >>> (pr==expected)\n True\n \"\"\"\n return self.__layer1protocol;\n ##\n @property\n @typechecked(group=__name__)\n def is_running(self)->bool:\n \"\"\" Returns true/false if running\n \n >>> lan = Lan(layer1protocol=Layer1Protocol.ETHERNET); \n >>> lan.is_running\n False\n \"\"\"\n return self.__hub.is_running;\n ##\n \n @property\n @typechecked(group=__name__)\n def hub(self)->Hub:\n \"\"\" Returns hub object that this lan is using\n \n >>> from hub import Hub\n >>> lan = Lan(layer1protocol=Layer1Protocol.ETHERNET); \n >>> h = lan.hub;\n >>> isinstance(h,Hub)\n True\n \"\"\"\n return self.__hub;\n ##\n \n @property\n @typechecked(group=__name__)\n def max_transmission_unit(self)->Optional[int]:\n \"\"\" Gets the underlying hub's maximum transmision unit\n \n >>> lan = Lan(layer1protocol=Layer1Protocol.ETHERNET); \n >>> lan.max_transmission_unit == None #default is None\n True\n \"\"\"\n return self.__hub.mtu;\n ##\n mtu = max_transmission_unit;#alias\n ### /Properties\n ##########\n \n ##########\n ### Public\n @typechecked(group=__name__)\n async def start(self,*a,**kw)->None:\n \"\"\" Starts the lan object\n \n >>> lan = Lan(layer1protocol=Layer1Protocol.ETHERNET);\n >>> lan.is_running\n False\n >>> K.run(lan.start)\n True\n >>> K.run(lan.start)\n False\n >>> lan.is_running\n True\n \"\"\"\n return await self.__hub.start(*a,**kw);\n ##\n \n @typechecked(group=__name__)\n async def stop(self)->bool:\n \"\"\" Stops the lan object\n \n >>> lan = Lan(layer1protocol=Layer1Protocol.ETHERNET); \n >>> lan.is_running\n False\n >>> K.run(lan.start)\n True\n >>> lan.is_running;\n True\n >>> K.run(lan.stop);\n True\n >>> lan.is_running;\n False\n >>> K.run(lan.stop);\n False\n >>> lan.is_running;\n False\n \"\"\"\n return await self.__hub.stop();\n ##\n \n @typechecked(group=__name__)\n async def changeHubType(self,HubType)->None:\n \"\"\" Changes the hub type\n \n >>> from hub import SimpleHub;\n >>> lan = Lan(layer1protocol=Layer1Protocol.ETHERNET); \n >>> h = lan.hub;\n >>> h.num_channels\n 0\n >>> addr = lan.allocateRandomAddress();\n >>> recv = lambda *args,**kwargs: None; #print({'args':args,'kwargs':kwargs});\n >>> send = lan.bind(layer2protocol=Layer2Protocol.default,layer1address=addr,recv=recv);\n >>> h.num_channels #both this channel and the broadcast\n 2\n >>> K.run(lan.changeHubType,SimpleHub);\n >>> h2=lan.hub;\n >>> h!=h2\n True\n >>> h2.num_channels\n 2\n \"\"\"\n assert issubclass(HubType,Hub);\n old_hub = self.__hub;\n running = old_hub.is_running;\n await old_hub.stop();\n self.__hub = HubType(self.__hub);\n if running:\n await self.__hub.start();\n ##\n ##\n \n @typechecked(group=__name__)\n def allocateRandomAddress(self,unicast:bool=True)->Layer1Address:\n \"\"\" Returns a locally managed Layer1Address\n \n >>> lan = Lan(layer1protocol=Layer1Protocol.ETHERNET);\n >>> a = lan.allocateRandomAddress(True);\n >>> lan.Layer1Address(a).is_unicast\n True\n \"\"\"\n channels = self.__hub.channels;\n if len(channels)>self.__num_addresses:\n raise ValueError('No more addresses available');\n ##\n while True:\n addr = self.Layer1Address.generateRandomAddress();\n if self.__supports_unicast and unicast is not None and addr.is_unicast!=unicast:\n addr.toggle_unicast();\n ##\n if self.__supports_local_admin and not addr.is_local_admin:\n #because Lan is generating address, it should be locally adminstered\n addr.toggle_local_admin();\n ##\n if addr not in channels:\n break;\n ##\n ##\n return addr;\n ##\n \n @typechecked(group=__name__)\n def allocateSequentialAddress(self,unicast:Optional[bool]=True)->Layer1Address:\n \"\"\" Returns locally managed Layer1Addres starting the first and incrementing by one\n \n >>> lan = Lan(layer1protocol=Layer1Protocol.ETHERNET); \n >>> a = lan.allocateRandomAddress(True);\n >>> lan.Layer1Address(a).is_unicast\n True\n \"\"\"\n channels = self.__hub.channels;\n if len(channels)>self.__num_addresses:\n raise ValueError(f'No more addresses available: {len(channels)}>{self.__num_addresses}');\n ##\n while True:\n value = next(self.__address_counter);\n addr = self.Layer1Address(value);\n if self.__supports_unicast and unicast is not None and addr.is_unicast!=unicast:\n addr.toggle_unicast();\n ##\n if self.__supports_local_admin and not addr.is_local_admin:\n #because Lan is generating address, it should be locally administered\n addr.toggle_local_admin();\n ##\n if addr not in channels:\n break;\n ##\n ##\n return addr;\n ##\n \n @typechecked(group=__name__)\n def bind(self,*,layer1address:Layer1Address,layer2protocol:Layer2Protocol,recv:Callable, \\\n record_statistics:bool=False,loopback_on_broadcast=False)->'Lan.Socket':\n \"\"\" Binds a layer1address,layer2protocol with a specfic receive callback.\n Returns a Socket object to facilitate transmission over the newly bound communication channel \"\"\"\n return self.Socket(lan=self,layer1address=layer1address,layer2protocol=layer2protocol,\\\n recv=recv,record_statistics=record_statistics,loopback_on_broadcast=loopback_on_broadcast);\n ##\n \n class Socket(object):\n \"\"\" Helper class that represents a bound (layer1adress,layer2protocol) communication channel \"\"\"\n \n @typechecked(group=__name__)\n def __init__(self,*,lan:'Lan',layer1address:Optional[Layer1Address],layer2protocol:Layer2Protocol,recv:Callable, \\\n record_statistics:bool=False,loopback_on_broadcast=False):\n \"\"\" Wraps lan interface for easier access to send and recv data \"\"\"\n self.__lan = lan;\n self.__address = layer1address if layer1address is not None else lan.allocateRandomAddress();\n self.__protocol = layer2protocol;\n self.__name = lan._getChannel(layer2protocol=self.__protocol,layer1address=self.__address);\n self.__rawRecv = recv;\n self.__record_statistics=None;\n self.__loopback_on_broadcast=None;\n \n self.statistics = Counter();\n \n #now bind the lan callbacks\n self.__is_bound = True;\n self.__lanTransmit = partial(self.__lan._publish ,self.__protocol);\n self.__lanBroadcast = None;\n self.__lanLoopback = None;\n \n self.record_statistics = record_statistics;\n self.loopback_on_broadcast = loopback_on_broadcast;\n ##\n \n ##########\n ### Properties\n @property\n def lan(self)->'Lan':\n \"\"\" Returns lan object of the socket interface \"\"\"\n return self.__lan\n ##\n @property\n def address(self)->Layer1Address:\n \"\"\" Returns address of the socket on the interface \"\"\"\n return self.__address;\n ##\n @property\n def protocol(self)->Layer2Protocol:\n \"\"\" Returns protocol of the socket \"\"\"\n return self.__protocol;\n ##\n @property\n def is_bound(self)->bool:\n \"\"\" Returns true/false if this socket is bound \"\"\"\n return self.__is_bound;\n ##\n \n @property\n def record_statistics(self)->bool:\n \"\"\" Returns true/false if the socket is recording statistics \"\"\"\n return self.__record_statistics;\n ##\n @record_statistics.setter\n def record_statistics(self,value:bool)->None:\n \"\"\" Tells the socket to record/not record statistics \"\"\"\n value=bool(value);\n if self.__record_statistics==value:\n return;\n ##\n if value:\n self.resetStatistics();\n ##\n self.__record_statistics=value;\n \n #rebind Lan's recv with/out stat recording callback\n self.__lanLoopback = self.__lan._bind(layer2protocol=self.__protocol,layer1address=self.__address,\\\n recv=(self.__recordRecv if self.__record_statistics else self.__rawRecv));\n self.__cacheCallbacks();\n ##\n \n @property\n def loopback_on_broadcast(self)->bool:\n \"\"\" Returns true/false if the socket sends messages to self when broadcasting \"\"\"\n return self.__loopback_on_broadcast;\n ##\n @loopback_on_broadcast.setter\n def loopback_on_broadcast(self,value):\n \"\"\" Tells the socket to send/not send messages to self when broadcasting \"\"\"\n if value==self.__loopback_on_broadcast:\n return;\n ##\n self.__loopback_on_broadcast=value;\n\n #Re-bind broadcast callback to ignore or not self\n self.__lanBroadcast = partial(self.__lan._broadcast,self.__protocol,\\\n ignore=(None if self.__loopback_on_broadcast else self.__name));\n self.__cacheCallbacks();\n ##\n ### /Properties\n ##########\n \n ##########\n ### Public\n async def loopback(self,data:Union[bytes,bytearray]):\n \"\"\" Function to send a message back to bound socket, may or may not bypass lower layers to send the message. \"\"\"\n if not self.__is_bound:\n raise ValueError('I/O operation on closed socket');\n ##\n return await self.__cachedLoopback(data);\n ##\n async def transmit(self,layer1address:Layer1Address,data:Union[bytes,bytearray]):\n \"\"\" Function to transmit to a particular address \"\"\"\n if not self.__is_bound:\n raise ValueError('I/O operation on closed socket');\n ##\n return await self.__cachedTransmit(layer1address,data);\n ##\n async def broadcast(self,data:Union[bytes,bytearray]):\n \"\"\" Function to broadcast to all addresses \"\"\"\n if not self.__is_bound:\n raise ValueError('I/O operation on closed socket');\n ##\n return await self.__cachedBroadcast(data);\n ##\n \n def resetStatistics(self)->None:\n \"\"\" Resets any recorded statistics \"\"\"\n self.statistics.clear();\n ##\n \n def unbind(self)->bool:\n \"\"\" Closes the socket, after which the socket can no longer send or receive messages \"\"\"\n if not self.__is_bound:\n return False;\n ##\n self.lan._unbind(layer2protocol=self.__protocol,layer1address=self.__address);\n self.__is_bound = False;\n self.__cacheCallbacks();\n return True;\n ##\n ### /Public\n ##########\n \n ##########\n ### Operators\n def __enter__(self,*a,**kw):\n \"\"\" Context enter \"\"\"\n return self;\n ##\n \n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\" Context exit \"\"\"\n self.unbind();\n ##\n \n def __del__(self):\n \"\"\" Deletes and closes the socket \"\"\"\n self.unbind();\n ##\n ### /Operators\n ##########\n \n ##########\n ### Private\n def __cacheCallbacks(self):\n \"\"\" Private function that updates cached callbacks mainly due to complexities relating to recording statistics \"\"\"\n if self.__is_bound:\n if self.__record_statistics:\n self.__cachedLoopback = self.__recordLoopback ;\n self.__cachedTransmit = self.__recordTransmit ;\n self.__cachedBroadcast = self.__recordBroadcast;\n else:\n self.__cachedLoopback = self.__lanLoopback ;\n self.__cachedTransmit = self.__lanTransmit ;\n self.__cachedBroadcast = self.__lanBroadcast; \n ##\n else:\n self.__cachedLoopback = None;\n self.__cachedTransmit = None;\n self.__cachedBroadcast = None;\n ##\n ##\n \n async def __recordRecv(self,*args,**kwargs):\n \"\"\" Private callback that records statistics relating to the receive callback \"\"\"\n try:\n self.statistics['receive']+=1;\n return await self.__rawRecv(*args,**kwargs);\n except:\n self.statistics['receive exception']+=1;\n raise;\n ##\n ##\n async def __recordLoopback(self,*args):\n \"\"\" Private callback that records statistics relating to the loopback callback \"\"\"\n try:\n self.statistics['loopback']+=1;\n return await self.__lanLoopback(*args);\n except:\n self.statistics['loopback exception']+=1;\n raise;\n ##\n ##\n async def __recordTransmit(self,address:Layer1Address,*args):\n \"\"\" Private callback that records statistics relating to the transmit callback \"\"\"\n try:\n self.statistics['transmit']+=1;\n return await self.__lanTransmit(address,*args);\n except:\n self.statistics['transmit exception']+=1;\n raise;\n ##\n ##\n async def __recordBroadcast(self,*args):\n \"\"\" Private callback that records statistics relating to the broadcast callback \"\"\"\n try:\n self.statistics['broadcast']+=1;\n return await self.__lanBroadcast(*args);\n except:\n self.statistics['broadcast exception']+=1;\n raise;\n ##\n ##\n ### /Private\n ######\n ## /class Socket\n ### /Public\n ##########\n \n ##########\n ### Operators\n async def __aenter__(self,*a,**kw):\n \"\"\" Context enter \"\"\"\n await self.start(*a,**kw);\n return self;\n ##\n \n async def __aexit__(self, exc_type, exc_val, exc_tb):\n \"\"\" Context exit \"\"\"\n #RISK: should any generated sockets become unbound/closed?\n await self.stop();\n ##\n ### /Operators\n ##########\n \n ##########\n ### /Private\n @typechecked(group=__name__)\n def _getChannel(self,*,layer2protocol:Layer2Protocol,layer1address:Layer1Address):\n \"\"\" Private function that returns tuple for binding callbacks \"\"\"\n return (layer2protocol,layer1address);\n ##\n \n @typechecked(group=__name__)\n def _bind(self,*,layer2protocol:Layer2Protocol,layer1address:Layer1Address,recv:Callable)->Callable:\n \"\"\" Simulates a connection with a particular protocol by taking in an address of the\n connection, the protocol/type of the communication, and a receive callback taking in *args\n If the address is unicast, then the address is also subscribed to the broadcast address.\n \n >>> lan = Lan(layer1protocol=Layer1Protocol.ETHERNET);\n >>> addr = lan.allocateRandomAddress();\n >>> async def recv(*a,**kw): return None;\n >>> loopback = lan._bind(layer2protocol=Layer2Protocol.default,layer1address=addr,recv=recv);\n >>> K.run(loopback,'a');\n \"\"\"\n connection_protocol = layer2protocol;\n async def wrappedRecv(channel,name,*args):\n \"\"\" This function is called by the hub, which always supplies channel by the first arg\n If the protocol matches the one for this connection, then call the callback\n otherwise ignore it \"\"\"\n \n try:\n protocol,address = channel;\n except TypeError:\n #other functions have been bound incorrecdtly to this hub which should be Lan exclusive\n return None;\n ##\n \n log.debug('Lan Recv: (address:%s,protocol:%s),name:%s, args:%s',address,protocol,name,args);\n if protocol==connection_protocol:\n #make sure that only publishes with the correct protocol will call the recv callback\n return await recv(*args);\n else:\n #generally shouldn't ever get here, because protocols are included\n # in the hub subscribe channel (adddress,protocol)\n return None;\n ##\n ##\n connection_channel = self._getChannel(layer2protocol=layer2protocol,layer1address=layer1address);\n connection_name = connection_channel;\n log.debug('Binding %s with callback',connection_channel);\n self.__hub.subscribe(channel=connection_channel,name=connection_channel,callback=wrappedRecv);\n \n #subscribing to broadcast if supported\n if not self.__supports_unicast or self.Layer1Address(layer1address).is_unicast:\n broadcast_channel = self._getChannel(layer2protocol=layer2protocol,layer1address=self.Layer1Address.broadcast);\n log.debug('Binding broadcast %s with callback',broadcast_channel);\n self.__hub.subscribe(channel=broadcast_channel,name=connection_name,callback=wrappedRecv);\n ##\n return self.__hub.getCallback(connection_channel,connection_name);\n ##\n \n @typechecked(group=__name__)\n def _unbind(self,*,layer2protocol:Layer2Protocol,layer1address:Layer1Address)->bool:\n \"\"\" Unbinds the recv callback from (address,protocol) and if applicable the (broadcoast,protocol) bindings\n \n >>> lan = Lan(layer1protocol=Layer1Protocol.ETHERNET);\n >>> addr = lan.allocateRandomAddress();\n >>> recv = lambda *args,**kwargs: None;\n >>> send = lan._bind(layer2protocol=Layer2Protocol.default,layer1address=addr,recv=recv);\n >>> len(tuple(lan.hub.subscribed_channels))\n 2\n >>> lan._unbind(layer2protocol=Layer2Protocol.default,layer1address=addr);\n True\n >>> len(tuple(lan.hub.subscribed_channels))\n 0\n \"\"\"\n out = True;\n connection_channel = self._getChannel(layer2protocol=layer2protocol,layer1address=layer1address);\n connection_name = connection_channel;\n log.debug('Unbinding %s',connection_channel);\n out &= self.__hub.unsubscribe(channel=connection_channel,name=connection_name);\n if not self.__supports_unicast or self.Layer1Address(layer1address).is_unicast:\n broadcast_channel = self._getChannel(layer2protocol=layer2protocol,layer1address=self.Layer1Address.broadcast);\n log.debug('Unbinding broadcast %s',broadcast_channel);\n out &= self.__hub.unsubscribe(channel=broadcast_channel,name=connection_name);\n ##\n return out\n ##\n \n @typechecked(group=__name__)\n async def _publish(self,layer2protocol:Layer2Protocol,layer1address:Layer1Address,*args,ignore=None)->None:\n \"\"\" Private function to publish/transmit data to hub \"\"\"\n channel=self._getChannel(layer2protocol=layer2protocol,layer1address=layer1address);\n return await self.__hub.publish(channel,*args,ignore=ignore);\n ##\n \n @typechecked(group=__name__)\n async def _broadcast(self,layer2protocol:Layer2Protocol,*args,ignore=None)->None:\n \"\"\" Private function to broadcast data to hub \"\"\"\n channel=self._getChannel(layer2protocol=layer2protocol,layer1address=self.Layer1Address.broadcast);\n return await self.__hub.publish(channel,*args,ignore=ignore);\n ##\n ### /Private\n ##########\n## /class Lan\n######################################\n\n\n##############################\ntry:\n MacLan = partial(Lan,layer1protocol=Layer1Protocol.ETHERNET);\nexcept ImportError:\n MacLan = NotImplemented;\n##\n##############################\n\n\n######################################\nasync def _test(HubType,verbose,success_expected):\n tests = [];\n from mac_address import MacAddress;\n import hub;\n import abc;\n ETHERNET = Layer1Protocol.ETHERNET;\n \n all_hub_types = set(getattr(hub,x) for x in dir(hub) if type(getattr(hub,x))==abc.ABCMeta and issubclass(getattr(hub,x),hub.Hub));\n hh = set();\n for ht in all_hub_types:\n try:\n h = ht();\n hh.add(ht);\n except Exception as e:\n pass;\n ##\n ##\n all_hub_types = hh;\n \n p2a = Layer2Protocol.CHAOS;\n p2b = Layer2Protocol.default;\n if verbose:\n print(f'---------\\nLan Unit tests - {HubType}\\n');\n formatter = logging.Formatter('{%(filename)s:%(funcName)s:%(lineno)d} %(levelname)s - %(message)s','%m-%d %H:%M:%S');\n handler = logging.StreamHandler();\n handler.setFormatter(formatter);\n [log.removeHandler(h) for h in log.handlers];\n log.addHandler(handler);\n log.setLevel(level=logging.DEBUG);\n log.propagate=False;\n ##\n async with Lan(layer1protocol=ETHERNET,hub=HubType()) as lan:\n try:\n addr = lan.allocateRandomAddress();\n addr2 = lan.allocateRandomAddress();\n X = [];\n async def recv(*args):\n nonlocal X;\n if verbose:\n print(f'RECV callback: {len(args)} {args}');\n ##\n X.append(args);\n return;\n ##\n \n #Test swapping types.\n for ht in all_hub_types:\n await lan.changeHubType(ht);\n tests.append(type(lan.hub)==ht);\n ##\n await lan.changeHubType(HubType);\n tests.append(type(lan.hub)==HubType);\n \n socket = lan.bind(layer2protocol=p2a,layer1address=addr,recv=recv);\n loopback = socket.loopback;\n await loopback('should0');\n await lan._publish(p2a,addr ,'should1');\n await lan._publish(p2b,addr ,'nope' );\n await lan._publish(p2b,addr2,'nope' );\n await lan._publish(p2a,addr2,'nope' );\n await lan._publish(p2a,addr ,'should2');\n \n await lan._broadcast(p2b,'nope' );\n await lan._broadcast(p2a,'broadcast3');\n \n [log.removeHandler(h) for h in log.handlers];\n log.setLevel(level=logging.WARNING);\n [hub.log.removeHandler(h) for h in log.handlers];\n hub.log.setLevel(level=logging.WARNING);\n \n await lan.hub.flush();\n success = len(X)==4;\n tests.append(lan.is_running);\n tests.append(success==success_expected);\n if verbose>=2:\n for i,x in enumerate(X):\n print(i,x);\n ##\n if verbose:\n if not all(tests):\n print(tests);\n print(f'{type(hub).__name__} did not send all properly.');\n else:\n print(f'Lan test ran properly with {type(hub).__name__}.');\n ##\n ##\n except Exception as e:\n if verbose:\n print(f'Caught exception: {e}');\n ##\n tests.append(False);\n finally:\n await lan.stop();\n ##\n ##\n passed = sum(tests);\n total = len(tests);\n return {'failed':total-passed,'total':total};\n##\n\nasync def _socketTest(HubType,verbose):\n ETHERNET = Layer1Protocol.ETHERNET;\n tests = [];\n counter = 0;\n cant_tx = (HubType==StubHub);\n async def count(*a,**kw):\n nonlocal counter;\n counter+=1;\n ##\n if verbose:\n print(f'---------\\nSocket Unit tests - {HubType}\\n');\n #formatter = logging.Formatter('{%(filename)s:%(funcName)s:%(lineno)d} %(levelname)s - %(message)s','%m-%d %H:%M:%S');\n #handler = logging.StreamHandler();\n #handler.setFormatter(formatter);\n #[log.removeHandler(h) for h in log.handlers];\n #log.addHandler(handler);\n #log.setLevel(level=logging.DEBUG);\n #log.propagate=False;\n ##\n \n async with Lan(layer1protocol=ETHERNET,hub=HubType()) as lan:\n protocol = Layer2Protocol.default;\n address = lan.allocateRandomAddress();\n \n #////////\n total = len(tests);\n with lan.bind(layer2protocol=protocol,layer1address=address,recv=count) as sock:\n pass;\n ##\n num_tests = len(tests)-total;\n if verbose and not all(tests[-num_tests:]):\n print(f'Socket enter/exit failed: {tests[-num_tests:]}');\n ##\n \n #////////\n num_tests = len(tests);\n with lan.bind(layer2protocol=protocol,layer1address=address,recv=count) as sock:\n tests.append(sock.is_bound==True);\n tests.append(sock.record_statistics in (True,False));\n tests.append(sock.loopback_on_broadcast in (True,False));\n tests.append(sock.address==address);\n tests.append(sock.protocol==protocol);\n tests.append(sock.lan is lan);\n num_tests = len(tests)-total;\n if verbose and not all(tests[-num_tests:]):\n print(f'Socket Init Test #1 failed: {[i for i,t in enumerate(tests[-num_tests:]) if not t]}');\n ##\n ##\n \n #////////\n total = len(tests);\n with lan.bind(layer2protocol=protocol,layer1address=address,recv=count,\\\n record_statistics=False,loopback_on_broadcast=False) as sock:\n tests.append(sock.is_bound==True);\n tests.append(sock.unbind()==True);\n tests.append(sock.is_bound==False);\n tests.append(sock.unbind()==False);\n tests.append(sock.is_bound==False);\n \n num_tests = len(tests)-total;\n if verbose and not all(tests[-num_tests:]):\n print(f'unbind/is_bound failed: {[i for i,t in enumerate(tests[-num_tests:]) if not t]}');\n ##\n ##\n \n #////////\n num_tests = len(tests);\n with lan.bind(layer2protocol=protocol,layer1address=address,recv=count, \\\n record_statistics=False,loopback_on_broadcast=False) as sock:\n res = False;\n try:\n del sock;\n res = True;\n except e:\n res = False;\n ##\n tests.append(res);\n \n num_tests = len(tests)-total;\n if verbose and not all(tests[-num_tests:]):\n print(f'del socket failed: {[i for i,t in enumerate(tests[-num_tests:]) if not t]}');\n ##\n ##\n \n #////////\n num_tests = len(tests);\n with lan.bind(layer2protocol=protocol,layer1address=address,recv=count,record_statistics=True) as sock:\n tests.append(sock.record_statistics==True);\n ##\n with lan.bind(layer2protocol=protocol,layer1address=address,recv=count,record_statistics=False) as sock:\n tests.append(sock.record_statistics==False);\n ##\n with lan.bind(layer2protocol=protocol,layer1address=address,recv=count,loopback_on_broadcast=True) as sock:\n tests.append(sock.loopback_on_broadcast==True);\n ##\n with lan.bind(layer2protocol=protocol,layer1address=address,recv=count,loopback_on_broadcast=False) as sock:\n tests.append(sock.loopback_on_broadcast==False);\n ##\n with lan.bind(layer2protocol=protocol,layer1address=address,recv=count, \\\n record_statistics=False,loopback_on_broadcast=False) as sock:\n tests.append(sock.record_statistics==False);\n tests.append(sock.loopback_on_broadcast==False);\n sock.record_statistics = True;\n sock.loopback_on_broadcast = True;\n tests.append(sock.record_statistics==True);\n tests.append(sock.loopback_on_broadcast==True);\n ##\n num_tests = len(tests)-total;\n if verbose and not all(tests[-num_tests:]):\n print(f'Init record_stats/loopback_on_broad tests failed: {[i for i,t in enumerate(tests[-num_tests:]) if not t]}');\n ## \n \n #////////\n num_tests = len(tests);\n with lan.bind(layer2protocol=protocol,layer1address=address,recv=count,record_statistics=True) as sock:\n tests.append(sock.statistics['a']==0);\n sock.statistics['a']=234;\n sock.resetStatistics();\n tests.append(sock.statistics['a']==0);\n ##\n num_tests = len(tests)-total;\n if verbose and not all(tests[-num_tests:]):\n print(f'resetStatistics failed: {[i for i,t in enumerate(tests[-num_tests:]) if not t]}');\n ##\n \n with lan.bind(layer2protocol=protocol,layer1address=address,recv=count) as sock:\n #//////// LOOPBACK\n total = len(tests);\n counter = 0;sock.resetStatistics();\n \n sock.record_statistics = True;\n counter = 0;sock.resetStatistics();\n await sock.loopback('test');\n tests.append(cant_tx or counter==1);\n tests.append(sock.statistics['loopback']==1);\n tests.append(cant_tx or sock.statistics['receive']==1);\n \n sock.record_statistics = False;\n counter = 0;sock.resetStatistics();\n await sock.loopback('test');\n tests.append(cant_tx or counter==1);\n tests.append(sock.statistics['loopback']==0);\n tests.append(cant_tx or sock.statistics['receive']==0);\n num_tests = len(tests)-total;\n if verbose and not all(tests[-num_tests:]):\n print(f'loopback failed: {[i for i,t in enumerate(tests[-num_tests:]) if not t]}');\n ##\n \n #//////// TRANSMIT\n total = len(tests);\n counter = 0;sock.resetStatistics();\n \n sock.record_statistics = True;\n counter = 0;sock.resetStatistics();\n await sock.transmit('nobody','test');\n await sock.lan.hub.flush();\n tests.append(cant_tx or counter==0); # 0\n tests.append(sock.statistics['transmit']==1); #1\n tests.append(sock.statistics['receive']==0); #2\n await sock.transmit(sock.address,'test'); \n await sock.lan.hub.flush();\n tests.append(cant_tx or counter==1); #3\n tests.append(sock.statistics['transmit']==2); #4\n tests.append(cant_tx or sock.statistics['receive']==1); #5\n \n sock.record_statistics = False;\n counter = 0;sock.resetStatistics();\n await sock.transmit('nobody','test');\n await sock.lan.hub.flush();\n tests.append(cant_tx or counter==0); #6\n tests.append(sock.statistics['transmit']==0); #7\n tests.append(sock.statistics['receive']==0); #8\n await sock.transmit(sock.address,'test');\n await sock.lan.hub.flush();\n tests.append(cant_tx or counter==1); #9\n tests.append(sock.statistics['transmit']==0); #10\n tests.append(sock.statistics['receive']==0); #11\n \n num_tests = len(tests)-total;\n if verbose and not all(tests[-num_tests:]):\n print(f'transmit failed: {[i for i,t in enumerate(tests[-num_tests:]) if not t]}');\n ##\n \n #//////// BROADCAST\n total = len(tests);\n \n sock.record_statistics = True;\n sock.loopback_on_broadcast = True;\n counter = 0;sock.resetStatistics();\n await sock.broadcast('test');\n await lan.hub.flush();\n tests.append(cant_tx or counter==1);#0\n tests.append(sock.statistics['broadcast']==1); #1\n tests.append(cant_tx or sock.statistics['receive']==1); #2\n \n sock.record_statistics = False;\n sock.loopback_on_broadcast = True;\n counter = 0;sock.resetStatistics();\n await sock.broadcast('test');\n await lan.hub.flush();\n tests.append(cant_tx or counter==1);#3\n tests.append(sock.statistics['broadcast']==0); #4\n tests.append(cant_tx or sock.statistics['receive']==0); #5\n \n sock.record_statistics = True;\n sock.loopback_on_broadcast = False;\n counter = 0;sock.resetStatistics();\n await sock.broadcast('test');\n await lan.hub.flush();\n tests.append(counter==0);#6\n tests.append(sock.statistics['broadcast']==1); #7\n tests.append(cant_tx or sock.statistics['receive']==0); #8\n \n sock.record_statistics = False;\n sock.loopback_on_broadcast = False;\n counter = 0;sock.resetStatistics();\n await sock.broadcast('test');\n await lan.hub.flush();\n tests.append(counter==0);#9\n tests.append(sock.statistics['broadcast']==0); #10\n tests.append(cant_tx or sock.statistics['receive']==0); #11\n \n num_tests = len(tests)-total;\n if verbose and not all(tests[-num_tests:]):\n print(f'broadcast failed: {[i for i,t in enumerate(tests[-num_tests:]) if not t]}');\n ##\n ##\n ##\n passed = sum(tests);\n total = len(tests);\n return {'failed':total-passed,'total':total};\n##\n\ndef _unittest(verbose=False):\n \"\"\" Unittest for this module \"\"\"\n out = True;\n import lib.typeguard as typeguard;\n from hub import StubHub,SimpleHub,QHub,DelayHub;\n import utilities;\n import generator_utilities;\n from lib.curio import Kernel; \n import doctest;\n \n results = [];\n if verbose:\n print(f'Unittest {__name__}');\n ##\n typeguard.groups.update({g:True for g in typeguard.groups});\n \n try:\n typeguard.groups[__name__]=True;\n types = [StubHub,SimpleHub,QHub,DelayHub];\n with Kernel() as K:\n extraglobs = {'K':K};\n for type in types:\n res = K.run(_test,type, verbose, type!=StubHub);\n if verbose:\n print(f'Lan test results for {type} {res}');\n ##\n results.append(res);\n \n res = K.run(_socketTest,type, verbose);\n if verbose:\n print(f'Socket Test results for {type} {res}');\n ##\n results.append(res);\n ##\n x = doctest.testmod(extraglobs=extraglobs,optionflags=doctest.IGNORE_EXCEPTION_DETAIL);\n if verbose:\n print(x);\n ##\n results.append({'failed':x.failed,'total':x.attempted});\n ##\n except Exception as e:\n out = False;\n if verbose:\n raise;\n ##\n ##\n totals = {k:sum(i[k] for i in results) for k in ('failed','total',)};\n return totals;\n##\n\nif __name__==\"__main__\":\n print(_unittest(True));\n##\n","sub_path":"lan.py","file_name":"lan.py","file_ext":"py","file_size_in_byte":34260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"137185625","text":"from PDFControl import PDFControl\n\nclass PDFText(PDFControl):\n\n def __init__(self):\n PDFControl.__init__(self)\n self.text = ''\n self.size = 10\n\n def write(self, page, stream):\n stream.write('BT')\n stream.write('/F0 %d Tf' % self.size)\n stream.write('%f %f Td' % (self.x, (page.toY(self.y)))) # Td is the basic positioning operator. It moves the text position\n stream.write('(%s) Tj' % self.text)\n stream.write('ET')\n\n","sub_path":"pdf/05.model/02.control/05.table/02.basic(merge)/py/PDFText.py","file_name":"PDFText.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"604496133","text":"\"\"\"Custom layers implementation.\n\nInspired by Bahdanau attention, the following implements a contextual attention\nmechanism. The attention weights specify how well each token of the encoded\nSMILES (e.g. bRNN, raw embedding, conv_output) targets the genes.\n\nNOTE:\ngene_projection and smiles_projection are used to project genes and SMILES into\n common space. Then in forward() these two are added and given through a\n tanh before the alpha_projection is applied to get the attention weights.\nNOTE:\nIn tensorflow, weights were initialized from N(0,0.1). Instead, pytorch uses\n U(-stddev, stddev) where stddev=1./math.sqrt(weight.size(1)).\n\"\"\"\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nfrom .utils import Squeeze, Unsqueeze, get_device\n\nDEVICE = get_device()\n\n\ndef dense_layer(\n input_size, hidden_size, act_fn=nn.ReLU(), batch_norm=False, dropout=0.\n):\n return nn.Sequential(\n OrderedDict(\n [\n ('projection', nn.Linear(input_size, hidden_size)),\n (\n 'batch_norm', nn.BatchNorm1d(hidden_size)\n if batch_norm else nn.Identity()\n ),\n ('act_fn', act_fn),\n ('dropout', nn.Dropout(p=dropout)),\n ]\n )\n )\n\n\ndef dense_attention_layer(number_of_features):\n \"\"\"Attention mechanism layer for dense inputs.\n\n Args:\n number_of_features (int): Size to allocate weight matrix.\n Returns:\n callable: a function that can be called with inputs.\n \"\"\"\n return nn.Sequential(\n OrderedDict(\n [\n ('dense', nn.Linear(number_of_features, number_of_features)),\n ('softmax', nn.Softmax())\n ]\n )\n )\n\n\ndef convolutional_layer(\n num_kernel,\n kernel_size,\n act_fn=nn.ReLU(),\n batch_norm=False,\n dropout=0.,\n input_channels=1\n):\n \"\"\"Convolutional layer.\n\n Args:\n num_kernel (int): Number of convolution kernels.\n kernel_size (tuple[int, int]): Size of the convolution kernels.\n act_fn (callable): Functional of the nonlinear activation.\n batch_norm (bool): whether batch normalization is applied.\n dropout (float): Probability for each input value to be 0.\n input_channels (int): Number of input channels (defaults to 1).\n\n Returns:\n callable: a function that can be called with inputs.\n \"\"\"\n return nn.Sequential(\n OrderedDict(\n [\n (\n 'convolve',\n torch.nn.Conv2d(\n input_channels, # channel_in\n num_kernel, # channel_out\n kernel_size, # kernel_size\n padding=[kernel_size[0] // 2, 0] # pad for valid conv.\n )\n ),\n ('squeeze', Squeeze()),\n ('act_fn', act_fn),\n ('dropout', nn.Dropout(p=dropout)),\n (\n 'batch_norm',\n nn.BatchNorm1d(num_kernel) if batch_norm else nn.Identity()\n )\n ]\n )\n )\n\n\ndef gene_projection(num_genes, attention_size, ind_nonlin=nn.Sequential()):\n return nn.Sequential(\n OrderedDict(\n [\n ('projection', nn.Linear(num_genes, attention_size)),\n ('act_fn', ind_nonlin), ('expand', Unsqueeze(1))\n ]\n )\n ).to(DEVICE)\n\n\ndef smiles_projection(\n smiles_hidden_size, attention_size, ind_nonlin=nn.Sequential()\n):\n return nn.Sequential(\n OrderedDict(\n [\n ('projection', nn.Linear(smiles_hidden_size, attention_size)),\n ('act_fn', ind_nonlin)\n ]\n )\n ).to(DEVICE)\n\n\ndef alpha_projection(attention_size):\n return nn.Sequential(\n OrderedDict(\n [\n ('projection', nn.Linear(attention_size, 1, bias=False)),\n ('squeeze', Squeeze()), ('softmax', nn.Softmax(dim=1))\n ]\n )\n ).to(DEVICE)\n","sub_path":"paccmann_predictor/utils/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"137791829","text":"# -*- coding: utf-8 -*-\n#\n# This file provide utilities for controlling Video\n#\n#\n\nimport os\nimport time\nimport copy\n\nfrom _classes import AbstractVLC, ExternalProcess, module\nfrom modules import link, exposesignals\nfrom engine.setting import settings\nfrom engine.log import init_log\nfrom libs import rtplib\n\nlog = init_log(\"video\")\n\n\nFILTERS = {\n \"MEDIA_END\": [\"transTo /video/end\", True],\n \"VIDEO_END\": [True]\n }\n\n\nclass VideoVLCPlayer(AbstractVLC):\n \"\"\"\n This class define an VideoPlayer with VLC as player backend\n \"\"\"\n\n def __init__(self):\n command = copy.copy(settings.get_path(\"mvlc\"))\n \"\"\":type: str\"\"\"\n arguments = copy.copy(settings.get(\"vlc\", \"options\", \"default\"))\n \"\"\":type: dict\"\"\"\n arguments.update(settings.get(\"vlc\", \"options\", \"video\"))\n log.log(\"raw\", \"Hplayer command : {0}\".format(command.format(**arguments)))\n AbstractVLC.__init__(self, name=\"videovlc\", command=command.format(**arguments), filters=FILTERS)\n\n def check_media(self, media):\n \"\"\"\n Add video to the media path\n \"\"\"\n return AbstractVLC.check_media(self, os.path.join(settings.get(\"path\", \"relative\", \"video\"), media))\n\n#\n# class VlcPlayer(ExternalProcess):\n# \"\"\"\n# Video Lan VLC Player interface\n# \"\"\"\n# def __init__(self, start=True, name='vlcvideo', *args, **kwargs):\n# ExternalProcess.__init__(self, name=name, *args, **kwargs)\n# self.onClose = \"VIDEO_END\"\n# self.media = None\n# self.repeat = False\n# self.preloaded = False\n# if start:\n# self.start()\n#\n# def preload(self, filename=None, repeat=None, mediatype='video'):\n# if filename is not None:\n# media = os.path.join(settings.get_path(\"media\", mediatype), filename)\n# if os.path.isfile(media):\n# self.media = media\n#\n# if repeat is not None:\n# self.repeat = True if repeat else False\n#\n# if self.media is None or not os.path.isfile(self.media):\n# log.warning(\"Media File not found {0}\".format(self.media))\n# self.preloaded = False\n# else:\n# self.preloaded = True\n#\n# def play(self, filename=None, repeat=None):\n# if filename is not None:\n# self.preload(filename, repeat)\n# if self.preloaded:\n# #self.say(\"clear\")\n# self.say(\"add {media}\".format(media=self.media))\n# self.say(\"volume {0}\".format(settings.get(\"sys\", \"vlc_volume\"))) # Set default VLC volume\n# #self.say(\"play\")\n# repeat = 'on' if self.repeat else 'off'\n# self.say(\"repeat {switch}\".format(switch=repeat))\n#\n# def pause(self):\n# self.say(\"pause\")\n#\n# def stop(self):\n# if self.is_running():\n# self.say(\"stop\")\n# time.sleep(0.01)\n# ExternalProcess.stop(self)\n#\n# def set_volume(self, value):\n# self.say(\"volume {0}\".format(value))\n#\n# def volume_up(self):\n# self.say(\"volup\")\n#\n# def volume_down(self):\n# self.say(\"voldown\")\n#\n# Filters = {\n# 'VIDEO_END': [True]\n# }\n#\n#\n# class VlcPlayerOneShot(VlcPlayer):\n#\n# def __init__(self, *args, **kwargs):\n# kwargs['start'] = False\n# VlcPlayer.__init__(self, *args, **kwargs)\n#\n# def play(self, filename=None, repeat=None):\n# if filename is not None:\n# self.preload(filename, repeat)\n# if self.preloaded:\n# self.command = self.executable+' --play-and-exit '\n# if repeat:\n# self.command += ' --repeat '\n# self.command += self.media\n# self.start()\n\nexposesignals(FILTERS)\n\n\n\n# ETAPE AND SIGNALS\n@module('VideoPlayer')\n@link({\"/video/play [media:str] [repeat:bool] [volume:int]\": \"video_play\",\n \"/video/pause\": \"video_pause\",\n \"/video/resume\": \"video_resume\",\n \"/video/toggle\": \"video_toggle\",\n \"/video/stop\": \"video_stop\",\n \"/video/volumeup\": \"video_volume_up\",\n \"/video/volumedown\": \"video_volume_down\",\n \"/video/set_volume [volume:int]\": \"video_set_volume\",\n \"SCENE_STOPPING\": \"video_stop\",\n \"/media/volup\": \"video_volume_up\",\n \"/media/voldown\": \"video_volume_down\"})\n\ndef video_player(flag, **kwargs):\n if kwargs[\"_fsm\"].process is None:\n kwargs[\"_fsm\"].process = VideoVLCPlayer()\n kwargs[\"_fsm\"].process.start()\n\n\n@link({None: \"video_player\"})\ndef video_play(flag, **kwargs):\n if kwargs[\"_fsm\"].process is None:\n video_player(flag, kwargs)\n\n media = flag.args[\"media\"] if 'media' in flag.args else None\n kwargs[\"_fsm\"].process.load(media)\n repeat = flag.args[\"repeat\"] if 'repeat' in flag.args else None\n if repeat is None:\n repeat = False\n kwargs[\"_fsm\"].process.repeat(repeat)\n\n if flag is not None and flag.args is not None and 'abs_time_sync' in flag.args:\n log.debug('+++ BEFORE SYNC PLAY {0}'.format(rtplib.get_time()))\n rtplib.wait_abs_time(*flag.args['abs_time_sync'])\n log.debug('+++ SYNC PLAY {0}'.format(flag.args['abs_time_sync']))\n kwargs[\"_fsm\"].process.play()\n # kwargs[\"_etape\"].preemptible.set()\n\n\n@link({None: \"video_player\"})\ndef video_stop(flag, **kwargs):\n kwargs[\"_fsm\"].process.stop_media()\n\n\n@link({None: \"video_player\"})\ndef video_pause(flag, **kwargs):\n kwargs[\"_fsm\"].process.pause()\n\n\n@link({None: \"video_player\"})\ndef video_resume(flag, **kwargs):\n kwargs[\"_fsm\"].process.resume()\n\n\n@link({None: \"video_player\"})\ndef video_toggle(flag, **kwargs):\n kwargs[\"_fsm\"].process.toggle()\n\n\n@link({None: \"video_player\"})\ndef video_set_volume(flag, **kwargs):\n if isinstance(kwargs[\"_fsm\"].process, VideoVLCPlayer):\n kwargs[\"_fsm\"].process.set_volume(flag.args[\"volume\"])\n else:\n log.warning(\"Ask to set volume on an unlauched process (VlcPlayer)\")\n\n\n@link({None: \"video_player\"})\ndef video_volume_up(flag, **kwargs):\n if isinstance(kwargs[\"_fsm\"].process, VideoVLCPlayer):\n kwargs[\"_fsm\"].process.volume_up()\n else:\n log.warning(\"Ask to volume up on an unlauched process (VlcPlayer)\")\n\n\n@link({None: \"video_player\"})\ndef video_volume_down(flag, **kwargs):\n if isinstance(kwargs[\"_fsm\"].process, VideoVLCPlayer):\n kwargs[\"_fsm\"].process.volume_down()\n else:\n log.warning(\"Ask to volume down on an unlauched process (VlcPlayer)\")\n\n\n'''\nclass HPlayer(ExternalProcess):\n \"\"\"\n The HVideoPlayer allow playing a video and control it via HPlayer\n \"\"\"\n\n def __init__(self, path, hdmi_audio=False, *_args):\n \"\"\"\n :param path: Path to the media\n :param hdmi_audio: Output audio via hdmi or not\n \"\"\"\n VideoPlayer.__init__(self, path)\n args = list(_args)\n self._file = os.path.join(settings.get(\"path\", \"media\"), self.path)\n self._target = liblo.Address(\"127.0.0.1\", settings.get(\"localport\", \"inhplayer\"))\n\n self._msg = dict()\n self._msg[\"play\"] = liblo.Message(\"/play\", (\"s\", self._file))\n self._msg[\"stop\"] = liblo.Message(\"/stop\")\n self._msg[\"pause\"] = liblo.Message(\"/pause\")\n self._msg[\"resume\"] = liblo.Message(\"/resume\")\n self._msg[\"quit\"] = liblo.Message(\"/quit\")\n\n self._cmd_line = \"{exe} --ahdmi {audio} --media {file} {params} --in {portin}\".format(\n exe=settings.get(\"path\", \"hplayer\"),\n file=self._file,\n audio=hdmi_audio,\n params=\" \".join(args),\n portin=settings.get(\"localport\", \"inhplayer\"))\n self.arguments = shlex.split(self._cmd_line)\n\n def start(self):\n \"\"\"\n This function play the file into the running server\n \"\"\"\n VideoPlayer.start(self)\n liblo.send(self._target, self._msg[\"play\"])\n\n def stop(self):\n liblo.send(self._target, self._msg[\"stop\"])\n liblo.send(self._target, self._msg[\"quit\"])\n VideoPlayer.stop(self)\n\n def pause(self):\n \"\"\"\n This function send an OSC message to the HPlayer to ask pause\n \"\"\"\n liblo.send(self._target, self._msg[\"pause\"])\n\n def resume(self):\n \"\"\"\n This function send an OSC message to the HPlayer to ask resume\n \"\"\"\n liblo.send(self._target, self._msg[\"resume\"])\n\n\n\nclass OMXVideoPlayer(ExternalProcess):\n \"\"\"\n The OMX Video Play allow playing a video via omxplayer\n \"\"\"\n\n def __init__(self, path, audio=\"both\", hardware=True, args=()):\n \"\"\"\n :param path: Path to the media\n :param video_output: Audio output to use (local = jack, hdmi = hdmi )\n :param hardware: Use the -hw option to decode audio with hardware\n :param args: Stings args to add to the omxplayer commande\n :return:\n \"\"\"\n VideoPlayer.__init__(self, path)\n t = rtplib.get_time()\n t = int((t[0] & 0xFFFF0000 | t[1] & 0x0000FFFF))\n args = list(args)\n self._fifo_path = os.path.join(settings.get(\"path\", \"tmp\"), \".omx_{0}.fifo\".format(t))\n os.mkfifo(self._fifo_path)\n\n if hardware:\n args.append(\"--hw -b \")\n if audio is True:\n video_output = \"hdmi\"\n elif audio == \"both\":\n video_output = \"both\"\n else:\n video_output = \"local\"\n self._cmd_line = \"{exe} -o {audio} {params} {file} < {fifo}\".format(exe=settings.get(\"path\", \"omxplayer\"),\n audio=video_output,\n params=\" \".join(args),\n file=os.path.join(\n settings.get(\"path\", \"media\"), path),\n fifo=self._fifo_path)\n # self.arguments = shlex.split(self._cmd_line) # TODO : if Shell=True, this line is useless\n self.arguments = self._cmd_line\n VideoPlayer.start(self) # Start player because it will wait the fifo\n log.log(\"raw\", \"Video player arguments : {0}\".format(self.arguments))\n\n def _write_fifo(self, data):\n \"\"\"\n Write to the fifo\n :param data:\n :return:\n \"\"\"\n if self._popen.poll() is None: # Process doesn't finish\n log.log(\"raw\", \"Write in fifo : {0}\".format(data))\n try:\n fifo = os.open(self._fifo_path, os.O_WRONLY)\n os.write(fifo, data)\n os.close(fifo)\n except Exception as e:\n log.error(log.show_exception(e))\n else:\n log.log(\"raw\", \"Ast to rite in fifo : {0}, but process end !\".format(data))\n\n def start(self):\n \"\"\"\n Redifine start which will just launch the player by the fifo\n :return:\n \"\"\"\n self._write_fifo(\".\\n\")\n\n def toggle_play(self):\n \"\"\"\n Toggle the player between play / pause\n :return:\n \"\"\"\n self._write_fifo(\"p\")\n\n def volume_up(self):\n \"\"\"\n Up the volume\n :return:\n \"\"\"\n self._write_fifo(\"+\")\n\n def volume_down(self):\n \"\"\"\n Down the volume\n :return:\n \"\"\"\n self._write_fifo(\"-\")\n\n def show_info(self):\n \"\"\"\n Show information about the current media\n :return:\n \"\"\"\n self._write_fifo(\"z\")\n\n def stop(self):\n \"\"\"\n Asking top stop the omx player\n :return:\n \"\"\"\n try:\n self._write_fifo(\"q\")\n time.sleep(0.250)\n VideoPlayer.stop(self)\n except Exception as e:\n log.error(log.show_exception(e))\n finally:\n os.remove(self._fifo_path)\n log.log(\"raw\", \"Video player correctly end !\")\n\n def _terminate(self):\n \"\"\"\n Send the terminate (15) signal to the process\n :return:\n \"\"\"\n os.killpg(self._popen.pid, signal.SIGTERM) # Send SIGTERM to the player, asking to stop\n\n def _kill(self):\n \"\"\"\n Send the kill (9) signal to the process\n :return:\n \"\"\"\n os.killpg(self._popen.pid, signal.SIGKILL) # Send SIGNKILL to brutaly kill the process\n\nclass VLCPlayer(VideoPlayer):\n \"\"\"\n Video Lan VLC Player interface\n \"\"\"\n\n def __init__(self, path, *args, **kwargs):\n VideoPlayer.__init__(self, path)\n t = rtplib.get_time()\n t = int((t[0] & 0xFFFF0000 | t[1] & 0x0000FFFF))\n self._socket_path = os.path.join(settings.get(\"path\", \"tmp\"), \".vlc_{0}.socket\".format(t))\n self._cmd_line = \"{exe} --play-and-exit -I oldrc --rc-unix {socket} --rc-fake-tty {file}\".format(exe=settings.get(\"path\", \"vlc\"),\n file=os.path.join(settings.get(\"path\", \"media\"), path),\n socket=self._socket_path)\n # self.arguments = shlex.split(self._cmd_line) # TODO : if Shell=True, this line is useless\n self.arguments = self._cmd_line\n self.start()\n\n def toggle_play(self):\n log.info(\"VLC PAUSE\")\n try:\n s = socket.socket(socket.AF_UNIX)\n s.connect(self._socket_path)\n s.send(\"pause\")\n except Exception as e:\n log.error(log.show_exception(e))\n finally:\n try:\n s.close()\n except Exception as e:\n log.error(log.show_exception(e))\n\n def stop(self):\n VideoPlayer.stop(self)\n '''\n\n","sub_path":"player/Python/modules/playervideo.py","file_name":"playervideo.py","file_ext":"py","file_size_in_byte":13602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"273561135","text":"from django.conf.urls import url\nfrom .views import GroupDetail, GroupMembers, AddGroupMember, RemoveGroupMember, RemoveUpline\n\napp_name = 'group'\n\nurlpatterns = [\n url(r'^$', GroupDetail.as_view(), name='group-detail'),\n url(r'^(?P\\d+)/$', GroupMembers.as_view(), name='group-members'),\n url(r'^add-member/$', AddGroupMember.as_view(), name='add-group-member'),\n url(r'^remove-member/(?P\\d+)/$', RemoveGroupMember.as_view(), name='remove-group-member'),\n url(r'^remove-upline/$', RemoveUpline.as_view(), name='remove-upline'),\n]","sub_path":"apis/components/group/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"604147244","text":"import random\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom cogdl.data import Graph\r\nfrom cogdl.layers import SAGELayer\r\n\r\nfrom cogdl.models import BaseModel\r\n\r\n\r\ndef sage_sampler(adjlist, edge_index, num_sample):\r\n if adjlist == {}:\r\n row, col = edge_index\r\n row = row.cpu().numpy()\r\n col = col.cpu().numpy()\r\n for i in zip(row, col):\r\n if not (i[0] in adjlist):\r\n adjlist[i[0]] = [i[1]]\r\n else:\r\n adjlist[i[0]].append(i[1])\r\n\r\n sample_list = []\r\n for i in adjlist:\r\n list = [[i, j] for j in adjlist[i]]\r\n if len(list) > num_sample:\r\n list = random.sample(list, num_sample)\r\n sample_list.extend(list)\r\n\r\n edge_idx = torch.as_tensor(sample_list, dtype=torch.long).t()\r\n return edge_idx\r\n\r\n\r\nclass Graphsage(BaseModel):\r\n def sampling(self, edge_index, num_sample):\r\n return sage_sampler(self.adjlist, edge_index, num_sample)\r\n\r\n def __init__(self, num_features, num_classes, hidden_size, num_layers, sample_size, dropout, aggr):\r\n super(Graphsage, self).__init__()\r\n assert num_layers == len(sample_size)\r\n self.adjlist = {}\r\n self.num_features = num_features\r\n self.num_classes = num_classes\r\n self.hidden_size = hidden_size\r\n self.num_layers = num_layers\r\n self.sample_size = sample_size\r\n self.dropout = dropout\r\n shapes = [num_features] + hidden_size + [num_classes]\r\n self.convs = nn.ModuleList(\r\n [SAGELayer(shapes[layer], shapes[layer + 1], aggr=aggr) for layer in range(num_layers)]\r\n )\r\n\r\n def mini_forward(self, graph):\r\n x = graph.x\r\n for i in range(self.num_layers):\r\n edge_index_sp = self.sampling(graph.edge_index, self.sample_size[i]).to(x.device)\r\n with graph.local_graph():\r\n graph.edge_index = edge_index_sp\r\n x = self.convs[i](graph, x)\r\n if i != self.num_layers - 1:\r\n x = F.relu(x)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n return x\r\n\r\n def forward(self, *args):\r\n if isinstance(args[0], Graph):\r\n return self.mini_forward(*args)\r\n else:\r\n device = next(self.parameters()).device\r\n x, adjs = args\r\n for i, (src_id, graph, size) in enumerate(adjs):\r\n graph = graph.to(device)\r\n output = self.convs[i](graph, x)\r\n x = output[: size[1]]\r\n if i != self.num_layers - 1:\r\n x = F.relu(x)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n return x\r\n\r\n def inference(self, x_all, data_loader):\r\n device = next(self.parameters()).device\r\n for i in range(len(self.convs)):\r\n output = []\r\n for src_id, graph, size in data_loader:\r\n x = x_all[src_id].to(device)\r\n graph = graph.to(device)\r\n x = self.convs[i](graph, x)\r\n x = x[: size[1]]\r\n if i != self.num_layers - 1:\r\n x = F.relu(x)\r\n output.append(x.cpu())\r\n x_all = torch.cat(output, dim=0)\r\n return x_all\r\n\r\n\r\nclass SAGE(BaseModel):\r\n def __init__(\r\n self,\r\n in_feats,\r\n out_feats,\r\n hidden_size,\r\n num_layers,\r\n aggr=\"mean\",\r\n dropout=0.5,\r\n norm=None,\r\n activation=None,\r\n normalize=False,\r\n actnn=False,\r\n ):\r\n super(SAGE, self).__init__()\r\n shapes = [in_feats] + [hidden_size] * (num_layers - 1) + [out_feats]\r\n self.num_layers = num_layers\r\n Layer = SAGELayer\r\n if actnn:\r\n try:\r\n from cogdl.layers.actsage_layer import ActSAGELayer\r\n except Exception:\r\n print(\"Please install the actnn library first.\")\r\n exit(1)\r\n Layer = ActSAGELayer\r\n self.layers = nn.ModuleList(\r\n [\r\n Layer(\r\n shapes[i],\r\n shapes[i + 1],\r\n aggr=aggr,\r\n normalize=normalize if i != num_layers - 1 else False,\r\n dropout=dropout,\r\n norm=norm if i != num_layers - 1 else None,\r\n activation=activation if i != num_layers - 1 else None,\r\n )\r\n for i in range(num_layers)\r\n ]\r\n )\r\n\r\n def reset_parameters(self):\r\n for layer in self.layers:\r\n layer.fc.reset_parameters()\r\n\r\n def forward(self, graph):\r\n x = graph.x\r\n for layer in self.layers:\r\n x = layer(graph, x)\r\n return F.log_softmax(x, dim=-1)\r\n\r\n","sub_path":"examples/dgraph/models/graphsage.py","file_name":"graphsage.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"642666752","text":"import matplotlib.pyplot as plt\n\nx = [i for i in range(10)]\nprint(x)\n\ny1 = [2 **x for x in range(10)]\nprint(y1)\nplt.plot(x, y1, 'go-', label='example1')\n\ny2 = [2**x for x in range(9,-1,-1)]\nprint(y2)\nplt.plot(x, y2, 'r--', label='example2')\n\ny3 = [x + y for x, y in zip(y1, y2)]\nprint(y3)\nplt.plot(x, y3, 'b:', label='example3')\n\nplt.legend(loc='best')\nplt.title('Line Chart Example')\nplt.show()\n\n","sub_path":"scratch03/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155741741","text":"'''\ndef malware():\n x = 1\n while x == 1:\n print(\"malware found \")\nmalware()\n'''\nimport datetime\nprint(\"Hello Friend \")\n\nperson = input(\"what's your name ? \");\nprint('Hello', person);\n\nday = input('how was your day ? ');\nif day == \"good\":\n print(\"good for you \", person);\nelif day == \"great\":\n print(\"that's great\");\nelse:\n print('oh what happened ', person);\nprint(\"well it can only get get better am i right or what lol\");\nprint(\"would you like to know today's date ? answer with Y or N \")\ndate = input();\nif date == \"Y\":\n currentDate = datetime.date.today();\n print(currentDate);\nelif date == 'y':\n currentDate = datetime.date.today();\n print(currentDate);\nelse:\n print('oh ok then ')\npersons_age = input(\"what's your age ? \");\nprint(\"nice so your \", persons_age, \"years old\");\nprint(\"nice when is your birthday ? \");\n\nbday = input('');\nprint('nice well i was created, i cant tell you when i was created because its not even in my syntex');\nprint(\"how many girlfriends do you have ? lol \");\n\ngf = input(\"well ?\" );\n\nif gf == 'yes':\n print(\"wow nice\");\nelif gf == \"yea\":\n print(\"god damn your doing good no your doing great\");\nelse:\n print(\"oh why \", person);\n'''\ndef malware():\n x = 1\n while x == 1:\n print(\"malware found \")\nmalware()\n'''\n'''\n# This sets the condition of x for later use\nx=0\n# This is the main part of the program\ndef numtest():\n print (\"I am the multiplication machine\")\n print (\"I can do amazing things!\")\n c = input (\"Give me a number, a really big number!\")\n c=float(c)\n print (\"The number\", int(c), \"multiplied by itself equals\",(int(c*c)))\n print(\"I am Python 3. I am really cool at maths!\")\n if (c*c)>10000:\n print (\"Wow, you really did give me a big number!\")\n else:\n print (\"The number you gave me was a bit small though. I like bigger stuff than that!\")\n\n# This is the part of the program that causes it to run over and over again.\nwhile x==0:\n numtest()\n again=input(\"Run again? y/n >>>\")\n if x==\"y\":\n print(\"\")\n numtest()\n else:\n print(\"Goodbye\");\n'''\nprint('hello man');\nprint('If your trying to get a girl to like you then this i for you ');\nprint('or ')\nprint('If you ever had a girl friend that your trying to get back then this is just the thing for you !!! ');\nher_name = input('what was her first name ? ');\nhair_color = input('what was her hair color ? ');\neye_color = input('what was her eye color ? ');\nplace_frst_seen = input('where did you first see her ? ');\nmet = input('how did you guys meet each other ? ');\nfrst_one_date = input('where was your first date ? ');\nfrst_two_date = input('when was your first date ? ');\nprint('dont worry all of this has a purpose, you will see when all the questions are finished ');\nfrst_thought_of_her = input('what was your first thought when you saw her for the first time ? ');\nur_number = input('wats ur phone number ? ' \"don't leave this blank this a very important \");\n\nfor i in range(1000):\n print('process is running please wait, the may take a few minutes ');\n\nget_back = input('so are you trying to get back with a ex-girlfriend ? Y/N');\nif get_back == 'Y':\n print('Hi ', her_name,\n ' How are you doing. I have really been think about you lately and was thinking if you wanted to meet up ',\n frst_one_date,\n ' I know your probably still mad at me but i really wanted another chance ', her_name,\n ' just give me on more chance and i '\n ' I promise you wont be mad, sad, or upset ever again. ', her_name, ' I remember the first time i saw you in ',\n place_frst_seen,\n ' I remember how beautiful your ', eye_color, ' eyes and ', hair_color,\n ' hair look. And even up to today your still '\n ' the same ', her_name, ' . I remember from ', met,\n ' Thats the first time we met you remember our first date ? it was in ',\n frst_one_date, ' I remember that our first date happened on ', frst_two_date,\n ' that day i will never forget because i love you ',\n her_name,\n '. And all I really want in life right now is another chance with you . ' ' If you want to talk or text well then my '\n ' number is ', ur_number, '. Thanks for taking time out of your day to read this ', her_name);\n#finish the else statment\nelse:\n print('');","sub_path":"idk_new.py","file_name":"idk_new.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42530505","text":"import os\nimport logging.config\nfrom web_tool.settings import LOG_DIR\n\n\ndef init_log(log_dir):\n if not os.path.exists(log_dir):\n os.mkdir(log_dir)\n\n log_config = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': '%(levelname)s %(asctime)s %(module)s:%(funcName)s:%(lineno)d %(message)s'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'default',\n },\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(LOG_DIR, 'web_tool.log'),\n 'maxBytes': 1024 * 1024 * 50,\n 'backupCount': 5,\n 'formatter': 'default',\n },\n },\n 'loggers': {\n 'web_tool': {\n 'handlers': ['console', 'file'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n }\n }\n\n logging.config.dictConfig(log_config)","sub_path":"web_tool/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"587070473","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom Reports.Report import Report\nfrom flask.views import View\n\n\nclass Average(Report, View):\n methods = ['GET']\n\n sql_report = \"\"\"\n SELECT\n s.id as \"s_id\",\n s.is_enable as \"s_enable\",\n s.state as \"s_state\",\n s.is_active as \"s_active\",\n sl.id as \"sl_id\",\n sl.address as \"sl_address\",\n sl.latitude as \"sl_latitude\",\n sl.longitude as \"sl_longitude\",\n s.is_active as \"sl_fast\",\n sa.id as \"sa_id\",\n sa.username as \"sa_username\",\n sa.service as \"sa_service\",\n sa.state as \"sa_status\",\n sa.is_active as \"sa_active\",\n (now() - ss.date_start) as \"ss_await\",\n ss.date_start as \"ss_reload\",\n ss.pokemons as \"ss_pokemons\",\n ss.gyms as \"ss_gyms\",\n ss.pokestops as \"ss_pokestops\",\n s.google_path as \"s_google_path\"\n FROM\n db_pokestats.scanner as s,\n db_pokestats.scanner_account as sa,\n db_pokestats.scanner_location as sl,\n db_pokestats.scanner_statistic as ss\n WHERE\n s.cd_account = sa.id\n and s.cd_location = sl.id\n and s.id = ss.cd_scanner;\n \"\"\"\n\n def __init__(self, config):\n Report.__init__(self, config, \"report_server_average.html\")\n\n def dispatch_request(self):\n return self.render()\n\n def _prepare_data(self):\n result = self._database_execute(self.sql_report)\n\n for row in result:\n row_dict = {\n \"s_id\": row[0],\n \"s_enable\": row[1],\n \"s_state\": row[2],\n \"s_active\": row[3],\n \"sl_id\": row[4],\n \"sl_address\": row[5],\n \"sl_latitude\": row[6],\n \"sl_longitude\": row[7],\n \"sl_fast\": row[8],\n \"sa_id\": row[9],\n \"sa_username\": row[10],\n \"sa_service\": row[11],\n \"sa_state\": row[12],\n \"sa_active\": row[13],\n \"ss_await\": row[14],\n \"ss_reload\": row[15],\n \"ss_pokemons\": row[16],\n \"ss_gyms\": row[17],\n \"ss_pokestops\": row[18],\n \"s_google_path\": row[19]\n }\n\n self.data.append(row_dict)\n\n\n","sub_path":"Reports/Servers/Average.py","file_name":"Average.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"159654867","text":"import tensorflow as tf\nimport numpy as np\nimport time\nimport argparse\n\nfrom matplotlib.colors import rgb_to_hsv, hsv_to_rgb\n\n\ndef args_parser():\n parser = argparse.ArgumentParser(description='Multi-style Generative Network for Real-time Transfer')\n parser.add_argument('model_dir')\n parser.add_argument('style_image')\n parser.add_argument('content_image')\n parser.add_argument('--out', default='result.jpg', help='Path to save transferred result', type=str)\n parser.add_argument('--keep_colors', default=False, action='store_true')\n return parser.parse_args()\n\n\ndef main():\n args = args_parser()\n\n style_image = tf.keras.preprocessing.image.img_to_array(\n tf.keras.preprocessing.image.load_img(args.style_image, target_size=(256,256)))\n\n content_image = tf.keras.preprocessing.image.img_to_array(\n tf.keras.preprocessing.image.load_img(args.content_image))\n\n tf.reset_default_graph()\n eval_graph = tf.Graph()\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n with eval_graph.as_default() as g, tf.Session(config=config, graph=eval_graph) as sess:\n tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], args.model_dir)\n\n inputs = g.get_tensor_by_name(\"inputs:0\")\n style = g.get_tensor_by_name(\"style:0\")\n style_gram_out = g.get_tensor_by_name(\"style_gram_out:0\")\n style_gram_in = g.get_tensor_by_name(\"style_gram_in:0\")\n outputs = g.get_tensor_by_name(\"outputs:0\")\n\n c, s = sess.run([tf.expand_dims(content_image, axis=0), tf.expand_dims(style_image, axis=0)])\n\n style_gram = sess.run(style_gram_out, feed_dict={style: s})\n\n start = time.time()\n result = sess.run(tf.squeeze(outputs), feed_dict={inputs: c, style_gram_in: style_gram})\n end = time.time()\n\n print(\"Inference time: {time} seconds\".format(time=end-start))\n\n if args.keep_colors:\n # retain original image color\n def use_original_color(original, result):\n result_hsv = rgb_to_hsv(result)\n orig_hsv = rgb_to_hsv(original)\n oh, os, ov = np.split(orig_hsv, axis=-1, indices_or_sections=3)\n rh, rs, rv = np.split(result_hsv, axis=-1, indices_or_sections=3)\n return hsv_to_rgb(np.concatenate([oh, os, rv], axis=-1))\n\n final_result = use_original_color(content_image.reshape((0, 0, 3)), result)\n else:\n final_result = result\n\n result_image = tf.keras.preprocessing.image.array_to_img(final_result)\n result_image.save(args.out)\n\n\nmain()\n","sub_path":"4-msgnet/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541813035","text":"#!/usr/bin/env python\n\n# Copyright (c) 2002 Brad Stewart\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\tSee the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n\n\"\"\"dunno.py - selects a random \"I don't know\" reply \"\"\"\n\nhandler_list = [\"dunno\"]\n\nfrom moobot_module import MooBotModule\n\nclass dunno(MooBotModule):\n\tdef __init__(self):\n\t\tself.regex=\".*\"\n\t\tself.priority = 50\n\n\tdef handler(self, **args):\n\t\t\"\"\"grabs a random reply from the database\"\"\"\n\t\tfrom irclib import Event\n\t\timport database, string, random\n\t\tfrom irclib import nm_to_n\n\t\tif database.type == \"pgsql\":\n\t\t\t# For postgres, we find the number of dunnos, calculate a random\n\t\t\t# offset into the table based on that number (the offset is\n\t\t\t# a zero-index offset), and then select the dunno at that offset\n\t\t\t# into the data table.\n\t\t\trandom.seed()\n\t\t\tnum_query = \"select count(data) from data where type='dunno'\"\n\t\t\tnum_dunnos = database.doSQL(num_query)[0][0]\n\t\t\toffset = random.randint(1, num_dunnos) - 1\n\t\t\tdunno_query = \"select data from data where type='dunno' order \" \\\n\t\t\t\t+ \"by data limit 1 offset \" + str(offset)\n\t\t\tline = database.doSQL(dunno_query)[0][0]\n\t\telif database.type == \"mysql\":\n\t\t\t# For MySQL we simply use the rand() function to pick a random\n\t\t\t# dunno for us\n\t\t\tdunno_query = \"select data from data where type='dunno' order \" \\\n\t\t\t\t+ \"by rand() limit 1\"\n\t\t\tline = database.doSQL(dunno_query)[0][0]\n\n\t\tline = string.replace(line, \"WHO\", nm_to_n(args[\"source\"]))\n\t\ttarget = self.return_to_sender(args)\n\t\treturn Event(\"privmsg\", \"\", target, [ line ])\n","sub_path":"usr/share/moobot/modules/dunno.py","file_name":"dunno.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"236221637","text":"import os, pandas as pd, numpy as np, csv\n\nfacs_data = pd.read_csv('facs_parsed.csv')\n\nos.system('rm ../db/affect_state.csv')\ncsv.register_dialect('md', delimiter=',', lineterminator='\\n')\n\nwith open('../db/affect_state.csv', 'a') as csv_f:\n\twriter = csv.writer(csv_f, dialect='md')\n\theaders = ['file', 'affect']\n\twriter.writerow(headers)\n\tfor i in range(len(facs_data['file'])):\n\t\tdata = facs_data.iloc[i]\n\t\t#if data['AU7'] == -1 and data['AU12'] == -1:\n\t\t#\taffect = 'Boredom'\n\t\tif data['AU4'] > -1 and data['AU7'] > -1 and data['AU12'] == -1:\n\t\t\taffect = 'Confusion'\n\t\telif data['AU7'] > -1 and data['AU12'] > -1 and data['AU25'] > -1 and data['AU26'] > -1 and data['AU45'] == -1:\n\t\t\taffect = 'Delight'\n\t\t#elif data['AU12'] > -1:\n\t\t#\taffect = 'Frustration'\n\t\telif data['AU4'] == -1 and data['AU7'] == -1 and data['AU12'] == -1 and data['AU25'] == -1 and data['AU26'] == -1:\n\t\t\taffect = 'Neutral'\n\t\telse:\n\t\t\taffect = 'Undefined'\n\t\twriter.writerow(list((data['file'], affect)))\n\tcsv_f.close()","sub_path":"CK+ _stats/parsers.py/affect_parser.py","file_name":"affect_parser.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615194860","text":"from Experiment import Experiment\nimport numpy as np\n\nclass MultiGeneric(Experiment):\n def __init__(self, learners=[], environment=None, **kwargs):\n super(MultiGeneric, self).__init__(**kwargs)\n self.learners = learners\n self.environment = environment\n\n def reset(self):\n self.environment.reset()\n for l in self.learners:\n l.reset()\n\n def run_episode(self,trial, episode):\n self.environment.start_setup()\n actions = {}\n terminals = {}\n step = 0\n learners = self.learners\n for learner in learners:\n state = self.environment.start(learner.id)\n action = learner.start(state)\n actions[learner.id] = action\n terminals[learner.id] = False\n while self.in_step_limit(step) and len(learners):\n # only select learners that are not finished\n for learner in np.random.permutation(learners):\n id = learner.id\n state, reward, terminals[id] = self.environment.step(id, actions[id])\n if not terminals[id]:\n actions[id] = learner.step(reward, state, episode)\n else:\n learner.end(trial, episode, reward)\n step += 1\n learners = [l for l in self.learners if not terminals[l.id]]\n # not all episodes terminal, but over step_limit\n if len(learners):\n for learner in learners:\n learner.end(trial, episode, reward)","sub_path":"src/experiments/MultiGeneric.py","file_name":"MultiGeneric.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"250519392","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n__author__ = 'Sunny'\r\n__mtime__ = '6/21/2016'\r\n\r\n ┏┓ ┏┓\r\n ┏┛┻━━━┛┻┓\r\n ┃ ☃ ┃\r\n ┃ ┳┛ ┗┳ ┃\r\n ┃ ┻ ┃\r\n ┗━┓ ┏━┛\r\n ┃ ┗━━━┓\r\n ┃ 神兽保佑 ┣┓\r\n ┃ 永无BUG! ┏┛\r\n ┗┓┓┏━┳┓┏┛\r\n ┃┫┫ ┃┫┫\r\n ┗┻┛ ┗┻┛\r\n\"\"\"\r\nimport configparser\r\nimport decimal\r\nimport json\r\nimport os\r\nimport socket\r\nimport sys\r\nimport threading\r\nfrom datetime import datetime, timedelta\r\n\r\nimport apscheduler\r\nimport requests\r\nfrom PyQt5 import QtWidgets\r\nfrom PyQt5.QtCore import QTranslator\r\nfrom PyQt5.QtWidgets import *\r\nfrom apscheduler.triggers.cron import CronTrigger\r\n\r\nimport init_database\r\nfrom Common import Common\r\nfrom Common import config\r\nfrom Common.config import BUFSIZ\r\nfrom View.customer.return_visit_setting import ReturnVisitSetting\r\nfrom View.login.login import Login\r\nfrom View.login.register import Register as Reg_Ui_MainWindow\r\nfrom database.dao.customer.customer_handler import get_return_visit_info\r\nfrom server.MySocket import myClient\r\n\r\ndecimal.__version__\r\napscheduler.__version__\r\n\r\n\r\n# 运行之前要检查配置文件\r\ndef pre_check():\r\n result = False\r\n root = 'config.ini'\r\n\r\n basic_msg = configparser.ConfigParser()\r\n basic_msg.read(root)\r\n\r\n code = None\r\n try:\r\n code = basic_msg.get('msg', 'code')\r\n except Exception as check_exception:\r\n print(check_exception)\r\n pass\r\n\r\n if Common.compare_local_code_with_remote_register(code):\r\n result = True\r\n\r\n return result\r\n\r\n\r\n# 回访设置\r\ndef return_visit():\r\n result_data = get_return_visit_info()\r\n for data in result_data:\r\n msg = \"您于 {} 要回访用户 : {}
    联系方式为 : {}
    车牌号为 : {}\" \\\r\n .format(data[0][:10], data[2], data[1], data[3])\r\n ui = ReturnVisitSetting(msg, record_id=data[4], car_phone=data[1], car_id=data[3], car_user=data[2])\r\n ui.exec_()\r\n\r\n\r\ndef run():\r\n translator = QTranslator()\r\n translator.load(\"qt_zh_CN.qm\")\r\n app = QApplication(sys.argv)\r\n ui = None\r\n # 判断是否在试用期内\r\n # check = TryUse()\r\n check = True\r\n if check:\r\n # 判断注册码是否正确\r\n if pre_check():\r\n # 链接服务器\r\n if Common.config.connect:\r\n try:\r\n myClient.send(\"connect {}\".format(Common.get_store_id()).encode())\r\n data, addr = myClient.recvfrom(BUFSIZ)\r\n Common.linkKey = data.decode()\r\n\r\n def heart_beat():\r\n try:\r\n if config.heartbeatCheck:\r\n myClient.send(\"heartbeat heartbeat\".encode())\r\n else:\r\n config.heartbeatCheck = True\r\n except Exception as run_exception:\r\n print(run_exception)\r\n try:\r\n config.scheduler.shutdown()\r\n except Exception as shutdown_exception:\r\n print(shutdown_exception)\r\n pass\r\n\r\n def scheduler_start(scheduler):\r\n try:\r\n scheduler.start()\r\n except (KeyboardInterrupt, SystemExit, Exception):\r\n scheduler.shutdown()\r\n\r\n trigger = CronTrigger(minute='*')\r\n config.scheduler.add_job(heart_beat, trigger)\r\n schedule = threading.Thread(target=scheduler_start, args=[config.scheduler])\r\n if Common.config.connect:\r\n schedule.start()\r\n\r\n except Exception as main_exception:\r\n print(main_exception)\r\n Common.config.connect = False\r\n\r\n try:\r\n ui = Login()\r\n\r\n except Exception as exception:\r\n print(exception)\r\n pass\r\n else:\r\n ui = Reg_Ui_MainWindow()\r\n Common.skin_change('qss/white.qss')\r\n return_visit()\r\n ui.show()\r\n sys.exit(app.exec_())\r\n\r\n else:\r\n if check == \"online\":\r\n msg = \"请链接网络\"\r\n else:\r\n msg = \"您的试用期已过,详情请联系官方。\"\r\n ui = Reg_Ui_MainWindow()\r\n QtWidgets.QMessageBox.information(ui.pushButton, \"提示\", msg)\r\n ui.close()\r\n sys.exit(app.exec_())\r\n\r\n\r\ndef is_open(port=15775, ip='127.0.0.1'):\r\n c = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n try:\r\n c.connect((ip, port))\r\n c.shutdown(2)\r\n c.close()\r\n return True\r\n except Exception as socket_exception:\r\n print(socket_exception)\r\n return False\r\n\r\n\r\ndef try_use():\r\n # now = datetime.now()\r\n url = 'http://119.23.39.238:8500/store/api/time'\r\n try:\r\n req = requests.get(url)\r\n except Exception as try_use_exception:\r\n print(try_use_exception)\r\n return \"online\"\r\n json_data = json.loads(req.text)\r\n now = datetime.strptime(json_data.get(\"data\"), \"%Y-%m-%d %H:%M:%S\")\r\n\r\n if os.path.isfile('secret.conf'):\r\n fp = open('secret.conf', 'rb')\r\n record = fp.readline()\r\n if not record:\r\n return False\r\n else:\r\n record = record.decode()\r\n record_time = datetime.strptime(record, \"%Y-%m-%d %H:%M:%S\")\r\n\r\n if now > (record_time + timedelta(days=30)):\r\n return False\r\n else:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n if not is_open():\r\n try:\r\n init_database.create_all_table()\r\n run()\r\n except Exception as e:\r\n print(e)\r\n pass\r\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"32221640","text":"import globals\nimport pygame\nclass entity:\n\n\tdef __init__(self,image,pos):\n\t\tself.image=image\n\t\tself.pos=pos\n\t\tself.speed_y=5\n\t\tself.speed_x=0\n\t\tself.rect=pygame.Rect(self.pos[0],self.pos[1],self.image.get_width(),self.image.get_height())\n\tdef should_move(self):\n\t\treturn False\n\n\tdef move (self, entities):\n\t\tself.pos=(self.speed_x+self.pos[0], self.speed_y+self.pos[1])\t\t\n\n\t\tself.rect.top=self.pos[1]\n\t\tfor entity in entities:\n\t\t\tif not self==entity:\n\t\t\t\tif self.rect.colliderect(entity.rect):\n\t\t\t\t\tif isinstance (self,cannonball) and not isinstance(entity,cannon):\n\t\t\t\t\t\tentities.remove(self)\n\t\t\t\t\t\tif isinstance(entity,player):\n\t\t\t\t\t\t\tentity.hp-=1\n\t\t\t\t\tif self.speed_y<=0:\n\t\t\t\t\t\tself.rect.top=entity.rect.top+entity.rect.height\n\t\t\t\t\t\tself.pos=(self.pos[0],self.rect.top)\n\t\t\t\t\t\tself.speed_y=0\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.rect.top=entity.rect.top-self.rect.height\n\t\t\t\t\t\tself.pos=(self.pos[0],self.rect.top)\n\t\t\t\t\t\tself.speed_y=0\n\t\t\t\t\t\tself.remaining_jumps=2\n\t\tself.rect.left=self.pos[0]\n\t\tfor entity in entities:\n\t\t\tif not self==entity:\n\t\t\t\tif self.rect.colliderect(entity.rect):\n\t\t\t\t\tif self.speed_x<=0:\n\t\t\t\t\t\tself.rect.left=entity.rect.left+entity.rect.width\n\t\t\t\t\t\tself.pos=(self.rect.left,self.pos[1])\n\t\t\t\t\t\tself.speed_x=0\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.rect.left=entity.rect.left-self.rect.width\n\t\t\t\t\t\tself.pos=(self.rect.left,self.pos[1])\n\t\t\t\t\t\tself.speed_x=0\n\nclass platform(entity):\n\tdef __init__(self,pos):\t\n\t\tentity.__init__(self, globals.platform,pos)\n\nclass player(entity):\n\tdef __init__(self,pos):\n\t\tentity.__init__(self, globals.character,pos)\n\t\tself.remaining_jumps=0\n\t\tself.hp=10\n\n\tdef should_move(self):\n\t\treturn True\n\n\nclass bottom(platform):\n\tdef __init__(self,):\t\n\t\tentity.__init__(self, globals.bottom,(0,800))\nclass cannonball(entity):\n\tdef __init__(self,pos,speed):\n\t\tentity.__init__(self, globals.cannonball,pos)\n\t\tself.speed_x=speed [0]\n\t\tself.speed_y=speed[1]\n\tdef should_move(self):\n\t\treturn True\n\nclass cannon (entity):\n\tdef __init__(self,pos):\t\n\t\tentity.__init__(self, globals.cannon,pos)\n\t\tself.loops_shot=0","sub_path":"entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"355875090","text":"\"\"\"Test out Threat and threat field classes\"\"\"\n\nfrom Threat import GaussThreat, GaussThreatField\nfrom Environment import XYEnvironment\nimport matplotlib.pyplot as plt\nfrom Visualize import draw_threat_field, draw_threat_field_2D, draw_path\nfrom Graph import XYNode, Vertex, Graph\nfrom Search import Astar, reconstruct_path\n\n\ndef main():\n env = XYEnvironment(x_size=10, y_size=10, n_grid_x=30, n_grid_y=30)\n\n threat1 = GaussThreat(location=(2, 2), shape=(0.5, 0.5), intensity=5)\n threat2 = GaussThreat(location=(6, 5), shape=(1.0, 1.0), intensity=5)\n threat3 = GaussThreat(location=(8, 2), shape=(.25, .5), intensity=10)\n threats = [threat1, threat2, threat3]\n\n threat_field = GaussThreatField(threats=threats, offset=2)\n\n threat4 = GaussThreat(location=(2, 8), shape=(0.5, 0.5), intensity=5)\n threat_field.add_threat(threat4)\n env.add_threat_field(threat_field)\n\n print(env)\n\n node_list = {}\n for gridpt in range(env.n_grid):\n mx = gridpt % env.n_grid_x\n my = int(gridpt / env.n_grid_x)\n new_node = XYNode(node_id=gridpt)\n new_node.pos_x = mx * env.grid_sep_x\n new_node.pos_y = my * env.grid_sep_y\n node_list[gridpt] = new_node\n print(new_node)\n\n graph = Graph(env=env)\n graph.add_vertex(node_list[0])\n start_vertex = graph.get_vertex(node_list[0])\n print(\"Start vertex: \", start_vertex)\n\n neighbor_list = graph.env.get_neighbors(start_vertex.node)\n print(\"neighbor_list\")\n print(neighbor_list)\n for nbr in neighbor_list:\n print(nbr)\n graph.add_edge(start_vertex.node, nbr, 1.0)\n\n for neighbor in start_vertex.neighbors:\n print(neighbor)\n\n print(\"Run A* search\")\n goal_vertex = Vertex(node=node_list[env.n_grid - 1])\n print(goal_vertex.node)\n\n goal_vertex_found = Astar(graph=graph, start_vertex=start_vertex, goal_vertex=goal_vertex)\n print(goal_vertex_found)\n\n path = [goal_vertex_found.node]\n reconstruct_path(goal_vertex_found, path)\n print(\"Path found :\")\n for waypoint in path:\n print(waypoint)\n\n draw_threat_field(env=env, threat_field=threat_field)\n\n ax2 = draw_threat_field_2D(env=env, threat_field=threat_field)\n\n draw_path(ax=ax2, path=path)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"IPAS_easy/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"506048126","text":"'''\nВъв файла sum_of_odds.py напишете програма, която:\n\nЧете едно число n от потребителя.\nНа екрана изкарва сумата на всички нечетни числа между 1 и n включително.\n'''\n\nn = input(\"Enter n: \")\nn = int(n)\n\ni = 1\nsum = 0\n\nwhile i <= n:\n\tif i % 2 == 1:\n\t\tsum += i\n\ti += 1\n\nprint(\"sum: \" + str(sum))","sub_path":"hackbg/programming-51/Programming0/week1/5-Saturday-Tasks/sum_of_odds.py","file_name":"sum_of_odds.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"549546810","text":"import re\n\nclass Token:\n def __init__(self, T_TYPE, value=None):\n self.T_TYPE = T_TYPE\n self.value = value\n def getTokenType(self):\n return self.T_TYPE\n def getTokenValue(self):\n return self.value\n def __str__(self):\n return '[Token: TokenType: {} TokenValue: {}]'.format(self.T_TYPE, self.value)\n def __repr__(self):\n return '[Token: TokenType: {} TokenValue: {}]'.format(self.T_TYPE, self.value)\n\ndef getIndexs(v):\n depth = 0\n values = []\n for i in v:\n if i == \"[\":\n depth += 1\n if len(values) < depth:\n values.append(\"\")\n elif i == \"]\":\n continue\n else:\n values[depth - 1] += i \n #print(values)\n return values\n\ndef checkIndexRef(s):\n vn = \"\"\n n = \"\"\n idx = -1\n for i in s:\n idx += 1\n if i == \"[\":\n break\n else:\n vn += i\n ns = s[idx:]\n if re.match(r\"^(\\[[^\\n\\]]+\\])+$\", ns):\n v = getIndexs(ns)\n return True, vn, v\n\n else:\n return False, None, None\n\ndef getNextChar(s, v):\n try:\n return s[v+1]\n except:\n return None\n\ndef parseFuncCall(s):\n new = \"\"\n last = \"\"\n tmpid = \"\"\n n = 0\n v = -1\n for i in s:\n v += 1\n if i == \" \":\n if tmpid in [\"quote\", \"chr\"] or last in [\"+\", \"-\", \"*\", \"/\", \"%\"] or getNextChar(s, v) in [\"+\", \"-\", \"*\", \"/\", \"%\"]:\n new += i\n else:\n new += i\n\n if i in [\")\", \"]\"] and tmpid in [\"paren\"]:\n if n == 0:\n tmpid = \"\"\n else:\n n -= 1\n elif i == \"(\" and tmpid in [\"paren\", \"\"]:\n n += 1\n elif i == \"[\" and tmpid in [\"paren\", \"\"]:\n n += 1\n elif i == \"\\\"\" and tmpid in [\"quote\"]:\n tmpid = \"\"\n elif i == \"\\\"\" and tmpid in [\"\"]:\n tmpid = \"quote\"\n elif i == \"'\" and tmpid in [\"chr\"]:\n tmpid = \"\"\n elif i == \"'\" and tmpid in [\"\"]:\n tmpid = \"chr\"\n else:\n last = i\n\n return new\n\ndef checkFuncCall(s):\n fn = \"\"\n args = \"\"\n idx = 0\n for i in s:\n idx += 1\n if i == \"(\":\n break\n else:\n fn += i\n if is_valid_variable_name(fn):\n args = s[idx:-1]\n args = parseFuncCall(args)\n tmpid = 0\n tmpida = 0\n tmpi = \"\"\n tmp = \"\"\n new = []\n #print(args)\n for i in args:\n if i == \"(\" and tmpi == \"\":\n tmp += i\n tmpid += 1\n elif i == \")\" and tmpi == \"\":\n tmp += i\n tmpid -= 1\n elif i == \"[\" and tmpi == \"\":\n tmp += i\n tmpida += 1\n elif i == \"]\" and tmpi == \"\":\n tmp += i\n tmpida -= 1\n elif i == \"\\\"\" and tmpi == \"\":\n tmp += i\n tmpi = \"quote\"\n elif i == \"\\\"\" and tmpi == \"quote\":\n tmp += i\n tmpi = \"\"\n elif i == \"'\" and tmpi == \"\":\n tmp += i\n tmpi = \"char\"\n elif i == \"'\" and tmpi == \"char\":\n tmp += i\n tmpi = \"\"\n elif tmpid == 0 and tmpida == 0 and i == \",\" and tmpi == \"\":\n new.append(tmp)\n tmp = \"\"\n else:\n tmp += i\n if tmpid != 0:\n return False, None, None\n new.append(tmp)\n #print(new)\n return True, fn, new\n else:\n return False, None, None\n \n\nTT_PLUS = \"TT_PLUS\"\nTT_EQUALS = \"TT_EQUALS\"\nTT_DNEQUAL = \"TT_DNEQUAL\"\nTT_GRTHAN = \"TT_GRTHAN\"\nTT_LTHAN = \"TT_LTHAN\"\nTT_MINUS = \"TT_MINUS\"\nTT_KEYWORD = \"TT_KEYWORD\"\nTT_SEMICOLON = \"TT_SEMICOLON\"\nTT_INTEGER = \"TT_INTEGER\"\nTT_HEX = \"TT_HEX\"\nTT_IDENTIFIER = \"TT_IDENTIFIER\"\nTT_LBRACE = \"TT_LBRACE\"\nTT_RBRACE = \"TT_RBRACE\"\nTT_STRING = \"TT_STRING\"\nTT_DEC = \"TT_DEC\"\nTT_AMPOINT = \"TT_AMPOINT\"\nTT_PTR = \"TT_PTR\"\nTT_FUNCCALL = \"TT_FUNCCALL\"\nTT_DEQUAL = \"TT_DEQUAL\"\nTT_MUL = \"TT_MUL\"\nTT_BYTES = \"TT_BYTES\"\nTT_DIV = \"TT_DIV\"\nTT_MOD = \"TT_MOD\"\nTT_CHAR = \"TT_CHAR\"\nTT_COLON = \"TT_COLON\"\nTT_ASM = \"TT_ASM\"\nTT_ARR = \"TT_ARR\"\nTT_AMP = \"TT_AMP\"\nTT_COMMA = \"TT_COMMA\"\nTT_INDEXREF = \"TT_INDEXREF\"\nTT_STRUCTDEF = \"TT_STRUCTDEF\"\n\ndef is_valid_struct_def(s):\n s = s.split(\".\")\n if len(s) != 2:\n return False\n return is_valid_variable_name(s[0]) and is_valid_variable_name(s[1]) and s[0] not in KEYWORDS.keys() and s[1] not in KEYWORDS.keys()\n\ndef findKeyFromValue(dictionary, v):\n for key, val in dictionary.items():\n if val == v:\n return key\n return None\n \nKEYWORDS = {\n \"INT_DEC\": \"let\",\n \"CONST_DEC\": \"const\",\n \"FN_DEC\": \"fn\",\n \"QUIT\": \"quit\",\n \"RETURN\": \"return\",\n \"IF\": \"if\",\n \"NEW\": \"new\",\n \"ASM\": \"asm\",\n \"ENDIF\": \"endif\",\n \"FREE\": \"free\",\n \"BANK\": \"bank\",\n \"TRUE\": \"true\",\n \"FALSE\": \"false\",\n \"CONST\": \"const\",\n \"FOR\": \"for\",\n \"ELSE\": \"else\",\n \"FROM\": \"from\",\n \"TO\": \"to\",\n \"ENDFOR\": \"endfor\",\n \"WHILE\": \"while\",\n \"ENDWHILE\": \"endwhile\",\n \"BREAK\": \"break\",\n \"STRUCT\": \"struct\",\n \"NEW\": \"new\",\n \"AS\": \"as\",\n}\n\ndef is_valid_variable_name(name):\n return name.isidentifier() and not name in KEYWORDS.keys()\n\n\ndef lex(s):\n tmp = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n ptmpd = 0\n tokens = []\n for i in s:\n #print(list(tmp))\n #print(tmpid)\n\n #print(tmpid)\n\n if tmpid == \"multiline\":\n if i == \"~\":\n tmpid = \"\"\n continue\n continue\n\n if tmpid == \"minus\" and i not in [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]:\n tokens.append(Token(TT_MINUS))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"minus\":\n tmp2 += \"-\"\n tmpid = \"\"\n\n if i == \"\\\"\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tmpid = \"quote\"\n tmp2 += i\n \n elif i == \"\\\"\" and tmpid == \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tmpid = \"\"\n tmp2 += i\n\n elif i == \"(\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"brac\":\n #print(ptmpd)\n tmpid = \"paren\"\n tmp2 += i\n ptmpd += 1\n \n elif i == \")\" and tmpid == \"paren\" and tmpid != \"chr\" and tmpid != \"quote\" and tmpid != \"brac\":\n tmp2 += i\n ptmpd -= 1\n if ptmpd == 0:\n tmpid = \"\"\n\n elif i == \"[\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\":\n tmpid = \"brac\"\n tmp2 += i\n ptmpd += 1\n\n \n elif i == \"]\" and tmpid == \"brac\" and tmpid != \"chr\" and tmpid != \"quote\" and tmpid != \"paren\":\n tmp2 += i\n ptmpd -= 1\n if ptmpd == 0:\n tmpid = \"\"\n\n elif i == \"'\" and tmpid != \"chr\" and tmpid != \"quote\" and tmpid != \"paren\" and tmpid != \"brac\":\n tmpid = \"chr\"\n tmp2 += i\n \n elif i == \"'\" and tmpid == \"chr\" and tmpid != \"quote\" and tmpid != \"paren\" and tmpid != \"brac\":\n tmpid = \"\"\n tmp2 += i\n \n elif i == \" \" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n #print(tmpid)\n tmp = tmp2\n tmp2 = \"\"\n \n elif i == \"\\n\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n # print(\"hey\")\n tmp = tmp2\n tmp2 = \"\"\n \n elif i == \"+\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_PLUS))\n tmpid = \"\"\n tmp2 = \"\"\n\n elif i == \"~\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tmpid = \"multiline\"\n\n elif i == \"&\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_AMP))\n tmpid = \"\"\n tmp2 = \"\"\n\n elif i == \"%\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_MOD))\n tmpid = \"\"\n tmp2 = \"\"\n \n elif i == \":\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_COLON))\n tmpid = \"\"\n tmp2 = \"\"\n \n elif i == \"-\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_MINUS))\n tmpid = \"\"\n tmp2 = \"\"\n\n elif i == \"*\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_MUL))\n tmpid = \"\"\n tmp2 = \"\"\n\n elif i == \"/\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_DIV))\n tmpid = \"\"\n tmp2 = \"\"\n \n elif i == \"{\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_LBRACE))\n tmpid = \"\"\n tmp2 = \"\"\n \n elif i == \"}\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_RBRACE))\n tmpid = \"\"\n tmp2 = \"\"\n \n elif i == \"=\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_EQUALS))\n tmpid = \"\"\n tmp2 = \"\"\n \n elif i == \"!\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_DNEQUAL))\n tmpid = \"\"\n tmp2 = \"\"\n\n elif i == \">\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_GRTHAN))\n tmpid = \"\"\n tmp2 = \"\"\n \n elif i == \"<\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n tokens.append(Token(TT_LTHAN))\n tmpid = \"\"\n tmp2 = \"\"\n \n elif i == \",\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n #tokens.append(Token(TT_COMMA))\n tmp = tmp2\n tmpid = \"comma\"\n \n elif i == \";\" and tmpid != \"quote\" and tmpid != \"chr\" and tmpid != \"paren\" and tmpid != \"brac\":\n #print(\"hi\")\n tmp = tmp2\n tmpid = \"semi\"\n \n else:\n tmp2 += i\n\n #print(tmp)\n\n if len(tmp) >= 4 and re.match(r\"^(i8:)\\d+$\", tmp):\n \n tokens.append(Token(TT_BYTES, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n\n elif re.match(r\"^\\d+\\.\\d+$\", tmp):\n print(\"Found double: {}\".format(tmp))\n tokens.append(Token(TT_DEC, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n\n elif checkIndexRef(tmp)[0]:\n tokens.append(Token(TT_INDEXREF, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n\n elif re.match(r\"^\\[(.*)\\]\\[(.*\\,)*(.*)\\]$\", tmp):\n #print(\"hello\")\n tokens.append(Token(TT_ARR, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n\n elif re.match(r\"^[-]?[0-9]+$\", tmp):\n tokens.append(Token(TT_INTEGER, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n\n elif re.match(r\"^(0[xX])[A-Fa-f0-9]+$\", tmp):\n tokens.append(Token(TT_HEX, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n\n elif re.match(r\"^['][^\\n][']$\", tmp):\n tokens.append(Token(TT_CHAR, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n\n elif re.match(r'^[\"][^\\n]*[\"]$', tmp):\n tokens.append(Token(TT_STRING, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n\n elif tmp in KEYWORDS.values():\n tokens.append(Token(TT_KEYWORD, KEYWORDS[findKeyFromValue(KEYWORDS, tmp)]))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n\n elif len(tmp) >= 2 and tmp[0] == \"@\" and is_valid_variable_name(tmp[1:]):\n tokens.append(Token(TT_AMPOINT, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n\n elif is_valid_struct_def(tmp):\n tokens.append(Token(TT_STRUCTDEF, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n \n elif len(tmp) >= 2 and tmp[0] == \"$\" and is_valid_variable_name(tmp[1:]):\n tokens.append(Token(TT_PTR, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n\n elif is_valid_variable_name(tmp):\n tokens.append(Token(TT_IDENTIFIER, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n\n elif len(tmp) >= 3 and checkFuncCall(tmp)[0]:\n tokens.append(Token(TT_FUNCCALL, tmp))\n if tmpid == \"semi\":\n tokens.append(Token(TT_SEMICOLON))\n tmpid = \"\"\n tmp2 = \"\"\n elif tmpid == \"comma\":\n tokens.append(Token(TT_COMMA))\n tmpid = \"\"\n tmp2 = \"\"\n tmpid = \"\"\n tmp = \"\"\n \n \n return tokens\n ","sub_path":"src/lex.py","file_name":"lex.py","file_ext":"py","file_size_in_byte":14268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"180309249","text":"# -*- coding: utf-8 -*-\n\"\"\"\nHKGovJobs spider created spider on the top of ATSSpider\n\nscrapy crawl hkgovjobs -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www1.jobs.gov.hk/1/0/WebForm/jobseeker/jobsearch/adv_search.aspx\"\n\nSeed URL:\n http://www1.jobs.gov.hk/1/0/WebForm/jobseeker/jobsearch/adv_search.aspx\n\"\"\"\n\nfrom json import dumps, loads\nfrom re import compile\nfrom scrapy.http import FormRequest, Request\nfrom scrapy.selector import Selector\nfrom scrapy.conf import settings\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, Prefix, Replace\nfrom brightcorp.lib.utils import extract_first, get_hidden_inputs\n\n\nclass HKGovJobs(ATSSpider):\n\n name = 'hkgovjobs'\n disable_default_field_extractors = True\n page = 1\n job_id = compile(r\"f_getJobCard\\('(.*?)'\\);\")\n\n def __init__(self, *args, **kwargs):\n super(HKGovJobs, self).__init__(*args, **kwargs)\n settings.overrides[\"AUTOTHROTTLE_ENABLED\"] = True\n\n def parse(self, response):\n \"\"\"\n Submit the form and parse jobs data.\n \"\"\"\n form_data = get_hidden_inputs(response)\n form_data.update({\n 'uxExpressFuncRadio': 'jobseeker',\n 'ctl00$ContentPlaceHolder1$uxLangCantonFlu': '-1',\n 'ctl00$ContentPlaceHolder1$uxLangMdnFlu': '-1',\n 'ctl00$ContentPlaceHolder1$uxLangEngFlu': '-1',\n 'ctl00$ContentPlaceHolder1$uxReadWtChiFlu': '-1',\n 'ctl00$ContentPlaceHolder1$uxReadWtEngFlu': '-1',\n 'ctl00$uxSimpKeyword': unicode('關鍵字', 'utf-8'),\n 'ctl00$ContentPlaceHolder1$uxPostedDtFr': unicode('日日/月月/年年年年', 'utf-8'),\n 'ctl00$ContentPlaceHolder1$uxPostedDtTo': unicode('日日/月月/年年年年', 'utf-8'),\n 'ctl00$ContentPlaceHolder1$uxSortBy': 'post_dt_desc',\n 'ctl00$ContentPlaceHolder1$uxQuery': unicode('搜尋', 'utf-8'),\n 'ctl00$ContentPlaceHolder1$uxSalOptGrp': 'uxSalOpt1',\n })\n yield FormRequest(\n callback=self.parse_jobs_list,\n formdata=form_data,\n url=response.url,\n headers={\n 'Content-Type': 'application/x-www-form-urlencoded',\n }\n )\n\n def parse_jobs_list(self, response):\n sel = Selector(response)\n self.page += 1\n if not self.expected_job_count_set:\n expected_count = sel.xpath(\n '//td/span/span[@id=\"ctl00_ContentPlaceHolder1_uxTotalPage\"]/span[1]/text()'\n ).extract()\n if expected_count:\n self.expected_job_count = expected_count[0]\n\n jobs = sel.xpath('//div[@id=\"uxJobListArea\"]/table//tr[td/@id]')\n for job in jobs:\n job_id = job.xpath('./td/div/@onclick').re(self.job_id)\n table_num = job.xpath('./td/@id').extract()\n if job_id and table_num:\n yield Request(\n callback=self.parse_job_callback(),\n body=dumps({\n 'p_langOpt': '2',\n 'p_liveOnly': '',\n 'p_ordNo': job_id[0],\n }),\n headers={\n 'Content-Type': 'application/json; charset=utf-8',\n 'X-Requested-With': 'XMLHttpRequest',\n },\n method='POST',\n meta={\n 'title': job.xpath(\n './/table//tr/td[2]/span[1]/text()'\n ).extract(),\n 'salary': job.xpath(\n './/table//tr/td[2]/span[2]/text()'\n ).extract(),\n 'location': job.xpath(\n './/table//tr/td[2]/span[3]/text()'\n ).extract(),\n 'url': urljoin(\n response.url,\n '/1/0/WebForm/jobseeker/jobcard/jobcard.aspx?ordno=%s' % job_id[0]\n ),\n 'job_id': job_id,\n },\n url=urljoin(response.url, '/1/0/WebServices/QuickviewWS.asmx/F_GetJobCardDetail')\n )\n\n next_url = sel.xpath(\n '//span[@id=\"ctl00_ContentPlaceHolder1_uxPageNum\"]//a[text()=\"%s\"]/@href' % self.page\n ).extract()\n if next_url:\n next_url = urljoin(response.url, next_url[0])\n yield Request(\n next_url, callback=self.parse_jobs_list,\n headers={\n 'Content-Type': 'application/x-www-form-urlencoded',\n }\n )\n\n def parse_job(self, response):\n try:\n jsonResponse = loads(response.body)\n except:\n return\n\n results = jsonResponse.get('d')\n if results:\n loader = BrightcorpItemLoader(selector=results)\n # List of information found.\n # Based on index, assign values to respective fields\n desc = []\n for index, value in enumerate(results):\n if index == 2:\n loader.add_value('title', value)\n if index == 4:\n loader.add_value('company', value)\n if index == 3:\n loader.add_value('industry', value)\n if index == 5:\n loader.add_value('location', value)\n if index == 6:\n desc.append(\"%s\" % unicode('職責:', 'utf-8'))\n desc.append(value)\n desc.append('
    ')\n if index == 7:\n desc.append(\"%s\" % unicode('資歷:', 'utf-8'))\n desc.append(value)\n desc.append('
    ')\n if index == 8:\n desc.append(\"%s\" % unicode('待遇:', 'utf-8'))\n desc.append(value)\n\n loader.add_value('description', desc)\n loader.add_value('date', results[-2], ConvertDateString('%d/%m/%Y'))\n\n loader.add_value(\n 'referencenumber',\n response.meta.get('job_id'),\n Prefix('%s-' % self.name)\n )\n loader.add_value('url', response.meta.get('url'))\n if not loader.get_output_value('title'):\n loader.add_value('title', response.meta.get('title'))\n if not loader.get_output_value('location'):\n loader.add_value('location', response.meta.get('location'))\n loader.add_value('apply_url', response.meta.get('url'))\n loader.add_value('baseSalary', response.meta.get('salary'))\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/hkgovjobs.py","file_name":"hkgovjobs.py","file_ext":"py","file_size_in_byte":6921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"238854816","text":"import argparse\r\nimport sys\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description='Počítač slov, znaků a řádků')\r\n parser.add_argument(\"jmeno_textoveho_souboru\", help='jmeno souboru, ktery chcete nacist')\r\n parser.add_argument(\"--slova\", help='pocet slov', action=\"store_true\")\r\n parser.add_argument(\"--znaky\", help='pocet znaku', action=\"store_true\")\r\n parser.add_argument(\"--radky\", help='pocet radku', action=\"store_true\")\r\n\r\n arg = parser.parse_args()\r\n\r\n try:\r\n soubor = open(arg.jmeno_textoveho_souboru)\r\n text = soubor.read()\r\n\r\n if arg.znaky:\r\n znaky = len(text)\r\n print(f\"\\n{text} \\nPočet znaků: [{znaky}]\\n\")\r\n soubor.close()\r\n\r\n elif arg.radky:\r\n radky = len(text.split(\"\\n\"))\r\n print(f\"\\n{text}\\nPočet řádků: [{radky}] \\n\")\r\n soubor.close()\r\n\r\n elif arg.slova:\r\n radky = len(text.split(\"\\n\"))\r\n slova = len(text.split(\" \")) + (radky - 1)\r\n print(f\"\\n{text}\\nPočet slov: [{slova}] \\n\")\r\n soubor.close()\r\n\r\n elif arg.slova and arg.znaky:\r\n znaky = len(text)\r\n radky = len(text.split(\"\\n\"))\r\n slova = len(text.split(\" \")) + (radky - 1)\r\n print(f\"\\n{text}\\nPočet znaků: [{znaky}] , počet slov: [{slova}]\\n\")\r\n soubor.close()\r\n\r\n elif arg.radky and arg.slova:\r\n radky = len(text.split(\"\\n\"))\r\n slova = len(text.split(\" \")) + (radky - 1)\r\n print(f\"\\n{text}\\nPočet slov: [{slova}] , počet řádků: [{radky}] \\n\")\r\n soubor.close()\r\n\r\n elif arg.radky and arg.znaky:\r\n znaky = len(text)\r\n radky = len(text.split(\"\\n\"))\r\n print(f\"\\n{text}\\nPočet znaků: [{znaky}] , počet řádků: [{radky}] \\n\")\r\n soubor.close()\r\n\r\n elif arg.slova and arg.znaky and arg.radky:\r\n znaky = len(text)\r\n radky = len(text.split(\"\\n\"))\r\n slova = len(text.split(\" \")) + (radky - 1)\r\n print(f\"\\n{text}\\nPočet znaků: [{znaky}] , počet slov:[{slova}], počet řádků: [{radky}]\\n\")\r\n soubor.close()\r\n\r\n else:\r\n znaky = len(text)\r\n radky = len(text.split(\"\\n\"))\r\n slova = len(text.split(\" \")) + (radky - 1)\r\n print(f\"\\n{text}\\nPočet znaků: [{znaky}], počet slov: [{slova}], počet řádků:[{radky}]\\n\")\r\n soubor.close()\r\n\r\n\r\n\r\n except PermissionError:\r\n print(\"Chyba\")\r\n\r\n except:\r\n print(\"Chyba\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"Cech_WC_oprava.py","file_name":"Cech_WC_oprava.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"122557766","text":"import matplotlib.pyplot as plt\nfrom obspy import read\nfrom obspy.signal.trigger import recursive_sta_lta, trigger_onset\n\nfrom seisnn.utils import get_dir_list\n\npredict_pkl_dir = \"/mnt/tf_data/dataset/2017_02\"\npredict_pkl_list = get_dir_list(predict_pkl_dir)\non = 3.5\noff = 0.5\nfor i, pkl in enumerate(predict_pkl_list):\n trace = read(pkl).traces[0]\n start_time = trace.stats.starttime\n df = trace.stats.sampling_rate\n cft = recursive_sta_lta(trace.data, int(0.2 * df), int(2. * df))\n on_of = trigger_onset(cft, on, off)\n\n # Plotting the results\n\n ax = plt.subplot(211)\n plt.plot(trace.data, 'k')\n ymin, ymax = ax.get_ylim()\n try:\n plt.vlines(on_of[:, 0], ymin, ymax, color='r', linewidth=2)\n plt.vlines(on_of[:, 1], ymin, ymax, color='b', linewidth=2)\n except TypeError:\n pass\n plt.subplot(212, sharex=ax)\n plt.plot(cft, 'k')\n plt.hlines([on, off], 0, len(cft), color=['r', 'b'], linestyle='--')\n plt.xticks(range(0, 3001, 500), range(0, 31, 5))\n plt.xlim()\n plt.show()\n","sub_path":"scripts/prototypes/stalta.py","file_name":"stalta.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"303799708","text":"import dinwaabou\nimport re\nimport unittest\n\nclass TestDinwaabou(unittest.TestCase):\n\n\tdef test_failure(self):\n\t\tnumber = dinwaabou.pick('HK', 'mobile')\n\t\tself.assertIsNotNone(number)\n\t\tself.assertIsNotNone(re.match('\\d+', number))\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"tests/test_dinwaabou.py","file_name":"test_dinwaabou.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"588946594","text":"# -*- coding: utf-8 -*-\nimport gym\nimport multiprocessing\nfrom gym.envs.registration import register\nimport numpy as np\n\ndef normalizeInput(config,stabilize_obs = 200, expected_obs = 200):\n env = config.env\n state_dim = config.N_S\n action_dim = config.N_A\n try:\n _ = env.reset()\n except:\n import ipdb; ipdb.set_trace()\n # We want to gather some observation here to get better output than expected.\n # We give no actions. So zero. and get the outputs until its done\n\n state_mu = np.zeros(state_dim)\n state_sigma = 1+np.zeros(state_dim)\n S_ = 0*np.ndarray([expected_obs,state_dim])\n i = 0\n print(\"Gathering some observation to normalize inputs\")\n while i < stabilize_obs:\n s_, r, done, info = env.step(np.zeros(action_dim))\n i = i+1\n i=0\n while i < expected_obs:\n s_, r, done, info = env.step(np.zeros(action_dim))\n S_[i,:] = s_\n i = i+1\n # Use symmetry :\n #TODO CHECK\n S = None\n # # We copy the array\n # leftS = np.copy(S_)\n # rightS = np.copy(S_)\n # # We erase half of it\n # leftRange = range(np.shape(S_)[1]/2,np.shape(S_)[1])\n # rightRange = range(np.shape(S_)[1]/2)\n # leftS[:,rightRange]=0\n # rightS[:,leftRange]=0\n # # We sum both half\n # leftS[:,leftRange] += rightS[:,rightRange]\n # rightS[:,rightRange] += leftS[:,leftRange]\n # # And create back the array\n # S = leftS+rightS\n # S /= 2\n\n state_mu = np.mean(S_)\n state_sigma = np.std(S_)\n return state_mu, state_sigma\n print(\"Done! lets start learning baby !\")\n\ndef getEnv(name=\"Regis\",port=5662,action_dim=18,action_repeat=1,random_seed=5662,world=\"\"):\n print(\"Creating env {},\\n\\\n action_dim={},\\n\\\n action_repeat={},\\n\\\n port={},\\n\\\n world={}\".format(name,action_dim,action_repeat,random_seed,world))\n env = None\n #try:\n #env = gym.make(name=\"Regis\")\n #except:\n envName = \"{}{}\".format(name,port)\n # Algorithmic\n # ----------------------------------------\n register(\n id=envName,\n entry_point='gym.envs.webots:Regis',\n kwargs=dict(\n action_dim = int(action_dim),\n repeat = int(action_repeat),\n port = int(port), # TODO UPDATE THE port=5662 FROM THE REGIS UPDATED IN THE DESKTOP COMPUTER\n world = world\n )\n )\n env = gym.make(envName)\n env.seed(random_seed)\n return env\n\nclass Config:\n mode = 'continuous'\n # mode = 'discrete'\n MAX_EP_STEP = 400 #TODO Unused\n GLOBAL_EP = 0\n GLOBAL_NET_SCOPE = 'Global_Net'\n\n NORMALIZE_MEAN = 1.0\n NORMALIZE_STD = 1.0\n N_S = 1\n N_A = 1\n ACTION_BOUND = [-1,1]\n ACTION_GAP = 2\n\n\n env = None\n def __init__(self,game=\"Regis-v\",args=None):\n if args is None:\n raise ValueError('No arguments passed to global config')\n print('Game: {}'.format(game))\n self.GAME = game\n self.INIT_PORT = args[\"communication_port\"]\n self.LR_A = float(args[\"actor_lr\"])\n self.LR_C = float(args[\"critic_lr\"])\n self.ENTROPY_BETA = float(args[\"entropy_beta\"])\n self.GAMMA = float(args[\"gamma\"])\n self.REWARD_SCALING = float(args[\"reward_scaling\"])\n self.MODEL_DIR = args[\"save_dir\"]\n self.MAX_GLOBAL_EP = int(args[\"max_episodes\"])\n self.UPDATE_GLOBAL_ITER = int(args[\"update_batch_iter\"])\n self.ACTOR_NETWORK_TYPE = int(args[\"actor_network_type\"])\n self.CRITIC_NETWORK_TYPE = int(args[\"critic_network_type\"])\n self.INPUT_SPACE_TYPE = int(args[\"input_space_type\"])\n self.TEMPORAL_WINDOW = int(args[\"temporal_window\"])\n\n self.N_WORKERS = int(args[\"n_workers\"])\n self.ACTION_REPEAT = int(args[\"action_repeat\"])\n self.RANDOM_SEED = int(args[\"random_seed\"])\n self.FORGET_WINDOW_SIZE = int(args[\"forget_window_size\"])\n self.WORLD = int(args[\"world\"])\n\n self.env = getEnv(\n name=self.GAME,\n port=self.INIT_PORT,\n action_dim=args[\"action_dim\"],\n action_repeat=self.ACTION_REPEAT,\n random_seed=self.RANDOM_SEED,\n world=self.WORLD)\n self.N_S = self.env.observation_space.shape[0]\n if self.mode == 'discrete': # Note:The action_space of CartPole-v0 does not contain attribute 'shape'\n self.N_A = self.env.action_space.n\n elif self.mode == 'continuous': # Note: The action of Pendulum-v0 is a list with shape (1,)\n self.N_A = self.env.action_space.shape[0]\n self.ACTION_BOUND = [self.env.action_space.low, self.env.action_space.high]\n self.ACTION_GAP = self.env.action_space.high - self.env.action_space.low\n","sub_path":"playground/rl-samples/DDPG-berat/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"307279739","text":"# =================================================================================================\n# All Rights Reserved.\n# =================================================================================================\n# File description:\n# tweet model unit tests\n#\n# =================================================================================================\n# Date Name Description of Change\n# 05-Sep-2021 Wayne Shih Initial create\n# $HISTORY$\n# =================================================================================================\n\nfrom datetime import date, timedelta\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom tweets.models import Tweet\nfrom utils.time_helpers import utc_now\n\n\nclass TweetTest(TestCase):\n\n def test_hours_to_now(self):\n kd = User.objects.create_user(username='kd')\n tweet = Tweet.objects.create(user=kd, content='kd is MVP!')\n tweet.created_at = utc_now() - timedelta(hours=10)\n tweet.save()\n self.assertEqual(tweet.hours_to_now, 10)\n\n def test_tweet_model_attributes(self):\n self.assertEqual(hasattr(Tweet, 'id'), True)\n self.assertEqual(hasattr(Tweet, 'user'), True)\n self.assertEqual(hasattr(Tweet, 'user_id'), True)\n self.assertEqual(hasattr(Tweet, 'content'), True)\n self.assertEqual(hasattr(Tweet, 'created_at'), True)\n\n def test_tweet_model(self):\n user = User.objects.create_user(username='curry', password='sc30')\n tweet = Tweet.objects.create(content='logo shot')\n\n self.assertEqual(tweet.user, None)\n self.assertEqual(tweet.created_at.day, date.today().day)\n\n tweet.delete()\n Tweet.objects.create(user=user, content='logo shot for 333333333!')\n\n user.delete()\n\n self.assertEqual(Tweet.objects.all().count(), 1)\n self.assertEqual(Tweet.objects.first().user, None)\n self.assertEqual(Tweet.content.field.max_length, 255)\n\n def test_auto_now_add(self):\n tweet = Tweet.objects.create(content='original tweet')\n old_created_time = tweet.created_at\n tweet.content = 'updated tweet'\n tweet.save()\n self.assertEqual(old_created_time, tweet.created_at)\n\n def test_str(self):\n user = User.objects.create_user(username='lbj23', password='KingJames')\n tweet = Tweet.objects.create(user=user, content='I am the King')\n # print(tweet)\n self.assertEqual(str(tweet.created_at) in str(tweet), True)\n self.assertEqual(tweet.user.username in str(tweet), True)\n self.assertEqual(tweet.content in str(tweet), True)\n","sub_path":"tweets/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485825101","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Mengxuan Chen\n@description:\n 第一个错误的版本\n@revise log:\n 2021.02.10 创建程序\n\"\"\"\n# The isBadVersion API is already defined for you.\n# @param version, an integer\n# @return a bool\n# def isBadVersion(version):\n\nclass Solution(object):\n def firstBadVersion(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n == 1:\n return 1\n left, right = 1, n\n while left <= right:\n mid = left + (right - left) // 2\n if not isBadVersion(mid - 1) and isBadVersion(mid):\n return mid\n elif isBadVersion(mid + 1) and not isBadVersion(mid):\n return mid + 1\n elif isBadVersion(mid):\n right = mid - 1\n else:\n left = mid + 1\n\n # left = 1\n # right = n\n # while (left < right):\n # mid = left + (right - left) / 2\n # if (isBadVersion(mid)):\n # right = mid\n # else:\n # left = mid + 1\n # return left\n\n # i = int( n / 2 )\n # while i < n:\n # if ( isBadVersion(i) == False) and ( isBadVersion(i+1) == True):\n # return int( n / 2 )+1\n # if ( isBadVersion(i) == False) and ( isBadVersion(i+1) == False):\n # i = int( i / 2)\n # if ( isBadVersion(i) == True) and ( isBadVersion(i+1) == True):\n # i = int(n/2) + int(i/2)\n\n","sub_path":"LC 第一个错误的版本.py","file_name":"LC 第一个错误的版本.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"566508622","text":"class Movement(object):\n \"\"\"docstring for Movement.\"\"\"\n\n def __init__(self, index, data):\n super(Movement, self).__init__()\n self.torso_arr = ['forward', 'backward', 'neutral']\n self.torso_deg_arr = ['small', 'large']\n self.speed_arr = ['none', 'slow', 'fast']\n self.arm_arr = ['forward', 'sides', 'high']\n self.emotion_labels = ['Happiness', 'Sadness', 'Fear', 'Disgust', 'Anger', 'Surprise', 'Interest', 'Neutral']\n self.torso_start = data[0]\n self.torso_start_deg = data[1]\n self.torso_end = data[2]\n self.torso_end_deg = data[3]\n self.torso_speed = data[4]\n if self.torso_start == self.torso_end:\n self.torso_speed = 'none'\n self.left_arm_start = data[5]\n self.left_arm_end = data[6]\n self.left_arm_speed = data[7]\n if self.left_arm_start == self.left_arm_end:\n self.left_arm_speed = 'none'\n self.right_arm_start = data[8]\n self.right_arm_end = data[9]\n self.right_arm_speed = data[10]\n if self.right_arm_start == self.right_arm_end:\n self.left_arm_speed = 'none'\n self.index = index\n self.responses = None\n if self.left_arm_speed == self.right_arm_speed:\n self.symmetric = 'true'\n else:\n self.symmetric = 'false'\n self.arm_speed = self.right_arm_speed\n self.arm_end = self.right_arm_end\n\n\n def get_constraint(self, group):\n return eval(\"self.\"+group)\n\n def check_constraints(self, groups=[], vals=[]):\n for group, val in zip(groups, vals):\n\n if not val in eval(\"self.\"+group):\n return False\n return True\n","sub_path":"Movement.py","file_name":"Movement.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155183892","text":"from tradeSystem import *\n\n# create a trade system object\ntSys = tradeSystem()\n\ntSys.ticList = [\"XLE\"]\n\n# get last 10 years of daily stock data\n#tSys.daGoogleCSV()\n\n# run system to build a tSys.trades data frame\n#tSys.tsRandomTrades()\n#tSys.tsAssetAllocationDalio(period=62)\n#tSys.tsOpeningGap(threshold=0.1,testLen=1000)\ntSys.tsMATrendHold()\n#tSys.tsMATrendTrail(testLen=200)\ntSys.tsMATrendSqeezeTrail(testLen=2000)\n\n# calculate the trade returns and add them to the trades data frame\ntSys.anReturns(period=21)\n\n#tSys.pltTradeGraph()\ntSys.pltTradeHistogram()\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"223406180","text":"\"\"\"Scenes class managing all scene data.\"\"\"\n\nimport asyncio\nimport logging\n\nimport binascii\nfrom aiopvapi.helpers.api_base import ApiEntryPoint\nfrom aiopvapi.helpers.constants import URL_SCENES, ATTR_NAME, ATTR_NAME_UNICODE\nfrom aiopvapi.helpers.tools import base64_to_unicode, get_base_path, \\\n unicode_to_base64\n\n_LOGGER = logging.getLogger(\"__name__\")\nATTR_SCENE_DATA = 'sceneData'\n\nSCENE_ROOM_ID = 'roomId'\nSCENE_ID = 'id'\nSCENE_ORDER = 'order'\nSCENE_ICON_ID = 'iconId'\nSCENE_COLOR_ID = 'colorId'\n\n\nclass Scenes(ApiEntryPoint):\n def __init__(self, hub_ip, loop, websession=None):\n ApiEntryPoint.__init__(self, loop, websession,\n get_base_path(hub_ip, URL_SCENES))\n\n @staticmethod\n def sanitize_resources(scenes: dict):\n \"\"\"Cleans up incoming scene data\n\n :param scenes: The dict with scene data to be sanitized.\n :returns: Cleaned up scene dict.\n \"\"\"\n try:\n for scene in scenes[ATTR_SCENE_DATA]:\n try:\n scene[ATTR_NAME_UNICODE] = base64_to_unicode(\n scene[ATTR_NAME])\n except binascii.Error:\n pass\n return scenes\n except (KeyError, TypeError):\n _LOGGER.debug(\"no scene data available\")\n return None\n\n @asyncio.coroutine\n def create_scene(self, room_id, name,\n color_id=0, icon_id=0):\n \"\"\"Creates am empty scene.\n\n Scenemembers need to be added after the scene has been created.\n\n :returns: A json object including scene id.\n \"\"\"\n name = unicode_to_base64(name)\n _data = {\"scene\":\n {SCENE_ROOM_ID: room_id,\n ATTR_NAME: name,\n SCENE_COLOR_ID: color_id,\n SCENE_ICON_ID: icon_id\n }}\n _response, status = yield from self.request.post(\n self._base_path, data=_data)\n if status == 200 or status == 201:\n _LOGGER.debug(\"Scene successfully created\")\n return _response\n else:\n _LOGGER.error(\"Error creating scene\")\n return None\n","sub_path":"aiopvapi/scenes.py","file_name":"scenes.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"280726758","text":"import os\nimport re\nimport time\nfrom collections import defaultdict\nfrom multiprocessing import Pool\n\nfrom django.utils.module_loading import import_string\n\nfrom django.conf import settings\nfrom django.db import transaction, IntegrityError\n\nfrom cap.models import Volume, Page, Case\n\n\n### helpers ###\n\n# Set up a Django abstract storage class for reading and writing to a file store -- could be either S3 or local files.\ningest_storage_class = import_string(settings.INGEST_STORAGE['class'])\ningest_storage = ingest_storage_class(**settings.INGEST_STORAGE.get('kwargs', {}))\n\ndef get_file_contents(path):\n print(\"Getting\", path)\n with ingest_storage.open(path) as f:\n print(\"Got\", f.read(100))\n f.seek(0)\n return f.read().decode('utf8')\n\n### code ###\n\ndef ingest_volume(volume_path):\n print(\"Storing volume\", volume_path)\n start_time = time.time()\n times = []\n\n # get volume ID\n vol_barcode = os.path.basename(volume_path.split('_redacted', 1)[0])\n alto_barcode_to_case_map = defaultdict(list)\n\n files = volume_files(volume_path)\n\n # save volume\n volmets_path = files['volume']\n\n try:\n with transaction.atomic():\n volume = Volume.objects.filter(barcode=vol_barcode).first()\n if volume:\n existing_case_ids = set(Case.objects.filter(volume=volume).values_list('barcode', flat=True))\n existing_page_ids = set(Page.objects.filter(volume=volume).values_list('barcode', flat=True))\n else:\n existing_case_ids = existing_page_ids = set()\n volume = Volume(orig_xml=get_file_contents(volmets_path), barcode=vol_barcode)\n volume.save()\n\n print(\"Processing Cases for \" + volume_path)\n # save cases\n for xml_path in files['casemets']:\n case_barcode = vol_barcode + \"_\" + xml_path.split('.xml', 1)[0].rsplit('_', 1)[-1]\n if case_barcode not in existing_case_ids:\n case = Case(orig_xml=get_file_contents(xml_path), volume=volume, barcode=case_barcode)\n case.save()\n\n # store case-to-page matches\n for alto_barcode in set(re.findall(r'file ID=\"alto_(\\d{5}_[01])\"', case.orig_xml)):\n alto_barcode_to_case_map[vol_barcode + \"_\" + alto_barcode].append(case.id)\n\n print(\"Processing Altos for \" + volume_path)\n # save altos\n for xml_path in files['alto']:\n alto_barcode = vol_barcode + \"_\" + xml_path.split('.xml', 1)[0].rsplit('_ALTO_', 1)[-1]\n if alto_barcode not in existing_page_ids:\n page = Page(orig_xml=get_file_contents(xml_path), volume=volume, barcode=alto_barcode)\n page.save()\n\n # write case-to-page matches\n if alto_barcode_to_case_map[alto_barcode]:\n page.cases.set(alto_barcode_to_case_map[alto_barcode])\n\n # Add relationship between pages and cases.\n # This could be done instead of the manual building of relationships up above,\n # if the sql was fast enough.\n # build_case_page_join_table(session, volume_id)\n\n except IntegrityError as e:\n print(\"Integrity Error... {} probably already exists: {}\".format(volmets_path, e))\n\n # # write completed volume ID to file so we won't try to import it again if this is re-run\n # with open(ALREADY_READ_FILE_PATH, 'a') as out:\n # out.write(vol_barcode+\"\\n\")\n\n print(\"-- stored in %s: %s\" % (time.time()-start_time, volume_path))\n\ndef ingest_volumes():\n \"\"\"\n This function deploys the list of volumes to the ingest_volume function for processing\n \"\"\"\n\n # # load list of volume IDs we've previously imported\n # if os.path.isfile(ALREADY_READ_FILE_PATH):\n # with open(ALREADY_READ_FILE_PATH) as in_file:\n # already_read = set(in_file.read().split())\n # else:\n # already_read = []\n\n # find list of volumes to import from s3\n # build this as a list so we can pass it to the process Pool\n\n dirs = all_volumes()\n\n volume_paths = []\n for i, volume_path in enumerate(dirs):\n\n # skip dirs that are superseded by the following version\n base_name = volume_path.split('_redacted', 1)[0]\n if i < len(dirs)-1 and dirs[i+1].startswith(base_name):\n continue\n\n # # skip volumes read on previous run\n # vol_id = os.path.basename(volume_path.split('_redacted', 1)[0])\n # if vol_id in already_read:\n # continue\n\n volume_paths.append(volume_path)\n\n # process volume directories in parallel processes\n pool = Pool(15)\n pool.map(ingest_volume, volume_paths)\n\n # keep this around in case we want to debug without using the process pool:\n #for i in volume_paths:\n # ingest_volume(i)\n\ndef volume_files(volume_path):\n \"\"\" This just gets all of the files in the volume directory, and puts them into\n a dictionary with a 'volume' array which has the volume mets and md5 files,\n 'images' for the pics, 'alto' for the alto files, and 'casemets' for the\n case files. I have one function to get all of the files rather than a generator\n to step through the results directly because there's a small enough number of files\n per volume to not be a 'huge' memory concern, and as far as wall clock time\n goes, the request is the biggest drag, so having a different request for alto,\n casemets, and volume files would be much slower.\n \"\"\"\n files = defaultdict(list)\n\n # check file paths for these patterns in order until we find one that matches\n regexes = [\n ('alto', re.compile(r'/alto/.*\\.xml$')),\n ('images', re.compile(r'/images/.*\\.(?:jp2|tif)$')),\n ('casemets', re.compile(r'/casemets/.*\\.xml$')),\n ('volume', re.compile(r'\\.xml$')),\n ('md5', re.compile(r'\\.md5$')),\n ]\n\n print(\"Getting Volume Files for \" + volume_path)\n for file_name in ingest_storage.iter_files(volume_path):\n for category, regex in regexes:\n if regex.search(file_name):\n files[category].append(file_name)\n break\n\n # unwrap lists that should only have one entry\n files['volume'] = files.get('volume', [None])[0]\n files['md5'] = files.get('md5', [None])[0]\n\n return files\n\ndef all_volumes():\n \"\"\" \n Gets all of the volume \"directories\" in settings.INGEST_VOLUMES_PATH.\n \"\"\"\n print(\"Getting Volume List\")\n volumes = []\n for i, subdir in enumerate(ingest_storage.iter_subdirs()):\n volumes.append(subdir)\n if settings.INGEST_VOLUME_COUNT > 0 and i >= settings.INGEST_VOLUME_COUNT-1:\n return volumes\n return volumes","sub_path":"capstone/scripts/ingest_files.py","file_name":"ingest_files.py","file_ext":"py","file_size_in_byte":6817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"105316586","text":"import urllib.request\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'\n }\nwz='http://www.meitulu.cn'\nwz_load=requests.get(wz,headers=headers).text\nbs=BeautifulSoup(wz_load,'lxml')\nlist=bs.find_all('a',{'target':'_blank','href':re.compile('/item/.*?\\.html')})\n\n#\nfor i in range(20):\n for l in list:\n url2=str(l['href'])[-9:-5]\n url1='http://www.meitulu.cn/item/'\n url=url1+url2\n other1='.html'\n URL=url+other1\n html1 = urllib.request.urlopen(URL).read().decode('utf-8')\n soup1 = BeautifulSoup(html1, features='lxml')\n\n\n # 取出标题,创造文件夹\n title = str(soup1.h1.string)\n os.makedirs('./%s' % title, exist_ok=True)\n # 取出页数\n pp=soup1.find_all('p')[2]\n for p in pp:\n times=int(str(p)[-3:-1])\n\n def save():\n soup=BeautifulSoup(html,features='lxml')\n A=soup.find_all('img',{'src':re.compile('http://image.meitulu.cn/d/file/bigpic/.*?\\.jpg')})\n print('正在下载的是%s' % title)\n print('下载到第{}张图片...还有{}张待下载...'.format(i + 1,(times-int(i+1))))\n for a in A:\n IMAGE_URL=a['src']\n r = requests.get(IMAGE_URL, headers=headers)\n img_name = IMAGE_URL.split('/')[-1]\n with open('./%s/%s' % (title ,img_name), 'wb') as f:\n f.write(r.content)\n for i in range(times):\n if i==0:\n html = requests.get(url + other1, headers=headers).text\n soup = BeautifulSoup(html, features='lxml')\n save()\n\n else:\n other2='_'+str(i+1)+other1\n html = requests.get(url + other2, headers=headers).text\n save()\n print('下载已完成{}/20'.format(i+1))\nprint('**********下载完成!!!!*********')\n\n","sub_path":"meitulu.py","file_name":"meitulu.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"502940278","text":"import numpy as np\nfrom sklearn import svm, preprocessing, cross_validation, neighbors\nimport pandas as pd\nallColumns = ['Date', 'OilOpen', 'OilHigh', 'OilLow', 'OilClose',\n 'OilVolume', 'FXClose', 'TOOpen', 'TOHigh', 'TOLow',\n 'TOClose', 'TOVolume', 'SPOpen', 'SPHigh', 'SPLow',\n 'SPClose', 'SPVolume', 'OilSpreadD', 'TOSpreadD',\n 'SPSpreadD', 'OilChangeD', 'TOChangeD', 'SPChangeD',\n 'OilVolumeD', 'TOVolumeD', 'SPVolumeD', 'FXDiffD',\n 'OilChangeW', 'TOChangeW', 'SPChangeW', 'OilVolumeW',\n 'TOVolumeW', 'SPVolumeW', 'FXDiffW', 'FXDifferenceD',\n 'FXstatusD', 'FXDifferenceW', 'FXstatusW']\n\n\ndef BuildDataSet():\n data_df = pd.read_csv(\"FXComplete0.csv\")\n #print(list(data_df.columns.values))\n data_df = data_df.replace(\"NaN\", 0).replace(\"N/A\", 0).replace(\"inf\", 0).replace(\"-inf\", 0)\n X = data_df.drop([\"Date\", \"FXDifferenceD\", \"FXstatusD\", \"FXDifferenceW\", \"FXstatusW\", \"SPVolume\", \"TOVolume\", \"OilVolume\",\n \"OilOpen\", \"OilHigh\", \"OilLow\", \"TOOpen\", \"TOHigh\", \"TOLow\",\n \"TOClose\", \"SPOpen\", \"SPHigh\", \"SPLow\", \"SPClose\", 'TOVolumeW', 'SPVolumeW',\n 'TOChangeW', 'SPChangeW', 'OilSpreadD', 'TOSpreadD',\n 'SPSpreadD', 'OilChangeD', 'TOChangeD', 'SPChangeD',\n 'OilVolumeD', 'TOVolumeD', 'SPVolumeD', 'FXDiffD', 'TOChangeW'], axis=1)\n FEATURES = list(X.columns.values)\n X = np.array(data_df[FEATURES].values)\n X = preprocessing.scale(X)\n\n # y = data_df[\"FXstatusD\"].values.tolist()\n # Z = np.array(data_df[\"FXDifferenceD\"])\n\n y = data_df[\"FXstatusW\"].values.tolist()\n Z = np.array(data_df[\"FXDifferenceW\"])\n\n return X, y, Z\n\ndef Analysis():\n X,y,Z = BuildDataSet()\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y, test_size= 0.2)\n\n total_invests = 0\n if_strat = 0\n if_not = 0\n\n clf = svm.SVC(kernel=\"linear\")\n #clf = neighbors.KNeighborsClassifier(n_neighbors=7)\n clf.fit(X_train, y_train)\n\n NPcount = 0\n Pcount = 0\n for i in range(len(X_test)):\n if clf.predict(X_test[-i].reshape(1,-1)) == 1:\n total_invests += 1\n if_strat += 1 + (1 * Z[-i])\n if(Z[-i] > 0):\n Pcount += 1\n else:\n NPcount += 1\n posPCT = Pcount/(NPcount+Pcount)*100.0\n\n return clf.score(X_test, y_test), total_invests, if_strat, posPCT\n\ndef AnalysisXTimes(x):\n score = 0\n total_invests = 0\n if_strat = 0\n posPCT = 0\n for i in range(x):\n score_, total_invests_, if_strat_, posPCT_ = Analysis()\n score += score_\n total_invests += total_invests_\n if_strat += if_strat_\n posPCT += posPCT_\n print(i)\n score = score/x\n total_invests = total_invests/x\n if_strat = if_strat/x\n posPCT = posPCT/x\n try:\n pct1W = ((if_strat-total_invests) /total_invests )* 100.0\n except:\n pct1W = 0\n print(\"Score :\" + str(score))\n print(\"PosPCT :\" + str(posPCT))\n print(\"Total invest :\" + str(total_invests))\n print(\"Total gain :\" + str(if_strat))\n print(\"Pctwin :\" + str(pct1W))\n #print(\"C = .005\")\n print(\"7n\")\nAnalysisXTimes(100)\n","sub_path":"ML/FXCLF.py","file_name":"FXCLF.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59574218","text":"import numpy as np\n\n\ndef compute_metrics(pred, processor, wer_metric):\n pred_logits = pred.predictions\n pred_ids = np.argmax(pred_logits, axis=-1)\n\n pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id\n\n pred_str = processor.batch_decode(pred_ids)\n # we do not want to group tokens when computing the metrics\n label_str = processor.batch_decode(pred.label_ids, group_tokens=False)\n\n wer = wer_metric.compute(predictions=pred_str, references=label_str)\n\n return {\"wer\": wer}\n","sub_path":"imperio/sonorus/experimental/modules/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"575724270","text":"\"\"\"Install python packages using pip\n\nPackages are downloaded/installed in parallel, allowing for faster installs\nwhen using many nodes.\n\nFor example to install the flask and SQLAlchemy packages on all the nodes::\n\n [plugin webapp-packages]\n setup_class = starcluster.plugins.pypackage.PyPackageSetup\n packages = flask, SQLAlchemy\n\nIt can also be used to install the development version of packages from\ngithub, for instance if you want to install the master branch of IPython\nand the latest released version of some dependencies::\n\n [plugin ipython-dev]\n setup_class = starcluster.plugins.pypackage.PyPackageSetup\n install_cmd = pip install -U %s\n packages = pyzmq,\n python-msgpack,\n git+http://github.com/ipython/ipython.git\n\n\"\"\"\nimport time\nfrom threading import Thread\n\nfrom starcluster.clustersetup import DefaultClusterSetup\nfrom starcluster.logger import log\nfrom starcluster.utils import print_timing\n\n\nclass PyPackageSetup(DefaultClusterSetup):\n \"\"\"Install Python packages with pip.\"\"\"\n\n def __init__(self, packages=\"\", install_command=\"pip install -U %s\"):\n super(PyPackageSetup, self).__init__()\n self.install_command = install_command\n self.packages = [p.strip() for p in packages.split(\",\") if p.strip()]\n\n @print_timing(\"PyPackageSetup\")\n def install_packages(self, nodes, dest='all nodes'):\n log.info(\"Installing Python packages on %s:\" % dest)\n commands = [self.install_command % p for p in self.packages]\n for command in commands:\n log.info(\"$ \" + command)\n cmd = \"\\n\".join(commands)\n for node in nodes:\n self.pool.simple_job(node.ssh.execute, (cmd,), jobid=node.alias)\n self.pool.wait(len(nodes))\n\n def run(self, nodes, master, user, user_shell, volumes):\n self.install_packages(nodes)\n\n def on_add_node(self, node, nodes, master, user, user_shell, volumes):\n self.install_packages([node], dest=node.alias)\n\n def on_remove_node(self, node, nodes, master, user, user_shell, volumes):\n pass","sub_path":".starcluster/plugins/pypackage.py","file_name":"pypackage.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556866526","text":"import os\nfrom werkzeug.wsgi import SharedDataMiddleware\nfrom forward_service import ForwardService\n\ndef create_app(with_static=True):\n app = ForwardService()\n if with_static:\n app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {\n '/static': os.path.join(os.path.dirname(__file__), 'static')\n })\n return app\n\nif __name__ == '__main__':\n from werkzeug.serving import run_simple\n app = create_app()\n # for debugging/development, set use_debugger=True, use_reloader=True,\n run_simple('localhost', 5005, app)","sub_path":"src/load_app.py","file_name":"load_app.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"209011203","text":"#q7_display_matrix.py\n#Chua Ming Yu\n#Date created: 20/2/2013\n#Date edited: 20/2/2013\n\nimport random\ndef print_matrix(n):\n m=n\n a=n\n while m!=0:\n n=a\n while n!=0:\n print (random.randint(0,1),end=\" \")\n n-=1\n print()\n m-=1\n \nprint_matrix(6)\n","sub_path":"practical 3/q7_display_matrix.py","file_name":"q7_display_matrix.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"142524524","text":"# Funcion que solo permite ingresar numeros\ndef inputNumber(mensaje = 'Ingresa un número:', error = 'Error: Tienes que ingresar un numero entero'):\n while True:\n try:\n x = int(input(mensaje))\n except ValueError:\n print(\"\\n\" + error + \"\\n\")\n continue\n else:\n return x\n\n# Funcion que verifica si se ingreso almenos 1 caracter por teclado\ndef inputRequired(mensaje = 'Ingresa almenos 1 caracter:', error = 'Error: Tienes que ingresar almenos 1 carácter'):\n while True:\n x = input(mensaje)\n if len(x) < 1:\n print(\"\\n\" + error + \"\\n\")\n else:\n break\n return x\n\n# Funcion que solo permite el ingreso de numeros positivos\ndef inputPositiveNumber(mensaje = 'Ingresa un número positivo:', error = 'Error: El número ingresado tiene que ser positivo'):\n while True:\n x = inputNumber(mensaje)\n if x < 0:\n print(\"\\n\" + error + \"\\n\")\n else:\n break\n return x\n\n# Funcion que solo permite el ingreso de numeros mayores a x numero\ndef inputHigherOrEqualNumber(higherOrEqualThan, mensaje = 'default', error = 'default'):\n\n if mensaje == 'default':\n mensaje = 'Ingresa un numero mayor a' + higherOrEqualThan + ':'\n\n if error == 'default':\n error = 'Error: El número ingresado debe ser mayor o igual a ' + higherOrEqualThan\n\n while True:\n x = inputNumber(mensaje)\n if x < higherOrEqualThan:\n print('\\n',error,'\\n')\n else:\n break\n return x\n","sub_path":"modules/utilities/input.utilities.py","file_name":"input.utilities.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"119005573","text":"import mord\nfrom sklearn import linear_model, metrics, preprocessing\n\nclass Predictiv_Learner_Mord():\n\n def do_prediction(self, X_train, X_test, y_train, y_test):\n\n clf2 = mord.LogisticAT(alpha=1.0)\n clf2.fit(X_train, y_train)\n result = clf2.predict(X_test)\n matrix = metrics.confusion_matrix(y_test, result)\n print(matrix)\n\n score = metrics.average_precision_score(y_test, result)\n print(score)\n\n # ranking = metrics.label_ranking_average_precision_score(y.values.argmax(axis=1), result)\n # print('ranking: ', ranking)\n\n #print('Mean Absolute Error of LogisticAT %s' %\n # metrics.mean_absolute_error(clf2.predict(X), y))\n\n #clf3 = mord.LogisticIT(alpha=1.)\n #clf3.fit(X, y)\n #print('Mean Absolute Error of LogisticIT %s' %\n # metrics.mean_absolute_error(clf3.predict(X), y))\n\n #clf4 = mord.LogisticSE(alpha=1.)\n #clf4.fit(X, y)\n #print('Mean Absolute Error of LogisticSE %s' %\n # metrics.mean_absolute_error(clf4.predict(X), y))","sub_path":"implementation_pointwise/predictiv_learn_mord.py","file_name":"predictiv_learn_mord.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"398165083","text":"import argparse\r\nimport logging\r\nimport os\r\nfrom collections import OrderedDict, namedtuple\r\n\r\nfrom config import Config, getConfig, test_optimizers\r\nimport numpy as np\r\nimport torch\r\nfrom utils import utils\r\nfrom optimize import test_neural, test_normal, train_neural, find_best_lr\r\nfrom utils.result import ResultDict\r\nfrom optimizers.optim_helpers import OptimizerParams\r\n\r\n\r\nlogger = logging.getLogger('main')\r\nC = utils.getCudaManager('default')\r\n\r\nparser = argparse.ArgumentParser(description='Pytorch implementation of L2L')\r\nparser.add_argument('--problem', type=str, default='mnist',\r\n choices=['debug', 'quadratic', 'mnist'],\r\n help='problem for the optimizee to solve')\r\n# parser.add_argument('--optimizer', type=str, default='lstm',\r\n# help='type of neural optimizer')\r\nparser.add_argument('--cpu', action='store_true',\r\n help='disable CUDA')\r\n# parser.add_argument('--max_epoch', type=int, default=10000, metavar='N',\r\n# help='number of epoch (default: 10000)')\r\nparser.add_argument('--result_dir', type=str, default='result')\r\nparser.add_argument('--save_dir', type=str, default='')\r\n#parser.add_argument('--desc', type=str, default='')\r\nparser.add_argument('--load_dir', type=str, default='')\r\nparser.add_argument('--retrain', nargs='*', type=str, default=[],\r\n choices=test_optimizers, help='name list of optimizers'\r\n 'that will be forcibly retrained even if loadable.')\r\nparser.add_argument('--retest', nargs='*', type=str, default=[],\r\n choices=test_optimizers, help='name list of optimizers'\r\n 'that will be forcibly retested even if loadable.')\r\nparser.add_argument('--retest_all', action='store_true',\r\n help='all the optimizers will be forcibly retested.')\r\nparser.add_argument('--volatile', action='store_true',\r\n help='supress saving fuctions')\r\nparser.add_argument('--meta_optim', type=str, default='SGD')\r\nparser.add_argument('--lr', type=float, default=1.0)\r\nparser.add_argument('--train_optim', nargs='*', type=str,\r\n default=['obsrv_multi'])\r\nparser.add_argument('--test_optim', nargs='*', type=str,\r\n default=[])\r\nparser.add_argument('--no_mask', action='store_true')\r\nparser.add_argument('--k_obsrv', type=int, default=10)\r\n\r\ndef main():\r\n args = parser.parse_args()\r\n # add arguments that will be used for meta training.\r\n train_args = ['meta_optim', 'lr', 'no_mask', 'k_obsrv']\r\n test_args = ['no_mask', 'k_obsrv']\r\n train_args = {k: v for k, v in vars(args).items() if k in train_args}\r\n test_args = {k: v for k, v in vars(args).items() if k in test_args}\r\n # set CUDA\r\n args.cuda = not args.cpu and torch.cuda.is_available()\r\n C.set_cuda(args.cuda)\r\n # set load dir\r\n load_dir = os.path.join(args.result_dir, args.load_dir)\r\n if args.load_dir and not os.path.exists(load_dir):\r\n raise Exception(f'{load_dir} does NOT exist!')\r\n # set save dir: saving functions will be suppressed if save_dir is None\r\n args.result_dir = None if args.volatile else args.result_dir\r\n save_dir = utils.prepare_dir(args.problem, args.result_dir, args.save_dir)\r\n if not args.test_optim:\r\n args.test_optim = args.train_optim\r\n print('No test optimzers.\\nAll the optimizers set to be trained '\r\n f'will be automatically on the list: {args.train_optim}\\n')\r\n # set problem & config\r\n print(f'Problem: {args.problem}')\r\n cfg = Config(getConfig(args))\r\n cfg.update_from_parsed_args(args)\r\n cfg.save(save_dir)\r\n #import pdb; pdb.set_trace()\r\n problem = cfg.problem.dict\r\n neural_optimizers = cfg.neural_optimizers.dict\r\n normal_optimizers = cfg.normal_optimizers.dict\r\n test_optimizers = cfg.test_optimizers\r\n # TODO: force to call this by wrapping it in class\r\n if args.retest_all:\r\n args.retest = test_optimizers\r\n\r\n params = {}\r\n ##############################################################################\r\n print('\\nMeta-training..')\r\n for name in [opt for opt in test_optimizers if opt in neural_optimizers]:\r\n if name not in args.retrain and OptimizerParams.is_loadable(name, load_dir):\r\n params[name] = OptimizerParams.load(name, load_dir).save(name, save_dir)\r\n else:\r\n print(f\"\\nTraining neural optimizer: {name}\")\r\n kwargs = neural_optimizers[name]['train_args']\r\n kwargs.update(cfg.args.get_by_names(train_args))\r\n # print(f\"Module name: {kwargs['optim_module']}\")\r\n params[name] = train_neural(name, save_dir, **problem, **kwargs)\r\n ##############################################################################\r\n print('\\n\\n\\nMeta-testing..')\r\n results = {}\r\n for name in test_optimizers:\r\n # np.random.seed(0)\r\n # if not utils.is_loadable_result(load_dir, name, args.force_retest):\r\n if name not in args.retest and ResultDict.is_loadable(name, load_dir):\r\n results[name] = ResultDict.load(name, load_dir).save(name, save_dir)\r\n else:\r\n if name in normal_optimizers:\r\n print(f'\\nOptimizing with static optimizer: {name}')\r\n kwargs = normal_optimizers[name]\r\n result = test_normal(name, save_dir, **problem, **kwargs)\r\n #import pdb; pdb.set_trace()\r\n lr_list = [1.0, 0.3, 0.1, 0.03, 0.01, 0.003, 0.001, 0.0003, 0.0001, \r\n 0.00003, 0.00001]\r\n best_loss, best_lr = find_best_lr(name, save_dir, lr_list, **problem, **kwargs)\r\n results[name] = result\r\n elif name in neural_optimizers:\r\n print(f'\\n\\nOptimizing with learned optimizer: {name}')\r\n kwargs = neural_optimizers[name]['test_args']\r\n kwargs.update(cfg.args.get_by_names(test_args))\r\n # print(f\"Module name: {kwargs['optim_module']}\")\r\n result = test_neural(name, save_dir, params[name], **problem, **kwargs)\r\n results[name] = result\r\n\r\n ##############################################################################\r\n print('End of program.')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"run_baek/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529150180","text":"#!/usr/bin/env python3\n\n#\n# Copyright (c) 2020 Project CHIP Authors\n# Copyright (c) 2016-2017 Nest Labs, Inc.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n##\n# @file\n# Implements ChipStateLoad class that sets up virtual network topology.\n#\n\nimport json\nimport os\nimport sys\n\nfrom happy.ReturnMsg import ReturnMsg\nfrom happy.Utils import *\n\nimport happy.HappyStateLoad\n\nfrom Chip import Chip\nfrom ChipState import ChipState\n\noptions = {}\noptions[\"quiet\"] = False\noptions[\"json_file\"] = None\n\nLOG_TEXT_PREFIX = \"[localhost] ChipStateLoad: \"\n\n\ndef option():\n return options.copy()\n\n\nclass ChipStateLoad(ChipState):\n def __init__(self, opts=options):\n ChipState.__init__(self)\n\n self.quiet = opts[\"quiet\"]\n self.new_json_file = opts[\"json_file\"]\n\n def __pre_check(self):\n if self.new_json_file is None:\n emsg = \"Missing name of file that specifies virtual network topology.\"\n self.logger.error(LOG_TEXT_PREFIX + emsg)\n self.exit()\n\n if not os.path.exists(self.new_json_file):\n emsg = \"Cannot find the configuration file {}\".format(\n self.new_json_file)\n self.logger.error(LOG_TEXT_PREFIX + emsg)\n self.exit()\n\n self.new_json_file = os.path.realpath(self.new_json_file)\n\n emsg = \"Loading Chip Topology from file {}.\".format(self.new_json_file)\n self.logger.debug(LOG_TEXT_PREFIX + emsg)\n\n def __load_JSON(self):\n emsg = \"Import state file {}.\".format(self.new_json_file)\n self.logger.debug(LOG_TEXT_PREFIX + emsg)\n\n try:\n with open(self.new_json_file, 'r') as jfile:\n json_data = jfile.read()\n\n self.chip_topology = json.loads(json_data)\n\n except Exception:\n emsg = \"Failed to load JSON state file: {}\".format(\n self.new_json_file)\n self.logger.error(LOG_TEXT_PREFIX + emsg)\n self.exit()\n\n def __load_network_topology(self):\n emsg = \"Loading network topology.\"\n self.logger.debug(LOG_TEXT_PREFIX + emsg)\n\n options = happy.HappyStateLoad.option()\n options[\"quiet\"] = self.quiet\n options[\"json_file\"] = self.new_json_file\n\n happyLoad = happy.HappyStateLoad.HappyStateLoad(options)\n happyLoad.run()\n\n self.readState()\n\n def __post_check(self):\n pass\n\n def run(self):\n with self.getStateLockManager():\n\n self.__pre_check()\n\n self.__load_JSON()\n\n self.__load_network_topology()\n\n self.__post_check()\n\n return ReturnMsg(0)\n","sub_path":"src/test_driver/happy/lib/ChipStateLoad.py","file_name":"ChipStateLoad.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"126563883","text":"import sqlite3\r\nimport pandas as pd\r\nimport numpy as np\r\nimport csv\r\nimport gzip\r\nfrom collections import defaultdict\r\n\r\nif __name__ == '__main__':\r\n conn = sqlite3.connect('data/instacart.db')\r\n c = conn.cursor()\r\n\r\n # Get the orders properly sorted, so we can directly\r\n # group by user_id, order_id and then compute the weights.\r\n q = \"\"\"\r\n SELECT user_id, order_id, days_since_prior_order \r\n FROM orders\r\n ORDER BY order_number\r\n \"\"\"\r\n\r\n orders = pd.read_sql(q, conn)\r\n\r\n # First day is 0\r\n orders.ix[orders.days_since_prior_order == '', 'days_since_prior_order'] = 0\r\n\r\n # Cumsum to obtain total days since *first* order\r\n orders_g = orders.groupby(['user_id'])['days_since_prior_order'].cumsum()\r\n orders['cumulative_days'] = orders_g.astype(int)\r\n # But I need to subtract cumulative_days from the actual day of the \r\n # order we want to compute... which will be the maximum\r\n max_cum_days = orders.groupby(['user_id'])['cumulative_days'].max()\r\n max_cum_days = max_cum_days.reset_index()\r\n max_cum_days.columns = ['user_id', 'max_order_day']\r\n orders = pd.merge(orders, max_cum_days, on = \"user_id\", how = 'left')\r\n\r\n # Compute weight\r\n orders['w'] = (np.cos(2 * (orders['max_order_day'] - orders['cumulative_days']) / 365.0 * 3.14) + 1) / 2\r\n\r\n # Remove unwanted columns (for DB storage, let's try not do duplicate)\r\n res = orders\r\n res = res.drop(['days_since_prior_order', 'cumulative_days', 'max_order_day'],\r\n axis = 1)\r\n\r\n # Insert weights into the DB\r\n res.to_sql('order_weights', conn, if_exists = 'replace')\r\n c.execute(\"CREATE INDEX IF NOT EXISTS idx_tmp1 ON order_weights(user_id)\")\r\n c.execute(\"CREATE INDEX IF NOT EXISTS idx_tmp2 ON order_weights(order_id)\")\r\n\r\n\r\n","sub_path":"data/external/repositories_2to3/164369/kaggle-public-master/instacart/compute_weights.py","file_name":"compute_weights.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322218831","text":"import argparse, os, subprocess, sys\nfrom time import gmtime, strftime\n\nimport pandas as pd\nimport numpy as np\n\nimport boto3\n\ndef pip_install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n \ndef spacy_install(package):\n subprocess.call([sys.executable, \"-m\", \"spacy\", \"download\", package])\n \nif __name__=='__main__':\n \n parser = argparse.ArgumentParser()\n # preprocessing arguments\n parser.add_argument('--filename', type=str)\n parser.add_argument('--num-reviews', type=int)\n parser.add_argument('--library', type=str, default='spacy')\n\n args, _ = parser.parse_known_args()\n print('Received arguments {}'.format(args))\n filename = args.filename\n num_reviews = args.num_reviews\n library = args.library\n\n # Load dataset into a pandas dataframe\n input_data_path = os.path.join('/opt/ml/processing/input', filename)\n print('Reading input data from {}'.format(input_data_path))\n data = pd.read_csv(input_data_path, sep='\\t', compression='gzip',\n error_bad_lines=False, dtype='str')\n \n # Remove lines with missing values\n data.dropna(inplace=True)\n \n # Keep only 'num_reviews' rows\n if num_reviews is not None:\n data = data[:num_reviews]\n \n # Drop unwanted columns\n data['review_body'] = data['review_headline'] + ' ' + data['review_body']\n data = data[['review_id', 'product_id', 'star_rating', 'review_body']]\n \n # Add label column\n data['label'] = data.star_rating.map({\n '1': '__label__negative__',\n '2': '__label__negative__',\n '3': '__label__neutral__',\n '4': '__label__positive__',\n '5': '__label__positive__'})\n \n # Tokenize data\n print('Tokenizing reviews')\n \n if library == 'nltk':\n pip_install('nltk')\n import nltk\n nltk.download('punkt')\n data['review_body'] = data['review_body'].apply(nltk.word_tokenize)\n data['review_body'] = data.apply(lambda row: \" \".join(row['review_body']).lower(), axis=1)\n \n elif library == 'spacy':\n pip_install('spacy')\n spacy_install('en_core_web_sm')\n import spacy\n spacy_nlp = spacy.load('en_core_web_sm')\n\n def tokenize(text):\n tokens = spacy_nlp.tokenizer(text)\n tokens = [ t.text for t in tokens ]\n return \" \".join(tokens).lower()\n data['review_body'] = data['review_body'].apply(tokenize)\n \n else:\n print('Incorrect library name: should be nltk or spacy.')\n exit()\n \n # Create output dirs\n bt_output_dir = '/opt/ml/processing/output/bt/'\n fs_output_dir = '/opt/ml/processing/output/fs/'\n os.makedirs(bt_output_dir, exist_ok=True)\n os.makedirs(fs_output_dir, exist_ok=True)\n \n # Save data in TSV format for SageMaker Feature Store \n fs_output_path = os.path.join(fs_output_dir, 'fs_data.tsv') \n \n print('Saving SageMaker Feature Store training data to {}'.format(fs_output_path))\n data.to_csv(fs_output_path, index=False, header=True, sep='\\t')\n\n # Save data in BlazingText format, with label column at the front\n bt_output_path = os.path.join(bt_output_dir, 'bt_data.txt') \n \n data = data[['label', 'review_body']]\n print('Saving BlazingText data to {}'.format(bt_output_path))\n np.savetxt(bt_output_path, data.values, fmt='%s')","sub_path":"Chapter 10/feature_store/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"61440844","text":"from sklearn import datasets\nfrom sklearn.linear_model import LogisticRegression\nimport datetime\nstart = datetime.datetime.now()\n\ndigits = datasets.load_digits()\nX_digits = digits.data / digits.data.max()\ny_digits = digits.target\n\nn_samples = len(X_digits)\n\nX_train = X_digits[:int(.9 * n_samples)]\ny_train = y_digits[:int(.9 * n_samples)]\nX_test = X_digits[int(.9 * n_samples):]\ny_test = y_digits[int(.9 * n_samples):]\n\nlogistic = linear_model.LogisticRegression(solver='lbfgs',max_iter=1000,multi_class='multinomial')\nlogistic.fit(X_train, y_train)\nlogistic.score(X_test, y_test)\n#accuracy\nend = datetime.datetime.now()\nprint (end-start)\n#running time\n","sub_path":"proj3/LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403070969","text":"from django.db import models\n\n# Create your models here.\n\n\n# Create your models here.\nfrom JD_project.helper import make_password\n\n\n# 创建客户端用户表\n\n\nclass JdUser(models.Model):\n user_name = models.CharField(max_length=50, verbose_name='用户名', blank=True, null=True)\n user_id = models.CharField(max_length=50, verbose_name='用户ID', primary_key=True)\n auth_string = models.CharField(max_length=255, verbose_name='用户密码', blank=True, null=True)\n is_val = models.IntegerField(verbose_name='是否验证', default=0,\n choices=((0, '未验证'), (1, '已验证')), blank=True, null=True)\n tel = models.CharField(max_length=15, verbose_name='联系电话', blank=True, null=True)\n asset = models.FloatField(verbose_name='余额', default=0, blank=True, null=True)\n u_img = models.TextField(verbose_name='用户头像地址', blank=True, null=True)\n u_bank = models.CharField(max_length=50, verbose_name='银行卡', blank=True, null=True)\n user_card = models.CharField(max_length=50, verbose_name='身份证号', blank=True, null=True)\n pay_pwd = models.CharField(max_length=50, verbose_name='支付密码', blank=True, null=True)\n u_email = models.CharField(max_length=20, verbose_name='邮箱', blank=True, null=True)\n u_real = models.CharField(max_length=20, verbose_name='真实姓名', blank=True, null=True)\n u_intg = models.IntegerField(verbose_name='用户积分', default=0)\n is_delete = models.IntegerField(verbose_name='删除标识位', default=0, choices=((0, '未删除'), (1, '已删除')), blank=True, null=True)\n\n def __str__(self):\n return self.user_name\n\n class Meta:\n db_table = 'jd_user'\n verbose_name = '用户信息表'\n verbose_name_plural = verbose_name\n\n\n# 创建客户端用户地址表\nclass UAddress(models.Model):\n user_id = models.ForeignKey(JdUser, on_delete=models.CASCADE, verbose_name='关联用户')\n user_address = models.CharField(max_length=255, verbose_name='收货地址', blank=True, null=True)\n is_default = models.IntegerField(verbose_name='默认地址', default=0, choices=((0, '选择'), (1, '不选择')))\n\n class Meta:\n db_table = 'u_address'\n verbose_name = '地址表'\n verbose_name_plural = verbose_name\n\n\n# 创建收藏商品表\nclass CollectGoods(models.Model):\n user_id = models.ForeignKey(JdUser, on_delete=models.CASCADE, verbose_name='用户id')\n clt_id = models.IntegerField(verbose_name='收藏id', default=0, blank=True, null=True)\n type_id = models.IntegerField(verbose_name='商品/商户',\n default=0,\n choices=((0, '商品'), (1, '商户')), blank=True, null=True)\n\n class Meta:\n db_table = 'u_clt'\n verbose_name = '收藏表'\n verbose_name_plural = verbose_name\n\n\n\"\"\"\n# 创建收藏商户表\nclass CollectShopper(models.Model):\n user_id = models.ForeignKey(JdUser, on_delete=models.CASCADE, verbose_name='用户id')\n m_id = models.IntegerField(verbose_name='商户id')\n\n class Meta:\n db_table = 'u_clt_sp'\n verbose_name = '收藏商户表'\n verbose_name_plural = verbose_name\n\n\"\"\"\n\n\n# 创建商户表\nclass JdShopper(models.Model):\n m_name = models.CharField(max_length=50, verbose_name='商铺名称', blank=True, null=True)\n m_username = models.CharField(max_length=50, verbose_name='商主名称', blank=True, null=True)\n m_id = models.CharField(max_length=20, verbose_name='商户ID', primary_key=True)\n m_pwd = models.CharField(max_length=200, verbose_name='密码', blank=True, null=True)\n create_time = models.DateTimeField(verbose_name='注册时间', auto_now_add=True, blank=True, null=True)\n update_time = models.DateTimeField(verbose_name='更新时间', auto_now=True, blank=True, null=True)\n is_active = models.IntegerField(verbose_name='登陆状态', default=1,\n choices=((0, '未登录'), (1, '已登录')), blank=True, null=True)\n m_img = models.TextField(verbose_name='****', blank=True, null=True)\n m_email = models.CharField(max_length=50, verbose_name='商户邮箱', blank=True, null=True)\n m_phone = models.CharField(max_length=15, verbose_name='手机号', blank=True, null=True)\n s_add = models.CharField(max_length=255, verbose_name='地址', blank=True, null=True)\n is_delete = models.IntegerField(verbose_name='删除标识位', default=0,\n choices=((0, '未删除'), (1, '已删除')), blank=True, null=True)\n is_val = models.IntegerField(verbose_name='验证标识位', default=1,\n choices=((0, '未验证'), (1, '已验证')), blank=True, null=True)\n note = models.CharField(max_length=50, verbose_name='备注', blank=True, null=True)\n clt = models.IntegerField(verbose_name='收藏人数', blank=True, null=True)\n\n def __str__(self):\n return self.m_id\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n if len(self.m_pwd) < 32:\n self.m_pwd = make_password(self.m_pwd)\n super().save()\n\n def image_tag(self):\n return u'' % self.m_img\n\n class Meta:\n db_table = 'jd_shopper'\n verbose_name = '商户表'\n verbose_name_plural = verbose_name\n\n\n# 商铺分类\nclass ShopperType(models.Model):\n m_id = models.ForeignKey(JdShopper, on_delete=models.CASCADE, verbose_name='关联商户')\n type_name = models.CharField(max_length=255, verbose_name='商铺分类名称', blank=True, null=True)\n remark = models.CharField(max_length=255, verbose_name='备注', blank=True, null=True)\n\n class Meta:\n db_table = 'shopper_type'\n verbose_name = '商户分类'\n verbose_name_plural = verbose_name\n","sub_path":"JD_project/apps/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"455294049","text":"#!/usr/bin/env python\nfrom concurrent.futures import ThreadPoolExecutor\nfrom netmiko import ConnectHandler\nfrom netmiko import NetMikoTimeoutException, NetMikoAuthenticationException\nfrom datetime import datetime\nimport traceback\nimport sys\nimport time\nimport re\nimport os\n\n\ndef config_filter_cisco_ios(cfg):\n \"\"\"Filter unneeded items that change from the config.\"\"\"\n\n # Strip the header line\n header_line1_re = r\"^Building configuration.*$\"\n header_line2_re = r\"^Current configuration.*$\"\n header_line3_re = r\"^!Running configuration.*$\"\n\n # Strip the service timestamps comments\n service_timestamps1_re = r\"^! Last configuration change at.*$\"\n service_timestamps2_re = r\"^! NVRAM config last updated at.*$\"\n service_timestamps3_re = r\"^! No configuration change since last restart.*$\"\n\n # Strip misc\n misc1_re = r'^ntp clock-period.*$'\n misc2_re = r'^!Time.*$'\n\n for pattern in [header_line1_re, header_line2_re, header_line3_re,\n service_timestamps1_re, service_timestamps2_re, \n service_timestamps3_re, misc1_re, misc2_re]:\n cfg = re.sub(pattern, \"\", cfg, flags=re.M).lstrip()\n\n return cfg\n\n\ndef run_task(a_device):\n\n try:\n host = a_device['host']\n a_result = {}\n a_result['host'] = host\n\n start_time = datetime.now()\n \n remote_conn = ConnectHandler(device_type=a_device['device_type'],\n host=a_device['host'],\n username=a_device['username'],\n password=a_device['password'],\n port=22,\n secret=a_device['secret'],\n #fast_cli=True\n )\n remote_conn.enable()\n\n #remote_conn.fast_cli = False\n data = remote_conn.send_command('show running-config')\n\n remote_conn.disconnect()\n\n elapsed_time_ssh = datetime.now() - start_time\n a_result['ssh_runtime'] = elapsed_time_ssh\n\n data = config_filter_cisco_ios(data)\n if a_device['device_type'] == 'cisco_ios':\n data = re.sub(r'^\\s*$', \"\", data, flags=re.M)\n\n with open(f\"configs/{host}.cfg\",\"w\") as f:\n f.write(data)\n\n return a_result\n\n except Exception as e:\n print(f'** {host} task closing with error {e}')\n a_result['exception'] = e\n a_result['traceback'] = traceback.format_exc()\n \n return a_result\n\ndef get_devices():\n\n devices = ['no.suchdomain','192.168.204.101','192.168.204.102','192.168.204.103','192.168.204.104']\n #devices = ['no.suchdomain','192.168.204.101']\n\n net_devices = {}\n for host in devices:\n a_device={}\n a_device['host']=host\n a_device['device_type']='cisco_ios'\n a_device['username']='fred'\n a_device['password']='bedrock'\n a_device['secret']=''\n net_devices[host]=a_device\n\n return net_devices\n\n\ndef main():\n\n net_devices = get_devices()\n start_time = datetime.now()\n\n results = []\n with ThreadPoolExecutor(4) as pool:\n results = pool.map(run_task, (a_device for a_device in net_devices.values()))\n\n elapsed_time = datetime.now() - start_time\n\n print('\\n--- Host task times ---')\n for a_result in results:\n if 'exception' in a_result:\n print(f\"{a_result['host']} - {a_result['exception']}\")\n #print(a_result.result['traceback'])\n else:\n print(f\"{a_result['host']} - {a_result['ssh_runtime']}\")\n print(f\"\\nTotal Elapsed time: {format(elapsed_time)}\")\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"running-configs/future-configs.py","file_name":"future-configs.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"282709506","text":"import scrapy\nfrom scrapy.http import Request, FormRequest\n\n\nclass EPlanningSpider(scrapy.Spider):\n name = 'E_Planning'\n allowed_domains = ['eplanning.ie']\n start_urls = ['http://www.eplanning.ie/CarlowCC/searchtypes']\n\n def parse(self, response, **kwargs):\n relative_received_link = 'SearchListing/RECEIVED'\n received_link = response.urljoin(relative_received_link)\n yield Request(url=received_link, callback=parse_form)\n\n relative_decision_due_link = 'SearchListing/DUE'\n decision_due_link = response.urljoin(relative_decision_due_link)\n yield Request(url=decision_due_link, callback=parse_form)\n\n relative_decision_made_link = 'SearchListing/MADE'\n decision_made_link = response.urljoin(relative_decision_made_link)\n yield Request(url=decision_made_link, callback=parse_form)\n\n\ndef parse_form(response):\n form_xpath = '//div[@class=\"container body-content\"]//form'\n form_data = {\n 'RdoTimeLimit': '42'\n }\n yield FormRequest.from_response(response=response,\n formxpath=form_xpath,\n formdata=form_data,\n callback=parse_page)\n\n\ndef parse_page(response):\n relative_file_links = response.xpath('//td/a/@href').extract()\n for relative_file_link in relative_file_links:\n file_link = response.urljoin(relative_file_link)\n yield Request(url=file_link, callback=parse_file)\n\n relative_next_link = response.xpath('//li[@class=\"PagedList-skipToNext\"]//a/@href').extract_first()\n next_link = response.urljoin(relative_next_link)\n yield Request(url=next_link, callback=parse_page)\n\n\ndef parse_file(response):\n agents_style = response.xpath('//input[@title=\"Show Agents Popup\"]/@style').extract_first()\n if 'display: inline' and 'visibility: visible' in agents_style:\n name = process_data(response.xpath('//th[normalize-space()=\"Name :\"]/following-sibling::td/text()').extract_first())\n address = process_data(response.xpath('//th[normalize-space()=\"Address :\"]/following-sibling::td/text()').extract_first())\n phone = process_data(response.xpath('//th[normalize-space()=\"Phone :\"]/following-sibling::td/text()').extract_first())\n fax = process_data(response.xpath('//th[normalize-space()=\"Fax :\"]/following-sibling::td/text()').extract_first())\n email = process_data(response.xpath('//th[normalize-space()=\"e-mail :\"]/following-sibling::td/text()').extract_first())\n address_one = process_data(response.xpath('//th[normalize-space()=\"Address :\"]/following-sibling::td/text()').extract_first())\n address_two = response.xpath('//th[normalize-space()=\"Address :\"]/parent::tr/following-sibling::tr/td/text()')[0:3].extract()\n address = address_one + ' ' + ' '.join(address_two)\n\n yield {'Name': name,\n 'Address': address,\n 'Phone': phone,\n 'Fax': fax,\n 'Email': email}\n\n\ndef process_data(data):\n if data:\n data = data.strip()\n else:\n data = ''\n return data\n","sub_path":"E_Planning_Spider/E_Planning_Spider/spiders/E_Planning.py","file_name":"E_Planning.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534361134","text":"import requests\nimport json\nr = requests.get(\"https://financialmodelingprep.com/api/v3/stock/real-time-price/AAPL\")\nvar =r.content\nprint(var)\nparsed_data = json.loads(var)\nprint(parsed_data['symbol'])\n\nvar2 = var.split()\nprint(var2[6])\n\n","sub_path":"OOP_Concepts/http_requester.py","file_name":"http_requester.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"637676211","text":"# -*- coding: utf-8 -*-\nimport os\nimport tempfile\nimport pytz\n\nimport datetime as dt\nfrom datetime import timedelta\n\nfrom openpyxl import load_workbook\n\nfrom odoo import models, fields, api, exceptions, _, SUPERUSER_ID\n\n\ndef user_time(utc_dt, user_tz):\n local_tz = pytz.timezone(user_tz)\n local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)\n return local_tz.normalize(local_dt)\n\n\nclass TimesheetWizard(models.TransientModel):\n _name = 'timesheet.wizard'\n\n period_start = fields.Date(string='Period Start')\n period_end = fields.Date(string='Period End')\n\n employee_id = fields.Many2one('hr.employee', string='Employee', default=lambda self: self._get_employee_id())\n outsource = fields.Selection(string='Outsource', selection=[\n ('tasc_timesheet.xlsx', 'TASC Outsourcing'),\n ('innovation_timesheet.xlsx', 'Innovation'),\n ('reach_timesheet.xlsx', 'Reach'),\n ])\n\n @api.model\n def _get_employee_id(self):\n employee = self.env['hr.employee'].search([('user_id.id', '=', self.env.user.id)])\n return employee.id\n\n @api.model\n def _get_timesheet_template(self, filename):\n form_template_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'timesheet_template'))\n return os.path.join(form_template_path, filename)\n\n @api.model\n def _get_workbook(self, template_path):\n return load_workbook(template_path)\n\n # TODO TIMEZONE FOR DATETIME\n @api.multi\n def _generate_timesheet(self, workbook, res):\n row = 8\n period_start = fields.Date.from_string(res.period_start)\n period_end = fields.Date.from_string(res.period_end)\n days = (period_end - period_start).days + 1\n\n # R8 is the first cell for the dates hidden\n # R38 is the last cell for the dates hidden\n # G is check IN column\n # H is check OUT column\n wb = workbook\n ws = wb.get_sheet_by_name('main')\n\n ws.cell(\"T1\").value = res.employee_id.name\n ws.cell(\"T2\").value = res.sudo().employee_id.job_id.name\n ws.cell(\"T3\").value = res.employee_id.barcode\n ws.cell(\"T6\").value = period_start\n ws.cell(\"U6\").value = period_end\n ws.cell(\"V1\").value = \"%s/%s\" % (res.sudo().employee_id.job_id.name, res.sudo().employee_id.department_id.name)\n ws.cell(\"V2\").value = res.sudo().employee_id.parent_id.name\n ws.cell(\"V3\").value = \"%s/%s\" % (\n res.sudo().employee_id.parent_id.job_id.name, res.sudo().employee_id.parent_id.department_id.name)\n\n local_tz = pytz.timezone(self.env.user.tz)\n for i in range(0, days):\n day_date = period_start + timedelta(days=i)\n check_in_min = dt.datetime(year=day_date.year, month=day_date.month, day=day_date.day, hour=0, minute=0,\n second=0)\n check_in_min = pytz.utc.localize(check_in_min, is_dst=False).astimezone(local_tz)\n check_in_max = dt.datetime(year=day_date.year, month=day_date.month, day=day_date.day, hour=23, minute=59,\n second=59)\n check_in_max = pytz.utc.localize(check_in_max, is_dst=False).astimezone(local_tz)\n\n query_attendance = [\n ('check_in', '>=', check_in_min.astimezone(pytz.utc).strftime('%Y-%m-%d')),\n ('check_in', '<=', check_in_max.astimezone(pytz.utc).strftime('%Y-%m-%d')),\n ('check_out', '!=', False),\n ('employee_id.user_id.id', '=', self.env.user.id)]\n\n attendance_check_in = self.env['hr.attendance'].search(query_attendance, order='check_in asc', limit=1)\n attendance_check_out = self.env['hr.attendance'].search(query_attendance, order='check_out desc', limit=1)\n\n if len(attendance_check_in) == 0:\n check_in = dt.datetime.now(tz=local_tz).replace(hour=0, minute=0, second=0, microsecond=0)\n remark = ''\n else:\n check_in = fields.Datetime.from_string(attendance_check_in.check_in)\n check_in = pytz.utc.localize(check_in, is_dst=False).astimezone(local_tz)\n remark = '' if not attendance_check_in.remark else attendance_check_in.remark\n\n if len(attendance_check_out) == 0:\n check_out = dt.datetime.now(tz=local_tz).replace(hour=0, minute=0, second=0, microsecond=0)\n else:\n check_out = fields.Datetime.from_string(attendance_check_out.check_out)\n check_out = pytz.utc.localize(check_out, is_dst=False).astimezone(local_tz)\n\n query_leave = [\n ('date_from', '<=', day_date.strftime('%Y-%m-%d')),\n ('date_to', '>=', day_date.strftime('%Y-%m-%d')),\n ('employee_id.user_id.id', '=', self.env.user.id)]\n\n leave = self.env['hr.holidays'].search(query_leave)\n\n leave_types = {'Annual Leaves': 'AL',\n 'Sick Leaves': 'SL'}\n\n if not leave and len(attendance_check_in) == 0 and len(attendance_check_out) == 0 and \\\n day_date.isoweekday() not in [5, 6]:\n leave = 'NA'\n\n elif not leave:\n leave = ''\n\n else:\n leave = leave_types[leave.holiday_status_id.name]\n\n ws.cell(row=row + i, column=19).value = day_date\n ws.cell(row=row + i, column=20).value = check_in.time()\n ws.cell(row=row + i, column=21).value = check_out.time()\n ws.cell(row=row + i, column=22).value = leave\n ws.cell(row=row + i, column=23).value = remark\n\n return wb\n\n @api.model\n def _save_temp(self, workbook):\n temp_dir = tempfile.mkdtemp()\n temp_file = os.path.join(temp_dir, 'timesheet.xlsx')\n workbook.save(temp_file)\n return temp_file\n\n @api.model\n def _attach(self, full_path, res_id):\n # Attach generated document to filestore\n ir_attach = self.env['ir.attachment']\n\n with open(full_path, 'r') as fp:\n data = fp.read().encode('base64')\n filename = os.path.split(full_path)[1]\n values = dict(\n name=filename,\n datas_fname=filename,\n res_id=res_id,\n res_model=self._name,\n type='binary',\n datas=data,\n )\n ir_attach.create(values)\n\n @api.model\n @api.returns('self', lambda value: value.id)\n def create(self, vals):\n res = super(TimesheetWizard, self).create(vals)\n template = self._get_timesheet_template(res.outsource)\n workbook = self._get_workbook(template)\n workbook = self._generate_timesheet(workbook, res)\n workbook_path = self._save_temp(workbook)\n self._attach(workbook_path, res.id)\n os.remove(workbook_path)\n return res\n\n @api.model\n def _transient_vacuum(self, force=False):\n attachments = self.env['ir.attachment'].search([('res_model', '=', self._name)])\n for attachment in attachments:\n attachment.unlink()\n\n return super(TimesheetWizard, self)._transient_vacuum(force)\n","sub_path":"wizards/timesheet_wizard.py","file_name":"timesheet_wizard.py","file_ext":"py","file_size_in_byte":7153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"654395120","text":"from django.shortcuts import render_to_response\nfrom empresa.models import Cliente\nfrom empresa.models import Veiculo\nfrom empresa.models import Mercadoria\nfrom rotas.forms import CriarRotaForm\nfrom rotas.models import Rotas\nfrom rotas.models import ClienteRotas\nfrom rotas.models import VeiculoRotas\nfrom rotas.models import MatrizDistancia\nfrom rotas.models import MatrizTempo\nfrom rotas.models import MatrizPoupanca\nfrom rotas.models import MatrizOrdenada\nfrom rotas.models import Algoritmo\nfrom rotas.models import AtribuirPosicaoCliente\nfrom rotas.models import Trajetos\nfrom rotas.models import TrajetosClientes\nimport json\nimport urllib.request\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.views.decorators.csrf import csrf_exempt\n\ndef rotas(request):\n\n context = RequestContext(request)\n registered = False\n if request.method == 'POST':\n criarRotas_form = CriarRotaForm(data=request.POST)\n if criarRotas_form.is_valid():\n criarRotas = criarRotas_form.save()\n criarRotas.empresa_id = request.session['idEmpresa']\n criarRotas.save()\n registered = True\n\n return HttpResponseRedirect('/empresa/rotas/')\n else:\n print(criarRotas_form.errors)\n else:\n criarRotas_form = CriarRotaForm()\n\n rotas = Rotas.objects.filter(empresa_id=request.session['idEmpresa'])\n return render_to_response('rotas.html',{'criarRotas_form': criarRotas_form,'registered': registered, 'rotas': rotas},context)\n\ndef criarRota(request, idRota):\n context = RequestContext(request)\n idEmpresa = request.session['idEmpresa']\n id = idRota\n #Carrega a rota selecionada\n rotas = Rotas.objects.filter(empresa_id=request.session['idEmpresa'],id=idRota)\n\n #carrega o objeto de todos os ClientesRotas, idRota, cliente_id, mercadoria_id\n clientesRotas = ClienteRotas.objects.filter(idRotas=idRota)\n i=0\n\n dadosCliente = []\n for cliente in clientesRotas:\n if True:\n x = Cliente.objects.get(id=cliente.cliente_id, empresa_id = idEmpresa)\n dadosCliente.append(x)\n\n for t in dadosCliente:\n print(t.nome)\n\n dadosMercadoria = []\n for mercadoria in clientesRotas:\n if True:\n m = Mercadoria.objects.get(id=mercadoria.mercadoria_id, empresa_id = idEmpresa)\n dadosMercadoria.append(m)\n\n for t in dadosMercadoria:\n print(t.quantidade)\n\n\n veiculoRotas = VeiculoRotas.objects.filter(idRotas=idRota)\n\n dadosVeiculo = []\n\n for veiculo in veiculoRotas:\n if True:\n v = Veiculo.objects.get(id=veiculo.veiculo_id)\n dadosVeiculo.append(v)\n\n for t in dadosVeiculo:\n print(\"matricula\"+t.matricula)\n\n listaClientes = Cliente.objects.all().filter(empresa_id = idEmpresa)\n listaMercadorias = Mercadoria.objects.all().filter(empresa_id = idEmpresa)\n clientesMercadorias = []\n for c in listaClientes:\n for m in listaMercadorias:\n if c.bi == m.cliente and m.estado == 0:\n clientesMercadorias.append(c)\n\n\n return render_to_response('criarRota.html',{'dadosCliente' : dadosCliente, 'dadosVeiculo' : dadosVeiculo, 'dadosMercadoria': dadosMercadoria,'id': id, 'clientesMercadorias': clientesMercadorias} ,context)\n\ndef adicionarCliente(request, idRota):\n id = idRota\n if request.method == 'POST':\n idEmpresa = request.session['idEmpresa']\n for c in request.POST.getlist('clienteMercadoria'):\n b = Cliente.objects.get(id=c, empresa_id = idEmpresa)\n x = Mercadoria.objects.get(cliente=b.bi, empresa_id = idEmpresa)\n x.estado = 1\n r = ClienteRotas(idRotas=id, cliente_id=c, mercadoria_id=x.id, empresa_id = idEmpresa)\n x.save()\n r.save()\n return HttpResponseRedirect('/rotas/criarRota/'+id+'/')\n else:\n return HttpResponseRedirect('/rotas/criarRota/'+id+'/')\n\ndef gerarRota(request, idRota):\n\n clientesRotas = ClienteRotas.objects.filter(idRotas=idRota)\n\n dadosCliente = []\n dadosMercadoria = []\n\n nClientes=1\n for cliente in clientesRotas:\n c = Cliente.objects.get(id=cliente.cliente_id)\n dadosCliente.append(c)\n q = Mercadoria.objects.get(id=cliente.mercadoria_id)\n dadosMercadoria.append(q)\n apc = AtribuirPosicaoCliente(idRotas=idRota, idCliente=c.id, posicao=nClientes, corX=c.corX, corY=c.corY)\n apc.save()\n nClientes += 1\n\n quantidadeMaxima = 100\n tempoMaximo = 1800000\n\n\n for i in range(0, nClientes):\n for j in range(0, nClientes):\n if j != i and i=1 and algoritmo1.ligacaoRaiz>=1 and quantidadeOcupada <= quantidadeMaxima and novoTempo <= tempoMaximo:\n print(\"Matriz I[\"+str(matrizOrdenada.i)+\"] - J[\"+str(matrizOrdenada.j)+\"]\")\n print(\"Rotulo : \"+str(algoritmo.rotulo))\n print(\"Rotulo1 : \"+str(algoritmo1.rotulo))\n print(\"Carga : \"+str(algoritmo.carga))\n print(\"Carga1 : \"+str(algoritmo1.carga))\n print(quantidadeOcupada)\n count += 1\n algoritmo.ligacaoRaiz -= 1\n algoritmo1.ligacaoRaiz -= 1\n\n\n if algoritmo.caminho == 0:\n algoritmo.caminho = caminho\n caminho += 1\n if algoritmo1.caminho == 0:\n algoritmo1.caminho = caminho\n caminho += 1\n\n if algoritmo.rotulo <= algoritmo1.rotulo:\n aux = algoritmo1.rotulo\n algoritmo1.rotulo = algoritmo.rotulo\n aux2 = algoritmo.rotulo\n else:\n aux = algoritmo.rotulo\n algoritmo.rotulo = algoritmo1.rotulo\n aux2 = algoritmo.rotulo\n\n algoritmo.i = matrizOrdenada.i\n algoritmo.j = matrizOrdenada.j\n algoritmo1.i = matrizOrdenada.i\n algoritmo1.j = matrizOrdenada.j\n algoritmo.save()\n algoritmo1.save()\n '''count = 1\n d = Algoritmo.objects.all().filter(idRotas=idRota)\n for algoritmo3 in d:\n if algoritmo3.rotulo == aux:\n if algoritmo.posicao == 1 and algoritmo1.posicao == 4:\n print(count)\n count += 1\n algoritmo3.rotulo = aux\n algoritmo3.save()'''\n\n\n matrizOrdenada.rotulo = aux2\n print(\"editar Matriz\")\n matrizOrdenada.save()\n\n c = Algoritmo.objects.all().filter(idRotas=idRota)\n for algoritmo2 in c:\n if algoritmo2.rotulo == algoritmo.rotulo or algoritmo2.rotulo == algoritmo1.rotulo or algoritmo2.rotulo == aux:\n if algoritmo2.rotulo == aux:\n algoritmo2.rotulo = aux2\n algoritmo2.i = matrizOrdenada.i\n algoritmo2.j = matrizOrdenada.j\n algoritmo2.carga = quantidadeOcupada\n if algoritmo.caminho == 0:\n algoritmo.caminho = caminho\n caminho += 1\n #print(\"Algoritmo 2 carga:\"+str(algoritmo2.carga)+\" Posicao :\"+str(algoritmo2.))\n algoritmo2.tempo = novoTempo\n algoritmo2.save()\n\n\n carrega = MatrizOrdenada.objects.all().filter(idRotas=idRota)\n for z in carrega:\n if z.rotulo == aux:\n z.rotulo = aux2\n print(\"carrega\")\n z.save()\n\n\n #FIM ALGORITMO\n\n\n\n for r in Algoritmo.objects.all().filter(idRotas=idRota):\n if r.ligacaoRaiz == 1:\n #c = AtribuirPosicaoCliente.objects.get(idRotas=idRota, posicao = r.posicao)\n t = Trajetos(idRotas=idRota, rotulo=r.rotulo, i = 0, j = r.posicao )\n t.save()\n if r.ligacaoRaiz == 2:\n t = Trajetos(idRotas=idRota, rotulo=r.rotulo, i = 0, j = r.posicao )\n t.save()\n f = Trajetos(idRotas=idRota, rotulo=r.rotulo, i = r.posicao, j = 0 )\n f.save()\n\n for z in MatrizOrdenada.objects.all().filter(idRotas=idRota):\n if z.rotulo != 0:\n #c = AtribuirPosicaoCliente.objects.get(idRotas=idRota, posicao = z.i)\n t = Trajetos(idRotas=idRota, rotulo=z.rotulo, i = z.i, j = z.j )\n t.save()\n\n for y in Trajetos.objects.all().filter(idRotas=idRota):\n if y.i == 0 and y.j != 0:\n d = AtribuirPosicaoCliente.objects.get(idRotas=idRota, posicao = y.j)\n y.iCliente = 0\n y.jCliente = d.idCliente\n if y.j == 0 and y.i != 0:\n c = AtribuirPosicaoCliente.objects.get(idRotas=idRota, posicao = y.i)\n y.iCliente = c.idCliente\n y.jCliente = 0\n if y.i != 0 and y.j != 0:\n c = AtribuirPosicaoCliente.objects.get(idRotas=idRota, posicao = y.i)\n d = AtribuirPosicaoCliente.objects.get(idRotas=idRota, posicao = y.j)\n y.iCliente = c.idCliente\n y.jCliente = d.idCliente\n y.save()\n\n rotulo = 1\n jaEntrou = 0\n anterior = 0\n sai = 1\n volta = 0\n primeiro = 0\n count = 0\n valorPrimeiraVez = 0\n count2 = 0\n trajeto = []\n contaVolta = []\n for i in range(1, nClientes):\n for a in Trajetos.objects.all().filter(idRotas=idRota, rotulo=rotulo):\n if a.i != 0 and a.j != 0:\n count += 1\n\n count2 += 1\n\n print(\"Contador = \"+str(count2))\n\n while sai != 0 and count2 > 0:\n for s in Trajetos.objects.filter(idRotas=idRota, rotulo=rotulo):\n if s.rotulo == rotulo:\n if s.i == 0 and primeiro == 0 and jaEntrou == 0 and sai == 1 and valorPrimeiraVez != s.j:\n volta += 1\n trajeto.append(rotulo)\n contaVolta.append(volta)\n print(\"CAMINHO: \"+str(volta))\n print(\"Sai da origem ate a posicção J[ \"+str(s.j)+\"]\")\n print(\"J : \"+str(s.j))\n primeiro = 1\n jaEntrou = 1\n anterior = s.j\n valorPrimeiraVez = s.j\n if s.i == anterior and anterior != 0 and jaEntrou == 0 and sai == 1 and count != 0:\n print(\"Sai da posição i[\"+str(anterior)+\"] ate a posicaçao J[\"+str(s.j)+\"]\")\n jaEntrou = 1\n anterior = s.j\n count -= 1\n if s.j == anterior and anterior != 0 and jaEntrou == 0 and sai == 1 and count != 0:\n print(\"Sai da posição i[\"+str(anterior)+\"] ate a posicaçao J[\"+str(s.i)+\"]\")\n jaEntrou = 1\n anterior = s.i\n count -= 1\n if s.i == 0 and primeiro == 1 and jaEntrou == 0 and count == 0 and sai == 1:\n jaEntrou = 1\n print(\"Sai da posição J[\"+str(anterior)+\"] ate à origem\")\n sai = 0\n jaEntrou = 0\n count2 = 0\n sai = 1\n jaEntrou = 0\n primeiro = 0\n rotulo += 1\n\n #https://maps.googleapis.com/maps/api/directions/json?origin=40.213453,-8.451979&destination=40.213453,-8.451979&waypoints=40.213453,-8.451979\n\n #rota = Rotas.objects.get(id=idRota)\n #rota.estado = 1\n #rota.save()\n valorZipado = zip(trajeto, contaVolta)\n\n return render_to_response('gerarRota.html',{ 'valorZipado' : valorZipado, 'idRota':idRota})\n\n@csrf_exempt\ndef verMapaRota(request, idRota):\n r = request.POST['mapa']\n rotulo = int(r)\n\n #rotulo = 1\n jaEntrou = 0\n anterior = 0\n sai = 1\n volta = 0\n primeiro = 0\n count = 0\n valorPrimeiraVez = 0\n count2 = 0\n trajeto = []\n contaVolta = []\n\n for a in Trajetos.objects.all().filter(idRotas=idRota, rotulo=rotulo):\n if a.i != 0 and a.j != 0:\n count += 1\n\n count2 += 1\n\n waypoints = []\n print(\"Contador = \"+str(count2))\n while sai != 0 and count2 > 0:\n for s in Trajetos.objects.filter(idRotas=idRota, rotulo=rotulo):\n if s.rotulo == rotulo:\n if s.i == 0 and primeiro == 0 and jaEntrou == 0 and sai == 1 and valorPrimeiraVez != s.j:\n volta += 1\n trajeto.append(rotulo)\n contaVolta.append(volta)\n print(\"CAMINHO: \"+str(volta))\n print(\"Sai da origem ate a posicção J[ \"+str(s.j)+\"]\")\n print(\"J : \"+str(s.j))\n primeiro = 1\n jaEntrou = 1\n anterior = s.j\n valorPrimeiraVez = s.j\n valorOrigem = '40.213453, -8.451979'\n c = Cliente.objects.get(id=s.jCliente)\n coordenada = str(c.corX)+\",\"+str(c.corY)\n waypoints.append(coordenada)\n if s.i == anterior and anterior != 0 and jaEntrou == 0 and sai == 1 and count != 0:\n print(\"Sai da posição i[\"+str(anterior)+\"] ate a posicaçao J[\"+str(s.j)+\"]\")\n jaEntrou = 1\n anterior = s.j\n count -= 1\n c = Cliente.objects.get(id=s.jCliente)\n coordenada = str(c.corX)+\",\"+str(c.corY)\n waypoints.append(coordenada)\n if s.j == anterior and anterior != 0 and jaEntrou == 0 and sai == 1 and count != 0:\n print(\"Sai da posição i[\"+str(anterior)+\"] ate a posicaçao J[\"+str(s.i)+\"]\")\n jaEntrou = 1\n anterior = s.i\n count -= 1\n c = Cliente.objects.get(id=s.iCliente)\n coordenada = str(c.corX)+\",\"+str(c.corY)\n waypoints.append(coordenada)\n if s.i == 0 and primeiro == 1 and jaEntrou == 0 and count == 0 and sai == 1:\n jaEntrou = 1\n print(\"Sai da posição J[\"+str(anterior)+\"] ate à origem\")\n sai = 0\n c = Cliente.objects.get(id=anterior)\n #coordenada = str(c.corX)+\",\"+str(c.corY)\n #waypoints.append(coordenada)\n valorDestinho = '40.213453, -8.451979'\n jaEntrou = 0\n\n\n return render_to_response('verMapaRota.html',{'valorOrigem':valorOrigem, 'valorDestino': valorDestinho, 'waypoints':waypoints})","sub_path":"rotas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"418876890","text":"import os\n\nfrom typing import Any, Dict, Optional\nfrom enum import IntEnum\nimport contextlib\nimport random\nimport numpy as np\nimport inspect\n\nfrom fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW\nfrom fastNLP.envs.utils import get_global_seed\nfrom fastNLP.envs import (\n get_global_rank,\n FASTNLP_BACKEND_LAUNCH,\n FASTNLP_GLOBAL_SEED,\n)\nfrom fastNLP.core.samplers import ReproducibleBatchSampler\nfrom fastNLP.core.utils import auto_param_call\nfrom fastNLP.core.log import logger\n\nif _NEED_IMPORT_ONEFLOW:\n import oneflow\n from oneflow.nn import Module\n from oneflow.utils.data import DataLoader\n from oneflow.utils.data import RandomSampler as oneflowRandomSampler\n from oneflow.utils.data import SequentialSampler as oneflowSequentialSampler\n from oneflow.utils.data import BatchSampler as oneflowBatchSampler\nelse:\n from fastNLP.core.utils.dummy_class import DummyClass as Module\n\n\n__all__ = [\n 'oneflow_seed_everything',\n]\n\ndef oneflow_seed_everything(seed: int = None, add_global_rank_to_seed: bool = True) -> int:\n r\"\"\"\n 为 **oneflow**、**numpy**、**python.random** 伪随机数生成器设置种子。\n\n :param seed: 全局随机状态的整数值种子。如果为 ``None`` 则会根据时间戳生成一个种子。\n :param add_global_rank_to_seed: 在分布式训练中,是否在不同 **rank** 中使用不同的随机数。\n 当设置为 ``True`` 时,**fastNLP** 会将种子加上当前的 ``global_rank``。\n \"\"\"\n max_seed_value = np.iinfo(np.uint32).max\n min_seed_value = np.iinfo(np.uint32).min\n\n if seed is None:\n if os.getenv(FASTNLP_BACKEND_LAUNCH) == \"1\":\n seed = 42\n else:\n seed = get_global_seed()\n logger.info(f\"'FASTNLP_GLOBAL_SEED' is set to {seed} automatically.\")\n if not isinstance(seed, int):\n seed = int(seed)\n\n if not (min_seed_value <= seed <= max_seed_value):\n logger.rank_zero_warning(\"Your seed value is too big or too small for numpy, we will choose a random seed for you.\")\n seed %= max_seed_value\n\n os.environ[FASTNLP_GLOBAL_SEED] = f\"{seed}\"\n if add_global_rank_to_seed:\n seed += get_global_rank()\n\n random.seed(seed)\n np.random.seed(seed)\n oneflow.manual_seed(seed)\n oneflow.cuda.manual_seed_all(seed)\n return seed\n\n\nclass ForwardState(IntEnum):\n TRAIN = 0\n VALIDATE = 1\n TEST = 2\n PREDICT = 3\n\n\nclass _DDPWrappingModel(Module):\n \"\"\"\n 该函数用于 DDP 训练时处理用户自己定制的 train_step 等函数;\n 之所以要使用这一额外的包裹模型,是因为在使用 DDP 时,必须使用 DistributedDataParallel 的 forward 函数才能实现正常的运行;\n 另一方面,我们要求用户在使用我们的框架时,需要针对不用的模式实现不同的处理函数,例如 'train_step', 'evaluate_step' 等;\n 然而,当使用 DistributedDataParallel 包裹 model 后,模型看不见其除了 forward 之外的方法;并且当我们尝试在训练过程中主动提取\n `model = model.module`,这同样会导致错误,会使得每一个gpu上的模型参数不同;\n\n 因此出于以上考虑,我们实现了这一函数;\n 对于更详细的解释,可以参考 'pytorch_lightning' 的 ddp 的设计;\n \"\"\"\n\n def __init__(self, model: Module):\n super(_DDPWrappingModel, self).__init__()\n self.model = model\n\n def forward(self, batch, **kwargs) -> Dict:\n \"\"\"\n pytorch lightning 实现了先 unwrapping_model 的操作,但是感觉对于我们来说没有什么必须要,先写个注释放这里,之后有需求了再看;\n \"\"\"\n fn = kwargs.pop(\"fastnlp_fn\")\n signature_fn = kwargs.pop(\"fastnlp_signature_fn\")\n wo_auto_param_call = kwargs.pop(\"wo_auto_param_call\")\n\n if isinstance(batch, Dict) and not wo_auto_param_call:\n return auto_param_call(fn, batch, signature_fn=signature_fn)\n else:\n return fn(batch)\n\n\nclass DummyGradScaler:\n\n def __init__(self, *args, **kwargs):\n pass\n\n def get_scale(self):\n return 1.0\n\n def is_enabled(self):\n return False\n\n def scale(self, outputs):\n return outputs\n\n def step(self, optimizer, *args, **kwargs):\n optimizer.step(*args, **kwargs)\n\n def update(self, new_scale=None):\n pass\n\n def unscale_(self, optimizer):\n pass\n\n def load_state_dict(self, state_dict):\n pass\n\n def state_dict(self):\n return {}\n\n\ndef _build_fp16_env(dummy=False):\n return\n if dummy:\n autocast = contextlib.ExitStack\n GradScaler = DummyGradScaler\n else:\n if not oneflow.cuda.is_available():\n raise RuntimeError(\"Oneflow is not installed in gpu version, please use device='cpu'.\")\n if oneflow.cuda.get_device_capability(0)[0] < 7:\n logger.rank_zero_warning(\n \"NOTE: your device does NOT support faster training with fp16, \"\n \"please switch to FP32 which is likely to be faster\"\n )\n try:\n from oneflow.amp import GradScaler\n from oneflow.cuda.amp import autocast, GradScaler\n except ImportError:\n raise RuntimeError(\"torch version too low (less than 1.6)\")\n return autocast, GradScaler\n\n\ndef replace_sampler(dataloader: \"DataLoader\", sampler):\n r\"\"\"\n 替换 sampler (初始化一个新的 dataloader 的逻辑在于):\n\n 用户可能继承了 dataloader,定制了自己的 dataloader 类,这也是我们为什么先 `inspect.signature(dataloader)` 而不是直接\n `inspect.signature(DataLoader)` 的原因,因此同时注意到我们在外层重新初始化一个 dataloader 时也是使用的用户传进来的 dataloader\n 的类,而不是直接的 DataLoader;\n\n 如果需要定制自己的 dataloader,保证以下两点:\n\n 1. 在 __init__ 方法中加入 **kwargs,这是为了方便我们将 sampler 插入到具体的 DataLoader 的构造中;\n 2. 在 __init__ 方法中出现的参数,请务必挂为同样名字的实例属性,例如 self.one_arg_name = one_arg_name,这是因为我们只能通过属性\n 来获取实际的参数的值;\n\n \"\"\"\n\n # 拿到实例属性;\n instance_attrs = {k: v for k, v in vars(dataloader).items() if not k.startswith('_')}\n\n # 'multiprocessing_context' 是 user-defined function;\n if getattr(dataloader, 'multiprocessing_context', None) is not None:\n instance_attrs[\"multiprocessing_context\"] = dataloader.multiprocessing_context\n\n # 拿到 dataloader '__init__' 函数的默认函数签名;\n init_params = dict(inspect.signature(dataloader.__init__).parameters)\n\n # 防止用户的 DataLoader 是继承了 oneflow 的 DataLoader,然后还是使用了 **kwargs 的方式对父类传参数\n has_variadic_kwargs = any(v.kind is v.VAR_KEYWORD for k, v in init_params.items())\n if has_variadic_kwargs and isinstance(dataloader, DataLoader):\n # 防止用户写入了 super().__init__(**kwargs)\n for key, value in dict(inspect.signature(DataLoader.__init__).parameters).items():\n if key not in init_params and key != 'self':\n init_params[key] = value\n\n # 如果初始化dataloader所使用的参数不是默认值,那么我们需要将其记录下来用于重新初始化时设置;\n non_default_params = {name for name, p in init_params.items() if\n name in instance_attrs and p.default != instance_attrs[name]}\n # add `dataset` as it might have been replaced with `*args`\n non_default_params.add(\"dataset\")\n\n reconstruct_args = {k: v for k, v in instance_attrs.items() if k in non_default_params}\n if isinstance(dataloader, DataLoader):\n reconstruct_args.update({\"sampler\": sampler, \"shuffle\": False, \"batch_sampler\": None})\n\n batch_sampler = getattr(dataloader, \"batch_sampler\")\n if batch_sampler is not None and isinstance(batch_sampler, ReproducibleBatchSampler):\n raise RuntimeError(\"It should not be running here, please report a bug to us.\")\n\n required_args = {\n p.name\n for p in init_params.values()\n if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD)\n and p.default is p.empty\n and p.name not in reconstruct_args\n }\n\n # 在 attribute 中没有找到这些参数,导致了没有办法重新初始化\n if required_args:\n required_args = sorted(required_args)\n dataloader_self_name = dataloader.__class__.__name__\n raise Exception(\n f\"Need to inject arguments {required_args} into the __init__ of `{dataloader_self_name}`. \"\n f\"But they are not found in the attribute of `{dataloader_self_name}`, fastNLP cannot determine its \"\n f\"value when try to reinitialize `{dataloader_self_name}`, please add `{required_args}` to be \"\n f\"`{dataloader_self_name}`'s attribute.\"\n )\n\n # 这种错误针对的是传入的 dataloader 不是直接的 DataLoader,而是定制了 DataLoader,但是 __init__ 中没有 **kwargs;\n if not has_variadic_kwargs:\n # the dataloader signature does not allow keyword arguments that need to be passed\n missing_kwargs = reconstruct_args.keys() - init_params.keys()\n if missing_kwargs:\n missing_kwargs = sorted(missing_kwargs)\n dataloader_self_name = dataloader.__class__.__name__\n raise Exception(\n f\"The parameter:{missing_kwargs} needed to reinitialize `{dataloader_self_name}` is not found.\"\n )\n # 如果没有kwargs,则保证一下只传入需要的参数\n if not isinstance(dataloader, DataLoader):\n reconstruct_args = {key:value for key,value in reconstruct_args.items() if key in init_params}\n\n return type(dataloader)(**reconstruct_args)\n\n\ndef replace_batch_sampler(dataloader, new_batch_sampler):\n r\"\"\"\n 替换一个 dataloader 的 batch_sampler;\n \"\"\"\n params_keys = [k for k in dataloader.__dict__.keys() if not k.startswith(\"_\")]\n for k in [\"batch_size\", \"sampler\", \"drop_last\", \"batch_sampler\", \"dataset_kind\"]:\n if k in params_keys:\n params_keys.remove(k)\n params = {k: getattr(dataloader, k) for k in params_keys}\n params[\"batch_sampler\"] = new_batch_sampler\n\n if not isinstance(dataloader, DataLoader):\n init_params = dict(inspect.signature(dataloader.__init__).parameters)\n has_variadic_kwargs = any(v.kind is v.VAR_KEYWORD for k, v in init_params.items())\n if not has_variadic_kwargs:\n params = {key:value for key,value in params.items() if key in init_params}\n\n return type(dataloader)(**params)\n\n\ndef optimizer_state_to_device(state, device):\n r\"\"\"\n 将一个 ``optimizer`` 的 ``state_dict`` 迁移到对应的设备。\n\n :param state: :func:`optimzier.state_dict` 获取的 state_dictt\n :param device: 要迁移到的目的设备。\n :return: 迁移后的新的 state_dict。\n \"\"\"\n new_state = {}\n for name, param in state.items():\n if isinstance(param, dict):\n new_state[name] = optimizer_state_to_device(param, device)\n elif isinstance(param, oneflow.Tensor):\n new_state[name] = param.to(device).clone()\n else:\n new_state[name] = param\n return new_state\n\n\ndef _check_dataloader_args_for_distributed(args, controller='Trainer'):\n \"\"\"\n 检查 dataloader 的 sampler 情况,如果用户替换了自己定制的 sampler ,为了防止\n 在分布式训练中出现错误会报错。\n \"\"\"\n error_flag = (type(args.sampler) not in {oneflowRandomSampler, oneflowSequentialSampler})\n if controller == 'Trainer':\n mode = 'training'\n substitution = 'fastNLP.RandomSampler'\n error_flag = (type(args.batch_sampler) != oneflowBatchSampler) or error_flag\n else: # Evaluator\n mode = 'evaluation'\n substitution = 'fastNLP.UnrepeatedSequentialSampler'\n if error_flag:\n raise TypeError(f\"Using customized ``batch_sampler`` or ``sampler`` for distributed {mode} may cause \"\n f\"unpredictable problems, because fastNLP will substitute the dataloader's sampler into \"\n f\"``{substitution}``. The customized sampler should set for distributed running \"\n f\"before initializing ``{controller}`` , and then set the \"\n f\"parameter ``use_dist_sampler`` of ``{controller}`` to ``False``.\"\n f\"\\n Current batch_sampler: {type(args.batch_sampler)}\"\n f\"\\n Current sampler: {type(args.sampler)}\")\n","sub_path":"fastNLP/core/drivers/oneflow_driver/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"81127077","text":"import matplotlib.pyplot as plt\nimport sklearn.datasets as skdata\nimport numpy as np\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn import metrics\n\nnumeros = skdata.load_digits()\ntarget = numeros['target']\nimagenes = numeros['images']\nn_imagenes = len(target)\n\n\ndata = imagenes.reshape((n_imagenes, -1)) # para volver a tener los datos como imagen basta hacer data.reshape((n_imagenes, 8, 8))\nprint(np.shape(data))\n\nscaler = StandardScaler()\nx_2, x_validation, y_2, y_validation = train_test_split(data, target, train_size=0.8)\nx_train, x_test, y_train, y_test = train_test_split(x_2, y_2, train_size=0.5)\n\nx_train = scaler.fit_transform(x_train)\nx_test = scaler.transform(x_test)\nx_validation = scaler.transform(x_validation)\n\ncov = np.cov(x_train.T)\nvalores, vectores = np.linalg.eig(cov)\nvalores = np.real(valores)\nvectores = np.real(vectores)\nii = np.argsort(-valores)\nvalores = valores[ii]\nvectores = vectores[:,ii]\n\nproyeccion_train = np.matmul(x_train,vectores)[:,:30]\nproyeccion_test = np.matmul(x_test,vectores)[:,:30]\nproyeccion_validation = np.matmul(x_validation,vectores)[:,:30]\n\nhiperparametros = np.logspace(-3,2,20)\nscores = []\n\nfor C in hiperparametros:\n clasificador = SVC(C=C)\n clasificador.fit(proyeccion_train,y_train)\n predicciones = clasificador.predict(proyeccion_test)\n scores.append(metrics.f1_score(y_test,predicciones,average='macro'))\n \n\nplt.figure()\nplt.plot(hiperparametros,scores)\nplt.xscale('log')\nplt.scatter(hiperparametros[np.argmax(scores)],np.amax(scores))\n\nmejor_C = hiperparametros[np.argmax(scores)]\nprint(mejor_C)\n\n\nclasificador = SVC(C=mejor_C)\nclasificador.fit(proyeccion_train,y_train)\npredicciones = clasificador.predict(proyeccion_validation)\n\nmatriz = metrics.confusion_matrix(y_validation,predicciones)\n\n\nplt.figure(figsize=(8,8))\nplt.imshow(matriz)\n\nfor i in range(0,10):\n for j in range(0,10):\n plt.text(i-0.5,j,' {:.2f}'.format(float(matriz[i,j])/np.sum(y_validation==i)),fontsize=10)\n \nplt.title('C = {:.2f}'.format(mejor_C))\nplt.axis('off')\nplt.title('C = {:.2f}'.format(mejor_C))\nplt.axis('off')\nplt.savefig('matriz_de_confusion.png')","sub_path":".ipynb_checkpoints/svm_linear-checkpoint.py","file_name":"svm_linear-checkpoint.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"584098300","text":"'''\r\n% [rot_mat] = rotmax (angle)\r\n% To obtain the rotation matrix around the X axis given the\r\n% rotation angle.\r\n% inputs:\r\n% angle\r\n% rotation angle, in radians.\r\n% outputs:\r\n% rot_mat\r\n% rotation matrix (3, 3)\r\n%\r\n\r\n% Valdemir Carrara, Sep, 1998'''\r\n\r\nimport math\r\nimport numpy as np\r\n\r\ndef rotmax(angle):\r\n\r\n\tcoan = math.cos(angle)\r\n\tsian = math.sin(angle)\r\n\r\n\trot_mat = np.array([ [1, 0, 0],\r\n\t\t\t\t\t\t [0, coan, sian],\r\n\t\t\t\t\t\t [0, -sian, coan] ])\r\n\r\n\treturn rot_mat\r\n\r\nif __name__ == \"__main__\":\r\n\tprint(rotmax(math.pi/4))\r\n\tprint(rotmax(math.pi/3))","sub_path":"Propat/rotmax.py","file_name":"rotmax.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"369238455","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 1 23:24:57 2018\n\n@author: kennedy\n\"\"\"\n\n__author__ = \"kennedy Czar\"\n__email__ = \"kennedyczar@outlook.com\"\n__version__ = '1.0'\n\nimport pickle\nfrom DATA_PREPROCESS_MAIN import FETCH_DATA, FEATURE_EXTRACTION\nfrom DATA_PREPROCESS_MAIN import PREPROCESS\nfrom PREDICTIVE_MODEL import Classify\n\n'''main function'''\n\nif __name__ == '__main__':\n path = 'D:\\\\FREELANCER\\\\CATEGORICAL_URI\\\\DATASET'\n dataset = '\\\\2013_04_21.csv'\n data, label = FETCH_DATA(path, dataset).fetch()\n print('Load processed data to pickle')\n \n '''Load proceesed data to pickle'''\n \n with open('data.pkl','wb') as f:\n pickle.dump(data, f)\n \n '''load labels to pickle.'''\n with open('label.pkl', 'wb') as f:\n pickle.dump(label, f)\n \n print('Done..')\n print(20*'*')\n \n features = FEATURE_EXTRACTION(data).extract()\n print('Load features to pickel')\n '''save the feature with pickle in current directory.\n saves us memory because of the size on memory.'''\n \n with open('features.pkl', 'wb') as f:\n pickle.dump(features, f) #NOTE that features contains all dataset\n print('Done')\n \n from os import chdir\n chdir('D:\\\\FREELANCER\\\\CATEGORICAL_URI\\\\SCRIPT')\n \n '''Note that this part of the code is to split the data into a fragment i can use.\n I split the data so i can return only 0.1% of the initial data as my useable dataset.\n \n Comment out the line to use the whole dataset. Note that this requires alot of\n computational power because the dataset is over 1Million.\n Also you would most likely get a MemoryError.\n \n It would fine if you have a system configuration with >24GM RAM. \n Mine is 24GB RAM and perhaps if you have something higher than that, you\n wouldnt have to bother with MemoryErrors.\n \n To processed if all conditions are met. do the following.\n \n comment this..\n feature_train, feature_test, label_train, label_test = train_test_split(features, label, test_size = 0.99)\n \n with open('feature.pkl', 'wb') as f:\n pickle.dump(feature, f)\n \n with open('label.pkl', 'wb') as f:\n pickle.dump(label, f)\n \n \n with open('feature.pkl','rb') as f:\n feature = pickle.load(f)\n \n with open('label_train.pkl','rb') as f:\n label = pickle.load(f)\n \n features_train_transfm = PREPROCESS(feature, label).process()\n \n Return model result\n NB(features_train_transfm, label_train)\n \n '''\n \n \n with open('features.pkl','rb') as f:\n features = pickle.load(f)\n \n with open('label.pkl','rb') as f:\n label = pickle.load(f)\n \n '''\n Feature Vectorization:\n features_train_BNB:\n Scale, Vectorize + SelectPercentile\n features_train_BNB_WTHSelector:\n Scale, Vectorize without SelectPercentile\n features_train_MNB:\n CountVectorizer + TfidfVectorizer\n \n The reason for vectorizing using this different approaches\n is to check which maps the features to the label much better.\n \n The better would give high average precision on test data.\n '''\n features_train = PREPROCESS(features, label).process()\n \n '''Return model result'''\n '''\n Binomial Model Output:\n NB(features_train_BNB, label_train)\n '''\n Classify(features_train, label).Support_Vector()\n Classify(features_train, label).RandForest()\n","sub_path":"SCRIPT/ALL_MAIN.py","file_name":"ALL_MAIN.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562871409","text":"import asyncio\nfrom typing import Any\nfrom loguru import logger\n\nfrom vka.base.longpoll import LongPoll\nfrom vka.base.wrapper import Event\nfrom vka.chatbot.checking import CheckingEventForCommand\nfrom vka.chatbot.context import Context\nimport sys\n\nsys.dont_write_bytecode = True\n\n\nclass ABot(LongPoll):\n\n \"\"\"\n param debug:\n Включение debug режима\n param custom_event_name:\n Чтобы бот мог отловить другое событие\n [\n 'wall_post_new', 'message_edit'\n ]\n param custom_event_func:\n ctx - обязательный параметр\n async def custom_event_func(ctx: Context): ...\n param custom_func:\n async def custom_func(bot: ABot): ...\n\n \"\"\"\n\n def run(\n self, debug: bool = False,\n custom_event_name: list[str] = None,\n custom_event_func=None,\n custom_func=None\n ):\n try:\n asyncio.run(\n self.async_run(\n debug=debug,\n custom_event_name=custom_event_name,\n custom_event_func=custom_event_func,\n custom_func=custom_func,\n )\n )\n except (KeyboardInterrupt, SystemExit):\n return\n\n async def async_run(\n self, debug: bool = False,\n custom_event_name: list[str] = None,\n custom_event_func=None,\n custom_func=None,\n ):\n await self.async_init()\n self.custom_event_name = custom_event_name\n self.custom_event_func = custom_event_func\n if custom_func is not None:\n asyncio.create_task(custom_func(self))\n await self._launching_bot(debug)\n\n async def _launching_bot(\n self, debug: bool,\n ):\n async for event in self.listen():\n if event.updates:\n asyncio.create_task(\n self._wiretapping_type(\n updates=event.updates, debug=debug\n )\n )\n\n async def _wiretapping_type(\n self, updates, debug: bool,\n ):\n for i in updates:\n if debug:\n logger.opt(colors=True).debug(\n f'[vka {self.group_id}] {i}'\n )\n await self._defining_events(update=i)\n\n async def _defining_events(self, update):\n \"\"\"\n Определяем какой событие пришло от сервера\n \"\"\"\n event = Event(update)\n ctx = Context(event=event, api=self.api, bot=self)\n check = CheckingEventForCommand(\n ctx=ctx,\n menu_commands=self.__menu_commands__,\n callback_action=self.__callback_action__,\n commands=self.__commands__\n )\n if self.custom_event_name is not None and update.type in self.custom_event_name:\n if self.custom_event_func is not None:\n return await self.custom_event_func(ctx)\n elif update.type == 'message_new':\n logger.opt(colors=True).info(\n f'[vka {self.group_id}] '\n f'type: New message '\n f'peer_id: {ctx.msg.peer_id} '\n f'from_id: '\n f'{ctx.msg.from_id} '\n f'message: {ctx.msg.text}'\n )\n if 'payload' in event.message:\n asyncio.create_task(\n check.shipment_data_message_event(\n obj=event.obj\n )\n )\n return\n await check.search_for_command_message()\n\n elif update.type == 'message_event':\n logger.opt(colors=True).info(\n f'[vka {self.group_id}] '\n f'type: Message event '\n f'peer_id: {ctx.msg.peer_id} '\n f'from_id: '\n f'{ctx.msg.from_id} '\n )\n asyncio.create_task(\n check.shipment_data_message_event(\n obj=event.obj,\n )\n )\n\n def add_command(\n self, *custom_filter,\n commands=(), any_text: bool = False,\n lvl: Any = None, show_snackbar: str = None,\n custom_answer: str = None\n ):\n \"\"\" используется декоратором над функцией \"\"\"\n def wrapper(func):\n self.register_command(\n func=func, *custom_filter, commands=commands,\n any_text=any_text, lvl=lvl, show_snackbar=show_snackbar,\n custom_answer=custom_answer\n )\n return func\n\n return wrapper\n\n def register_command(\n self, func, *custom_filter,\n commands=(), any_text: bool = False,\n lvl: Any = None, show_snackbar: str = None,\n custom_answer: str = None,\n ):\n \"\"\"\n Регистрирует команду в боте\n :param func: сама функция команды\n :param custom_filter: свой фильтр\n :param commands: команды | str / [str, ...]\n :param any_text: любой текст | bool\n :param lvl: дополнительный аргумент\n :param show_snackbar:\n :param custom_answer:\n :return:\n \"\"\"\n self.__commands__.append(\n {\n 'func_obj': func,\n 'custom_filter': custom_filter,\n 'any_text': any_text,\n 'commands': commands,\n 'lvl': lvl,\n 'show_snackbar': show_snackbar,\n 'custom_answer': custom_answer,\n }\n )\n\n def add_click_callback(\n self, *custom_filter,\n callback: bool = False,\n show_snackbar: bool = False,\n ):\n \"\"\"\n используется декоратором над функцией\n\n :param callback:\n :param show_snackbar: исчезающее сообщение на экране\n\n в разработке:\n # :param open_link: открывает ссылку\n # :param open_app: открывает vk mini apps\n \"\"\"\n\n def decorator(func):\n def wrapper():\n self.register_callback(\n func=func, *custom_filter, callback=callback,\n show_snackbar=show_snackbar\n )\n return func\n return wrapper()\n return decorator\n\n def register_callback(\n self,\n func, *custom_filter,\n callback: bool = False,\n show_snackbar: bool | str = False,\n ):\n \"\"\" можно использовать для добавлении кнопок без декораторов \"\"\"\n self.__callback_action__[func.__name__] = {\n 'func_obj': func,\n 'custom_filter': custom_filter,\n 'callback': callback,\n 'show_snackbar': show_snackbar\n }\n\n","sub_path":"vka/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"595737012","text":"import cv2\r\nimport numpy as np\r\nfrom collections import defaultdict\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.cluster import KMeans\r\nimport math\r\ndef segment_by_angle_kmeans(lines, k=2, **kwargs):\r\n # Define criteria = (type, max_iter, epsilon)\r\n default_criteria_type = cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER\r\n criteria = kwargs.get('criteria', (default_criteria_type, 10, 1.0))\r\n flags = kwargs.get('flags', cv2.KMEANS_RANDOM_CENTERS)\r\n attempts = kwargs.get('attempts', 10)\r\n angles = np.array([line[0][1] for line in lines])\r\n \r\n # multiply the angles by two and find coordinates of that angle\r\n pts = np.array([[np.cos(2*angle), np.sin(2*angle)]\r\n for angle in angles], dtype=np.float32)\r\n\r\n # run kmeans on the coords\r\n labels, centers = cv2.kmeans(pts, k, None, criteria, attempts, flags)[1:]\r\n labels = labels.reshape(-1) # transpose to row vec\r\n\r\n # segment lines based on their kmeans label\r\n segmented = defaultdict(list)\r\n for i, line in zip(range(len(lines)), lines):\r\n segmented[labels[i]].append(line)\r\n segmented = list(segmented.values())\r\n return segmented\r\n\r\ndef intersection(line1, line2):\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n A = np.array([\r\n [np.cos(theta1), np.sin(theta1)],\r\n [np.cos(theta2), np.sin(theta2)]\r\n ])\r\n b = np.array([[rho1], [rho2]])\r\n x0, y0 = np.linalg.solve(A, b)\r\n x0, y0 = int(np.round(x0)), int(np.round(y0))\r\n return [[x0, y0]]\r\n\r\n\r\ndef segmented_intersections(lines):\r\n intersections = []\r\n for i, group in enumerate(lines[:-1]):\r\n for next_group in lines[i+1:]:\r\n for line1 in group:\r\n for line2 in next_group:\r\n intersections.append(intersection(line1, line2)) \r\n\r\n return intersections\r\n\r\ndef brick_boi(image):\r\n x_offset = 0.045\r\n y_offset = 0.022\r\n Angular_offset = 0\r\n filename = '%s.jpg' %(image)\r\n img = cv2.imread(filename)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n blur = cv2.medianBlur(gray, 5)\r\n adapt_type = cv2.ADAPTIVE_THRESH_GAUSSIAN_C\r\n thresh_type = cv2.THRESH_BINARY_INV\r\n bin_img = cv2.adaptiveThreshold(blur, 255, adapt_type, thresh_type, 11, 2)\r\n rho, theta, thresh = 1, np.pi/180, 350\r\n lines = cv2.HoughLines(bin_img, rho, theta, thresh)\r\n segmented = segment_by_angle_kmeans(lines)\r\n intersections = segmented_intersections(segmented)\r\n for i in range (0,len(intersections)):\r\n array = np.array(intersections[i])\r\n plt.plot(array[0,0], array[0,1], 'bo')\r\n newintersections = []\r\n for i in range (0,len(intersections)):\r\n newintersections.append(intersections[i][0])\r\n kmeans = KMeans(n_clusters=4, random_state=0).fit(newintersections)\r\n centers4=kmeans.cluster_centers_\r\n centers=centers4[0:3]\r\n line1 = [centers[0,0]-centers[1,0], centers[0,1]-centers[1,1]]\r\n line2 = [centers[0,0]-centers[2,0], centers[0,1]-centers[2,1]]\r\n line3 = [centers[1,0]-centers[2,0], centers[1,1]-centers[2,1]]\r\n line1len = math.sqrt((line1[0]**2)+(line1[1]**2))\r\n line2len = math.sqrt((line2[0]**2)+(line2[1]**2))\r\n line3len = math.sqrt((line3[0]**2)+(line3[1]**2))\r\n linelen = [line1len, line2len, line3len]\r\n \r\n values = [0,1,2]\r\n values.remove(linelen.index(max(linelen)))\r\n values.remove(linelen.index(min(linelen)))\r\n if (values[0] == 0):\r\n shortestline = [centers[0,0]-centers[1,0], centers[0,1]-centers[1,1]]\r\n angle = np.arctan2(shortestline[0], shortestline[1])\r\n scalefactor = 0.2/line1len\r\n \r\n if (values[0] == 1):\r\n shortestline = [centers[0,0]-centers[2,0], centers[0,1]-centers[2,1]]\r\n angle = np.arctan2(shortestline[0], shortestline[1])\r\n scalefactor = 0.2/line2len\r\n \r\n if (values[0] == 2):\r\n shortestline = [centers[1,0]-centers[2,0], centers[1,1]-centers[2,1]]\r\n angle = np.arctan2(shortestline[0], shortestline[1])\r\n scalefactor = 0.2/line3len\r\n \r\n if (angle >= math.pi/2):\r\n angle = angle - math.pi\r\n \r\n if (angle <= -math.pi/2):\r\n angle = angle + math.pi\r\n averagecenters = [0,0]\r\n angle = -angle + Angular_offset\r\n \r\n averagecenters[0] = (centers4[0,0] + centers4[1,0]+ centers4[2,0]+ centers4[3,0])/4\r\n averagecenters[1] = (centers4[0,1] + centers4[1,1]+ centers4[2,1]+ centers4[3,1])/4\r\n \r\n yerror = y_offset -(scalefactor * (averagecenters[0]- img.shape[1]/2))\r\n xerror = x_offset + scalefactor * (averagecenters[1]- img.shape[0]/2)\r\n plt.plot(averagecenters[0],averagecenters[1], 'ro')\r\n plt.plot(img.shape[1]/2,img.shape[0]/2, 'ro')\r\n \r\n for i in range (0, len(centers)):\r\n plt.plot(centers[i][0],centers[i][1], 'ro')\r\n plt.imshow(img)\r\n \r\n cv2.waitKey(0)\r\n return(xerror, yerror, angle)\r\n\r\n\r\n","sub_path":"brick_recognition.py","file_name":"brick_recognition.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"133426502","text":"from stat_parser import Parser\r\nimport nltk\r\nfrom nltk import word_tokenize, ne_chunk, pos_tag\r\nfrom nltk import CFG\r\nimport nltk,re\r\nfrom nltk.tree import Tree\r\n\r\n\r\nparser = Parser()\r\nsent= \"so through netbanking process his request for 10 sec PL\"\r\ntoken_words = re.sub(\"[^\\w]\", \" \", sent).split()\r\n\r\n\r\n\r\n\r\nfor token_word in token_words:\r\n\tif token_word=='i':\r\n\t sent=sent.replace('i','I')\r\n\t# if token_word[-3:] == \"ing\":\r\n\t # print \r\n\t\r\n\t\t\r\nprint(sent)\r\ntree=parser.parse(sent)\r\n\r\n\r\n\r\n\r\n\r\nprint(tree)\r\n\r\n# ROOT = 'ROOT'\r\n# tree = ...\r\n# def getNodes(parent):\r\n # for node in parent:\r\n # if type(node) is nltk.Tree:\r\n # if node.label() == ROOT:\r\n # print (\"======== Sentence =========\")\r\n # print (\"Sentence:\", \" \".join(node.leaves()))\r\n # else:\r\n # print (\"Label:\", node.label())\r\n # print (\"Leaves:\", node.leaves())\r\n\r\n # getNodes(node)\r\n # else:\r\n # print (\"Word:\", node)\r\n\r\n# getNodes(tree)\r\n\r\ndef traverse(t):\r\n try:\r\n t.label()\r\n except AttributeError:\r\n print(t, end=\" \")\r\n else:\r\n # Now we know that t.node is defined\r\n print('(', t.label(), end=\" \")\r\n for child in t:\r\n traverse(child)\r\n print(')', end=\" \")\r\ntraverse(tree)\r\nprint ()\r\nROOT = 'ROOT'\r\nfound = 0\r\n#==============================================================================\r\nparser = Parser() #defining parser of stat_parser \r\nROOT = 'ROOT' #giving ROOT name to the root of the tree\r\nfound = 0 \r\nt=0\r\np=0 #tag for stopping the upsell in case of instrument words like through, using etc\r\ndef stop_cross_sell_check(parent, products_upsell, stop_cross_sell):\r\n for node in parent:\r\n global found\r\n global t\r\n global p\r\n if type(node) is nltk.Tree:\r\n if node.label() == ROOT:\r\n print (\"======== Sentence =========\")\r\n print (\"Sentence:\", \" \".join(node.leaves()))\r\n else:\r\n print (\"Label:\", node.label()) \r\n print (\"Leaves:\", node.leaves())\r\n print(found)\r\n stop_cross_sell_check(node,products_upsell, stop_cross_sell)\r\n else: \r\n if node==stop_cross_sell:\r\n p=found \r\n print ('found') \r\n if node== products_upsell:\r\n t=found\r\n print('double foud')\r\n found=found+1\r\n if (t-p)==1:\r\n return 1\r\n else:\r\n return 0 \r\n# \r\n#==============================================================================\r\nprint (stop_cross_sell_check(tree, 'PL', 'through')) \r\n","sub_path":"parsing1.py","file_name":"parsing1.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"301823038","text":"\nfrom keras.models import Model\nfrom keras.layers import Input, Dense, multiply, concatenate, Activation, Lambda\nfrom keras.layers import PReLU, LSTM\nfrom keras.layers import Conv1D, BatchNormalization, GlobalAveragePooling1D, Permute, Dropout\nfrom keras.layers import MaxPooling1D, Flatten\nfrom keras.optimizers import Adam, SGD\nfrom keras.utils import to_categorical\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, TensorBoard, CSVLogger, EarlyStopping\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras import backend as K\n\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom utils.constants import max_seq_len, nb_classes\nfrom utils.generic_utils import load_dataset_at, calculate_dataset_metrics, cutoff_choice, \\\n cutoff_sequence, plot_dataset\nimport sys\nimport math\nimport numpy as np\nimport os\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\n\nTRAINABLE = True\n\ndef slice_seq(x):\n return x[:, :1]\n\ndef slice_dtw(x):\n return x[:, 1:]\n\ndef play_model(nb_cnn, proto_num, max_seq_lenth, nb_class):\n ip = Input(shape=(1+proto_num, max_seq_lenth))\n\n ip1 = Lambda(slice_seq)(ip)\n ip2 = Lambda(slice_dtw)(ip)\n\n x1 = Permute((2, 1))(ip1)\n x2 = Permute((2, 1))(ip2)\n\n for i in range(nb_cnn):\n i_prime = i if i < 3 else 3\n nb_nodes = 64 * 2 ** i_prime\n\n x1 = Conv1D(nb_nodes, 3, padding='same', activation='relu', kernel_initializer='he_uniform')(x1)\n x1 = Conv1D(nb_nodes, 3, padding='same', activation='relu', kernel_initializer='he_uniform')(x1)\n\n x2 = Conv1D(nb_nodes, 3, padding='same', activation='relu', kernel_initializer='he_uniform')(x2)\n x2 = Conv1D(nb_nodes, 3, padding='same', activation='relu', kernel_initializer='he_uniform')(x2)\n if i > 2:\n x1 = Conv1D(nb_nodes, 3, padding='same', activation='relu', kernel_initializer='he_uniform')(x1)\n\n x2 = Conv1D(nb_nodes, 3, padding='same', activation='relu', kernel_initializer='he_uniform')(x2)\n\n x1 = MaxPooling1D(pool_size=2)(x1)\n x2 = MaxPooling1D(pool_size=2)(x2)\n\n\n x = concatenate([x1, x2])\n\n x = Flatten()(x)\n\n x = Dense(4096, activation='relu')(x)\n x = Dropout(0.5)(x)\n\n x = Dense(4096, activation='relu')(x)\n x = Dropout(0.5)(x)\n\n out = Dense(nb_class, activation='softmax')(x)\n\n model = Model(ip, out)\n\n model.summary()\n\n return model\n\ndef train_model(model:Model, dataset_id, method, proto_num, dataset_prefix, nb_iterations=100000, batch_size=128, val_subset=None, cutoff=None, normalize_timeseries=False, learning_rate=1e-3, early_stop=False, balance_classes=True, run_ver=''):\n X_train, y_train, X_test, y_test, is_timeseries = load_dataset_at(dataset_id, method, proto_num, normalize_timeseries=normalize_timeseries)\n\n #calculate num of batches\n nb_epochs = math.ceil(nb_iterations * (batch_size / X_train.shape[0]))\n\n if balance_classes == True:\n classes = np.arange(0, nb_classes(dataset_id)) #np.unique(y_train)\n le = LabelEncoder()\n y_ind = le.fit_transform(y_train.ravel())\n recip_freq = len(y_train) / (len(le.classes_) *\n np.bincount(y_ind).astype(np.float64))\n class_weight = recip_freq[le.transform(classes)]\n\n print(\"Class weights : \", class_weight)\n\n y_train = to_categorical(y_train, nb_classes(dataset_id))\n y_test = to_categorical(y_test, nb_classes(dataset_id))\n\n if is_timeseries:\n factor = 1. / np.cbrt(2)\n else:\n factor = 1. / np.sqrt(2)\n\n reduce_lr = ReduceLROnPlateau(monitor='loss', patience=math.ceil(nb_epochs / 20), mode='auto',\n factor=factor, cooldown=0, min_lr=learning_rate/10., verbose=2)\n\n if early_stop:\n early_stopping = EarlyStopping(monitor='loss', patience=500, mode='auto', verbose=2, restore_best_weights=True)\n callback_list = [early_stopping]\n else:\n callback_list = []\n\n optm = Adam(lr=learning_rate)\n #optm = SGD(lr=learning_rate, momentum=0.9, decay=5e-4)\n\n model.compile(optimizer=optm, loss='categorical_crossentropy', metrics=['accuracy'])\n\n if val_subset is not None:\n X_test = X_test[:val_subset]\n y_test = y_test[:val_subset]\n\n\n if balance_classes:\n model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epochs, callbacks=callback_list,\n class_weight=class_weight, verbose=2, validation_data=(X_test, y_test))\n else:\n model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epochs, callbacks=callback_list, verbose=2, validation_data=(X_test, y_test))\n\n\nif __name__ == \"__main__\":\n dataset = sys.argv[1]\n method = sys.argv[2]\n proto_num = int(sys.argv[3])\n\n max_seq_lenth = max_seq_len(dataset)\n nb_class = nb_classes(dataset)\n nb_cnn = int(round(math.log(max_seq_lenth, 2))-3)\n print(\"Number of Pooling Layers: %s\" % str(nb_cnn))\n\n #model = lstm_fcn_model(proto_num, max_seq_lenth, nb_class)\n #model = alstm_fcn_model(proto_num, max_seq_lenth, nb_class)\n\n #model = cnn_raw_model(nb_cnn, proto_num, max_seq_lenth, nb_class)\n #model = cnn_dtwfeatures_model(nb_cnn, proto_num, max_seq_lenth, nb_class)\n #model = cnn_earlyfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)\n #model = cnn_midfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)\n #model = cnn_latefusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)\n\n #model = vgg_raw_model(nb_cnn, proto_num, max_seq_lenth, nb_class)\n #model = vgg_dtwfeatures_model(nb_cnn, proto_num, max_seq_lenth, nb_class)\n #model = vgg_earlyfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)\n #model = vgg_midfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)\n #model = vgg_latefusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)\n\n model = play_model(nb_cnn, proto_num, max_seq_lenth, nb_class)\n\n train_model(model, dataset, method, proto_num, dataset_prefix=dataset, nb_iterations=50000, batch_size=50, learning_rate=0.0001, early_stop=False, balance_classes=False, run_ver='vgg_')\n #train_model(model, dataset, method, proto_num, dataset_prefix=dataset, nb_iterations=28000, batch_size=64, learning_rate=0.001, early_stop=True)\n\n acc = evaluate_model(model, dataset, method, proto_num, dataset_prefix=dataset, batch_size=50, checkpoint_prefix=\"vgg_loss\")\n np.savetxt(\"output/vgg/vgg-%s-%s-%s-loss-%s\" % (dataset, method, str(proto_num), str(acc)), [acc])\n\n acc = evaluate_model(model, dataset, method, proto_num, dataset_prefix=dataset, batch_size=50, checkpoint_prefix=\"vgg_val_acc\")\n np.savetxt(\"output/vgg/vgg-%s-%s-%s-vacc-%s\" % (dataset, method, str(proto_num), str(acc)), [acc])\n","sub_path":"playground_gpu0.py","file_name":"playground_gpu0.py","file_ext":"py","file_size_in_byte":6816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"330104508","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 13 11:16:09 2017\n\n@author: aguil\n\"\"\"\n\nimport numpy as np\n#import pylab as pl\nimport sympy as sp\nx,t = sp.symbols('x t')\nx1 = 0.9*sp.sin(np.pi*t)\nx2 = 0.3*sp.sin(3*np.pi*t)\n\nx = x1 + x2 + 3*x1 + 3*x2 + 4*x1 + 4*x2\nsp.plot(x)","sub_path":"Python/rect.py","file_name":"rect.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"499132062","text":"import cv2\nimport tensorflow as tf\nfrom tensorflow import keras\nIMG_SIZE = (224, 224)\nIMG_SHAPE = IMG_SIZE + (3,)\npath = r'C:\\Users\\a\\Downloads\\FaceMask3'\nmodel = keras.models.load_model(path)\npreprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n keypress = cv2.waitKey(1)\n if keypress & 0xFF == ord('q'):\n break\n\n ret, img = cap.read()\n\n img = cv2.resize(img, IMG_SIZE)\n cv2.imshow('t', img)\n\n im = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n im = im.astype('float32')\n\n im = tf.expand_dims(im, 0)\n predictions = model.predict(im)\n score = predictions[0][0]\n percent_mask = (1-score)*100\n print(f'{percent_mask:.2f}% mask')\n\n \n \n\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"564809327","text":"def parserText(msg):\n queryStr = msg.get('Content')\n arr = smart_unicode(queryStr).split(' ')\n commander = arr[0]\n content = arr[1:]\n\n if cmp(commander, u'menu') == 0 or cmp(commander, u'菜单') == 0:\n return menu(msg)\n elif cmp(commander, u'qbcj') == 0 or cmp(commander, u'全部成绩') == 0 :\n return inquiry_score(msg,content)\n else:\n return otherResponse(msg)\n\ndef inquiry_score(msg,content):\n if content==None or len(content) == 0:\n return getReplyXml(msg, SCORE_HINT)\n if len(content) != 2:\n return getReplyXml(msg,u\"输入格式有误,请尝试正确格式~ \\n\"+SCORE_HINT)\n openid = msg.get('FromUserName')\n task_score(openid, content)\n return getReplyXml(msg, TISHI)\n\ndef task_score(openID,content):\n zjh = content[0]\n mm = content[1]\n data = u'openID=%s&zjh=%s&mm=%s' % (openID, zjh, mm)\n queue = TaskQueue('hebeu')\n queue.add(Task(\"/hebeu/task/score/\", data))\n\ndef score_task(request):\n if request.method == 'POST':\n data = request.POST\n openID=data.get(\"openID\",'').encode('GBK')\n zjh = data.get(\"zjh\",'').encode('GBK')\n mm = data.get('mm', '').encode('GBK')\n cache_score(openID, zjh, mm)\n return HttpResponse(openID)\n else:\n return HttpResponse(\"false\")\n\ndef cache_score(openID,zjh,mm):\n mc = memcache.Client()\n result = SDU_Spider().sdu_init(zjh,mm)\n mc.set(openID, result)\n\ndef get_cache(openID):\n mc = memcache.Client()\n value = mc.get(openID)\n return value\n\nclass SDU_Spider:\n # 声明相关的属性\n def __init__(self):\n socket.setdefaulttimeout(10)\n self.loginUrl = 'http://202.206.161.173:9080/loginAction.do' # 登录的url\n self.resultUrl = 'http://202.206.161.173:9080/gradeLnAllAction.do?oper=qbinfo' # 显示成绩的url\n self.topUrl = 'http://202.206.161.173:9080/menu/s_top.jsp' #这里保存的学生姓名\n self.cookieJar = cookielib.CookieJar() # 初始化一个CookieJar来处理Cookie的信息\n self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookieJar))\n\n def sdu_init(self, zjh, mm):\n # 初始化链接并且获取cookie\n postdata=urllib.urlencode({'zjh':zjh,'mm':mm}) # POST的数据\n myRequest = urllib2.Request(url = self.loginUrl,data = postdata) # 自定义一个请求\n\n\n login = self.opener.open(myRequest) # 访问登录页面,获取到必须的cookie的值\n\n\n if (login.read().decode('GBK').find(u'重新输入') != -1):\n return u'╮(╯_╰)╭\\n请输入正确的学号和密码,么么哒~'\n\n top = self.opener.open(self.topUrl) \n\n\n result = self.opener.open(self.resultUrl, timeout=4) # 访问成绩页面,获得成绩的数据\n\n\n return self.deal_data(top.read().decode('GBK'), result.read().decode('GBK'))\n\n # 将内容从页面代码中抠出来\n def deal_data(self,topPage,myPage):\n # print topPage\n stu_name = re.findall(r' .*?nbsp;(.*?) ',topPage,re.S)\n name = u'【'+stu_name[0]+u'】全部成绩如下:\\n================'\n myItems = re.findall(r'(.*?).*?(.*?).*?(.*?)

    .*?.*?',myPage,re.S) #获取到学分\n grade = []\n grade.append(name)\n scores = []\n for item in myItems:\n #s = '%s %s %s\\n' % (item[0].strip(), item[1].strip(), item[2].strip())\n s = '\\n%s %s' % (item[0].strip(), item[2].strip())\n grade.append(s)\n score = item[2].strip()\n try:\n if cmp(score, u'优秀') == 0:\n scores.append(95)\n elif cmp(score, u'良好') == 0:\n scores.append(85)\n elif cmp(score, u'中等') == 0:\n scores.append(75)\n elif cmp(score, u'及格') == 0:\n scores.append(65)\n elif cmp(score, u'不及格') == 0:\n scores.append(65)\n else:\n scores.append(float(score))\n except:\n pass\n # print grade\n result = ''.join(grade)\n average = sum(scores)/len(scores)\n\n if 90<=average<100:\n result+=u'\\n================\\n\\n【平均分】%s\\n你是不是传说中的学霸?\\n ≥▽≤y' % ('%.2f'% average)\n elif 80<=average<90:\n result+= u'\\n================\\n\\n【平均分】%s\\n成绩这么好,你家里人知道吗?\\n (/≥▽≤/) ' % ('%.2f'% average)\n elif 70<=average<80:\n result+=u'\\n================\\n\\n【平均分】%s\\n考成这样,你家里人知道吗?\\n学渣,快滚去学习吧!\\n ((`□′)) ' % ('%.2f'% average)\n elif 60<=average<70:\n result+=u'\\n================\\n\\n【平均分】%s\\n考成这样,你家里人知道吗?\\n学渣,快滚去学习吧!\\n ((`□′)) ' % ('%.2f'% average)\n else:\n result += u'\\n================\\n\\n【平均分】%s\\n学渣,滚去学习!\\n ( ̄Q ̄)╯ ' % ('%.2f'% average)\n\n return result\n\n def __del__(self):\n self.opener.close()\n","sub_path":"work for 2015-2016/py2/shijian-2016/zhuaqu2/three/jwxt5.py","file_name":"jwxt5.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"107617685","text":"from pylastica import Document\nfrom pylastica.aggregation.iprange import IpRange\nfrom pylastica.doc_type import Mapping\nfrom pylastica.query.query import Query\nfrom tests.base import Base\n\n__author__ = 'Joe Linn'\n\nimport unittest\n\n\nclass IpRangeTest(unittest.TestCase, Base):\n def setUp(self):\n super(IpRangeTest, self).setUp()\n self._index = self._create_index(\"test_aggregation_ip_range\")\n mapping = Mapping()\n mapping.set_properties({\n \"address\": {\"type\": \"ip\"}\n })\n doc_type = self._index.get_doc_type(\"test\")\n doc_type.mapping = mapping\n docs = [\n Document(\"1\", {\"address\": \"192.168.1.100\"}),\n Document(\"2\", {\"address\": \"192.168.1.150\"}),\n Document(\"3\", {\"address\": \"192.168.1.200\"})\n ]\n doc_type.add_documents(docs)\n self._index.refresh()\n\n def tearDown(self):\n super(IpRangeTest, self).tearDown()\n self._index.delete()\n\n def test_ip_range_aggregation(self):\n agg = IpRange(\"ip\", \"address\")\n agg.add_range(from_value=\"192.168.1.101\").add_range(to_value=\"192.168.1.200\").add_mask_range(\"192.168.1.0/24\")\n\n query = Query()\n query.add_aggregation(agg)\n results = self._index.search(query).aggregations['ip']\n\n for bucket in results['buckets']:\n if 'from' in bucket and 'to' in bucket:\n #the CIDR mask\n self.assertEqual(3, bucket['doc_count'])\n else:\n self.assertEqual(2, bucket['doc_count'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/aggregation/test_iprange.py","file_name":"test_iprange.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"427474897","text":"import psycopg2\nimport psycopg2.extras\nimport psycopg2.pool\nfrom contextlib import contextmanager\nfrom flask import current_app\nfrom flask import g\n\ndef create_pool():\n if 'pool' not in g:\n g.pool = psycopg2.pool.SimpleConnectionPool(\n 1,\n 20,\n dsn=current_app.config['DATABASE_URI'],\n connection_factory=psycopg2.extras.RealDictConnection\n )\n\n return g.pool\n\n@contextmanager\ndef get_db_connection():\n try:\n connection = g.pool.getconn()\n yield connection\n finally:\n g.pool.putconn(connection)\n\n@contextmanager\ndef get_db_cursor(commit=True):\n create_pool() # ugly hack to make sure pool is really created\n with get_db_connection() as conn:\n cur = conn.cursor()\n try:\n yield cur\n if commit:\n conn.commit()\n finally:\n cur.close()\n\ndef db_query(query:str, params:dict=None, fetch_all:bool=False):\n \"\"\"Get rows from database.\n\n :param query\n :param params\n :param fetch_all\n \"\"\"\n with get_db_cursor() as cur:\n if (current_app.config['DEBUG'] and not current_app.config['TESTING']):\n current_app.logger.info(cur.mogrify(query, params))\n\n cur.execute(query, params)\n if fetch_all:\n result = cur.fetchall()\n else:\n result = cur.fetchone()\n\n return result\n\n","sub_path":"mmb_db_helpers/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"426944900","text":"import os\nimport time\nimport sys\nimport subprocess\nfrom process import Network\n\nclass p4():\n def __init__(self):\n self.run()\n\n def run(self):\n #######INPUTS########\n pid = subprocess.Popen(\"ls patients/ -t1 | head -n 1\",\n shell=True,\n stdout=subprocess.PIPE,\n universal_newlines=True).communicate()[0]\n root_path = os.getcwd() +'/patients/' + pid.strip()\n img_path = root_path + '/Images/dir_4'\n #print('img_path', img_path)\n #####################\n #tmp = time.time()\n Network(0, img_path, root_path)\n #tend = time.time()\n #print(tend-tmp)\n\nif __name__ == \"__main__\":\n p4()\n","sub_path":"process4.py","file_name":"process4.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"368771883","text":"import httplib2\nimport json\nimport random\nimport requests\nimport string\n\nfrom database_setup import Base, MenuItem, Restaurant\nfrom flask import Flask, flash, jsonify, make_response, render_template, request, redirect, url_for\nfrom flask import session as login_session\nfrom oauth2client.client import FlowExchangeError, flow_from_clientsecrets\nfrom sqlalchemy import create_engine, asc\nfrom sqlalchemy.orm import sessionmaker\n\nfrom constants import FB_APP_ID, FB_PERMISSION_URL, FB_TOKEN_URL, FB_USER_INFO_URL, FB_USER_PIC_URL\nfrom constants import CLIENT_ID, GOOGLE_API_KEY, LOGIN_OUTPUT, SCRIPT_FOR_RESTAURANT\nfrom constants import MESSAGE_LOGIN, MESSAGE_LOGOUT, MESSAGE_NOT_LOGGED\nfrom helpers import create_user, get_fb_token, get_user_id, get_user_info, request_to_url\n\napp = Flask(__name__)\n\n# Connect to Database and create database session\nengine = create_engine('sqlite:///restaurantmenu.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n# Login\n@app.route('/login')\ndef show_login():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))\n login_session['state'] = state\n return render_template('login.html', google_api_key=GOOGLE_API_KEY, STATE=state, fb_app_id=FB_APP_ID)\n\n\n# Google POST Request\n@app.route('/gconnect', methods=['POST'])\ndef gconnect():\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n code = request.data\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secret.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s' % access_token)\n result = request_to_url(url, value=1)\n\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 50)\n response.headers['Content-Type'] = 'application/json'\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(json.dumps(\"Token's client ID does not match app's.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check to see if user is already logged in\n stored_credentials = login_session.get('credentials')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_credentials is not None and gplus_id == stored_gplus_id:\n response = make_response(json.dumps('Current user is already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n\n # Store the access token in the session for later use.\n login_session['provider'] = 'google'\n login_session['credentials'] = credentials\n login_session['gplus_id'] = gplus_id\n response = make_response(json.dumps('Successfully connected user.'), 200)\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n data = json.loads(answer.text)\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n # See if user exists, if doesn't make a new one\n user_id = get_user_id(login_session['email'])\n if not user_id:\n user_id = create_user(login_session)\n login_session['user_id'] = user_id\n\n output = LOGIN_OUTPUT % (login_session['username'], login_session['picture'])\n flash(MESSAGE_LOGIN % login_session['username'])\n return output\n\n\n# FB POST Request\n@app.route('/fbconnect', methods=['POST'])\ndef fbconnect():\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n access_token = request.data\n\n # Exchanges client token for long-lived server-side token with /GET /oauth/\n # access_token?grant_type=fb_exchange_token&client_id={app-id}&client_secret={app-secret}&fb_exchange_token=\n # {short-lived-token}\n app_id = json.loads(open('fb_client_secrets.json', 'r').read())['web']['app_id']\n app_secret = json.loads(open('fb_client_secrets.json', 'r').read())['web']['app_secret']\n url = FB_TOKEN_URL % (app_id, app_secret, access_token)\n result = request_to_url(url, value=1, to_json=False)\n\n # Use token to get user info from API and Strip Expire Tag from Access Token\n token = get_fb_token(result)\n userinfo_url = FB_USER_INFO_URL % token\n data = request_to_url(userinfo_url, value=1)\n\n login_session['provider'] = 'facebook'\n login_session['username'] = data[\"name\"]\n login_session['email'] = data[\"email\"]\n login_session['facebook_id'] = data[\"id\"]\n\n # Get user picture\n url = FB_USER_PIC_URL % token\n data = request_to_url(url, value=1)\n login_session['picture'] = data['data']['url']\n\n # See if user exists\n user_id = get_user_id(login_session['email'])\n if not user_id:\n user_id = create_user(login_session)\n login_session['user_id'] = user_id\n\n output = LOGIN_OUTPUT % (login_session['username'], login_session['picture'])\n flash(MESSAGE_LOGIN % login_session['username'])\n return output\n\n\n# DISCONNECT - Method to logout specific provider\n@app.route('/disconnect')\ndef disconnect():\n if 'provider' in login_session:\n if login_session['provider'] == 'google':\n gdisconnect()\n del login_session['gplus_id']\n del login_session['credentials']\n if login_session['provider'] == 'facebook':\n fbdisconnect()\n del login_session['facebook_id']\n\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n del login_session['provider']\n flash(MESSAGE_LOGOUT)\n return redirect(url_for('show_restaurants'))\n else:\n flash(MESSAGE_NOT_LOGGED)\n return redirect(url_for('show_restaurants'))\n\n\n# DISCONNECT - Revoke a current user's token and reset their login_session.\n@app.route('/gdisconnect')\ndef gdisconnect():\n # Only disconnect a connected user.\n credentials = login_session.get('credentials')\n if credentials is None:\n response = make_response(json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Execute HTTP GET request to revoke current token.\n access_token = credentials\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == 200:\n # Reset the user's session\n response = make_response(json.dumps('Succesfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n # For whatever reason, the given token was invalid.\n response = make_response(json.dumps('Failed to revoke token for given user.'), 400)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\n# DISCONNECT - Revoke FB current's user token and reset their login_session\n@app.route('/fbdisconnect')\ndef fbdisconnect():\n facebook_id = login_session['facebook_id']\n url = FB_PERMISSION_URL % facebook_id\n result = request_to_url(url, 'DELETE', 1)\n\n\n# JSON APIs to view Restaurant Information\n@app.route('/restaurant//menu/JSON')\ndef restaurantMenuJSON(restaurant_id):\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n items = session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()\n return jsonify(MenuItems=[i.serialize for i in items])\n\n\n@app.route('/restaurant//menu//JSON')\ndef menuItemJSON(restaurant_id, menu_id):\n Menu_Item = session.query(MenuItem).filter_by(id=menu_id).one()\n return jsonify(Menu_Item=Menu_Item.serialize)\n\n\n@app.route('/restaurant/JSON')\ndef restaurantsJSON():\n restaurants = session.query(Restaurant).all()\n return jsonify(restaurants=[r.serialize for r in restaurants])\n\n\n# Show all restaurants\n@app.route('/')\n@app.route('/restaurant/')\ndef show_restaurants():\n restaurants = session.query(Restaurant).order_by(asc(Restaurant.name))\n if 'username' not in login_session:\n return render_template('publicrestaurants.html', restaurants=restaurants)\n return render_template('restaurants.html', restaurants=restaurants)\n\n\n# Create a new restaurant\n@app.route('/restaurant/new/', methods=['GET', 'POST'])\ndef new_restaurant():\n if 'username' not in login_session:\n return redirect('/login')\n if request.method == 'POST':\n restaurant = Restaurant(name=request.form['name'], user_id=login_session['user_id'])\n session.add(restaurant)\n flash('New Restaurant %s Successfully Created' % restaurant.name)\n session.commit()\n return redirect(url_for('show_restaurants'))\n else:\n return render_template('newRestaurant.html')\n\n\n# Edit a restaurant\n@app.route('/restaurant//edit/', methods=['GET', 'POST'])\ndef edit_restaurant(restaurant_id):\n if 'username' not in login_session:\n return redirect('/login')\n editedRestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n if request.form['name']:\n editedRestaurant.name = request.form['name']\n flash('Restaurant Successfully Edited %s' % editedRestaurant.name)\n return redirect(url_for('show_restaurants'))\n else:\n return render_template('editRestaurant.html', restaurant=editedRestaurant)\n\n\n# Delete a restaurant\n@app.route('/restaurant//delete/', methods=['GET', 'POST'])\ndef delete_restaurant(restaurant_id):\n restaurant_to_delete = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if 'username' not in login_session:\n return redirect('/login')\n if restaurant_to_delete.user_id != login_session['user_id']:\n return SCRIPT_FOR_RESTAURANT\n if request.method == 'POST':\n session.delete(restaurant_to_delete)\n flash('%s Successfully Deleted' % restaurant_to_delete.name)\n session.commit()\n return redirect(url_for('show_restaurants', restaurant_id=restaurant_id))\n else:\n return render_template('deleteRestaurant.html', restaurant=restaurant_to_delete)\n\n\n# Show a restaurant menu\n@app.route('/restaurant//')\n@app.route('/restaurant//menu/')\ndef show_menu(restaurant_id):\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n creator = get_user_info(restaurant.user_id)\n items = session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()\n can_edit = True\n if creator is not None and 'username' in login_session:\n can_edit = creator.id != login_session['user_id']\n if 'username' not in login_session or can_edit:\n return render_template('publicmenu.html', items=items, restaurant=restaurant, creator=creator)\n else:\n return render_template('menu.html', items=items, restaurant=restaurant, creator=creator)\n\n\n# Create a new menu item\n@app.route('/restaurant//menu/new/', methods=['GET', 'POST'])\ndef new_menu_item(restaurant_id):\n if 'username' not in login_session:\n return redirect('/login')\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n new_item = MenuItem(\n name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n course=request.form['course'],\n restaurant_id=restaurant_id,\n user_id=restaurant.user_id\n )\n session.add(new_item)\n session.commit()\n flash('New Menu %s Item Successfully Created' % new_item.name)\n return redirect(url_for('show_menu', restaurant_id=restaurant_id))\n else:\n return render_template('newmenuitem.html', restaurant_id=restaurant_id)\n\n\n# Edit a menu item\n@app.route('/restaurant//menu//edit', methods=['GET', 'POST'])\ndef edit_menu_item(restaurant_id, menu_id):\n if 'username' not in login_session:\n return redirect('/login')\n editedItem = session.query(MenuItem).filter_by(id=menu_id).one()\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n if request.form['price']:\n editedItem.price = request.form['price']\n if request.form['course']:\n editedItem.course = request.form['course']\n session.add(editedItem)\n session.commit()\n flash('Menu Item Successfully Edited')\n return redirect(url_for('show_menu', restaurant_id=restaurant_id))\n else:\n return render_template('editmenuitem.html', restaurant_id=restaurant_id, menu_id=menu_id, item=editedItem)\n\n\n# Delete a menu item\n@app.route('/restaurant//menu//delete', methods=['GET', 'POST'])\ndef delete_menu_item(restaurant_id, menu_id):\n if 'username' not in login_session:\n return redirect('/login')\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n itemToDelete = session.query(MenuItem).filter_by(id=menu_id).one()\n if request.method == 'POST':\n session.delete(itemToDelete)\n session.commit()\n flash('Menu Item Successfully Deleted')\n return redirect(url_for('show_menu', restaurant_id=restaurant_id))\n else:\n return render_template('deletemenuitem.html', item=itemToDelete)\n\n\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='0.0.0.0', port=5040)\n","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":15072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"118461134","text":"from urllib.parse import urljoin\nfrom tornado import gen, httpclient\nfrom seplis.api.handlers import base\nfrom seplis import config, utils\nfrom io import BytesIO\n\nclass Handler(base.Handler):\n\n def get_httpclient(self):\n return httpclient.AsyncHTTPClient()\n\n @gen.coroutine\n def save_files(self):\n files = []\n if not self.request.files:\n return\n for key in self.request.files:\n for file_ in self.request.files[key]:\n files.append(\n ('image', file_['filename'], file_['body'])\n )\n content_type, body = utils.MultipartFormdataEncoder().encode([], files)\n client = self.get_httpclient()\n response = yield gen.Task(\n client.fetch,\n urljoin(config['api']['storitch'], 'store'),\n method='POST',\n headers={'Content-Type': content_type},\n body=body,\n ) \n return utils.json_loads(response.body)\n\n","sub_path":"src/seplis/api/handlers/file_upload.py","file_name":"file_upload.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562408074","text":"from django.db import models\nfrom django.utils.translation import ugettext as _\nfrom edc_base.audit_trail import AuditTrail\nfrom edc.choices.common import YES_NO, POS_NEG, YES_NO_DONT_KNOW\nfrom ..choices import SYMPTOMS\nfrom .base_scheduled_model import BaseScheduledModel\n\n\nclass HtcHivResult(BaseScheduledModel):\n\n todays_result = models.CharField(\n verbose_name=_(\"Today\\'s results:\"),\n max_length=15,\n choices=POS_NEG,\n help_text='',\n )\n\n couples_testing = models.CharField(\n verbose_name=_(\"Did testing and counseling occur through couples testing today?\"),\n max_length=15,\n choices=YES_NO,\n help_text='',\n )\n # We need clarification here as to the type of id used here. Is it Omang?\n partner_id = models.CharField(\n verbose_name=_(\"What is the unique identification number for the other member of the couple?\"),\n max_length=25,\n null=True,\n blank=True,\n help_text='',\n )\n\n symptoms = models.CharField(\n verbose_name=_(\"Does the client currently have any of the following symptoms?\"),\n max_length=75,\n choices=SYMPTOMS,\n help_text='',\n )\n\n family_tb = models.CharField(\n verbose_name=_(\"Have any of the client\\'s family members been diagnosed with tuberculosis?\"),\n max_length=15,\n choices=YES_NO_DONT_KNOW,\n help_text='',\n )\n\n history = AuditTrail()\n\n class Meta:\n app_label = 'bcpp_htc_subject'\n verbose_name = \"HIV test result\"\n verbose_name_plural = \"HIV test result\"\n","sub_path":"bhp066/apps/bcpp_htc_subject/models/htc_hiv_result.py","file_name":"htc_hiv_result.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"336305160","text":"# -*- coding: utf-8 -*-\r\nimport scrapy\r\nfrom ITJUZI.itjz import crawler\r\nfrom ITJUZI.items import ItjuziItem\r\nclass ITjuziSpider(scrapy.Spider):\r\n name = \"it\"\r\n ha = {\r\n \"Connection\": \"keep-alive\",\r\n \"Referer\": \"http://radar.itjuzi.com/investevent\",\r\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\r\n \"Host\": \"www.itjuzi.com\",\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\r\n }\r\n cookie = crawler('17611225911@163.com', '!QAZ2wsx')[1]\r\n def start_requests(self):\r\n for i in crawler('17611225911@163.com', '!QAZ2wsx')[0]:\r\n yield scrapy.Request(i,headers=self.ha,cookies=self.cookie,dont_filter=True)\r\n def parse(self, response):\r\n item=ItjuziItem()\r\n #公司名称\r\n try:\r\n item['company_name']=response.xpath('//*/div[@id=\"home\"]//h1[@class=\"seo-important-title\"]//text()').extract()[0].strip()\r\n except:\r\n item['company_name'] =''\r\n #公司简介\r\n try:\r\n item['company_intro']=''.join([one.strip() for one in response.xpath('//*/div[@id=\"home\"]//div[@class=\"info-line\"]//text()').extract()])\r\n except:\r\n item['company_intro'] = ''\r\n #基本详情\r\n try:\r\n item['seo_slogan']=response.xpath('//*/div[@class=\"info-line\"]/h2/text()').extract()[0].strip()\r\n except:\r\n item['seo_slogan'] =''\r\n #更多详情\r\n try:\r\n des_name=''.join([one.strip() for one in response.xpath('//*/div[@class=\"des-more\"]/h2/text()').extract()])\r\n except:\r\n des_name= ''\r\n try:\r\n des_time=''.join([one.strip() for one in response.xpath('//*/div[@class=\"des-more\"]/h3[1]/span/text()').extract()])\r\n except:\r\n des_time=''\r\n try:\r\n des_count=''.join([one.strip() for one in response.xpath('//*/div[@class=\"des-more\"]/h3[2]/span/text()').extract()])\r\n except:\r\n des_count=''\r\n try:\r\n des_state=''.join([one.strip() for one in response.xpath('//*/div[@class=\"des-more\"]/span/text()').extract()])\r\n except:\r\n des_state=''\r\n item['des_more']=des_name+'|'+des_time+'|'+des_count+'|'+des_state\r\n try:\r\n rz_time=response.xpath('//*/div[@id=\"invest-portfolio\"]//table//td/span[@class=\"date \"]//text()').extract()[0].replace('\\n','').replace('\\t','').replace('\\r','')\r\n except:\r\n rz_time=''\r\n try:\r\n rz_genre=response.xpath('//*/div[@id=\"invest-portfolio\"]//table//td/span[@class=\"round\"]//text()').extract()[0]\r\n except:\r\n rz_genre=''\r\n try:\r\n rz_money=response.xpath('//*/div[@id=\"invest-portfolio\"]//table//td/span[@class=\"finades\"]//text()').extract()[0]\r\n except:\r\n rz_money=''\r\n try:\r\n rz_company=''.join([one.strip() for one in response.xpath('//*/div[@id=\"invest-portfolio\"]//table//td/a/text()').extract()]).replace('详情','').replace('反馈','')\r\n except:\r\n rz_company =''\r\n item['rz']=rz_time+'|'+rz_genre+'|'+rz_money+'|'+rz_company\r\n try:\r\n person_name_list = response.xpath('//*/a[@class=\"person-name\"]/text()').extract()\r\n per_position_list = response.xpath('//*/div[@class=\"per-position\"]/text()').extract()\r\n per_des_list = [one.strip() for one in response.xpath('//*/div[@class=\"per-des\"]/div/text()').extract()]\r\n # 团队信息\r\n per=[]\r\n for person in zip(person_name_list, per_position_list, per_des_list):\r\n per.append(','.join([one.strip().replace('\\n','').replace('\\r','').replace('\\t','') for one in list(person)]))\r\n item['person']=''.join(per)\r\n except Exception as e:\r\n item['person'] = ''\r\n mask_xpath=response.xpath('//*/div[@class=\"mask-panel\"]/ul/li')\r\n #产品信息\r\n try:\r\n mask = {}\r\n for m in mask_xpath:\r\n mask[m.xpath('./a[@class=\"product-name\"]/text()').extract()[0].strip()]=''.join(m.xpath('./div[@class=\"product-des line2\"]/text()').extract())\r\n item['mask']=mask\r\n except:\r\n item['mask'] =''\r\n compete_xpath=response.xpath('//*/div[@class=\"tab-content\"]//ul/li')\r\n #竞品\r\n try:\r\n compete={}\r\n for c in compete_xpath:\r\n c_l = []\r\n c_l.append(c.xpath('./p//span/text()').extract()[1].strip())\r\n c_l.append(c.xpath('./p//span/text()').extract()[-1].strip())\r\n c_l.append(c.xpath('./div/text()').extract()[0].strip())\r\n c_l.append(c.xpath('./i/span/text()').extract()[0].strip())\r\n c_l.append(c.xpath('./i/span/text()').extract()[-1].strip())\r\n compete[c.xpath('./p//span/text()').extract()[0]]=','.join(c_l)\r\n item['compete']=compete\r\n except:\r\n item['compete']=''\r\n #工商信息\r\n try:\r\n mc = ''.join([one.strip() for one in response.xpath('//*/div[@class=\"icp-bussiness\"]//table/thead/tr/th/text()').extract()])\r\n except:\r\n mc=''\r\n try:\r\n zb=''.join([one.strip() for one in response.xpath('//*/div[@class=\"icp-bussiness\"]//table/tbody/tr[1]/td[1]/span//text()').extract()]).split(':')[-1]\r\n except:\r\n zb=''\r\n try:\r\n clsj=''.join([one.strip() for one in response.xpath('//*/div[@class=\"icp-bussiness\"]//table/tbody/tr[1]/td[2]/span//text()').extract()]).split(':')[-1]\r\n except:\r\n clsj=''\r\n try:\r\n frdb = ''.join([one.strip() for one in response.xpath('//*/div[@class=\"icp-bussiness\"]//table/tbody/tr[2]/td[1]/span//text()').extract()]).split(':')[-1]\r\n except:\r\n frdb=''\r\n try:\r\n gslx=''.join([one.strip() for one in response.xpath('//*/div[@class=\"icp-bussiness\"]//table/tbody/tr[2]/td[2]/span//text()').extract()]).split(':')[-1]\r\n except:\r\n gslx=''\r\n try:\r\n dz=''.join([one.strip() for one in response.xpath('//*/div[@class=\"icp-bussiness\"]//table/tbody/tr[3]/td/span//text()').extract()]).split(':')[-1]\r\n except:\r\n dz=''\r\n item['busi'] =mc+'|'+zb+'|'+clsj+'|'+frdb+'|'+gslx+'|'+dz\r\n #商标信息\r\n try:\r\n item['brand']=','.join(response.xpath('//*/div[@class=\"brand-info\"]/p/text()').extract())\r\n except:\r\n item['brand'] = ''\r\n #联系方式\r\n try:\r\n phone=''.join([one.strip() for one in response.xpath('//*/ul[@class=\"list-block aboutus\"]/li/i[@class=\"fa icon icon-phone-o\"]/following-sibling::*/text()').extract()])\r\n except:\r\n phone = ''\r\n try:\r\n email = ''.join([one.strip() for one in response.xpath('//*/ul[@class=\"list-block aboutus\"]/li/i[@class=\"fa icon icon-email-o\"]/following-sibling::*/text()').extract()])\r\n except:\r\n email = ''\r\n try:\r\n address = ''.join([one.strip() for one in response.xpath('//*/ul[@class=\"list-block aboutus\"]/li/i[@class=\"fa icon icon-address-o\"]/following-sibling::*/text()').extract()])\r\n except:\r\n address = ''\r\n item['touch']=phone+'|'+email+'|'+address\r\n #原文url,后期用作md5加密去重\r\n item['md5_url']=response.url\r\n yield item","sub_path":"ITJUZI/spiders/investevent.py","file_name":"investevent.py","file_ext":"py","file_size_in_byte":7494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"227872383","text":"from sklearn.linear_model import LinearRegression\nx = [[12,2],[16,1],[20,0],[28,2],[36,0]]\ny = [[700],[900],[1300],[1750],[1800]]\n\nmodel= LinearRegression()\nmodel.fit(x,y)\n\nx_test = [[16,2],[18,0],[22,2],[32,2],[24,0]]\ny_test = [[1100],[850],[1500],[1800],[1100]]\n\nprices = model.predict(x_test)\nfor i, price in enumerate(prices):\n print('Predicated:%s, Target:%s' %(price, y_test[i]))\n\nscore = model.score(x_test, y_test)\nprint('r-squared:', score)","sub_path":"aiAcademy/kaiki.py","file_name":"kaiki.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"100989068","text":"import os\nfrom subprocess import Popen, STDOUT, PIPE\nimport argparse\nimport re\nimport logging\nfrom gitmodules_parser import GitModulesParser as SubParser\n\n_logger = logging.getLogger(__name__)\n\nsubmodule_parser = SubParser()\n\nBASE_DIR = os.getcwd()\nDESC = \"\"\"\nSubmodule updater\n\"\"\"\n\n\n\nclass Updater():\n \n parser = argparse.ArgumentParser(description=DESC)\n repo = \"\"\n submodule = \"\"\n main_branch = \"\"\n sub_branch = \"\"\n auto_commit = False\n\n \n def __init__(self):\n self.parser.add_argument(\n 'repo',\n help=\"Main repository where to update the submodule\"\n )\n self.parser.add_argument(\n '-b',\n '--branch',\n metavar=\"\",\n help=\"Branch of the main repository where to update the submodule (if not set will use master)\"\n )\n self.parser.add_argument(\n 'submodule',\n help=\"the submodule to update\"\n )\n self.parser.add_argument(\n '-sb',\n '--submod-branch',\n metavar=\"\",\n help=\"if you want to specify which branch of the submodule you'll use to update\"\n )\n self.parser.add_argument(\n '-a',\n '--auto-commit',\n action=\"store_true\",\n help=\"set to auto commit changes\"\n )\n\n def _parse(self):\n args = self.parser.parse_args()\n print(args)\n self.repo = args.repo\n self.submodule = args.submodule\n self.main_branch = args.branch or 'master'\n self.submodule_branch = args.submod_branch\n self.auto_commit = args.auto_commit\n\n def _get_repo_name(self):\n name_match = re.search(r'\\/([A-Z]|[a-z]|[0-9]|[-,_])*',self.repo)\n return name_match and name_match[0][1:]\n\n def _exist_repo(self):\n repo_folder_name = self._get_repo_name()\n if not repo_folder_name:\n raise Exception('%s is not a valid repository name' % self.repo)\n return os.path.exists(os.path.join(BASE_DIR, repo_folder_name))\n\n def run(self):\n self._parse()\n if self._exist_repo():\n process = Popen(['rm','-rf', self._get_repo_name()], stdout=PIPE, stderr=PIPE)\n _, error = process.communicate()\n if error:\n raise OSError('Error occured erasing the following directory: %s' % error)\n print('Dicrectory %s erased successfully' % os.path.join(BASE_DIR,self._get_repo_name()))\n #git clone\n command_list = (\"git clone --branch %s %s\" % (self.main_branch, self.repo)).split(' ')\n process = Popen(command_list)\n process.communicate()\n if process.returncode:\n exit(process.returncode)\n print(\"\\nCloned successfully from %s\\n\" % (self.repo))\n\n #move to dir\n os.chdir(os.path.join(BASE_DIR, self._get_repo_name()))\n workin_dir = os.getcwd()\n\n #check for .gitmodules\n modules_file_dir = os.path.join(workin_dir,\".gitmodules\")\n if not os.path.isfile(modules_file_dir):\n raise Exception(\"No file named as .gitmodules\") # add more verbose\n \n if self.submodule_branch:\n # subtitutesubmodule's branch on .gitmodules\n with open(os.path.join(workin_dir,'.gitmodules'),'r+') as file:\n submodule_parser.parse(file)\n submodule_parser.submodules[self.submodule].branch = self.submodule_branch\n file.truncate()\n content = submodule_parser.serialize()\n file.write(content)\n\n \n #update\n update_command_list = (\"git submodule update --init --remote --recursive %s\" % self.submodule).split(' ')\n upd_process = Popen(update_command_list)\n upd_process.communicate()\n\n if upd_process.returncode:\n exit(process.returncode)\n print(\"\\nUpdated successfully\\n\")\n\n\n\n\n\nif __name__ == \"__main__\":\n up = Updater()\n up.run() ","sub_path":"submodule_updater/submodule_updater.py","file_name":"submodule_updater.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"14583297","text":"# Module 4: Machine Learning using Tensorflow\n# Simple TF Model - Linear Regression Challenge\n\nimport tensorflow as tf\n\n# Step 1 Initial Setup\nx = tf.placeholder(tf.float32)\nW1 = tf.Variable([0.1],dtype=tf.float32)\nW2 = tf.Variable([0.1],dtype=tf.float32)\nb = tf.Variable([0.1],dtype=tf.float32)\ny = tf.placeholder(tf.float32)\n\nimport numpy as np\nX_train = np.linspace(-10.0,10.0,20)\ny_train = 2*X_train*X_train - 1 + 0.0*np.random.randn(len(X_train))\n\n# import matplotlib.pyplot as plt\n# plt.scatter(X_train,y_train)\n# plt.show()\n#\n# Step 2 Model\n\nyhat = W1*x*x+W2*x+b\n\n# Step 3 Loss Function\n\nloss = tf.reduce_sum(tf.square(yhat-y))\n\n# Step 4 Optimizer\n\noptimizer =tf.train.GradientDescentOptimizer(0.000001)\ntrain = optimizer.minimize(loss)\n\n# Step 5 Training Loop\n\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\nfor i in range(5000):\n sess.run(train,feed_dict={x:X_train,y:y_train})\n\n# Step 6\n\nprint(sess.run(W1))\nprint(sess.run(W2))\nprint(sess.run(b))\n\nimport matplotlib.pyplot as plt\nplt.plot(X_train,y_train,'o')\nyhat = sess.run(W1*X_train*X_train+W2*X_train+b)\nplt.plot(X_train,yhat,'r')\nplt.show()\n","sub_path":"exercises/module4_1_regression_challenge.py","file_name":"module4_1_regression_challenge.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224315969","text":"#sentence.py\r\n\r\nprint(\"This program will accept a sentence of any length and return some general information about it.\")\r\n\r\ndef main ():\r\n sentence= input(\"Please enter your sentence here: \").lower() #Gets a sentence as input from the user.\r\n \r\n num=len(sentence) #Returns the number of characters in the sentence.\r\n print(\"Number of characters: \", num)\r\n \r\n wordCount=len(sentence.split()) #Returns the number of words in the sentence.\r\n print(\"Number of words: \", wordCount)\r\n \r\n average = int(num/wordCount) #Calculates the average word length within the\r\n print(\"Average word length: \", average) #sentence, then returns that value.\r\n \r\n new = input(\"Would you like to analyze another sentence? y/n: \") #Checks if the user would like to analyze a new\r\n #sentence.\r\n if new == 'y':\r\n print(\"Follow the instructions to analyze a new sentence.\")\r\n main()\r\n else:\r\n print(\"Goodbye.\")\r\nmain()\r\n","sub_path":"sentence.py","file_name":"sentence.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"312467405","text":"from typing import Tuple\n\n\nclass Solution:\n def solve(self, components: [Tuple[int, int]], player: [int, int]) -> str:\n def get_xdir(diff): return 'W' if diff < 0 else 'E' if diff > 0 else None\n def get_ydir(diff): return 'S' if diff < 0 else 'N' if diff > 0 else None\n\n moves = []\n for cx, cy in components:\n xdiff = cx - player[0]\n ydiff = cy - player[1]\n xdir = get_xdir(xdiff)\n ydir = get_ydir(ydiff)\n if xdir: moves.append((abs(xdiff), xdir))\n if ydir: moves.append((abs(ydiff), ydir))\n moves.append((1, 'P')) # initiate a pickup\n player[0] = cx\n player[1] = cy\n\n return ''.join(list(map(str, [count*val for count, val in moves])))\n\n\n\"\"\"\n Use this input:\n2\n2\n1 1\n0 0\n1 0\n The output should be: NPWSP\n\"\"\"\nif __name__ == \"__main__\":\n grid_size = int(input())\n num_of_components = int(input())\n components = []\n for i in range(num_of_components):\n cx, cy = input().rstrip().split()\n components.append((int(cx), int(cy)))\n player = list(map(int, input().rstrip().split()))\n\n print(Solution().solve(components, player))\n","sub_path":"tasks/coin_collector/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231100510","text":"import threading\n\nimport grpc\n\nimport etcd3.etcdrpc as etcdrpc\nimport etcd3.events as events\nimport etcd3.exceptions as exceptions\nimport etcd3.leases as leases\nimport etcd3.locks as locks\nimport etcd3.members\nimport etcd3.transactions as transactions\nimport etcd3.utils as utils\n\n\nclass Transactions(object):\n def __init__(self):\n self.value = transactions.Value\n self.version = transactions.Version\n self.create = transactions.Create\n self.mod = transactions.Mod\n\n self.put = transactions.Put\n self.get = transactions.Get\n self.delete = transactions.Delete\n\n\nclass Etcd3Client(object):\n def __init__(self, host='localhost', port=2379):\n self.channel = grpc.insecure_channel('{host}:{port}'.format(\n host=host, port=port)\n )\n self.kvstub = etcdrpc.KVStub(self.channel)\n self.watchstub = etcdrpc.WatchStub(self.channel)\n self.clusterstub = etcdrpc.ClusterStub(self.channel)\n self.leasestub = etcdrpc.LeaseStub(self.channel)\n self.maintenancestub = etcdrpc.MaintenanceStub(self.channel)\n self.transactions = Transactions()\n\n def _build_get_range_request(self, key,\n range_end=None,\n limit=None,\n revision=None,\n sort_order=None,\n sort_target='key',\n serializable=None,\n keys_only=None,\n count_only=None,\n min_mod_revision=None,\n max_mod_revision=None,\n min_create_revision=None,\n max_create_revision=None):\n range_request = etcdrpc.RangeRequest()\n range_request.key = utils.to_bytes(key)\n if range_end is not None:\n range_request.range_end = utils.to_bytes(range_end)\n\n if sort_order is None:\n range_request.sort_order = etcdrpc.RangeRequest.NONE\n elif sort_order == 'ascend':\n range_request.sort_order = etcdrpc.RangeRequest.ASCEND\n elif sort_order == 'descend':\n range_request.sort_order = etcdrpc.RangeRequest.DESCEND\n else:\n raise ValueError('unknown sort order: \"{}\"'.format(sort_order))\n\n if sort_target is None or sort_target == 'key':\n range_request.sort_target = etcdrpc.RangeRequest.KEY\n elif sort_target == 'version':\n range_request.sort_target = etcdrpc.RangeRequest.VERSION\n elif sort_target == 'create':\n range_request.sort_target = etcdrpc.RangeRequest.CREATE\n elif sort_target == 'mod':\n range_request.sort_target = etcdrpc.RangeRequest.MOD\n elif sort_target == 'value':\n range_request.sort_target = etcdrpc.RangeRequest.VALUE\n else:\n raise ValueError('sort_target must be one of \"key\", '\n '\"version\", \"create\", \"mod\" or \"value\"')\n\n return range_request\n\n def get(self, key):\n \"\"\"\n Get the value of a key from etcd.\n\n :param key: key in etcd to get\n :returns: value of key\n :rtype: bytes\n \"\"\"\n range_request = self._build_get_range_request(key)\n range_response = self.kvstub.Range(range_request)\n\n if range_response.count < 1:\n raise exceptions.KeyNotFoundError(\n 'the key \"{}\" was not found'.format(key))\n else:\n # smells funny - there must be a cleaner way to get the value?\n return range_response.kvs.pop().value\n\n def get_prefix(self, key_prefix, sort_order=None, sort_target='key'):\n \"\"\"\n Get a range of keys with a prefix.\n\n :param key_prefix: first key in range\n\n :returns: sequence of (key, value) tuples\n \"\"\"\n range_request = self._build_get_range_request(\n key=key_prefix,\n range_end=utils.increment_last_byte(utils.to_bytes(key_prefix)),\n sort_order=sort_order,\n )\n\n range_response = self.kvstub.Range(range_request)\n\n if range_response.count < 1:\n raise exceptions.KeyNotFoundError('no keys found')\n else:\n for kv in range_response.kvs:\n yield (kv.key, kv.value)\n\n def get_all(self, sort_order=None, sort_target='key'):\n \"\"\"\n Get all keys currently stored in etcd.\n\n :returns: sequence of (key, value) tuples\n \"\"\"\n range_request = self._build_get_range_request(\n key=b'\\0',\n range_end=b'\\0',\n sort_order=sort_order,\n sort_target=sort_target,\n )\n\n range_response = self.kvstub.Range(range_request)\n\n if range_response.count < 1:\n raise exceptions.KeyNotFoundError('no keys')\n else:\n for kv in range_response.kvs:\n yield (kv.key, kv.value)\n\n def _build_put_request(self, key, value, lease=None):\n put_request = etcdrpc.PutRequest()\n put_request.key = utils.to_bytes(key)\n put_request.value = utils.to_bytes(value)\n put_request.lease = utils.lease_to_id(lease)\n return put_request\n\n def put(self, key, value, lease=None):\n \"\"\"\n Save a value to etcd.\n\n :param key: key in etcd to set\n :param value: value to set key to\n :type value: bytes\n :param lease: Lease to associate with this key.\n :type lease: either :class:`.Lease`, or int (ID of lease)\n \"\"\"\n put_request = self._build_put_request(key, value, lease=lease)\n self.kvstub.Put(put_request)\n\n def replace(self, key, initial_value, new_value):\n \"\"\"\n Atomically replace the value of a key with a new value.\n\n This compares the current value of a key, then replaces it with a new\n value if it is equal to a specified value. This operation takes place\n in a transaction.\n\n :param key: key in etcd to replace\n :param initial_value: old value to replace\n :type initial_value: bytes\n :param new_value: new value of the key\n :type new_value: bytes\n :returns: status of transaction, ``True`` if the replace was\n successful, ``False`` otherwise\n :rtype: bool\n \"\"\"\n status, _ = self.transaction(\n compare=[self.transactions.value(key) == initial_value],\n success=[self.transactions.put(key, new_value)],\n failure=[],\n )\n\n return status\n\n def _build_delete_request(self, key,\n range_end=None,\n prev_kv=None):\n delete_request = etcdrpc.DeleteRangeRequest()\n delete_request.key = utils.to_bytes(key)\n\n if range_end is not None:\n delete_request.range_end = utils.to_bytes(range_end)\n\n if prev_kv is not None:\n delete_request.prev_kv = prev_kv\n\n return delete_request\n\n def delete(self, key):\n \"\"\"\n Delete a single key in etcd.\n\n :param key: key in etcd to delete\n \"\"\"\n delete_request = self._build_delete_request(key)\n self.kvstub.DeleteRange(delete_request)\n\n def delete_prefix(self, prefix):\n \"\"\"Delete a range of keys with a prefix in etcd.\"\"\"\n delete_request = self._build_delete_request(\n prefix,\n range_end=utils.increment_last_byte(utils.to_bytes(prefix))\n )\n return self.kvstub.DeleteRange(delete_request)\n\n def _build_watch_request(self, cv, key,\n range_end=None,\n start_revision=None,\n progress_notify=False,\n filters=None,\n prev_kv=False):\n cv.acquire()\n create_watch = etcdrpc.WatchCreateRequest()\n create_watch.key = utils.to_bytes(key)\n if range_end is not None:\n create_watch.range_end = utils.to_bytes(range_end)\n if start_revision is not None:\n create_watch.start_revision = start_revision\n if progress_notify:\n create_watch.progress_notify = progress_notify\n if filters is not None:\n create_watch.filters = filters\n if prev_kv:\n create_watch.prev_kv = prev_kv\n create_watch.progress_notify = True\n watch_requests = etcdrpc.WatchRequest(create_request=create_watch)\n yield watch_requests\n cv.wait()\n cv.release()\n\n def _build_watch_iterator(self, key_prefix,\n range_end=None,\n start_revision=None,\n progress_notify=False,\n filters=None,\n prev_kv=False):\n cv = threading.Condition()\n\n def cancel_watch():\n cv.acquire()\n cv.notify()\n cv.release()\n\n request = self._build_watch_request(\n cv, key_prefix,\n range_end=range_end,\n start_revision=start_revision,\n progress_notify=progress_notify,\n filters=filters, prev_kv=prev_kv)\n watcher = self.watchstub.Watch(request)\n for event in watcher:\n for e in event.events:\n event_obj = events.PutEvent(e.kv.key, e.kv.value, e.kv.version)\n yield (event_obj, cancel_watch)\n\n def watch(self, key,\n start_revision=None,\n progress_notify=False,\n filters=None,\n prev_kv=False):\n \"\"\"\n Watch a key.\n\n Example usage:\n\n .. code-block:: python\n\n for (event, cancel) in etcd.watch('/doot/key'):\n print(event)\n\n :param key: key to watch\n\n :returns: Iterator of ``(event, cancel)`` tuples.\n Use ``event`` to get the events of key changes and ``cancel``\n to cancel the watch request\n \"\"\"\n return self._build_watch_iterator(key,\n start_revision=start_revision,\n progress_notify=progress_notify,\n filters=filters,\n prev_kv=prev_kv)\n\n def watch_prefix(self, key_prefix,\n start_revision=None,\n progress_notify=False,\n filters=None,\n prev_kv=False):\n \"\"\"\n Watch a range of keys with a prefix.\n\n Example usage:\n\n .. code-block:: python\n\n for (event, cancel) in etcd.watch_prefix('/doot/keys'):\n print(event)\n\n :param key_prefix: key prefix to watch\n\n :returns: Iterator of ``(event, cancel)`` tuples.\n Use ``event`` to get the events of key changes and ``cancel``\n to cancel the watch request\n \"\"\"\n range_end = utils.increment_last_byte(utils.to_bytes(key_prefix))\n return self._build_watch_iterator(key_prefix,\n range_end=range_end,\n start_revision=start_revision,\n progress_notify=progress_notify,\n filters=filters,\n prev_kv=prev_kv)\n\n def add_watch_callback(self, key, callback,\n start_revision=None,\n progress_notify=False,\n filters=None,\n prev_kv=False):\n class Watcher(threading.Thread):\n def __init__(self, iterator, callback):\n super(Watcher, self).__init__()\n self.iterator = iterator\n self.callback = callback\n\n def run(self):\n for (event, cancel) in self.iterator:\n self._cancel = cancel\n self.callback(event)\n\n def cancel(self):\n self._cancel()\n\n iterator = self._build_watch_iterator(\n key,\n start_revision=start_revision,\n progress_notify=progress_notify,\n filters=filters,\n prev_kv=prev_kv)\n\n thread = Watcher(iterator, callback)\n thread.start()\n return thread\n\n def _ops_to_requests(self, ops):\n \"\"\"\n Return a list of grpc requests.\n\n Returns list from an input list of etcd3.transactions.{Put, Get,\n Delete} objects.\n \"\"\"\n request_ops = []\n for op in ops:\n if isinstance(op, transactions.Put):\n request = self._build_put_request(op.key, op.value, op.lease)\n request_op = etcdrpc.RequestOp(request_put=request)\n request_ops.append(request_op)\n\n elif isinstance(op, transactions.Get):\n request = self._build_get_range_request(op.key)\n request_op = etcdrpc.RequestOp(request_range=request)\n request_ops.append(request_op)\n\n elif isinstance(op, transactions.Delete):\n request = self._build_delete_request(op.key)\n request_op = etcdrpc.RequestOp(request_delete_range=request)\n request_ops.append(request_op)\n\n else:\n raise Exception(\n 'Unknown request class {}'.format(op.__class__))\n return request_ops\n\n def transaction(self, compare, success=None, failure=None):\n \"\"\"\n Perform a transaction.\n\n Example usage:\n\n .. code-block:: python\n\n etcd.transaction(\n compare=[\n etcd.transactions.value('/doot/testing') == 'doot',\n etcd.transactions.version('/doot/testing') > 0,\n ],\n success=[\n etcd.transactions.put('/doot/testing', 'success'),\n ],\n failure=[\n etcd.transactions.put('/doot/testing', 'failure'),\n ]\n )\n\n :param compare: A list of comparisons to make\n :param success: A list of operations to perform if all the comparisons\n are true\n :param failure: A list of operations to perform if any of the\n comparisons are false\n \"\"\"\n compare = [c.build_message() for c in compare]\n\n success_ops = self._ops_to_requests(success)\n failure_ops = self._ops_to_requests(failure)\n\n transaction_request = etcdrpc.TxnRequest(compare=compare,\n success=success_ops,\n failure=failure_ops)\n txn_response = self.kvstub.Txn(transaction_request)\n\n responses = []\n for response in txn_response.responses:\n response_type = response.WhichOneof('response')\n if response_type == 'response_put':\n responses.append(None)\n\n elif response_type == 'response_range':\n range_kvs = []\n for kv in response.response_range.kvs:\n range_kvs.append((kv.key, kv.value))\n\n responses.append(range_kvs)\n\n return txn_response.succeeded, responses\n\n def lease(self, ttl, lease_id=None):\n \"\"\"\n Create a new lease.\n\n All keys attached to this lease will be expired and deleted if the\n lease expires. A lease can be sent keep alive messages to refresh the\n ttl.\n\n :param ttl: Requested time to live\n :param lease_id: Requested ID for the lease\n\n :returns: new lease\n :rtype: :class:`.Lease`\n \"\"\"\n lease_grant_request = etcdrpc.LeaseGrantRequest(TTL=ttl, ID=lease_id)\n lease_grant_response = self.leasestub.LeaseGrant(lease_grant_request)\n return leases.Lease(lease_id=lease_grant_response.ID,\n ttl=lease_grant_response.TTL,\n etcd_client=self)\n\n def revoke_lease(self, lease_id):\n \"\"\"\n Revoke a lease.\n\n :param lease_id: ID of the lease to revoke.\n \"\"\"\n lease_revoke_request = etcdrpc.LeaseRevokeRequest(ID=lease_id)\n self.leasestub.LeaseRevoke(lease_revoke_request)\n\n def refresh_lease(self, lease_id):\n keep_alive_request = etcdrpc.LeaseKeepAliveRequest(ID=lease_id)\n request_stream = [keep_alive_request]\n for response in self.leasestub.LeaseKeepAlive(request_stream):\n yield response\n\n def get_lease_info(self, lease_id):\n # only available in etcd v3.1.0 and later\n ttl_request = etcdrpc.LeaseTimeToLiveRequest(ID=lease_id,\n keys=True)\n return self.leasestub.LeaseTimeToLive(ttl_request)\n\n def lock(self, name, ttl=60):\n \"\"\"\n Create a new lock.\n\n :param name: name of the lock\n :type name: string or bytes\n :param ttl: length of time for the lock to live for in seconds. The\n lock will be released after this time elapses, unless\n refreshed\n :type ttl: int\n :returns: new lock\n :rtype: :class:`.Lock`\n \"\"\"\n return locks.Lock(name, ttl=ttl, etcd_client=self)\n\n def add_member(self, urls):\n \"\"\"\n Add a member into the cluster.\n\n :returns: new member\n :rtype: :class:`.Member`\n \"\"\"\n member_add_request = etcdrpc.MemberAddRequest(peerURLs=urls)\n\n member_add_response = self.clusterstub.MemberAdd(member_add_request)\n member = member_add_response.member\n return etcd3.members.Member(member.ID,\n member.name,\n member.peerURLs,\n member.clientURLs,\n etcd_client=self)\n\n def remove_member(self, member_id):\n \"\"\"\n Remove an existing member from the cluster.\n\n :param member_id: ID of the member to remove\n \"\"\"\n member_rm_request = etcdrpc.MemberRemoveRequest(ID=member_id)\n self.clusterstub.MemberRemove(member_rm_request)\n\n def update_member(self, member_id, peer_urls):\n \"\"\"\n Update the configuration of an existing member in the cluster.\n\n :param member_id: ID of the member to update\n :param peer_urls: new list of peer urls the member will use to\n communicate with the cluster\n \"\"\"\n member_update_request = etcdrpc.MemberUpdateRequest(ID=member_id,\n peerURLs=peer_urls)\n self.clusterstub.MemberUpdate(member_update_request)\n\n @property\n def members(self):\n \"\"\"\n List of all members associated with the cluster.\n\n :type: sequence of :class:`.Member`\n\n \"\"\"\n member_list_request = etcdrpc.MemberListRequest()\n member_list_response = self.clusterstub.MemberList(member_list_request)\n\n for member in member_list_response.members:\n yield etcd3.members.Member(member.ID,\n member.name,\n member.peerURLs,\n member.clientURLs,\n etcd_client=self)\n\n def compact(self, revision, physical=False):\n \"\"\"\n Compact the event history in etcd up to a given revision.\n\n All superseded keys with a revision less than the compaction revision\n will be removed.\n\n :param revision: revision for the compaction operation\n :param physical: if set to True, the request will wait until the\n compaction is physically applied to the local database\n such that compacted entries are totally removed from\n the backend database\n \"\"\"\n compact_request = etcdrpc.CompactionRequest(revision=revision,\n physical=physical)\n self.kvstub.Compact(compact_request)\n\n def defragment(self):\n \"\"\"Defragment a member's backend database to recover storage space.\"\"\"\n defrag_request = etcdrpc.DefragmentRequest()\n self.maintenancestub.Defragment(defrag_request)\n\n\ndef client(host='localhost', port=2379):\n \"\"\"Return an instance of an Etcd3Client.\"\"\"\n return Etcd3Client(host=host, port=port)\n","sub_path":"etcd3/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":20719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541915997","text":"# Authors: Michael Greer, Matthew Shepherd\n\n# IMPORTANT FOR CIRCLE DEFINITION:\n# We defined our unit circle as a mirror image of the standard unit circle.\n# Circle starts at left and goes clockwise\n# Code reflects this\n\n###############################################################################################################\n\n# -*- coding: utf-8 -*-\n\n# Import statements\n\nimport pygame \t\t\t# Graphics and Drawing Module\nimport serial\t\t\t# Serial Library\nimport time\t\t\t\t# For delays\nimport math\t\t\t\t# sin, cos, etc\nimport struct\t\t\t# For converting byte to float\n\nimport os\n\n# Is this a test or not\ntest = True\n\n# Initialize serial\nif (not test): \n\tser = serial.Serial('/dev/ttyUSB0',9600)\n\n# Draws pointer on dials\ndef draw_indicator(angle,length,center_x,center_y):\n\n\tx_len = math.cos(math.radians(angle))*float(length) # Finds the x and y compoents of the length\n\ty_len = math.sin(math.radians(angle))*float(length) \n\n\tx_pos = center_x - x_len # Finds the x and y \n\ty_pos = center_y - y_len\n\t\n\tinner_x_pos = int(center_x-(.6*x_len))\t\t\t\t\t\t\t\t\t\t# x coordinate of inside point\n\tinner_y_pos = int(center_y-(.6*y_len))\n\n\tpygame.draw.line(screen,red,(inner_x_pos,inner_y_pos),(x_pos,y_pos),10)\n\n# Draws tick marks along the outside of circles\ndef draw_tick_marks(startAngle,stopAngle,numMarks,center_x,center_y,radius):\n\n\tangle_diff = stopAngle-startAngle\t\t\t\t\t\t\t\t\t\t\t\t# Value of the difference between the start and stop angles\n\tspacing = float(angle_diff)/float(numMarks-1)\t\t\t\t\t\t\t\t\t# Angle spacing between each mark\n\n\tfor mark in range(numMarks): \t\t\t\t\t\t\t\t\t\t\t\t\t# Loops through each tick mark\n\t\tcurrent_angle=startAngle+(spacing*float(mark))\t\t\t\t\t\t\t\t# Current angle for this tick mark\n\t\ty_len = math.sin(math.radians(current_angle))*radius\t\t\t\t\t\t# y component of length\n\t\tx_len = math.cos(math.radians(current_angle))*radius\t\t\t\t\t\t# x component of length\n\n\t\tx_pos = int(center_x - x_len)\t\t\t\t\t\t\t\t\t\t\t\t# x coordinate of outside point\n\t\ty_pos = int(center_y - y_len)\t\t\t\t\t\t\t\t\t\t\t\t# y coordinate of outside point\n\n\t\tinner_x_pos = int(center_x-(.9*x_len))\t\t\t\t\t\t\t\t\t\t# x coordinate of inside point\n\t\tinner_y_pos = int(center_y-(.9*y_len))\t\t\t\t\t\t\t\t\t\t# y coordinate of inside point\n\n\t\tnum_x_pos = int(center_x-(.8*x_len))\n\t\tnum_y_pos = int(center_y-(.8*y_len))\n\n\t\t#print x_pos, y_pos, inner_x_pos, inner_y_pos\t\t\t\t\t\t\t\t# debug\n\n\t\tpygame.draw.line(screen,white,(x_pos,y_pos),(inner_x_pos,inner_y_pos),6)\t# draws tick mark\n\n\t\tnum = font.render(str(mark),1,white)\n\n\t\t(num_width,num_height) = font.size(str(num))\n\n\t\tscreen.blit(num,(num_x_pos-5,num_y_pos-(num_height/2)))\n\n\n# Draws redline on outside of circle\ndef draw_redline_arc(startAngle,stopAngle,center_x,center_y,radius):\n\n\trect = (center_x-radius,center_y-radius,2*radius,2*radius)\t\t# Defines the rectangle to draw arc in\n\n\tstart_radians = math.radians((-stopAngle)+180)\t\t\t\t\t# Converts between our \"unit circle\" and standard unic circle\n\tstop_radians = math.radians((-startAngle)+180)\n\n\tpygame.draw.arc(screen,red,rect,start_radians,stop_radians,10)\t# Draws Arc\n\n# Logo Sprite BUG: Refuses to load an image as a sprite\n# logo = pygame.sprite.Sprite()\n# logo.image = pygame.Surface((100,100))\n# logo.image.fill((255,255,255))\n# logo.image.set_colorkey((0,0,0))\n# logo.rect = (100,100,100,100)\n\n# Draws all parts of display that are not data-dependent\ndef draw_screen():\n\n#\tDraw dial\n\tpygame.draw.circle(screen, lgrey, (160, 240), 210, 0)\n\tpygame.draw.circle(screen, black, (160, 240), 200, 0)\n\tdraw_redline_arc(305,315,160,240,200)\n\tpygame.draw.rect(screen, lgrey, (0,100,20,280))\n\tpygame.draw.ellipse(screen, black, (8, 100, 20, 280), 0)\n\tpygame.draw.ellipse(screen, black, (8, 100, 20, 280), 0)\n\tdraw_tick_marks(45,315,14,160,240,200)\n# \tpygame.draw.rect(screen, green, (80,240,160,80)) RPM Font Box\n#\tscreen.blit(logo.image,logo.rect)\n\t\n#\tDraw rectangles\n\tpygame.draw.rect(screen, lgrey, (440,10,320,210))\n\tpygame.draw.rect(screen, green, (450,20,300,190))\n\tpygame.draw.rect(screen, lgrey, (440,260,320,210))\n\tpygame.draw.rect(screen, green, (450,270,300,190))\n\t\n#\tDraw logo\n# \tscreen.blit(logo.image,logo.rect)\n\t\n\n# maps a variable from one space to another\ndef linear_transform(input,rangeOneStart,rangeOneEnd,rangeTwoStart,rangeTwoEnd):\n\n\treturn int((input-rangeOneStart)*(float(rangeTwoEnd-rangeTwoStart)/float(rangeOneEnd-rangeOneStart))+rangeTwoStart)\n\n\n# All code taken from Thomas Kelly's implementation of readData() in serial_thread.py\ndef readData():\n\tglobal ser\n\tglobal rpm, engineLoad, throttle, temp, oxygen, speed, gear, volts\n\tser.flush()\n\tif (ser.inWaiting() > 0):\n\t\tdata = ser.read()\n\t\tif (data == bytes(b'!')):\n\t\t\tdata = ser.read()\n\n\t\t\t# Packet Headers:\n\t\t\t# 0x30 : RPMs\n\t\t\t# 0x31 : Engine Load\n\t\t\t# 0x32 : throttle\n\t\t\t# 0x33 : Coolant Temp (F)\n\t\t\t# 0x34 : O2 level\n\t\t\t# 0x35 : Vehicle Speed (The shitty one from the ECU anyway)\n\t\t\t# 0x36 : Gear (Again, shitty ECU version)\n\t\t\t# 0x37 : Battery Voltage\n\n\t\t\tif (data == bytes(b'0')):\n\t\t\t\ttimestamp = struct.unpack('>I',ser.read(4))[0]\n\t\t\t\tpayload = struct.unpack('>f',ser.read(4))[0]\n\t\t\t\t#print(payload)\n\t\t\t\trpm = payload\n\n\t\t\telif (data == bytes(b'1')):\n\t\t\t\ttimestamp = struct.unpack('>I',ser.read(4))[0]\n\t\t\t\tpayload = struct.unpack('>f',ser.read(4))[0]\n\t\t\t\tengineLoad = payload\n\n\t\t\telif (data == bytes(b'2')):\n\t\t\t\ttimestamp = struct.unpack('>I',ser.read(4))[0]\n\t\t\t\tpayload = struct.unpack('>f',ser.read(4))[0]\n\t\t\t\tthrottle = payload\n\n\t\t\telif (data == bytes(b'3')):\n\t\t\t\ttimestamp = struct.unpack('>I',ser.read(4))[0]\n\t\t\t\tpayload = struct.unpack('>f',ser.read(4))[0]\n\t\t\t\ttemp = payload\n\n\t\t\telif (data == bytes(b'4')):\n\t\t\t\ttimestamp = struct.unpack('>I',ser.read(4))[0]\n\t\t\t\tpayload = struct.unpack('>f',ser.read(4))[0]\n\t\t\t\toxygen = payload\n\n\t\t\telif (data == bytes(b'5')):\n\t\t\t\ttimestamp = struct.unpack('>I',ser.read(4))[0]\n\t\t\t\tpayload = struct.unpack('>f',ser.read(4))[0]\n\t\t\t\tspeed = payload\n\n\t\t\telif (data == bytes(b'6')):\n\t\t\t\ttimestamp = struct.unpack('>I',ser.read(4))[0]\n\t\t\t\tpayload = int(list(ser.read())[0])\n\t\t\t\t#print(payload)\n\t\t\t\tgear = payload\n\n\t\t\telif (data == bytes(b'7')):\n\t\t\t\ttimestamp = struct.unpack('>I',ser.read(4))[0]\n\t\t\t\tpayload = struct.unpack('>f',ser.read(4))[0]\n\t\t\t\tvolts = payload\n\n\t\t\telse:\n\t\t\t\tprint(\"ERROR: Corrupted Data\")\n\t\telse:\n\t\t\tpass\n\telse:\n\t\tpass\n\n# Smooths rpm readout\ndef smooth_rpm():\n\tglobal rpm, display_rpm\n\n\tdisplay_rpm += (rpm-display_rpm)/2\n\n\n############# Color Definitions\nred = \t(255,0,0)\nblack = (0,0,0)\ngrey = \t(100,100,100)\nlgrey=\t(150,150,150)\ngreen = (0,120,0)\nwhite = (255,255,255)\n\ndef rpmColor(n):\n\tinpt = linear_transform(n,0,13000,0,255)\n\tif (inpt < 100):\n\t\treturn (\t\t250,\t\t\t\t\t250,\t\t\t\t\t250)\n\telif (inpt < 150):\n\t\treturn (\t\t250-3*(inpt-100),\t\t250-(inpt-100),\t\t250-5*(inpt-100))\n\telif (inpt < 200):\n\t\treturn (\t\t100+2*(inpt-150),\t\t200-(inpt-150),\t\t\t0)\n\telif (inpt < 250):\n\t\treturn (\t\t200+(inpt-200),\t\t150-3*(inpt-200),\t\t0)\n\telse:\n\t\treturn (\t\t250,\t\t\t\t\t0,\t\t\t\t\t\t0)\n\t\n###############################\n\npygame.init()\n\ndisplay_size=width, height=800,480 # Size of the Adafruit screen\n\nscreen = pygame.display.set_mode(display_size)\n\n#pygame.display.toggle_fullscreen() # Sets display mode to full screen\n\n# Display Logo\n\n#img = pygame.image.load(\"WURacing-Logo-Big.png\")# \n# \n#img = pygame.transform.scale(img, (600,480))\n\nscreen.fill(green)\n\n# screen.blit(img, (100,0))\n\npygame.display.flip()\n\ntime.sleep(5)\n\nfont = pygame.font.Font(\"fonts/monaco.ttf\", 24)\n\nscreen.fill(grey)\n\npygame.draw.circle(screen, black, (160, 240), 200, 0)\n\ndisplay_font = pygame.font.Font(\"fonts/monaco.ttf\", 120)\n\nrpm_font = pygame.font.Font(\"fonts/monaco.ttf\", 40)\n\ndraw_tick_marks(45,315,14,160,240,200)\n\n# Overarching state variables\nrpm = 0.0\ndisplay_rpm = 0.0\nengineLoad = 0.0\nthrottle = 0.0\ntemp = 0.0\noxygen = 0.0\nspeed = 0.0\ngear = 0\nvolts = 0.0\n\n\n\n\n\n# Test code\nif (test):\n\twhile 1:\n\t\tfor i in range(0,13000,50):\n\t\t\tinpt = linear_transform(i,0,13000,45,315)\n\t\t\t\n\t\t\tdraw_screen()\n\n\t\t\tdraw_indicator(inpt,190,160,240)\n\n\t\t\tangle = display_font.render(str(inpt)+u'\\N{DEGREE SIGN}',1,white)\n\t\t\ttxtrpm = rpm_font.render(str(i),1,rpmColor(i))\n\n\t\t\tscreen.blit(angle,(470,40))\n\t\t\tscreen.blit(txtrpm,(100,260))\n\n\t\t\tpygame.display.update()\n\n# Gets serial values and animates the dashboard\nif (not test):\n\t\n\twhile (True):\n\n\t\t# Animate using new data\n\t\tdraw_screen()\n\n\t\tsmooth_rpm()\n\t\t\n\t\tdraw_indicator(linear_transform(display_rpm,0,13000,45,315),190,160,240)\n\n\t\ttext = display_font.render(str(temp) + u'\\N{DEGREE SIGN}',1,white)\n\n\t\ttxtrpm = rpm_font.render(str(int(rpm)),1,rpmColor(rpm))\n\n\t\tscreen.blit(text,(470,40))\n\t\tscreen.blit(txtrpm,(100,100))\n\n\t\tpygame.display.update()\n\n\t\treadData()\n\n\t\t#print (\"end of while loop\")\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"2017_Dashboard/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":8556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594185212","text":"\"\"\"\nThis script process the \"genomic\" output from populations,\nturning the genotype encoding into a proportion of homozygosity\nand removing SNPs that are homozygous or missing in mothers\nfrom their respective offspring. Since genomics output file from\npopulations are typically huge, the script takes quite a long time\nto run although it is parallelized and will automatically use all\ncore available on the host. It takes 2 arguments: the path to the\nfolder containing populations files, used as input, and the folder where\nthe output will be written.\n\"\"\"\n# Cyril Matthey-Doret\n# 11.08.2017\n\nfrom os import path, walk\nimport re\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom multiprocessing import Pool # Parallel computing support\nfrom functools import partial # \"freeze\" arguments when mapping function\n\n\n# ========== PARSING COMMAND LINE ARGUMENTS ==========#\n\nparser = argparse.ArgumentParser(description=\"This script processes the \\\n 'genomic' output from STACKS's populations \\\n module to compute the proportion of \\\n homozygous individuals at each genomic \\\n position.\")\nparser.add_argument('pop_files', type=str,\n help='Path to the folder containing \\\n populations output files. Used as input')\nparser.add_argument('out', type=str,\n help='Folder where output will be written')\nparser.add_argument('--keep_all', action='store_true',\n help='Keep all SNPs, even if missing or\\\n homozygous in the mother.')\nparser.add_argument('--pool_output', action='store_true',\n help='Pool output instead of computing \\\n proportions per family')\nparser.add_argument('--sample_list', type=str,\n help='File containing the list of samples with family, \\\n name, sex and generation information. Defaults to \\\n \"data/individuals.tsv\".', default='data/individuals.tsv')\n\nargs = parser.parse_args()\n\n# ========== DEFINING FUNCTIONS ==========#\n\n\ndef unify_genomic(pop_path, pop):\n \"\"\"\n Reads populations \"genomic\" output files from each family's folder and\n gathers them into a single pandas DataFrame object.\n :param path: Path containing the family subfolders containing populations\n output files.\n :param pop: dataframe containing name of all individuals in correct final\n order.\n :returns: A DataFrame containing all sites in all individuals.\n \"\"\"\n\n # pop['real_idx'] = pop.index\n # Adding trailing slash if not provided\n if pop_path[-1] != '/':\n pop_path += '/'\n first_fam = True\n # Iterating over family subfolders\n for subdir, dirs, files in walk(pop_path):\n if path.basename(subdir):\n # Read file from subfolders\n sum_file = path.join(subdir, \"batch_1.sumstats.tsv\")\n with open(sum_file, 'r') as f:\n # Count population rows starting at -1 to exclude header line\n pops = -1\n for line in f:\n if re.search('^#', line):\n pops += 1\n # Reading only population rows\n fam_samples = pd.read_csv(sum_file, sep='\\t',\n nrows=pops, header=None)\n # Get individual names in original order\n f_names_nest = [fam_samples.iloc[:, 1][n].split(',') for n in\n range(pops)]\n # Flattening nested lists while maintaining order\n fam_names = [item for nest in f_names_nest for item in nest]\n fam_names = pd.DataFrame({'Name': fam_names})\n # Get final index of names\n tmp_pop = fam_names.merge(pop, on='Name', how='inner')\n # Incrementing index by 3 since there is 3 columns before indv\n # tmp_idx = tmp_idx.real_idx + 3\n tmp = pd.read_csv(path.join(subdir, \"batch_1.genomic.tsv\"),\n sep='\\t', header=None, skiprows=1)\n # Rename sample columns with final indices\n tmp.rename(columns={x: tmp_pop.Name[x-3] for x in\n range(3, tmp.shape[1])}, inplace=True)\n if first_fam:\n # Assign dataframe on first iteration only\n united = tmp\n first_fam = False\n else:\n # Graft each family's samples onto final df as new columns\n # Using outer merge on chromosome, bp and Locus ID\n united = united.merge(tmp, how='outer', on=range(3))\n united = united.fillna(0) # Change all NAs to the \"missing\" code\n return united\n\n\ndef gen_decode(encoded):\n \"\"\"\"\n This function decodes numeric genotypes and\n replaces it with E (heterozygous), O (homozygous)\n or M (missing).\n :returns: A dataframe containing the genotype letters.\n \"\"\"\n\n genodict = {}\n for code in range(11):\n # Genotype encoding in genomic output from populations:\n # Missing bases are encoded as 0\n # Homozygous genotypes are: 1,5,8,10\n # Heterozygous genotypes are all others (except 0)\n # Building dictionary for numeric genotype translation\n if code in [1, 5, 8, 10]: # Homozygous codes\n genodict[code] = ''.join(['O',str(code)])\n elif code == 0: # Missing\n genodict[code] = 'M'\n else:\n genodict[code] = 'E' # All others are heterozygous\n # Rarely, rows are filled this value. I assume this is a STACKS issue.\n decoded = encoded.applymap(lambda r: genodict.get(r, 'M'))\n return decoded\n\n\ndef mother_hom(geno, pop):\n \"\"\"\n This function runs on a numpy array that has already been\n transformed with gen_decode and sets sites that are homozygous/missing in\n mothers to missing in their whole family. If the mother is not available,\n sites that are homozygous or missing in all offspring in the family are\n used instead as a proxy.\n :param pop: a dataframe containing individual names and their respective\n families. The names need to be in the same order as the columns in geno.\n :param geno: a numpy array that will be processed\n \"\"\"\n\n for f in np.unique(pop.Family): # Iterate over mothers\n fam = pop.loc[pop.Family == f, :] # Subssetting samples from family\n # Get mother idx\n mother_name = fam.Name[fam.Generation == 'F3'].tolist()\n # hom/missing mother sites\n fam_SNP = np.where(geno.loc[:, mother_name] != 'E')[0]\n if not mother_name: # If the mother is not available\n # Use sites where no individual in the fam is heterozygous instead\n fam_df = pd.DataFrame(geno[fam.Name.tolist()].T)\n # Count number of diff genotypes among offspring (E, M, O1, O2)\n fam_SNP = fam_df.apply(lambda x: len(x.unique()))\n # If at least one offspring is het: SNP used automatically\n fam_SNP += (fam_df == \"E\").sum() * 3\n # If missing among genotypes, decrease number by one for the SNP\n fam_SNP -= (fam_df == 'M').any()\n\n # Include SNP if number of genotypes > 1 (het = auto include)\n fam_SNP = fam_SNP[fam_SNP < 2].index.tolist()\n\n # Change those sites to M in all indv with same family\n geno.loc[fam_SNP, fam.Name] = 'M'\n # If using pandas <0.19, the above line will fail. The loop below can\n # be used instead, but is much slower\n # for snp in fam_SNP:\n # for family in fam.Name:\n # geno.set_value(snp, family, 'M')\n return geno\n\n\ndef prop_hom(pop, geno):\n \"\"\"\n This function computes the proportion of homozygous individuals (cols) at\n each SNP (row) in a numpy array containing decoded allelic state (O,E,M).\n It computes this proportion both by sex, and in all individuals.\n :param geno: Pandas DataFrame with sites as rows and individuals as cols.\n :param pop: Dataframe containing the sex of each individual and its name.\n :returns: a Pandas DataFrame object with the proportion of homozygous\n females, males and all individuals at each site and the number of\n individuals where it was present.\n \"\"\"\n # Suppresses warning when numpy divides by 0\n np.seterr(divide='ignore', invalid='ignore')\n # Number of males and females\n # N = {sex:pop.Sex[pop.Sex == sex].shape[0] for sex in ['M','F']}\n N = {'M': pop.Sex[pop.Sex == 'M'].shape[0],\n 'F': pop.Sex[pop.Sex == 'F'].shape[0]}\n # Get sample names by sex\n sex_id = {'M': pop.Name[pop.Sex == 'M'],\n 'F': pop.Name[pop.Sex == 'F']}\n \n # Un-specify the genotype of homozygous individuals (to comptabilise recomb)\n geno = geno.replace(['O1','O5','O8', 'O10'], \"O\")\n # Counting how many individuals are used to compute proportion at each SNP\n sample_size = {} # Number of individuals in which each site is found\n hom = {} # proportion of individuals in which each site is homozygous\n for sex in N:\n # Looping over sexes\n dff = {}\n for t in ['O', 'M', 'E']:\n dff[t] = (geno.loc[:, sex_id[sex].values] == t).T.sum().astype(float)\n sample_size[sex] = dff['E']+dff['O']\n hom[sex] = np.divide(dff['O'], (dff['O'] + dff['E']))\n\n # Building output dataframe with all relevant stats\n out_df = pd.DataFrame({\n \"N.Samples\": sample_size['F'] + sample_size['M'],\n \"Prop.Hom\": ((sample_size['M'] * hom['M'].fillna(0) +\n sample_size['F'] * hom['F'].fillna(0)) /\n (sample_size['F'] + sample_size['M'])).round(3),\n \"N.Males\": sample_size['M'],\n \"N.Females\": sample_size['F'],\n \"Prop.Hom.F\": hom['F'].round(3),\n \"Prop.Hom.M\": hom['M'].round(3)\n })\n return out_df\n\n\ndef split_fam_prop(df, pop, parallel=True):\n \"\"\"\n This function is intended as a wrapper for prop_hom, so that it will only\n compute stats per family and return family info associated with every stat.\n :param df: Pandas DataFrame with sites as rows and individuals as columns.\n :param pop: Dataframe containing the sex, name and Family of each\n individual in the same order as the df individuals columns.\n :param parallel: Boolean value. Should the script exploit multiple cores if\n available ?\n :returns: a Pandas DataFrame object with an added family column and at each\n site for each family, the proportion of homozygous females, males and all\n individuals and the number of individuals where it was present.\n \"\"\"\n\n df_list = [] # List to contain each family's df\n for fam in pop.Family.unique():\n # Iterating over families and subsetting df for each\n fam_id = pop.Name[pop.Family == fam]\n genofam = df.loc[:, fam_id]\n popfam = pop.loc[pop.Name.isin(fam_id), :]\n if parallel:\n fam_df = parallel_func(prop_hom, genofam, f_args=(popfam,))\n else:\n fam_df = prop_hom(popfam, genofam)\n fam_df[\"Family\"] = fam\n df_list.append(fam_df)\n\n return pd.concat(df_list)\n\n\ndef parallel_func(f, df, f_args=[], chunk_size=1000):\n \"\"\"\n Parallelizes a function that runs on a dataframe by splitting the dataframe\n into small chunks by rows and distributing chunks across several processes.\n :param f: Target function that will be parallelized\n :param df: pandas dataframe to be used as input\n :param f_args: optional arguments for the function to be parallelized. Need\n to be an iterable (list or tuple).\n :param chunk_size: size of the chunks in which df is split. Default=1000\n :returns: the processed dataframe reconstructed by combining output from\n all processes\n \"\"\"\n\n # Create pool of processes, size depends on number of core available\n pool = Pool(processes=4)\n tot_rows = df.shape[0]\n chunks = range(0, tot_rows, chunk_size) # Start positions of chunks\n # Split df into chunks\n chunked_df = [df.iloc[c: (c+min(chunk_size, tot_rows)), ] for c in chunks]\n func = partial(f, *f_args) # Unpacking optional fixed arguments.\n result = pool.map(func, chunked_df) # Mapping function to chunks.\n # Concatenating into single df. Order is preserved\n pool.terminate()\n\n return pd.concat(result)\n\n# ========== LOADING AND PROCESSING DATA ==========#\n# Path to STACKS populations folder and output file\n\n\nin_path = args.pop_files\nout_prefix = 'grouped_'\nif args.pool_output:\n out_prefix += \"outpool_\"\nif args.keep_all:\n out_prefix += \"keep_\"\nout_path = path.join(args.out, (out_prefix + \"prophom.tsv\"))\nindv_path = args.sample_list # family and sex information\nindv = pd.read_csv(indv_path, sep='\\t') # Family and sex info\n# Preparing data structure to match sample names and families with columns\n\n\ntry:\n # Names in correct order\n samples = pd.read_csv(path.join(in_path, \"batch_1.sumstats.tsv\"),\n sep='\\t', nrows=2, header=None)\n\n # Concatenating populations\n names = samples.iloc[:, 1][0].split(',') + \\\n samples.iloc[:, 1][1].split(',')\nexcept pd.errors.ParserError:\n # In case only 1 sex is present\n samples = pd.read_csv(path.join(in_path, \"batch_1.sumstats.tsv\"),\n sep='\\t', nrows=1, header=None)\n\n # Concatenating populations\n names = samples.iloc[:, 1][0].split(',')\n\nnames = pd.DataFrame({'Name': names})\n# Adding family and sex, keeping order\npop = names.merge(indv, on='Name', how='left')\ngenomic = pd.read_csv(path.join(in_path, \"batch_1.genomic.tsv\"),\n sep='\\t', header=None, skiprows=1)\n# select only samples cols and reindex from 0\ngen_indv = genomic.iloc[:, 3:].T.reset_index(drop=True).T\n# Replacing numeric header with corresponding sample name\ngen_indv.rename(columns=lambda x: pop.Name[x], inplace=True)\nprint(\"Processing {0} sites across {1} samples.\".format(gen_indv.shape[0],\n gen_indv.shape[1]))\nprint(\"files loaded\")\n\n# ========== RUNNING CODE ==========#\n# Decoding numeric genotypes into states (het, hom, missing)\nstate = parallel_func(gen_decode, gen_indv)\nprint(\"genotypes decoded\")\n# Will run unless user explicitly set the --keep_all parameter\nif not args.keep_all:\n # Remove SNPs that are hom./missing in mothers from their family\n state = mother_hom(state, pop)\n print(\"Mother homozygous and missing sites removed\")\n# Computing proportion of homozygous indv at each site\nif args.pool_output:\n prop = prop_hom(pop, state)\nelse:\n prop = split_fam_prop(state, pop, parallel=False)\nprint(\"homozygosity stats calculated\")\n\n# ========== SAVING OUTPUT ==========#\n# Merging Chromosomal positions with proportion of homozygosity into 1 df\nprop = genomic.iloc[:, 0: 3].merge(prop, left_index=True, right_index=True)\nstate = genomic.iloc[:, 0: 3].merge(state, left_index=True, right_index=True)\nprop.rename(columns={0: \"Locus.ID\", 1: \"Chr\", 2: \"BP\"}, inplace=True)\nstate.rename(columns={0: \"Locus.ID\", 1: \"Chr\", 2: \"BP\"}, inplace=True)\nstate_path = path.join(args.out,\n (out_prefix.replace(\"outpool_\", \"\") + \"geno_EOM.tsv\"))\nstate = state.replace(['O1','O5','O8', 'O10'], \"O\")\nstate.to_csv(state_path, sep='\\t', index=False, na_rep='NA')\nprop.to_csv(out_path, sep='\\t', index=False, na_rep='NA')\nprint(\"Output saved to {0}\".format(out_path))\n","sub_path":"src/assoc_mapping/process_genomic.py","file_name":"process_genomic.py","file_ext":"py","file_size_in_byte":15500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"4998530","text":"#!/usr/bin/python3\n\n####################################################################################################\n# Orion\n# \n# Copyright (C) 2020, Roshan J. Samuel\n#\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the copyright holder nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n####################################################################################################\n\n# Import all necessary modules\nfrom orion import meshData as grid\nfrom orion import globalVars as gv\nimport numpy as np\n\n############################### GLOBAL VARIABLES ################################\n\n# Get array of grid sizes are tuples corresponding to each level of V-Cycle\nN = [(grid.sLst[x[0]], grid.sLst[x[2]]) for x in [gv.sInd - y for y in range(gv.VDepth + 1)]]\n\n# Define array of grid spacings along X\nhx = [1.0/(x[0]-1) for x in N]\n\n# Define array of grid spacings along Z\nhz = [1.0/(x[1]-1) for x in N]\n\n# Square of hx, used in finite difference formulae\nhx2 = [x*x for x in hx]\n\n# Square of hz, used in finite difference formulae\nhz2 = [x*x for x in hz]\n\n# Cross product of hx and hz, used in finite difference formulae\nhzhx = [hx2[i]*hz2[i] for i in range(gv.VDepth + 1)]\n\n# Maximum number of iterations while solving at coarsest level\nmaxCount = 10*N[-1][0]*N[-1][1]\n\n# Integer specifying the level of V-cycle at any point while solving\nvLev = 0\n\n# Flag to determine if non-zero homogenous BC has to be applied or not\nzeroBC = False\n\n############################## MULTI-GRID SOLVER ###############################\n\ndef multigrid(P, H):\n global N\n global pData, rData\n\n chMat = np.zeros(N[0])\n for i in range(gv.VDepth):\n pData[i].fill(0.0)\n rData[i].fill(0.0)\n sData[i].fill(0.0)\n\n pData[0][1:-1, 1:-1] = P[1:-1, 1:-1]\n rData[0] = H[1:-1, 1:-1]\n\n for i in range(gv.vcCnt):\n v_cycle()\n\n if gv.testPoisson:\n chMat = laplace(pData[0])\n\n resVal = np.amax(np.abs(H[1:-1, 1:-1] - chMat))\n print(\"Residual after V-Cycle {0:2d} is {1:.4e}\".format(i+1, resVal))\n\n errVal = np.amax(np.abs(pAnlt[1:-1, 1:-1] - pData[0][1:-1, 1:-1]))\n print(\"Error after V-Cycle {0:2d} is {1:.4e}\\n\".format(i+1, errVal))\n\n P[1:-1, 1:-1] = pData[0][1:-1, 1:-1]\n\n\n# Multigrid V-cycle without the use of recursion\ndef v_cycle():\n global vLev, zeroBC\n\n vLev = 0\n zeroBC = False\n\n # Pre-smoothing\n smooth(gv.preSm)\n\n zeroBC = True\n for i in range(gv.VDepth):\n # Compute residual\n calcResidual()\n\n # Copy smoothed pressure for later use\n sData[vLev] = np.copy(pData[vLev])\n\n # Restrict to coarser level\n restrict()\n\n # Reinitialize pressure at coarser level to 0 - this is critical!\n pData[vLev].fill(0.0)\n\n # If the coarsest level is reached, solve. Otherwise, keep smoothing!\n if vLev == gv.VDepth:\n if gv.solveSol:\n solve()\n else:\n smooth(gv.preSm + gv.pstSm)\n else:\n smooth(gv.preSm)\n\n # Prolongation operations\n for i in range(gv.VDepth):\n # Prolong pressure to next finer level\n prolong()\n\n # Add previously stored smoothed data\n pData[vLev] += sData[vLev]\n\n # Apply homogenous BC so long as we are not at finest mesh (at which vLev = 0)\n if vLev:\n zeroBC = True\n else:\n zeroBC = False\n\n # Post-smoothing\n smooth(gv.pstSm)\n\ndef vectorJacobi2D2RedUpdate(p, r, ic1, ic2, ia1, ia2):\n global hx2, hz2, hzhx\n global xixx, xix2, ztzz, ztz2\n \n jFactor2D = 2*((xix2[vLev][ia1, ia1]/hx2[vLev]) + (ztz2[vLev][ia1, ia1]/hz2[vLev]))\n p[ic1, ic1] = (-r[ia1, ia1] + \\\n (p[ia1, ic1][1:, :] + p[ia1, ic1][:-1, :])*(xix2[vLev][ia1, ia1]/hx2[vLev]) + \\\n (p[ia1, ic1][1:, :] - p[ia1, ic1][:-1, :])*(xixx[vLev][ia1, ia1]/(2*hx[vLev])) + \\\n (p[ic1, ia1][:, 1:] + p[ic1, ia1][:, :-1])*(ztz2[vLev][ia1, ia1]/hz2[vLev]) + \\\n (p[ic1, ia1][:, 1:] - p[ic1, ia1][:, :-1])*(ztzz[vLev][ia1, ia1]/(2*hz[vLev])))* \\\n (1/jFactor2D)\n \n jFactor2D = 2*((xix2[vLev][ia2, ia2]/hx2[vLev]) + (ztz2[vLev][ia2, ia2]/hz2[vLev]))\n p[ic2, ic2] = (-r[ia2, ia2] + \\\n (p[ia2, ic2][1:, :] + p[ia2, ic2][:-1, :])*(xix2[vLev][ia2, ia2]/hx2[vLev]) + \\\n (p[ia2, ic2][1:, :] - p[ia2, ic2][:-1, :])*(xixx[vLev][ia2, ia2]/(2*hx[vLev])) + \\\n (p[ic2, ia2][:, 1:] + p[ic2, ia2][:, :-1])*(ztz2[vLev][ia2, ia2]/hz2[vLev]) + \\\n (p[ic2, ia2][:, 1:] - p[ic2, ia2][:, :-1])*(ztzz[vLev][ia2, ia2]/(2*hz[vLev])))* \\\n (1/jFactor2D)\n return p\n\ndef vectorJacobi2D2BlackUpdate(p, r, ic1, ic2, ia1, ia2):\n global hx2, hz2, hzhx\n global xixx, xix2, ztzz, ztz2\n \n jFactor2D = 2*((xix2[vLev][ia2, ia1]/hx2[vLev]) + (ztz2[vLev][ia2, ia1]/hz2[vLev]))\n p[ic2, ic1] = (-r[ia2, ia1] + \\\n (p[ia2, ic1][1:, :] + p[ia2, ic1][:-1, :])*(xix2[vLev][ia2, ia1]/hx2[vLev]) + \\\n (p[ia2, ic1][1:, :] - p[ia2, ic1][:-1, :])*(xixx[vLev][ia2, ia1]/(2*hx[vLev])) + \\\n (p[ic2, ia1][:, 1:] + p[ic2, ia1][:, :-1])*(ztz2[vLev][ia2, ia1]/hz2[vLev]) + \\\n (p[ic2, ia1][:, 1:] - p[ic2, ia1][:, :-1])*(ztzz[vLev][ia2, ia1]/(2*hz[vLev])))* \\\n (1/jFactor2D)\n \n jFactor2D = 2*((xix2[vLev][ia1, ia2]/hx2[vLev]) + (ztz2[vLev][ia1, ia2]/hz2[vLev]))\n p[ic1, ic2] = (-r[ia1, ia2] + \\\n (p[ia1, ic2][1:, :] + p[ia1, ic2][:-1, :])*(xix2[vLev][ia1, ia2]/hx2[vLev]) + \\\n (p[ia1, ic2][1:, :] - p[ia1, ic2][:-1, :])*(xixx[vLev][ia1, ia2]/(2*hx[vLev])) + \\\n (p[ic1, ia2][:, 1:] + p[ic1, ia2][:, :-1])*(ztz2[vLev][ia1, ia2]/hz2[vLev]) + \\\n (p[ic1, ia2][:, 1:] - p[ic1, ia2][:, :-1])*(ztzz[vLev][ia1, ia2]/(2*hz[vLev])))* \\\n (1/jFactor2D)\n\n return p\n\ndef vectorJacobi2D_RBGS(p, r):\n #Performs 1 iteration of the Jacobi method\n #Vectorized Form\n global hx2, hz2, hzhx\n global xixx, xix2, ztzz, ztz2\n # p is n x n\n # r is n x n\n #Assumes p and rhs have boundaries \n \n ic1 = slice(1, -1, 2)\n ic2 = slice(2, -2, 2)\n ia1 = slice(0, None, 2) \n ia2 = slice(1, -1, 2)\n \n \n #Red\n p = vectorJacobi2D2RedUpdate(p, r, ic1, ic2, ia1, ia2)\n #Black\n p = vectorJacobi2D2BlackUpdate(p, r, ic1, ic2, ia1, ia2)\n \n return p\n\n# Smoothens the solution sCount times using Gauss-Seidel smoother\ndef smooth(sCount):\n global N\n global vLev\n global rData, pData\n global hx2, hz2, hzhx\n global xixx, xix2, ztzz, ztz2\n \n n = N[vLev]\n for iCnt in range(sCount):\n imposeBC(pData[vLev])\n \n if gv.solveMethod == \"MG-GS\":\n # Gauss-Seidel smoothing\n for i in range(1, n[0]+1):\n for j in range(1, n[1]+1):\n pData[vLev][i, j] = (\n hz2[vLev]*xix2[vLev][i-1]*(pData[vLev][i+1, j] + pData[vLev][i-1, j])*2.0 +\n hz2[vLev]*xixx[vLev][i-1]*(pData[vLev][i+1, j] - pData[vLev][i-1, j])*hx[vLev] +\n hx2[vLev]*ztz2[vLev][j-1]*(pData[vLev][i, j+1] + pData[vLev][i, j-1])*2.0 +\n hx2[vLev]*ztzz[vLev][j-1]*(pData[vLev][i, j+1] - pData[vLev][i, j-1])*hz[vLev] -\n 2.0*hzhx[vLev]*rData[vLev][i-1, j-1]) / \\\n (4.0*(hz2[vLev]*xix2[vLev][i-1] + hx2[vLev]*ztz2[vLev][j-1]))\n \n if gv.solveMethod == \"MG-RBGS\":\n pData[vLev] = vectorJacobi2D_RBGS(pData[vLev],rData[vLev])\n \n imposeBC(pData[vLev])\n\n\n# Compute the residual and store it into iTemp array\ndef calcResidual():\n global vLev\n global iTemp, rData, pData\n\n iTemp[vLev].fill(0.0)\n iTemp[vLev][1:-1, 1:-1] = rData[vLev] - laplace(pData[vLev])\n\n\n# Reduces the size of the array to a lower level, 2^(n - 1) + 1\ndef restrict():\n global N\n global vLev\n global iTemp, rData\n\n pLev = vLev\n vLev += 1\n\n n = N[vLev]\n for i in range(1, n[0] + 1):\n i2 = i*2\n for k in range(1, n[1] + 1):\n k2 = k*2\n rData[vLev][i-1, k-1] = 0.25*(iTemp[pLev][i2 - 1, k2 - 1]) + \\\n 0.125*(iTemp[pLev][i2 - 2, k2 - 1] + iTemp[pLev][i2, k2 - 1] + iTemp[pLev][i2 - 1, k2 - 2] + iTemp[pLev][i2 - 1, k2]) + \\\n 0.0625*(iTemp[pLev][i2 - 2, k2 - 2] + iTemp[pLev][i2, k2 - 2] + iTemp[pLev][i2 - 2, k2] + iTemp[pLev][i2, k2])\n\n\n# Solves at coarsest level using an iterative solver\ndef solve():\n global N, vLev\n global maxCount\n global pData, rData\n global hx2, hz2, hzhx\n global xixx, xix2, ztzz, ztz2\n\n n = N[vLev]\n solLap = np.zeros(n)\n\n jCnt = 0\n while True:\n imposeBC(pData[vLev])\n\n # Gauss-Seidel iterative solver\n for i in range(1, n[0]+1):\n for j in range(1, n[1]+1):\n pData[vLev][i, j] = (\n hz2[vLev]*xix2[vLev][i-1]*(pData[vLev][i+1, j] + pData[vLev][i-1, j])*2.0 +\n hz2[vLev]*xixx[vLev][i-1]*(pData[vLev][i+1, j] - pData[vLev][i-1, j])*hx[vLev] +\n hx2[vLev]*ztz2[vLev][j-1]*(pData[vLev][i, j+1] + pData[vLev][i, j-1])*2.0 +\n hx2[vLev]*ztzz[vLev][j-1]*(pData[vLev][i, j+1] - pData[vLev][i, j-1])*hz[vLev] -\n 2.0*hxhyhz[vLev]*rData[vLev][i-1, j-1]) / \\\n (4.0*(hz2[vLev]*xix2[vLev][i-1] + hx2[vLev]*ztz2[vLev][j-1]))\n\n maxErr = np.amax(np.abs(rData[vLev] - laplace(pData[vLev])))\n if maxErr < gv.tolerance:\n break\n\n jCnt += 1\n if jCnt > maxCount:\n print(\"ERROR: Jacobi not converging. Aborting\")\n quit()\n\n imposeBC(pData[vLev])\n\n\n# Increases the size of the array to a higher level, 2^(n + 1) + 1\ndef prolong():\n global N\n global vLev\n global pData\n\n pLev = vLev\n vLev -= 1\n\n pData[vLev].fill(0.0)\n\n n = N[vLev]\n for i in range(1, n[0] + 1):\n i2 = int(i/2) + 1\n if i % 2:\n for k in range(1, n[1] + 1):\n k2 = int(k/2) + 1\n if k % 2:\n pData[vLev][i, k] = pData[pLev][i2, k2]\n else:\n pData[vLev][i, k] = (pData[pLev][i2, k2] + pData[pLev][i2, k2 - 1])*0.5\n else:\n for k in range(1, n[1] + 1):\n k2 = int(k/2) + 1\n if k % 2:\n pData[vLev][i, k] = (pData[pLev][i2, k2] + pData[pLev][i2 - 1, k2])*0.5\n else:\n pData[vLev][i, k] = (pData[pLev][i2, k2] + pData[pLev][i2, k2 - 1] + pData[pLev][i2 - 1, k2] + pData[pLev][i2 - 1, k2 - 1])*0.25\n\n\n# Computes the 2D laplacian of function\ndef laplace(function):\n global N, vLev\n global hx2, hz2\n\n n = N[vLev]\n\n laplacian = xix2[vLev]*(function[2:, 1:-1] - 2.0*function[1:n[0]+1, 1:-1] + function[:n[0], 1:-1]) / hx2[vLev] + \\\n xixx[vLev]*(function[2:, 1:-1] - function[:n[0], 1:-1]) / (2.0*hx[vLev]) + \\\n ztz2[vLev]*(function[1:-1, 2:] - 2.0*function[1:-1, 1:n[1]+1] + function[1:-1, :n[1]]) / hz2[vLev] + \\\n ztzz[vLev]*(function[1:-1, 2:] - function[1:-1, :n[1]]) / (2.0*hz[vLev])\n\n return laplacian\n\n\n# Initialize the arrays used in MG algorithm\ndef initVariables():\n global N\n global pData, rData, sData, iTemp\n\n nList = np.array(N)\n\n rData = [np.zeros(tuple(x)) for x in nList]\n pData = [np.zeros(tuple(x)) for x in nList + 2]\n\n sData = [np.zeros_like(x) for x in pData]\n iTemp = [np.zeros_like(x) for x in pData]\n\n initGrid()\n\n\n# Initialize the grid metric terms at each V-level\ndef initGrid():\n global N\n global xixx, xix2\n global ztzz, ztz2\n\n # Uniform grid default values\n xPts = [np.linspace(0.0, 1.0, n[0]) for n in N]\n zPts = [np.linspace(0.0, 1.0, n[1]) for n in N]\n\n xix2 = [np.ones_like(i) for i in xPts]\n xixx = [np.zeros_like(i) for i in xPts]\n\n ztz2 = [np.ones_like(i) for i in zPts]\n ztzz = [np.zeros_like(i) for i in zPts]\n\n # Copy the values for finest grid from meshData\n xPts[0] = np.copy(grid.xStag)\n xixx[0] = np.copy(grid.xixxStag)\n xix2[0] = np.copy(grid.xix2Stag)\n\n zPts[0] = np.copy(grid.zStag)\n ztzz[0] = np.copy(grid.ztzzStag)\n ztz2[0] = np.copy(grid.ztz2Stag)\n\n # For coarser grids, simply use the values at every even index of the finer grid array.\n for i in range(1, gv.VDepth+1):\n xPts[i] = xPts[i-1][::2]\n xixx[i] = xixx[i-1][::2]\n xix2[i] = xix2[i-1][::2]\n\n zPts[i] = zPts[i-1][::2]\n ztzz[i] = ztzz[i-1][::2]\n ztz2[i] = ztz2[i-1][::2]\n\n # Reshape arrays to make it easier to multiply with 3D arrays\n xixx = [x[:, np.newaxis] for x in xixx]\n xix2 = [x[:, np.newaxis] for x in xix2]\n \n ztzz = [x[:, np.newaxis] for x in ztzz]\n ztz2 = [x[:, np.newaxis] for x in ztz2]\n \n if gv.solveMethod == \"MG-RBGS\":\n #Reshaping arrays for RBGS\n for i in range(len(rData)):\n xixx[i] = (xixx[i]*np.ones(rData[i].shape))\n xix2[i] = (xix2[i]*np.ones(rData[i].shape))\n \n ztzz[i] = (ztzz[i]*np.ones(rData[i].shape)).T\n ztz2[i] = (ztz2[i]*np.ones(rData[i].shape)).T\n \n############################## BOUNDARY CONDITION ###############################\n\n\n# The name of this function is self-explanatory. It imposes BC on P\ndef imposeBC(P):\n global zeroBC\n global pWallX, pWallZ\n\n if gv.testPoisson:\n # Dirichlet BC\n if zeroBC:\n # Homogenous BC\n # Left Wall\n P[0, :] = -P[2, :]\n\n # Right Wall\n P[-1, :] = -P[-3, :]\n\n # Bottom wall\n P[:, 0] = -P[:, 2]\n\n # Top wall\n P[:, -1] = -P[:, -3]\n\n else:\n # Non-homogenous BC\n # Left Wall\n P[0, :] = 2.0*pWallX - P[2, :]\n\n # Right Wall\n P[-1, :] = 2.0*pWallX - P[-3, :]\n\n # Bottom wall\n P[:, 0] = 2.0*pWallZ - P[:, 2]\n\n # Top wall\n P[:, -1] = 2.0*pWallZ - P[:, -3]\n\n else:\n # Periodic BCs along X and Y directions\n if gv.xyPeriodic:\n # Left wall\n P[0, :] = P[-3, :]\n\n # Right wall\n P[-1, :] = P[2, :]\n\n # Neumann boundary condition on pressure\n else:\n # Left wall\n P[0, :] = P[2, :]\n\n # Right wall\n P[-1, :] = P[-3, :]\n\n # Bottom wall\n P[:, 0] = P[:, 2]\n\n # Top wall\n P[:, -1] = P[:, -3]\n\n","sub_path":"orion/solvers/poissonSolverNU_d2.py","file_name":"poissonSolverNU_d2.py","file_ext":"py","file_size_in_byte":16215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573164114","text":"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2019 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Unit tests for nbhood.ApplyNeighbourhoodProcessingWithAMask.\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nimport iris\nimport numpy as np\nfrom iris.coords import DimCoord\nfrom iris.tests import IrisTest\n\nfrom improver.nbhood.use_nbhood import ApplyNeighbourhoodProcessingWithAMask\nfrom improver.tests.nbhood.nbhood.test_BaseNeighbourhoodProcessing import (\n set_up_cube)\n\n\ndef add_dimensions_to_cube(cube, new_dims):\n \"\"\"\n Add additional dimensions to a cube by adding new axes to the input cube\n and concatenating them.\n\n Args:\n cube (iris.cube.Cube):\n The cube we want to add dimensions to.\n new_dims (dict):\n A dictionary containing the names of the dimensions you want to\n add and the number of points you want in that dimension.\n e.g {\"threshold\": 3, \"realization\": 4}\n Points in the additional dimension will be integers\n counting up from 0.\n The data will all be copies of the input cube's data.\n Returns:\n cube (iris.cube.Cube):\n The iris cube with the additional dimensions added.\n \"\"\"\n for dim_name, dim_size in new_dims.items():\n cubes = iris.cube.CubeList()\n for i in range(dim_size):\n threshold_coord = DimCoord([i], long_name=dim_name)\n threshold_cube = iris.util.new_axis(cube.copy())\n threshold_cube.add_dim_coord(threshold_coord, 0)\n cubes.append(threshold_cube)\n cube = cubes.concatenate_cube()\n return cube\n\n\ndef set_up_topographic_zone_cube(\n mask_data, topographic_zone_point, topographic_zone_bounds,\n num_time_points=1, num_grid_points=16, num_realization_points=1):\n \"\"\"Function to generate a cube with a topographic_zone coordinate. This\n uses the existing functionality from the set_up_cube function.\"\"\"\n mask_cube = set_up_cube(\n zero_point_indices=((0, 0, 0, 0),),\n num_time_points=num_time_points, num_grid_points=num_grid_points,\n num_realization_points=num_realization_points)\n mask_cube = iris.util.squeeze(mask_cube)\n mask_cube.data = mask_data\n mask_cube.long_name = 'Topography mask'\n coord_name = 'topographic_zone'\n threshold_coord = iris.coords.AuxCoord(\n topographic_zone_point, bounds=topographic_zone_bounds,\n long_name=coord_name)\n mask_cube.add_aux_coord(threshold_coord)\n mask_cube.attributes['Topographical Type'] = \"Land\"\n return mask_cube\n\n\nclass Test__init__(IrisTest):\n\n \"\"\"Test the __init__ method of ApplyNeighbourhoodProcessingWithAMask.\"\"\"\n\n def test_basic(self):\n \"\"\"Test that the __init__ method returns the expected string.\"\"\"\n coord_for_masking = \"topographic_zone\"\n radii = 2000\n result = ApplyNeighbourhoodProcessingWithAMask(\n coord_for_masking, radii)\n msg = (\"\")\n self.assertEqual(str(result), msg)\n\n\nclass Test__repr__(IrisTest):\n\n \"\"\"Test the __repr__ method of ApplyNeighbourhoodProcessingWithAMask.\"\"\"\n\n def test_basic(self):\n \"\"\"Test that the __repr__ method returns the expected string.\"\"\"\n coord_for_masking = \"topographic_zone\"\n radii = 2000\n result = str(ApplyNeighbourhoodProcessingWithAMask(\n coord_for_masking, radii))\n msg = (\"\")\n self.assertEqual(result, msg)\n\n\nclass Test_process(IrisTest):\n\n \"\"\"Test the process method of ApplyNeighbourhoodProcessingWithAMask.\"\"\"\n\n def setUp(self):\n \"\"\"Set up a cube.\"\"\"\n self.cube = set_up_cube(\n zero_point_indices=((0, 0, 2, 2),), num_grid_points=5)\n # The neighbourhood code adds bounds to the coordinates if they are\n # not present so add them now to make it easier to compare input and\n # output from the plugin.\n self.cube.coord(\"projection_x_coordinate\").guess_bounds()\n self.cube.coord(\"projection_y_coordinate\").guess_bounds()\n self.cube = iris.util.squeeze(self.cube)\n mask_data = np.array([[[1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0],\n [1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]],\n [[0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]],\n [[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1],\n [0, 0, 0, 1, 1]]])\n topographic_zone_points = [50, 150, 250]\n topographic_zone_bounds = [[0, 100], [100, 200], [200, 300]]\n\n mask_cubes = iris.cube.CubeList([])\n for data, point, bounds in zip(mask_data, topographic_zone_points,\n topographic_zone_bounds):\n mask_cubes.append(\n set_up_topographic_zone_cube(\n data, point, bounds, num_grid_points=5))\n self.mask_cube = mask_cubes.merge_cube()\n\n def test_basic(self):\n \"\"\"Test that the expected result is returned, when the\n topographic_zone coordinate is iterated over.\"\"\"\n expected = np.array(\n [[[1.00, 1.00, 1.00, np.nan, np.nan],\n [1.00, 1.00, 1.00, np.nan, np.nan],\n [1.00, 1.00, 1.00, np.nan, np.nan],\n [1.00, 1.00, 1.00, np.nan, np.nan],\n [np.nan, np.nan, np.nan, np.nan, np.nan]],\n [[np.nan, 1.00, 1.00, 1.00, 1.00],\n [np.nan, 0.50, 0.75, 0.75, 1.00],\n [np.nan, 0.50, 0.75, 0.75, 1.00],\n [np.nan, 0.00, 0.50, 0.50, 1.00],\n [np.nan, np.nan, np.nan, np.nan, np.nan]],\n [[np.nan, np.nan, np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan, np.nan, np.nan],\n [np.nan, np.nan, 1.00, 1.00, 1.00],\n [np.nan, np.nan, 1.00, 1.00, 1.00],\n [np.nan, np.nan, 1.00, 1.00, 1.00]]])\n coord_for_masking = \"topographic_zone\"\n radii = 2000\n num_zones = len(self.mask_cube.coord(coord_for_masking).points)\n expected_shape = tuple(\n [num_zones] + list(self.cube.data.shape))\n result = ApplyNeighbourhoodProcessingWithAMask(\n coord_for_masking, radii).process(self.cube, self.mask_cube)\n self.assertEqual(result.data.shape, expected_shape)\n self.assertArrayAlmostEqual(result.data, expected)\n\n def test_preserve_dimensions_input(self):\n \"\"\"Test that the dimensions on the output cube are the same as the\n input cube, apart from the additional topographic zone coordinate.\n \"\"\"\n self.cube.remove_coord(\"realization\")\n cube = add_dimensions_to_cube(\n self.cube, OrderedDict([(\"threshold\", 3), (\"realization\", 4)]))\n coord_for_masking = \"topographic_zone\"\n radii = 2000\n result = ApplyNeighbourhoodProcessingWithAMask(\n coord_for_masking, radii).process(cube, self.mask_cube)\n expected_dims = list(cube.dim_coords)\n expected_dims.insert(2, self.mask_cube.coord(\"topographic_zone\"))\n self.assertEqual(result.dim_coords, tuple(expected_dims))\n self.assertEqual(result.coord_dims(\"realization\"), (0,))\n self.assertEqual(result.coord_dims(\"threshold\"), (1,))\n self.assertEqual(result.coord_dims(\"topographic_zone\"), (2,))\n self.assertEqual(result.coord_dims(\"projection_y_coordinate\"), (3,))\n self.assertEqual(result.coord_dims(\"projection_x_coordinate\"), (4,))\n\n def test_preserve_dimensions_with_single_point(self):\n \"\"\"Test that the dimensions on the output cube are the same as the\n input cube, apart from the collapsed dimension.\n Check that a dimension coordinate with a single point is preserved\n and not demoted to a scalar coordinate.\"\"\"\n self.cube.remove_coord(\"realization\")\n cube = add_dimensions_to_cube(self.cube,\n {\"threshold\": 4, \"realization\": 1})\n coord_for_masking = \"topographic_zone\"\n radii = 2000\n result = ApplyNeighbourhoodProcessingWithAMask(\n coord_for_masking, radii).process(cube, self.mask_cube)\n expected_dims = list(cube.dim_coords)\n expected_dims.insert(2, self.mask_cube.coord(\"topographic_zone\"))\n\n self.assertEqual(result.dim_coords, tuple(expected_dims))\n self.assertEqual(result.coord_dims(\"realization\"), (0,))\n self.assertEqual(result.coord_dims(\"threshold\"), (1,))\n self.assertEqual(result.coord_dims(\"topographic_zone\"), (2,))\n self.assertEqual(result.coord_dims(\"projection_y_coordinate\"), (3,))\n self.assertEqual(result.coord_dims(\"projection_x_coordinate\"), (4,))\n\n def test_identical_slices(self):\n \"\"\"Test that identical successive slices of the cube produce\n identical results.\"\"\"\n expected = np.array(\n [[[1.00, 1.00, 1.00, np.nan, np.nan],\n [1.00, 1.00, 1.00, np.nan, np.nan],\n [1.00, 1.00, 1.00, np.nan, np.nan],\n [1.00, 1.00, 1.00, np.nan, np.nan],\n [np.nan, np.nan, np.nan, np.nan, np.nan]],\n [[np.nan, 1.00, 1.00, 1.00, 1.00],\n [np.nan, 0.50, 0.75, 0.75, 1.00],\n [np.nan, 0.50, 0.75, 0.75, 1.00],\n [np.nan, 0.00, 0.50, 0.50, 1.00],\n [np.nan, np.nan, np.nan, np.nan, np.nan]],\n [[np.nan, np.nan, np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan, np.nan, np.nan],\n [np.nan, np.nan, 1.00, 1.00, 1.00],\n [np.nan, np.nan, 1.00, 1.00, 1.00],\n [np.nan, np.nan, 1.00, 1.00, 1.00]]])\n cube = set_up_cube(\n zero_point_indices=((0, 0, 2, 2), (1, 0, 2, 2)), num_grid_points=5,\n num_realization_points=2)\n # The neighbourhood code adds bounds to the coordinates if they are\n # not present so add them now to make it easier to compare input and\n # output from the plugin.\n cube.coord(\"projection_x_coordinate\").guess_bounds()\n cube.coord(\"projection_y_coordinate\").guess_bounds()\n cube = iris.util.squeeze(cube)\n coord_for_masking = \"topographic_zone\"\n radii = 2000\n num_zones = len(self.mask_cube.coord(coord_for_masking).points)\n expected_shape = tuple(\n [cube.data.shape[0], num_zones] + list(cube.data.shape[1:])\n )\n result = ApplyNeighbourhoodProcessingWithAMask(\n coord_for_masking, radii).process(cube, self.mask_cube)\n self.assertEqual(result.data.shape, expected_shape)\n for realization_slice in result.slices_over(\"realization\"):\n self.assertArrayAlmostEqual(realization_slice.data, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"lib/improver/tests/nbhood/use_nbhood/test_ApplyNeighbourhoodProcessingWithAMask.py","file_name":"test_ApplyNeighbourhoodProcessingWithAMask.py","file_ext":"py","file_size_in_byte":13175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"622241789","text":"\"\"\"\n\nHigher-level client for both submitter interaction and launching ability\n\n\"\"\"\n\nimport os\n\nfrom .api.client import SubmitterClient\n\nfrom .launcher.occopus import OccopusLauncher\nfrom .launcher.openstack import OpenStackLauncher\n\nfrom .models.application import Applications\nfrom .models.master import MicadoMaster\n\nfrom .exceptions import MicadoException\n\nLAUNCHERS = {\n \"occopus\": OccopusLauncher,\n \"openstack\": OpenStackLauncher,\n}\n\n\nclass MicadoClient:\n \"\"\"The MiCADO Client\n\n Builds and communicates with a MiCADO Master node\n\n Usage with a launcher:\n\n a)\n\n >>> from micado import MicadoClient\n >>> client = MicadoClient(launcher=\"openstack\")\n >>> client.master.create(\n ... auth_url='yourendpoint',\n ... project_id='project_id',\n ... image='image_name or image_id',\n ... flavor='flavor_name or flavor_id',\n ... network='network_name or network_id',\n ... keypair='keypair_name or keypair_id',\n ... security_group='security_group_name or security_group_id'\n ... )\n >>> client.applications.list()\n >>> client.master.destroy()\n\n b)\n\n >>> from micado import MicadoClient\n >>> client = MicadoClient(launcher=\"openstack\")\n >>> master_id = client.master.create(\n ... auth_url='yourendpoint',\n ... project_id='project_id',\n ... image='image_name or image_id',\n ... flavor='flavor_name or flavor_id',\n ... network='network_name or network_id',\n ... keypair='keypair_name or keypair_id',\n ... security_group='security_group_name or security_group_id'\n ... )\n >>> client.applications.list()\n >>> << store your master_id >>\n >>> << exit >>\n >>> -------------------------------------------------------------\n >>> << start >>\n >>> ...\n >>> master_id = << retrieve master_id >>\n >>> client = MicadoClient(launcher=\"openstack\")\n >>> client.master.attach(master_id = master_id)\n >>> client.applications.list()\n >>> client.master.destroy()\n\n Usage without a launcher i.e. MiCADO master is already created independently from the client library.\n\n >>> from micado import MicadoClient\n >>> client = MicadoClient(\n ... endpoint=\"https://micado/toscasubmitter/\",\n ... version=\"v2.0\",\n ... verify=False,\n ... auth=(\"ssl_user\", \"ssl_pass\")\n ... )\n >>> client.applications.list()\n\n Args:\n auth_url (string): Authentication URL for the NOVA\n resource.\n image (string): Name or ID of the image resource.\n flavor (string): Name or ID of the flavor resource.\n network (string): Name or ID of the network resource.\n keypair (string): Name or ID of the keypair resource.\n security_group (string, optional): name or ID of the\n security_group resource. Defaults to 'all'.\n region (string, optional): Name of the region resource.\n Defaults to None.\n user_domain_name (string, optional): Define the user_domain_name.\n Defaults to 'Default'\n project_id (string, optional): ID of the project resource.\n Defaults to None.\n micado_user (string, optional): MiCADO username.\n Defaults to admin.\n micado_password (string, optional): MiCADO password.\n Defaults to admin.\n endpoint (string): Full URL to API endpoint (omit version).\n Required.\n version (string, optional): MiCADO API Version (minimum v2.0).\n Defaults to 'v2.0'.\n verify (bool, optional): Verify certificate on the client-side.\n OR (str): Path to cert bundle (.pem) to verfiy against.\n Defaults to True.\n auth (tuple, optional): Basic auth credentials (, ).\n Defaults to None.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n launcher = kwargs.pop(\"launcher\", \"\").lower()\n if launcher:\n self.api = None\n try:\n self.launcher = LAUNCHERS[launcher]()\n except KeyError:\n raise MicadoException(f\"Unknown launcher: {launcher}\")\n else:\n self.api = SubmitterClient(*args, **kwargs)\n\n @classmethod\n def from_master(cls):\n \"\"\"Usage:\n Ensure MICADO_API_ENDPOINT and MICADO_API_VERSION\n environment variables are set, then:\n\n >>> from micado import MicadoClient\n >>> client = MicadoClient.from_master()\n \"\"\"\n try:\n submitter_endpoint = os.environ[\"MICADO_API_ENDPOINT\"]\n submitter_version = os.environ[\"MICADO_API_VERSION\"]\n except KeyError as err:\n raise MicadoException(f\"Environment variable {err} not defined!\")\n\n return cls(\n endpoint=submitter_endpoint,\n version=submitter_version,\n verify=False,\n )\n\n @property\n def applications(self):\n return Applications(client=self)\n\n @property\n def master(self):\n if not self.launcher:\n raise MicadoException(\"No launcher defined\")\n return MicadoMaster(client=self)\n","sub_path":"micado/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"463590180","text":"from tkinter import *\nfrom tkinter import ttk\nimport os\nimport threading\nimport wave\nimport pyaudio\nimport sys\n\n# Thread fuction for play\ndef play_audio():\n chunk = 1024\n wf = wave.open('wav.wav', 'rb')\n p = pyaudio.PyAudio()\n\n stream = p.open(\n format = p.get_format_from_width(wf.getsampwidth()),\n channels = wf.getnchannels(),\n rate = wf.getframerate(),\n output = True)\n\n data = wf.readframes(chunk)\n\n print(\"playing\")\n while data != '' and is_playing: #to stop playing\n stream.write(data)\n data = wf.readframes(chunk)\n #is_playing = False\n if data == b'': #End of file - may affect playback\n print(\"breaking\")\n break\n\n print(\"finished playing\")\n stream.stop_stream()\n stream.close()\n p.terminate()\n print(\"closing play thread\")\n\n# Thread fuction for play\ndef record_audio():\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n CHANNELS = 1\n RATE = 44100\n p = pyaudio.PyAudio()\n \n stream = p.open(format = FORMAT,\n channels = CHANNELS,\n rate = RATE,\n input = True,\n frames_per_buffer = CHUNK)\n\n frames = []\n print (\"starting recording...\")\n for i in range(0, int(RATE / CHUNK * 2)):\n if is_recording: #to stop recording\n data = stream.read(CHUNK)\n frames.append(data)\n print (\"finished recording\")\n\n stream.stop_stream()\n stream.close()\n\n wf = wave.open('wav.wav', 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n p.terminate()\n\ndef playButton():\n try:\n global is_playing\n global my_thread\n\n #if not is_playing:\n is_playing = True\n my_thread = threading.Thread(target=play_audio)\n my_thread.start()\n except ValueError:\n pass\n\ndef stopPlayButton():\n try:\n global is_playing\n\n if is_playing:\n is_playing = False\n my_thread.join() \n except ValueError:\n pass\n\ndef recordButton():\n try:\n global is_recording\n global record_thread\n\n if not is_recording:\n is_recording = True\n record_thread = threading.Thread(target=record_audio)\n record_thread.start()\n except ValueError:\n pass\n\ndef stopRecordButton():\n try:\n global is_recording\n\n if is_recording:\n is_recording = False\n record_thread.join() \n except ValueError:\n pass\n\n\n#### MAIN PROGRAM ####\n\nis_playing = False\nis_recording = False\nmy_thread = None\n \nroot = Tk()\nroot.title(\"Collaborative Audio System\")\n\n#root.geometry(\"400x300\")\n# Setup Tk mainframe\nmainframe = ttk.Frame(root, padding=\"4 4 12 12\")\nmainframe.grid(column=0, row=0, sticky=(N, W, E, S))\nmainframe.columnconfigure(0, weight=1)\nmainframe.rowconfigure(0, weight=1)\n\n\n# Treeview for showing takes\ntree = ttk.Treeview(mainframe)\ntree[\"columns\"]=(\"one\",\"two\")\ntree['show'] = 'headings' #hides first column\ntree.column(\"one\", width=100 )\ntree.column(\"two\", width=100)\ntree.heading(\"one\", text=\"Take\")\ntree.heading(\"two\", text=\"Duration\")\n#tree.heading(\"three\", text=\"column C\")\n\n# Populate Treeview with takes\ntree.insert(\"\" , 0, text=\"Line 1\", values=(\"1A\",\"1b\"))\n\nid2 = tree.insert(\"\", 1, \"dir2\", text=\"Dir 2\")\ntree.insert(id2, \"end\", \"dir 2\", text=\"sub dir 2\", values=(\"2A\",\"2B\"))\n\n# Setup buttons\n#ttk.Treeview(mainframe).grid(column=0, row=0, sticky=E)\ntree.grid(column=3, row=0, sticky=E)\nttk.Button(mainframe, text=\"Play\", command=playButton).grid(column=3, row=2, sticky=E)\nttk.Button(mainframe, text=\"Stop Playing\", command=stopPlayButton).grid(column=3, row=3, sticky=E)\nttk.Button(mainframe, text=\"Record\", command=recordButton).grid(column=0, row=2, sticky=W)\nttk.Button(mainframe, text=\"Stop Recording\", command=stopRecordButton).grid(column=0, row=3, sticky=E)\n\n\nfor child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5)\n\n#root.bind('', calculate)\nroot.mainloop()\n","sub_path":"Code/FinalDev/cas.py","file_name":"cas.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"283959197","text":"import math\nfrom Functions import NumberOfCatigories, Gain, EntropyAllData, Node, isParent, CategoryEntropy, educationNumCategory, \\\n fnlwgtCategory, AgeCategory, HoursCategory, median, LossCategory, GainCategory\nimport csv\n\n# return True if i is an element of array\ndef belongTo( i, array):\n for j in array :\n if(i==j) :\n return True\n return False\n\n# check if it's one class\ndef OneClass ( filename, Node, category):\n\n AllParents = Node.parents\n RowList= attributes[Node.name][category]\n\n\n if len(AllParents)==1:\n RowList= attributes[Node.name][category]\n else :\n for name in AllParents:\n if AllParents[name] != 'None':\n RowList = [val for val in RowList if val in attributes[name][AllParents[name]]]\n\n\n with open(filename, 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n high=0\n less=0\n nbRow=0\n for row in csvreader:\n if belongTo(nbRow,RowList)==True and (row[14].strip()== \"<=50K\"):\n high=high+1\n if belongTo(nbRow,RowList)==True and (row[14].strip()== \">50K\"):\n less=less+1\n nbRow=nbRow+1\n if (high !=0) and (less !=0):\n return False\n if high==0 :\n Class=\">50K\"\n if less==0 :\n Class=\"<=50K\"\n\n return True,Class\n\n\n\n#Build the decision Tree\ndef Main (filename, ParentNode, MapParentNode) :\n # repeat for all categories\n for category in MapParentNode :\n #check that the category is one class\n\n if OneClass(filename, ParentNode, category)==False:\n #compute the entropy\n categoryEntropy=CategoryEntropy(filename,ParentNode.name,category)\n # for all attributes not already in the tree\n gains= dict()\n for attribute in attributes:\n if isParent(ParentNode,attribute)== False :\n tmpMap={}\n for key in (attributes[attribute]):\n tmpMap[key]=[0,0]\n # for each row of the file\n with open(filename, 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n for row in csvreader:\n if row[ParentNode.name]== category :\n for key in tmpMap :\n if row[attribute] == key :\n if row[14] == \">50K\":\n tmpMap[key][0]=tmpMap[key][0]+1\n else :\n tmpMap[key][1]=tmpMap[key][1]+1\n # Gain\n Entropy=0\n for key in attributes[attribute] :\n nb=tmpMap[key][0]+tmpMap[key][1]\n if tmpMap[key][0] !=0 and tmpMap[key][1] !=0:\n Entropy=Entropy+((tmpMap[key][0]/float(nb))*math.log(tmpMap[key][0]/float(nb),2) + (tmpMap[key][1]/float(nb))*math.log(tmpMap[key][1]/float(nb),2))*float(nb)/len(attributes[ParentNode.name][category])\n else :\n Entropy =0\n gain= -categoryEntropy + Entropy\n gains[attribute]=gain\n\n\n if(len(gains)!=0) :\n # obtain the highest gain\n att=-1\n max_val = max(gains.itervalues())\n for k, v in gains.iteritems() :\n if v == max_val :\n att=k\n\n # Create the Node\n NewNode= Node()\n NewNode.name= att\n # add parents\n NewNode.parents= ParentNode.parents.copy()\n NewNode.parents[ParentNode.name]= category\n NewNode.parents[NewNode.name]='None'\n\n # Add the node as a child of parent Node\n ParentNode.children[category]=NewNode\n\n # repeat for children\n Main(filename,NewNode,attributes.get(NewNode.name))\n else :\n #No more attribute, but still not a class\n NewNode = Node()\n NewNode.name= -1\n NewNode.parents= ParentNode.parents.copy()\n NewNode.parents[ParentNode.name]= category\n NewNode.parents[NewNode.name]='None'\n # Add the node as a child of parent Node\n ParentNode.children[category]=NewNode\n\n else :\n # the category is a class\n NewNode = Node()\n NewNode.name= -1\n bool,NewNode.Class=OneClass(filename, ParentNode, category)\n NewNode.parents= ParentNode.parents.copy()\n NewNode.parents[ParentNode.name]= category\n NewNode.parents[NewNode.name]='None'\n # Add the node as a child of parent Node\n ParentNode.children[category]=NewNode\n\n\n\n# find the final Node\ndef lookup(outfile, array, Node,missingValue):\n if Node.name==-1 or missingValue==True :\n if missingValue==True :\n result= 'Unknown'\n else :\n result=Node.Class\n outfile.writerow([array[0],array[1],array[2],array[3],array[4],array[5],array[6],array[7],array[8],array[9],array[10],array[11],array[12],array[13],result])\n else :\n for category in Node.children:\n if array[Node.name].strip()==category.strip() :\n lookup(outfile,array,Node.children[category],missingValue)\n\n\n\n\n# Apply the decision Tree\ndef DecisionTree(infilename, outfilename, Node) :\n with open(infilename, 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n outfile = csv.writer(open(outfilename, \"wb\"))\n i=0\n for row in csvreader:\n array =[]\n missingValue=False\n for i in range(14) :\n array.append(row[i])\n if row[i].strip()=='?':\n missingValue=True\n lookup(outfile, array,Node,missingValue)\n i=i+1\n\n# delete lines of missing values\ndef missingValues (filenameIn,filnameOut):\n with open(filnameOut, 'w') as fout :\n with open(filenameIn) as fin :\n for line in fin:\n if '?' not in line:\n fout.write(line)\n\n# reterun the nomber of rows in a file\ndef nbRows(filename) :\n with open(filename, 'rb') as f:\n reader = csv.reader(f, delimiter=';')\n for row in reader:\n a=row[0]\n return reader.line_num\n\n\n# Create final output file\ndef output( filename,outfilename):\n with open(filename, 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n outfile = csv.writer(open(outfilename, \"wb\"))\n i=1\n for row in csvreader:\n array=[]\n array.append(i)\n array.append(row[14])\n outfile.writerow([array[0],array[1]])\n i=i+1\n\n\n# missing values and contunious categories\nmissingValues ('adult.data','1.output')\neducationNumCategory('1.output','2.output')\nfnlwgtCategory('2.output','3.output')\nLossCategory('3.output','4.output',median ('3.output', 11))\nGainCategory('4.output','5.output',median ('4.output', 10))\nHoursCategory('5.output','6.output')\nAgeCategory('6.output', 'adult1.data')\n\n\n# Attributes\nworkClass = {'Private': [], 'Self-emp-not-inc': [], 'Local-gov' : [], 'Self-emp-inc' : [], 'Federal-gov' :[], 'State-gov' :[] , 'Without-pay' :[], 'Never-worked' : []}\neducation = {'Bachelors' :[], 'Some-college' :[], '11th':[], 'HS-grad' :[], 'Prof-school' :[], 'Assoc-acdm' :[], 'Assoc-voc' :[], '9th' :[], '7th-8th' :[], '12th':[], 'Masters':[], '1st-4th':[], '10th':[], 'Doctorate':[], '5th-6th':[], 'Preschool':[]}\nmaritalStatus = {'Married-civ-spouse': [], 'Divorced': [], 'Never-married': [], 'Separated': [], 'Widowed':[], 'Married-spouse-absent':[], 'Married-AF-spouse':[]}\noccupation = {'Armed-Forces' :[],'Transport-moving': [], 'Priv-house-serv': [], 'Protective-serv': [],'Machine-op-inspct': [], 'Adm-clerical': [], 'Farming-fishing': [],'Tech-support': [], 'Craft-repair': [], 'Other-service': [], 'Sales': [], 'Exec-managerial':[], 'Prof-specialty':[], 'Handlers-cleaners':[] }\nrelationship = {'Wife': [], 'Own-child': [], 'Husband': [], 'Not-in-family': [], 'Other-relative':[], 'Unmarried':[]}\nrace = {'White': [], 'Asian-Pac-Islander': [], 'Amer-Indian-Eskimo': [], 'Other': [], 'Black': []}\nsex= {'Female' :[], 'Male': []}\nnativeCountry = {'Holand-Netherlands' : [],'Scotland' : [],'Thailand' : [],'Yugoslavia' : [],'El-Salvador' : [],'Trinadad&Tobago' : [],'Peru' : [],'Hong' : [],'Ecuador' :[],'Taiwan' : [],'Haiti' : [],'Columbia' : [],'Hungary' : [],'Guatemala' : [],'Nicaragua' : [],'Italy' : [],'Poland' : [], 'Jamaica' : [], 'Vietnam' : [], 'Mexico' : [], 'Portugal' : [], 'Ireland' : [], 'France' : [], 'Dominican-Republic' : [],'Laos' : [],'Germany' : [],'South': [], 'China': [], 'Cuba': [], 'Iran': [], 'Honduras': [],'Philippines': [], 'Outlying-US(Guam-USVI-etc)': [], 'India': [], 'Japan': [], 'Greece':[],'United-States': [], 'Cambodia': [], 'England': [], 'Puerto-Rico': [], 'Canada':[]}\nage ={'1': [],'2': [],'3': [],'4': []}\nhours ={'1': [],'2': [],'3': [],'4': []}\ncapitalLoss ={'1': [],'2': [],'3': []}\ncapitalGain ={'1': [],'2': [],'3': []}\nfnlwgt={'0': []}\neducationNum ={'0': []}\n\n# Categories\nNumberOfCatigories(\"adult1.data\", age,0)\nNumberOfCatigories(\"adult1.data\",workClass, 1)\nNumberOfCatigories(\"adult1.data\", fnlwgt,2)\nNumberOfCatigories(\"adult1.data\",education, 3)\nNumberOfCatigories(\"adult1.data\", educationNum,4)\nNumberOfCatigories(\"adult1.data\",maritalStatus, 5)\nNumberOfCatigories(\"adult1.data\",occupation, 6)\nNumberOfCatigories(\"adult1.data\",relationship, 7)\nNumberOfCatigories(\"adult1.data\",race, 8)\nNumberOfCatigories(\"adult1.data\",sex, 9)\nNumberOfCatigories(\"adult1.data\", capitalGain,10)\nNumberOfCatigories(\"adult1.data\", capitalLoss,11)\nNumberOfCatigories(\"adult1.data\", hours,12)\nNumberOfCatigories(\"adult1.data\",nativeCountry, 13)\n\n\n# Map of All Attributes\nattributes={0:age, 1: workClass, 2: fnlwgt, 3:education, 4:educationNum, 5:maritalStatus, 6:occupation, 7:relationship, 8:race, 9:sex, 10:capitalGain, 11:capitalLoss, 12:hours, 13: nativeCountry}\n\n# Gain\n\ngain = []\ngain.append(Gain(\"adult1.data\", age,0,nbRows(\"adult1.data\"), EntropyAllData(\"adult1.data\",nbRows(\"adult1.data\"))))\ngain.append(Gain(\"adult1.data\", workClass,1,nbRows(\"adult1.data\"), EntropyAllData(\"adult1.data\",nbRows(\"adult1.data\"))))\n# ignore fnwgt attribute\ngain.append(0)\ngain.append(Gain(\"adult1.data\", education,3,nbRows(\"adult1.data\"), EntropyAllData(\"adult1.data\",nbRows(\"adult1.data\"))))\n# ignore Education Number attribute\ngain.append(0)\ngain.append(Gain(\"adult1.data\", maritalStatus,5,nbRows(\"adult1.data\"), EntropyAllData(\"adult1.data\",nbRows(\"adult1.data\"))))\ngain.append(Gain(\"adult1.data\", occupation,6,nbRows(\"adult1.data\"), EntropyAllData(\"adult1.data\",nbRows(\"adult1.data\"))))\ngain.append(Gain(\"adult1.data\", relationship,7,nbRows(\"adult1.data\"), EntropyAllData(\"adult1.data\",nbRows(\"adult1.data\"))))\ngain.append(Gain(\"adult1.data\", race,8,nbRows(\"adult1.data\"), EntropyAllData(\"adult1.data\",nbRows(\"adult1.data\"))))\ngain.append(Gain(\"adult1.data\", sex,9,nbRows(\"adult1.data\"), EntropyAllData(\"adult1.data\",nbRows(\"adult1.data\"))))\ngain.append(Gain(\"adult1.data\", capitalGain,10,nbRows(\"adult1.data\"), EntropyAllData(\"adult1.data\",nbRows(\"adult1.data\"))))\ngain.append(Gain(\"adult1.data\", capitalLoss,11,nbRows(\"adult1.data\"), EntropyAllData(\"adult1.data\",nbRows(\"adult1.data\"))))\ngain.append(Gain(\"adult1.data\", hours,12,nbRows(\"adult1.data\"), EntropyAllData(\"adult1.data\",nbRows(\"adult1.data\"))))\ngain.append(Gain(\"adult1.data\", nativeCountry,13,nbRows(\"adult1.data\"), EntropyAllData(\"adult1.data\",nbRows(\"adult1.data\"))))\n\n# Initialisation\nTree=Node()\nTree.name=gain.index(max(gain))\nTree.parents[Tree.name]='None'\n\n# Build decision Tree\nMain (\"adult1.data\",Tree,attributes[Tree.name])\n\n\n#Apply Decision Tree\nDecisionTree(\"Test\", \"Test1\", Tree)\n\n#output\noutput( \"Test1\",\"Test2\")\n\n\n","sub_path":"code/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":12450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377012959","text":"# -*- coding: utf-8 -*-\n# @Author: Safer\n# @Date: 2016-08-19 00:55:40\n# @Last Modified by: Safer\n# @Last Modified time: 2016-08-22 23:52:24\n\nimport sys\nfrom PyQt5.QtCore import Qt\n# from PyQt5.QtWidgets import QApplication, QMessageBox\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtSql import QSqlDatabase, QSqlQuery, QSqlQueryModel, QSqlRecord\n\n\nclass DB(QSqlDatabase):\n\n def __init__(self, db_name='client.db'):\n super(DB, self).__init__()\n self.db = self.addDatabase('QSQLITE')\n # self.db.setDatabaseName(':memory:')\n self.db_name = db_name\n self.db.setDatabaseName(self.db_name)\n if not self.db.open():\n QMessageBox.critical(None, \"Cannot open database\",\n \"Unable to establish a database connection.\\n\"\n \"Click Cancel to exit.\",\n QMessageBox.Cancel)\n return False\n self.from_str = ''\n self.columns_str = ''\n self.where_str = ''\n self.query_str = ''\n\n def from_(self, from_str=''):\n self.from_str = from_str\n return self\n\n def columns_(self, columns_str='*'):\n self.columns_str = self._getTableColumns() if columns_str == '*' else columns_str\n return self\n\n def where_(self, where_str=''):\n self.where_str = self._getTablePrimaryKey(\n ) + ' > 0' if where_str == '' else where_str\n return self\n\n def find_(self):\n columns = self.columns_str.split(',')\n select_str = ','.join(columns)\n sql = 'SELECT %s FROM %s WHERE %s' % (\n select_str, self.from_str, self.where_str)\n self._executeSql(sql)\n results = []\n while self._q.isValid():\n record = [self._q.value(i) for i in range(len(columns))]\n results.append(record)\n self._q.next()\n return results\n\n def create_(self, data):\n keys = ','.join([\"`\" + k + \"`\" for k in data.keys()])\n values = ','.join([\"'\" + v + \"'\" for v in data.values()])\n sql = 'INSERT INTO %s (%s) VALUES (%s)' % (\n self.from_str, keys, values)\n self._executeSql(sql)\n return True\n\n def update_(self, data):\n lists = data.items()\n length = len(lists)\n sql = ''\n string = ''\n num = 1\n for i in lists:\n string += \"='\".join(list(i)) + \"'\"\n if num < length:\n string += \", \"\n num += 1\n sql = 'UPDATE %s SET %s WHERE %s' % (\n self.from_str, string, self.where_str)\n self._executeSql(sql)\n return True\n\n def delete_(self):\n sql = 'DELETE FROM %s WHERE %s' % (self.from_str, self.where_str)\n self._executeSql(sql)\n return True\n\n # 直接执行 sql 语句\n def executeSql_(self, sql=''):\n self._executeSql(sql)\n return True\n\n def _getTablePrimaryKey(self):\n sql = \"PRAGMA table_info([%s])\" % (self.from_str)\n self._executeSql(sql)\n primary_key = ''\n if self._q.isValid():\n primary_key = self._q.value(1)\n print(primary_key)\n return primary_key\n\n def _getTableColumns(self):\n sql = \"PRAGMA table_info([%s])\" % (self.from_str)\n self._executeSql(sql)\n columns = []\n while self._q.isValid():\n columns.append(self._q.value(1))\n self._q.next()\n columns = ','.join(columns)\n return columns\n\n def _getAllTablesName(self):\n sql = \"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name\"\n self._executeSql(sql)\n tables = []\n while self._q.isValid():\n tables.append(self._q.value(0))\n self._q.next()\n return ','.join(tables)\n\n def _executeSql(self, sql=''):\n self._q = QSqlQuery()\n self._q.prepare(sql)\n self._q.exec_()\n self._q.first()\n\n# if __name__ == '__main__':\n\n# app = QApplication(sys.argv)\n# db = DB()\n# db.from_('test')\n\n# ######## select ########\n\n# db.where_('id > 0')\n# db.columns_('*')\n# results = db.find_()\n# print(results)\n\n# ######## create ########\n\n# # db.create_({'id': '3', 'name': 'safer'})\n\n# ######## update ########\n\n# # db.where_('id = 2')\n# # db.update_({'name': 'saferssssssss'})\n\n# ######## delete ########\n\n# # db.where_('id = 2')\n# # db.delete_()\n","sub_path":"back/python/databases/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615922538","text":"import urllib.request\nimport json\nimport time\n#test\n\n\n\n\n\n\ndef fetch(url):\n #return urllib.urlopen(url).read()\n return urllib.request.urlopen(url).read().decode('utf8')\n\n\n\n#temp = fetch('http://finance.google.com/finance/info?client=ig&q=TAIEX:2002&f=n,v')\ntemp = fetch('http://www.google.com/finance/getprices?q=2002&x=TPE&p=4Y&f=d,c,h,l,o,v')\n\nstack = str(temp).split('\\n')\nfor i in range(7):\n stack.pop(0)\nstockdata = []\nwhile (len(stack)) > 1:\n entry = stack.pop(0)\n entry = str(entry).split(',')\n if entry[0][0:1] == 'a':\n basetime = int(entry[0][1:])\n temptime = time.localtime(basetime)\n nowtime = time.asctime(temptime)\n else:\n temptime = time.localtime(basetime + int(entry[0]) * 86400)\n nowtime = time.asctime(temptime)\n entry.pop(0)\n entry.insert(0, nowtime)\n year = entry[0][-4:]\n month = entry[0][4:7]\n day = entry[0][8:10]\n #print(year + month + day)\n stockdata.append(entry)\n \nprint(stockdata)\n\n","sub_path":"StockPriceLoader.py","file_name":"StockPriceLoader.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"255087942","text":"import math\nfrom simpleai.search import astar, SearchProblem, depth_first, breadth_first, iterative_limited_depth_first, idastar\n\n# Class containing the methods to solve the maze\nclass MazeSolver(SearchProblem):\n # Initialize the class \n def __init__(self, board):\n self.board = board\n self.goal = (0, 0)\n\n for y in range(len(self.board)):\n for x in range(len(self.board[y])):\n if self.board[y][x].lower() == \"o\":\n self.initial = (x, y)\n elif self.board[y][x].lower() == \"x\":\n self.goal = (x, y)\n\n super(MazeSolver, self).__init__(initial_state=self.initial)\n\n # Define the method that takes actions\n # to arrive at the solution 액션은 벽에 걸리지 않게 핸들링해주는 부분을 포함하고 있음.\n def actions(self, state):\n actions = []\n for action in COSTS.keys():\n newx, newy = self.result(state, action)\n if self.board[newy][newx] != \"#\":\n actions.append(action)\n\n return actions\n\n # Update the state based on the action result는 state와 action을 받아서 새로운 state를 만들어낸다.\n def result(self, state, action):\n x, y = state\n\n if action.count(\"up\"):\n y -= 1\n if action.count(\"down\"):\n y += 1\n if action.count(\"left\"):\n x -= 1\n if action.count(\"right\"):\n x += 1\n\n new_state = (x, y)\n\n return new_state\n\n # Check if we have reached the goal 목표점에 도달했는지 체크해주는 부분.\n def is_goal(self, state):\n return state == self.goal\n\n #Compute the cost of taking an action 우리 과제에선 코스트 상관할 필요 전혀 없음.\n def cost(self, state, action, state2):\n return COSTS[action]\n\n\n # Heuristic that we use to arrive at the solution 피타고라스 정의로 직선거리를 잰거라고 함.\n def heuristic(self, state):\n x, y = state\n gx, gy = self.goal\n\n return math.sqrt((x - gx) ** 2 + (y - gy) ** 2)\n\n\n#여기서 부터 실제 코드가 실행되는 부분이다!!! 맵은 직접 손으로 그렸고, o와 x를 problem클래스에게 인자로 전달함으로써\n#problem클래스는 이 인자들을 읽어들이면서 맵을 알아낸다.\n\nif __name__ == \"__main__\":\n # Define the map\n MAP = \"\"\"\n ##############################\n # # # #\n # #### ######## # #\n # o # # # #\n # ### ##### ###### #\n # # ### # #\n # # # # # # ###\n # ##### # # # x #\n # # # #\n ##############################\n \"\"\"\n\n # Convert map to a list 프린트(맵)으로 맵을 한번 출력해줌.\n print(MAP)\n MAP = [list(x) for x in MAP.split(\"\\n\") if x]\n\n\n # Define cost of moving around the map 코스트는 우리과제랑 전혀 상관없음 그러니까 신경쓰지마!!\n cost_regular = 1.0\n cost_diagonal = 1.7\n\n # Create the cost dictionary\n COSTS = {\n \"up\": cost_regular,\n \"down\": cost_regular,\n \"left\": cost_regular,\n \"right\": cost_regular,\n \"up left\": cost_diagonal,\n \"up right\": cost_diagonal,\n \"down left\": cost_diagonal,\n \"down right\": cost_diagonal,\n }\n\n # Create maze solver object 메이즈솔버 클래스에 맵을 파라미터로 넘김. 이제 문제를 생성시킨거임.\n problem = MazeSolver(MAP)\n\n # Run the solver 여기에서 실행 주석해가면서 전부다 넣어서 실행시켜보자. 그리고 그에 따른 결과를 출력ㄱㄱ\n # 다 임포트 시킬것. simpleai-search-tranditinal안에 함수들이 구현되어 있음.\n #저기 가보면 코드가 구현되어 있는것이 아니라 그냥 이미 구현되어 있는것(search)을 불러내는 방식으로 되어 있음.\n #search 함수에 있는 파라미터를 조작함으로써 과제의 원하는 값을 얻어낼 수 있음.\n #예를 들면, 브리드 퍼스트에선 피포리스트를 사용하고, 프라블럼도 인자로 넘기고, 그래프 서치, 뷰 등등(뒤에 2개는 자세히 설명 x)\n #depth first같은 경우에는 라스트인 퍼스트아웃 리스트를 사용,\n #여기다가 ida* 구현해. 이름적고 return 값적고, 핸들링 하는 부분 어디다 추가하는진 모르겠지만 그것만 잘 추가하면 될것이야~(웬지 맨 밑에 search일것 같은데...)\n #memory라는 set이 있음. set이 뭐냐! 탐색했던 노드들을 메모리에 집어 넣는 것이다. 노드들을 탐색할때마다 메모리에다 에드를 시킴. 그렇다면, 메모리 렝스를 출력하면, 내가 탐색했던 노드의 길이수를 알 수 있겠지!\n #끝나기 전에 메모리 렝스를 출력하면 확장노드가 7개인것을 얻어낼 수 있음. 어디에다가 넣는진 모르겠지만, str(len(memory))를 잘 넣어봐.\n #또한, 생성노드는 fringe라는 리스트를 이용하면 구할 수 있음. fringe는 탐색할 노드들을 집어넣음. 종료직전에는 아직 탐색하지 않은 노드들만 남아있음.\n #ida스타는 a스타하고 iterative limited ~하고 합치면 된다.\n #f limit는 직접구현해야할거야~~ 평가함수값을 계산하는것도 다 나와있어 잘 찾아봐.\n #프로젝트 파일 전체하고 한글파일.\n\n\n #result = astar(problem, graph_search=True)\n #result = depth_first(problem, graph_search=True)\n #result = breadth_first(problem, graph_search=True)\n result = iterative_limited_depth_first(problem, graph_search=True)\n #result = idastar(problem, graph_search=True)\n # Extract the path\n path = [x[1] for x in result.path()]\n print(\"해길이\", path)\n print(len(path))\n\n # Print the result\n print()\n for y in range(len(MAP)):\n for x in range(len(MAP[y])):\n if (x, y) == problem.initial:\n print('o', end='')\n elif (x, y) == problem.goal:\n print('x', end='')\n elif (x, y) in path:\n print('·', end='')\n else:\n print(MAP[y][x], end='')\n\n print()\n\n","sub_path":"ai파이썬과제3/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":6290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"373119093","text":"from cursesmenu import *\nfrom cursesmenu.items import *\nfrom piston.steem import Steem\nimport time\nimport os\nimport sys\n\nsteem = Steem(wif=\"YOUR WIF HERE\")\nmenu = CursesMenu(\"Screem Stream\", \"main feed for screem\")\ndef main():\n post_status = FunctionItem(\"Update Status\", status, [])\n posts = []\n for post in steem.get_posts(category=\"tweeter\", limit=33, sort=\"trending\"):\n submenu_2 = CursesMenu(\"Actions Menu\", \"Post Options\")\n function_item_2 = FunctionItem(\"Up Vote\", post.upvote, [])\n item2 = FunctionItem(\"Down Vote\", post.downvote, [])\n submenu_2.append_item(function_item_2)\n submenu_2.append_item(item2)\n posts.append(SubmenuItem(str(\"@\" + post.author + \": \" + post.title), submenu=submenu_2))\n posts[len(posts)-1].set_menu(menu)\n menu.append_item(post_status)\n for post in posts:\n menu.append_item(post)\n menu.show()\n\ndef status():\n update = input(\"Status: \")\n try:\n a = steem.post(update, update, category=\"tweeter\")\n #print(a)\n steem.vote(\"@\" + a[\"operations\"][0][1]['author'] + \"/\" + a[\"operations\"][0][1]['permlink'], 100.0)\n os.execl(sys.executable, sys.executable, *sys.argv)\n except Exception as e:\n menu.pause()\n #cursesmenu.clear_terminal()\n print(\"An error occured while trying to post status!\")\n print(\"Error Log:\")\n print(e)\n time.sleep(3)\n menu.draw()\n menu.resume()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"screemit.py","file_name":"screemit.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"288342266","text":"\"\"\"\nClone of 2048 game. Run on Codeskulptor http://www.codeskulptor.org/#user44_nCXSPCKWdo_15.py\n\"\"\"\n\nimport poc_2048_gui\n# import poc_simpletest\nimport random\n\n# Directions, DO NOT MODIFY\nUP = 1\nDOWN = 2\nLEFT = 3\nRIGHT = 4\n\n# Offsets for computing tile indices in each direction.\n# DO NOT MODIFY this dictionary.\nOFFSETS = {UP: (1, 0),\n DOWN: (-1, 0),\n LEFT: (0, 1),\n RIGHT: (0, -1)}\n\ndef merge(line):\n \"\"\"\n Function that merges a single row or column in 2048.\n \"\"\"\n temp_line = list(line)\n line_copy = list(line)\n needs_sort = True\n summed = False\n\n # Shift non-zero numbers to start of the list\n while needs_sort:\n needs_sort = False\n for dummy_i in xrange(1, len(line)):\n if temp_line[dummy_i - 1] == 0 and temp_line[dummy_i] != 0:\n needs_sort = True\n line_copy[dummy_i - 1] = line_copy[dummy_i]\n line_copy[dummy_i] = 0\n temp_line = list(line_copy)\n\n # Add adjacent identical numbers together, only loops once\n if needs_sort == False and not summed:\n just_summed = False\n for dummy_i in xrange(1, len(line)):\n if temp_line[dummy_i - 1] == temp_line[dummy_i] and temp_line[dummy_i] != 0 and not just_summed:\n line_copy[dummy_i - 1] *= 2\n line_copy[dummy_i] = 0\n just_summed = True\n else:\n just_summed = False\n needs_sort = True\n summed = True\n temp_line = list(line_copy)\n\n return line_copy\n\nclass TwentyFortyEight:\n \"\"\"\n Class to run the game logic.\n \"\"\"\n\n def __init__(self, grid_height, grid_width):\n self._grid_height = grid_height\n self._grid_width = grid_width\n self.reset()\n\n # Calculate initial tiles\n self._initial = {}\n self._initial[UP] = [(0, dummy_x) for dummy_x in xrange(grid_width)]\n self._initial[DOWN] = [(grid_height - 1, dummy_x) for dummy_x in xrange(grid_width)]\n self._initial[LEFT] = [(dummy_x, 0) for dummy_x in xrange(grid_height)]\n self._initial[RIGHT] = [(dummy_x, grid_width - 1) for dummy_x in xrange(grid_height)]\n # print self\n\n def reset(self):\n \"\"\"\n Reset the game so the grid is empty except for two\n initial tiles.\n \"\"\"\n self._grid = [[0 for dummy_i in xrange(self.get_grid_width())]\n for dummy_j in xrange(self.get_grid_height())]\n self.new_tile()\n self.new_tile()\n\n def __str__(self):\n \"\"\"\n Return a string representation of the grid for debugging.\n \"\"\"\n string = ''\n for row in xrange(self._grid_height):\n for col in xrange(self._grid_width):\n string += str(self._grid[row][col]) + ' '\n string += '\\n'\n return string\n\n def get_grid_height(self):\n \"\"\"\n Get the height of the board.\n \"\"\"\n return self._grid_height\n\n def get_grid_width(self):\n \"\"\"\n Get the width of the board.\n \"\"\"\n return self._grid_width\n\n def move(self, direction):\n \"\"\"\n Move all tiles in the given direction and add\n a new tile if any tiles moved.\n \"\"\"\n tiles_moved = False\n for initial_tile in self._initial[direction]:\n temp_list = []\n if direction == 1 or direction == 2:\n step_range = self._grid_height\n elif direction == 3 or direction == 4:\n step_range = self._grid_width\n for num in xrange(step_range):\n temp_list.append(self.get_tile(initial_tile[0] + OFFSETS[direction][0] * num,\n initial_tile[1] + OFFSETS[direction][1] * num))\n temp_list = merge(temp_list)\n for num in xrange(step_range):\n if self.get_tile(initial_tile[0] + OFFSETS[direction][0] * num, initial_tile[1] + OFFSETS[direction][1] * num) != temp_list[num]:\n self.set_tile(initial_tile[0] + OFFSETS[direction][0] * num, initial_tile[1] + OFFSETS[direction][1] * num, temp_list[num])\n tiles_moved = True\n if tiles_moved:\n self.new_tile()\n\n # print self\n\n\n def new_tile(self):\n \"\"\"\n Create a new tile in a randomly selected empty\n square. The tile should be 2 90% of the time and\n 4 10% of the time.\n \"\"\"\n # sort out empty squares\n grid_copy = []\n for row in xrange(self._grid_height):\n for col in xrange(self._grid_width):\n if self.get_tile(row, col) == 0:\n grid_copy.append((row, col))\n\n # randomly choose an empty square and assign a value\n lucky_square = grid_copy[random.randrange(len(grid_copy))]\n probability = random.randint(1, 10)\n if probability > 1:\n self.set_tile(lucky_square[0], lucky_square[1], 2)\n else:\n self.set_tile(lucky_square[0], lucky_square[1], 4)\n\n def set_tile(self, row, col, value):\n \"\"\"\n Set the tile at position row, col to have the given value.\n \"\"\"\n self._grid[row][col] = value\n\n def get_tile(self, row, col):\n \"\"\"\n Return the value of the tile at position row, col.\n \"\"\"\n # if self._grid[row][col]:\n return self._grid[row][col]\n\n#def test_2048():\n# \"\"\"\n# Debugger fuction to test code.\n# \"\"\"\n# a = TwentyFortyEight(4,5)\n# print 'a.get_grid_width()', a.get_grid_width()\n# print 'a.get_grid_height()', a.get_grid_height()\n# suite = poc_simpletest.TestSuite()\n# suite.run_test(a.get_grid_width(), 5, 'Method get_grid_width() test')\n# suite.run_test(a.get_tile(1, 1), 0, 'Method get_tile() test')\n# suite.report_results()\n# a.set_tile(0,4,2)\n# print a\n# a.move(LEFT)\n# print a\n#\npoc_2048_gui.run_gui(TwentyFortyEight(4, 4))\n#test_2048()\n","sub_path":"part 3 - principles of computing 1/week_2_2048.py","file_name":"week_2_2048.py","file_ext":"py","file_size_in_byte":6051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"182545387","text":"import py.test\n\nfrom radosgw_agent import client\n\nREGION_MAP = {\n \"regions\": [\n {\n \"val\": {\n \"zones\": [\n {\n \"endpoints\": [\n \"http://vit:8001/\"\n ],\n \"log_data\": \"true\",\n \"log_meta\": \"true\",\n \"name\": \"skinny-1\"\n },\n {\n \"endpoints\": [\n \"http://vit:8002/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"false\",\n \"name\": \"skinny-2\"\n }\n ],\n \"name\": \"skinny\",\n \"default_placement\": \"\",\n \"master_zone\": \"skinny-1\",\n \"api_name\": \"slim\",\n \"placement_targets\": [],\n \"is_master\": \"true\",\n \"endpoints\": [\n \"http://skinny:80/\"\n ]\n },\n \"key\": \"skinny\"\n },\n {\n \"val\": {\n \"zones\": [\n {\n \"endpoints\": [\n \"http://vit:8003/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"false\",\n \"name\": \"swab-2\"\n },\n {\n \"endpoints\": [\n \"http://vit:8004/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"false\",\n \"name\": \"swab-3\"\n },\n {\n \"endpoints\": [\n \"http://vit:8000/\"\n ],\n \"log_data\": \"true\",\n \"log_meta\": \"true\",\n \"name\": \"swab-1\"\n }\n ],\n \"name\": \"swab\",\n \"default_placement\": \"\",\n \"master_zone\": \"swab-1\",\n \"api_name\": \"shady\",\n \"placement_targets\": [],\n \"is_master\": \"false\",\n \"endpoints\": [\n \"http://vit:8000/\"\n ]\n },\n \"key\": \"swab\"\n },\n {\n \"val\": {\n \"zones\": [\n {\n \"endpoints\": [\n \"http://ro:80/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"false\",\n \"name\": \"ro-1\"\n },\n {\n \"endpoints\": [\n \"http://ro:8080/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"false\",\n \"name\": \"ro-2\"\n },\n ],\n \"name\": \"readonly\",\n \"default_placement\": \"\",\n \"master_zone\": \"ro-1\",\n \"api_name\": \"readonly\",\n \"placement_targets\": [],\n \"is_master\": \"false\",\n \"endpoints\": [\n \"http://ro:80/\",\n \"http://ro:8080/\"\n ]\n },\n \"key\": \"readonly\"\n },\n {\n \"val\": {\n \"zones\": [\n {\n \"endpoints\": [\n \"http://meta:80/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"true\",\n \"name\": \"meta-1\"\n },\n {\n \"endpoints\": [\n \"http://meta:8080/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"false\",\n \"name\": \"meta-2\"\n },\n ],\n \"name\": \"metaonly\",\n \"default_placement\": \"\",\n \"master_zone\": \"meta-1\",\n \"api_name\": \"metaonly\",\n \"placement_targets\": [],\n \"is_master\": \"false\",\n \"endpoints\": [\n \"http://meta:80/\",\n \"http://meta:8080/\"\n ]\n },\n \"key\": \"metaonly\"\n }\n ],\n \"master_region\": \"skinny\"\n }\n\ndef test_endpoint_default_port():\n endpoint = client.Endpoint('example.org', None, True)\n assert endpoint.port == 443\n endpoint = client.Endpoint('example.org', None, False)\n assert endpoint.port == 80\n\ndef test_endpoint_port_specified():\n endpoint = client.Endpoint('example.org', 80, True)\n assert endpoint.port == 80\n endpoint = client.Endpoint('example.org', 443, True)\n assert endpoint.port == 443\n\ndef test_endpoint_equality():\n default_port = client.Endpoint('a.org', None, True)\n secure = client.Endpoint('a.org', 443, True)\n insecure = client.Endpoint('a.org', 80, False)\n assert default_port == secure\n assert secure == insecure\n assert insecure == default_port\n\ndef test_endpoint_inequality():\n base = client.Endpoint('a.org', 80, True)\n diff_host = client.Endpoint('b.org', 80, True)\n diff_port = client.Endpoint('a.org', 81, True)\n insecure = client.Endpoint('a.org', 8080, False)\n assert base != diff_host\n assert base != diff_port\n assert base != insecure\n\ndef test_parse_endpoint():\n endpoints = {\n 'http://example.org': ('example.org', 80, False),\n 'https://example.org': ('example.org', 443, True),\n 'https://example.org:8080': ('example.org', 8080, True),\n 'https://example.org:8080/': ('example.org', 8080, True),\n 'http://example.org:81/a/b/c?b#d': ('example.org', 81, False),\n }\n for url, (host, port, secure) in endpoints.iteritems():\n endpoint = client.parse_endpoint(url)\n assert endpoint.port == port\n assert endpoint.host == host\n assert endpoint.secure == secure\n\ndef test_parse_endpoint_bad_input():\n with py.test.raises(client.InvalidProtocol):\n client.parse_endpoint('ftp://example.com')\n with py.test.raises(client.InvalidHost):\n client.parse_endpoint('http://:80/')\n\ndef _test_configure_endpoints(dest_url, dest_region, dest_zone,\n expected_src_url, expected_src_region,\n expected_src_zone, specified_src_url=None,\n meta_only=False):\n dest = client.parse_endpoint(dest_url)\n if specified_src_url is not None:\n src = client.parse_endpoint(specified_src_url)\n else:\n src = client.Endpoint(None, None, None)\n region_map = client.RegionMap(REGION_MAP)\n client.configure_endpoints(region_map, dest, src, meta_only)\n assert dest.region.name == dest_region\n assert dest.zone.name == dest_zone\n assert src == client.parse_endpoint(expected_src_url)\n assert src.region.name == expected_src_region\n assert src.zone.name == expected_src_zone\n\ndef test_configure_endpoints_2nd_region_master_zone_meta():\n _test_configure_endpoints('http://vit:8000', 'swab', 'swab-1',\n 'http://vit:8001', 'skinny', 'skinny-1',\n meta_only=True)\n\ndef test_configure_endpoints_2nd_region_master_zone_data():\n with py.test.raises(client.InvalidZone):\n _test_configure_endpoints('http://vit:8000', 'swab', 'swab-1',\n 'http://vit:8001', 'skinny', 'skinny-1',\n meta_only=False)\n\ndef test_configure_endpoints_master_region_2nd_zone():\n _test_configure_endpoints('http://vit:8002', 'skinny', 'skinny-2',\n 'http://vit:8001', 'skinny', 'skinny-1')\n\ndef test_configure_endpoints_2nd_region_2nd_zone():\n _test_configure_endpoints('http://vit:8003', 'swab', 'swab-2',\n 'http://vit:8000', 'swab', 'swab-1')\n\ndef test_configure_endpoints_2nd_region_readonly_meta():\n _test_configure_endpoints('http://ro:8080', 'readonly', 'ro-2',\n 'http://vit:8001', 'skinny', 'skinny-1',\n meta_only=True)\n\ndef test_configure_endpoints_2nd_region_readonly_data():\n with py.test.raises(client.InvalidZone):\n _test_configure_endpoints('http://ro:8080', 'readonly', 'ro-2',\n 'http://vit:8001', 'skinny', 'skinny-1',\n meta_only=False)\n\ndef test_configure_endpoints_2nd_region_metaonly_meta():\n _test_configure_endpoints('http://meta:8080', 'metaonly', 'meta-2',\n 'http://meta:80', 'metaonly', 'meta-1',\n meta_only=True)\n\ndef test_configure_endpoints_2nd_region_metaonly_data():\n with py.test.raises(client.InvalidZone):\n _test_configure_endpoints('http://meta:8080', 'metaonly', 'meta-2',\n 'http://vit:8001', 'skinny', 'skinny-1',\n meta_only=False)\n\ndef test_configure_endpoints_master_region_master_zone():\n with py.test.raises(client.InvalidZone):\n _test_configure_endpoints('http://vit:8001', 'skinny', 'skinny-1',\n 'http://vit:8001', 'skinny', 'skinny-1')\n\ndef test_configure_endpoints_specified_src_same_region():\n _test_configure_endpoints('http://vit:8003', 'swab', 'swab-2',\n 'http://vit:8000', 'swab', 'swab-1',\n 'http://vit:8000')\n\ndef test_configure_endpoints_specified_src_master_region_meta():\n _test_configure_endpoints('http://vit:8003', 'swab', 'swab-2',\n 'http://vit:8001', 'skinny', 'skinny-1',\n 'http://vit:8001', meta_only=True)\n\ndef test_configure_endpoints_specified_src_master_region_data():\n with py.test.raises(client.InvalidZone):\n _test_configure_endpoints('http://vit:8003', 'swab', 'swab-2',\n 'http://vit:8001', 'skinny', 'skinny-1',\n 'http://vit:8001', meta_only=False)\n\ndef test_configure_endpoints_bad_src_same_region():\n with py.test.raises(client.InvalidZone):\n _test_configure_endpoints('http://vit:8003', 'swab', 'swab-2',\n 'http://vit:8004', 'swab', 'swab-3',\n 'http://vit:8004')\n\ndef test_configure_endpoints_bad_src_master_region():\n with py.test.raises(client.InvalidZone):\n _test_configure_endpoints('http://vit:8003', 'swab', 'swab-2',\n 'http://vit:8002', 'skinny', 'skinny-2',\n 'http://vit:8002')\n\ndef test_configure_endpoints_bad_src_same_zone():\n with py.test.raises(client.InvalidZone):\n _test_configure_endpoints('http://vit:8000', 'swab', 'swab-1',\n 'http://vit:8000', 'swab', 'swab-1',\n 'http://vit:8000')\n\ndef test_configure_endpoints_specified_nonexistent_src():\n with py.test.raises(client.ZoneNotFound):\n _test_configure_endpoints('http://vit:8005', 'skinny', 'skinny-1',\n 'http://vit:8001', 'skinny', 'skinny-1',\n 'http://vit:80')\n\ndef test_configure_endpoints_unknown_zone():\n with py.test.raises(client.ZoneNotFound):\n _test_configure_endpoints('http://vit:8005', 'skinny', 'skinny-1',\n 'http://vit:8001', 'skinny', 'skinny-1')\n","sub_path":"radosgw_agent/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":11949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488979469","text":"from unittest import TestCase\nfrom ..project.two_d_point import TwoDPoint\n\n\nclass TestTwoDPoint(TestCase):\n def test_from_coordinates(self):\n self.assertEqual(\n [TwoDPoint(0, 0), TwoDPoint(1.2, 3.2), TwoDPoint(-5, 3)],\n TwoDPoint.from_coordinates([0, 0, 1.2, 3.2, -5, 3]),\n )\n self.assertNotEqual(\n [TwoDPoint(0, 0), TwoDPoint(1.2, 3.2), TwoDPoint(-5, 3)],\n TwoDPoint.from_coordinates([0, 0, 1.2, 3.2, -4, 3]),\n )\n\n def test___eq__(self):\n point_1 = TwoDPoint(2, 2)\n point_2 = TwoDPoint(2, 2)\n self.assertEqual(point_1, point_2)\n point_3 = TwoDPoint(2, 2.1)\n self.assertNotEqual(point_1, point_3)\n\n def test___add__(self):\n point_1 = TwoDPoint(2, 2)\n point_2 = TwoDPoint(2, 2)\n point_3 = TwoDPoint(4, 4)\n self.assertEqual(point_1 + point_2, point_3)\n self.assertNotEqual(point_1 + point_3, point_2)\n\n def test___sub__(self):\n point_1 = TwoDPoint(2, 2)\n point_2 = TwoDPoint(2, 2)\n point_3 = TwoDPoint(0, 0)\n self.assertEqual(point_1 - point_2, point_3)\n self.assertNotEqual(point_1 - point_2, point_1)\n\n def test___str__(self):\n self.assertEqual(\"(2.5, 2.5)\", str(TwoDPoint(2.5, 2.5)))\n\n def test___neq__(self):\n point_1 = TwoDPoint(2, 2)\n point_2 = TwoDPoint(2, 2.1)\n self.assertNotEqual(point_1, point_2)\n\n def test___init__(self):\n p = TwoDPoint(100, -100.2)\n self.assertEqual(100, p.x)\n self.assertEqual(-100.2, p.y)\n","sub_path":"hw3/package/test/test_twoDPoint.py","file_name":"test_twoDPoint.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"354053501","text":"#desafio017\n\nfrom math import sqrt, pow, trunc, hypot\n\ncateto_oposto = float(input('Informe o valor do cateto oposto: '))\ncateto_adjacente = float(input('Informe o valor do cateto adjacente: '))\n#hipotenusa = (pow(cateto_oposto, 2) + pow(cateto_adjacente, 2))\n#print('O comprimento da hipotenusa vale {:.2f}.'.format(sqrt(hipotenusa)))\n\nprint('O comprimento da hipotenusa vale {:.2f}.'.format(hypot(cateto_oposto, cateto_adjacente)))\n","sub_path":"Mundo1/ex019.py","file_name":"ex019.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"311196849","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 16 22:26:45 2020\n\n@author: aims\n\"\"\"\n\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport matplotlib.pyplot as plt\nimport numpy\nfrom torch.utils.data import Dataset\nimport glob\nfrom PIL import Image\nfrom torch.utils.data import DataLoader\n\n#function to count number of parameters\ndef get_n_params(model):\n np=0\n for p in list(model.parameters()):\n np += p.nelement()\n return np\n\nimport dataset\nimport model\naccuracy_list = []\n\ndef train(epoch, model):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n \n #print(data[0].shape)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % 100 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n \ndef test(model, perm=torch.arange(0, 224*224*3).long()):\n model.eval()\n test_loss = 0\n correct = 0\n for data, target in test_loader:\n \n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss \n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability \n correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()\n\n test_loss /= len(test_loader.dataset)\n accuracy = 100. * correct / len(test_loader.dataset)\n accuracy_list.append(accuracy)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n accuracy))\n\ninput_size = 224*224*3 # images are 224*224 pixels and has 3 channels because of RGB color\noutput_size = 2 # there are 2 classes---Cat and dog\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 60\n\n\n# define training and test data directories\ndata_dir = './Data/'\ntrain_dir = os.path.join(data_dir, 'train/')\ntest_dir = os.path.join(data_dir, 'test/')\n\nimage_size = (224, 224)\nimage_row_size = image_size[0] * image_size[1]\n\n#create transformers\nmean = [0.485, 0.456, 0.406]\nstd = [0.229, 0.224, 0.225]\ntrain_transform = transforms.Compose([\n transforms.Resize(image_size), \n transforms.ToTensor(), \n transforms.Normalize(mean, std)])\ntest_transforms = transforms.Compose([\n transforms.Resize(image_size), \n transforms.ToTensor(), \n transforms.Normalize(mean, std)])\n \ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n \nif __name__ == '__main__':\n train_dataset = dataset.datasetloader(train_dir, transform=train_transform)\n test_dataset = dataset.datasetloader(test_dir, transform=test_transforms)\n \n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,\n num_workers=num_workers, shuffle=True)\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, \n num_workers=num_workers)\n\n def imshow(source):\n plt.figure(figsize=(10,10))\n imt = (source.view(-1, image_size[0], image_size[0]))\n imt = imt.numpy().transpose([1,2,0])\n imt = (std * imt + mean).clip(0,1)\n plt.subplot(1,2,2)\n plt.imshow(imt)\n \n imshow(train_dataset[0][0])\n imshow(test_dataset[2][0])\n test_dataset[2][0].shape\n\n n_features = 2 # hyperparameter\n\n model_cnn = model.CNN(input_size, n_features, output_size)\n optimizer = optim.SGD(model_cnn.parameters(), lr=0.01, momentum=0.5)\n print('Number of parameters: {}'.format(get_n_params(model_cnn)))\n \n for epoch in range(0, 1):\n train(epoch, model_cnn)\n test(model_cnn)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"587235116","text":"\nfrom tkinter import *\n\nfrom .ui_tab_in_notebook import *\nfrom ..model.organizer import *\n\nclass UISettings(UITabInNB):\n def __init__(self, parent, tab_name):\n super().__init__(parent, tab_name)\n\n def refresh(self):\n self.cleanup()\n\n button_1 = Button(self, text='Save & Exit', \n command=self.controller.save_exit)\n button_2 = Button(self, text='Start Fresh',\n command=self.controller.start_fresh)\n button_3 = Button(self, text='Load',\n command=self.load_onclick)\n button_4 = Button(self, text='Save As',\n command=self.save_as_onclick)\n\n table = [button_1, button_2, button_3, button_4]\n for i in range(len(table)):\n table[i].grid(row=i, column=0, sticky=W+E, padx=2, pady=5)\n\n def load_onclick(self):\n file_path = filedialog.askopenfilename(\n initialdir = Organizer.app_dir,\n title = 'Select a file!',\n filetypes = (('Want To List files', '*.wtl'), ('all files', '*.*')))\n\n if file_path != '': # File is selected\n self.controller.load(file_path)\n\n def save_as_onclick(self):\n file_path = filedialog.asksaveasfilename(\n initialdir = Organizer.app_dir,\n title = 'Save as?',\n defaultextension = '.wtl')\n\n if file_path != '':\n self.controller.save_as(file_path)\n\n\n","sub_path":"want_to_list/view/ui_settings.py","file_name":"ui_settings.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"560856499","text":"# -*- coding:utf-8 -*-\nfrom tkinter import *\nimport time\nimport platform\nimport globe\nimport scene.scene_game as game_scene\nimport scene.scene_reg as reg_scene\nimport scene.scene_emergency as emergency_scene\n\nispause = False\nextdata = None\n\n\nclass Scene:\n def __init__(self, thewindow, external_data=None, arg_1=None):\n global extdata\n self.scene = thewindow\n self.width = 1280\n self.height = 720\n self.arg1 = arg_1\n self.size = [self.width, self.height]\n self.image_bg = PhotoImage(file=\"./asset/bg/stats_window_bg.gif\")\n self.canvas = Canvas(\n self.scene, width=self.size[0],\n height=self.size[1], bd=0, highlightthickness=0)\n self.bg = self.canvas.create_image(\n self.width / 2, self.height / 2, image=self.image_bg)\n self.canvas.place(x=0, y=0)\n self.initime = time.time()\n self.canvas.bind_all('', self.jump)\n self.external_data = external_data\n extdata = external_data\n # STAT DATA\n self.totalscore = 0\n self.totaltime = 0\n self.maxbonus = 0\n self.points = 0\n self.rank = \"Easy   \"\n self.level = \"Stage 1\"\n self.stats_process()\n if platform.system() == 'Darwin':\n self.totaltime_onscreen = self.canvas.create_text(450, 269, fill=\"red\", font=\"Times 49 bold italic\", text=str(self.totaltime).zfill(7))\n self.maxbonus_onscreen = self.canvas.create_text(523, 339, fill=\"red\", font=\"Times 49 bold italic\", text=str(self.maxbonus).zfill(1))\n self.points_onscreen = self.canvas.create_text(450, 407, fill=\"red\", font=\"Times 49 bold italic\", text=str(self.points).zfill(7))\n self.rank_onscreen = self.canvas.create_text(420, 476, fill=\"red\", font=\"Times 49 bold italic\", text=self.rank)\n self.totalscore_onscreen = self.canvas.create_text(280, 628, fill=\"purple\", font=\"Times 89 bold italic\", text=str(self.totalscore).zfill(7))\n self.indication = self.canvas.create_text(270, 690, fill=\"black\", font=\"Times 30 bold \", text=\"[Press any key]\")\n else:\n self.totaltime_onscreen = self.canvas.create_text(450, 274, fill=\"red\", font=\"Times 39 bold italic\", text=str(self.totaltime).zfill(7))\n self.maxbonus_onscreen = self.canvas.create_text(523, 344, fill=\"red\", font=\"Times 39 bold italic\", text=str(self.maxbonus).zfill(1))\n self.points_onscreen = self.canvas.create_text(450, 412, fill=\"red\", font=\"Times 39 bold italic\", text=str(self.points).zfill(7))\n self.rank_onscreen = self.canvas.create_text(420, 481, fill=\"red\", font=\"Times 39 bold italic\", text=self.rank)\n self.totalscore_onscreen = self.canvas.create_text(280, 638, fill=\"purple\", font=\"Times 79 bold italic\", text=str(self.totalscore).zfill(7))\n self.indication = self.canvas.create_text(270, 690, fill=\"black\", font=\"Times 20 bold \", text=\"[Press any key]\")\n\n def update(self):\n self.stats_update()\n self.canvas.update()\n\n # scoring mechanism:\n # finalscore = (score + bonus_score + 500*game_life +\n # 325*game_bomb + 120*max_bonus + time_score) * delta_difficulty\n # delta_difficulty: easy = 0.5 normal = 1.0 hard = 1.2 lunatic = 1.5\n # time_score: if less than 90s: +1200\n # if in range (90, 180): +900 if in range(180+): no reward\n # [rank, level, game_total_time,\n # game_life, game_bomb, score, bonus_score, max_bonus]\n def stats_process(self):\n delta = 1\n time_score = 0\n print(self.external_data)\n if 0 <= self.external_data[2] <= 90:\n time_score = 1200\n elif 90 < self.external_data[2] <= 180:\n time_score = 900\n elif self.external_data[2] > 180:\n time_score = 0\n if self.external_data[1] == \"Easy   \":\n delta = 0.5\n elif self.external_data[1] == \"Normal \":\n delta = 1.0\n elif self.external_data[1] == \"Hard   \":\n delta = 1.2\n elif self.external_data[1] == \"Lunatic\":\n delta = 1.5\n self.totalscore = delta * (self.external_data[5] + self.external_data[6] + 500 * self.external_data[3] +\n 325 * self.external_data[4] + 120 * self.external_data[7] + time_score)\n self.totaltime = self.external_data[2]*10\n self.maxbonus = self.external_data[7]\n self.points = self.external_data[5]\n self.rank = self.external_data[0]\n self.level = self.external_data[1]\n\n def stats_update(self):\n pass\n\n @staticmethod\n def extpause(arg):\n global ispause\n if arg == 0:\n ispause = True\n elif arg == 1:\n ispause = False\n\n def jump(self, event):\n if self.external_data[1] == \"Stage 1\":\n # globe.window.switch(stats_scene, [rank, totalscore])\n print(\"[console]now switch to stage 2\")\n globe.window.switch(game_scene, [0, self.totalscore, 254, 1])\n elif self.external_data[1] == \"Stage 2\":\n print(\"[console]now switch to stage 3\")\n globe.window.switch(game_scene, [0, self.totalscore, 253, 1])\n elif self.external_data[1] == \"Stage 3\" or (self.external_data[1] != \"Stage 3\" and self.external_data[8] == 1):\n print(\"[console]switch to hiscore for reg.\")\n globe.window.switch(reg_scene, [self.totalscore, self.rank, self.level, 1])\n elif event.keysym == 'b':\n globe.window.switch(emergency_scene, 3, extdata)\n","sub_path":"Game-1280*720(no line length limit)/scene/scene_stats.py","file_name":"scene_stats.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"185631455","text":"import numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\n\r\n\r\ng = 9.81\r\ndt = 0.01\r\n\r\ndef CalcRange(velo,anglep,elev):\r\n anginr = (np.pi/180)*anglep\r\n a = -g/2\r\n b = velo*np.sin(anginr)\r\n c = elev\r\n ux = velo*np.cos(anginr)\r\n t = (-b-math.sqrt(b**2-4*a*c)) / (2 * a)\r\n sx= ux*t\r\n\r\ndef CalcMaxHeight(velo,anglep,elev):\r\n anginr = (np.pi/180)*anglep\r\n uy = velo*np.sin(anginr)\r\n e = elev\r\n hmax = ((uy**2/(2*g))) + e\r\n\r\ndef CalcTimeTaken(velo,anglep,elev):\r\n anginr = (np.pi/180)*anglep\r\n a = -g/2\r\n b = velo*np.sin(anginr)\r\n c = elev\r\n time = (-b-math.sqrt(b**2-4*a*c)) / (2 * a)\r\n\r\n\r\ndef CalcXandYPosition(velo,anglep,elev,time):\r\n anginr = (np.pi/180)*anglep\r\n xpos = velo*np.cos(anginr) * time \r\n ypos = (velo*np.sin(anginr) * time) + (0.5 * -g * time**2)\r\n roundedxpos = round(xpos, 2)\r\n roundedypos = round(ypos, 2)\r\n print(f\"The x position is: {roundedxpos}m\")\r\n print(f\"The y position is: {roundedypos}m\")\r\n\r\ndef DisplayAns(ans):\r\n roundedans = round(ans, 2)\r\n print(roundedans)\r\n\r\n'''\r\ndef EulerMethod(velo,anglep,elev):\r\n anginr = (np.pi/180)*anglep\r\n y = elev\r\n vy = velo*np.sin(anginr)\r\n t = 0\r\n while y > 0:\r\n y += vy * dt\r\n vy += -g * dt \r\n t += dt\r\n print(y)\r\n'''\r\n\r\n#g = float(input(\"Enter the gravitational field strength: \"))\r\nv = int(input(\"Enter the velocity: \"))\r\na = int(input(\"Enter the angle of projection: \"))\r\ne = int(input(\"Enter the elevation: \"))\r\nt = int(input(\"Enter the time: \"))\r\nprint()\r\n\r\nCalcRange(v,a,e)\r\nCalcMaxHeight(v,a,e)\r\nCalcTimeTaken(v,a,e)\r\nCalcXandYPosition(v,a,e,t)\r\n#EulerMethod(v,a,e)\r\n\r\n","sub_path":"initialcalcswithNONARRAY_DISPLAYANS.py","file_name":"initialcalcswithNONARRAY_DISPLAYANS.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"621455406","text":"from infixToPostfix import infix_to_postfix\nfrom cStack import Stack\nfrom MixedFraction import MixedFraction\n\n__author__ = 'Brad Miller and David Ranum\\nModified by Hunt Blanchat'\n\n\ndef postfix_eval(postfix_expr):\n \"\"\" Takes a post fix expression and evaluates it using a helper function\n\n :param postfix_expr: postfix expression in form str\n :return: either an int or a float depending on inputs\n \"\"\"\n operand_stack = Stack()\n token_list = postfix_expr.split()\n\n for token in token_list:\n try:\n float(token)\n except ValueError:\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n result = do_math(token, operand1, operand2)\n operand_stack.push(result)\n\n else:\n try:\n int(token)\n except ValueError:\n operand_stack.push(float(token))\n else:\n operand_stack.push(int(token))\n return operand_stack.pop()\n\n\ndef do_math(op, op1, op2):\n \"\"\" Helper function for postfix that\n handles different operators and two ints/floats\n\n :param op: operator to be applied to op1 and op2\n :param op1: int or float\n :param op2: int or float\n :return: int or float\n \"\"\"\n if op == \"^\":\n return op1 ** op2\n elif op == \"*\":\n return op1 * op2\n elif op == \"/\":\n return op1 / op2\n elif op == \"+\":\n return op1 + op2\n else:\n return op1 - op2\n\n\ndef eval_expression_file(file_name):\n \"\"\" Reads through a file of either infix or postfix expressions, determines\n which expression type is on a line and then evaluates the expression accordingly\n\n :param file_name: name of file being operated on\n :return:\n \"\"\"\n print(\"\\nEvaluation of {:}:\".format(file_name))\n if file_name == 'fractionExpressions.txt':\n for line in open(file_name, 'r'):\n if line.strip() != '' and line.strip()[-1].isalnum():\n express = infix_to_postfix(line.strip())\n print('{:} = {:} = {:}'.format(line.strip(), MixedFraction(postfix_eval(express)), postfix_eval(express)))\n elif len(line.strip()) != 0:\n print('{:} = {:} = {:}'.format(line.strip(), MixedFraction(postfix_eval(express)), postfix_eval(line.strip())))\n else:\n for line in open(file_name, 'r'):\n if line.strip() != '' and line.strip()[-1].isalnum():\n express = infix_to_postfix(line.strip())\n print('{:} = {:}'.format(line.strip(), postfix_eval(express)))\n elif len(line.strip()) != 0:\n print('{:} = {:}'.format(line.strip(), postfix_eval(line.strip())))\n\n\ndef main():\n postfix_expressions = ['4.4 4.6 + 2 1 3 + / ^', '2 20 ^ 2 1 3 + / ^', '2 20 + 2 1 3 + + *', '2 -1 3 + -']\n infix_expressions = [\"7 + 9 * 8 - 4 ^ 2\", \"7 + 9 * 8 / 4 ^ 2\", \"( 17 + 9 ) * 3 / ( 5 - 3 ) ^ 4\",\n \"7.5 + 9 - 1.8 / 4 ^ 2.5\"]\n print(\"\\nEvaluation of postfix_expressions[]:\")\n for item in postfix_expressions:\n print('{:} = {:,}'.format(item, postfix_eval(item)))\n print(\"\\nEvaluation of infix_expressions[]:\")\n for item in infix_expressions:\n express = infix_to_postfix(item)\n print('{:} = {:,}'.format(item, postfix_eval(express)))\n\n #print(postfix_eval(\"2 5 * 3 5 * 2 3 + / +\"))\n #print(postfix_eval(\"2 5 * 3 5 * + 2 3 + /\"))\n\n eval_expression_file('expressions.txt')\n eval_expression_file('fractionExpressions.txt')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"evaluateExpressions.py","file_name":"evaluateExpressions.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"227676382","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as pl\n\n#import pylab as pl\nimport healpy as hp\n\nfrom cmb_footprint import footprint\nimport astropy.table as t\n\nif __name__ == '__main__':\n\n\n #fp = footprint.SurveyStack('PLANCK-DUSTPOL', projection='mollweide', coord_plot='C', rot=[0,0],\n # config='footprint_hsc.cfg')\n fp = footprint.SurveyStack('PLANCK-DUSTPOL', projection='mollweide', coord_plot='C', rot=[-90,0],\n config='footprint_hsc.cfg')\n\n cmass = hp.read_map('/astro/astronfs01/workarea/msyriac/maps/archived/DR12N1024_nocfhtwts_spec_hp_ct_noMagCut_zFrom0.43To0.7.fits')\n fp.superimpose_hpxmap(cmass,label='BOSS',color='deeppink', coord_in='G')\n \n d56 = hp.read_map('/gpfs01/astro/www/msyriac/d56_healpix.fits')\n fp.superimpose_hpxmap(d56,label='ACT-D56',color='green')\n \n\n d5 = hp.read_map('/gpfs01/astro/www/msyriac/d5_healpix.fits')\n fp.superimpose_hpxmap(d5,label='ACT-D5',color='cyan')\n \n d6 = hp.read_map('/gpfs01/astro/www/msyriac/d6_healpix.fits')\n fp.superimpose_hpxmap(d6,label='ACT-D6',color='orange')\n \n bossn = hp.read_map('/gpfs01/astro/www/msyriac/bossn_healpix.fits')\n fp.superimpose_hpxmap(bossn,label='ACT-BOSSN',color='mediumpurple')\n FDFC = hp.read_map('/astro/astronfs01/workarea/msyriac/hscxact/s16A/S16A_wide2_fdfc_i_limitmag_hp.fits')\n fp.superimpose_hpxmap(FDFC,label='HSC-FDFC',color='red')\n fp.superimpose_survey_outline('HSC',color='lightsalmon',label='HSC-planned')\n fp.superimpose_survey_outline('POLARBEAR',color='blue',label='PB')\n \n pl.savefig('act_hsc_pbear_footprint.pdf')\n #pl.show()\n","sub_path":"plot_hsc+planckCl.py","file_name":"plot_hsc+planckCl.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504339911","text":"from onesie.also import AlsoEnsemble\nfrom sklearn.ensemble import RandomForestRegressor\n\nimport numpy as np\n\nimport pytest\n\nfrom sklearn.metrics import roc_auc_score\nfrom lightgbm import LGBMRegressor\n\nfrom onesie.datasets import load_data\n# df, y = load_data('synthetic')\n\n@pytest.fixture()\ndef numeric_data():\n df, y = load_data('forestcover')\n df = df.drop(['Wilderness_Area', 'Soil_Type'], axis=1)\n df = df.sample(1000)\n y_sub = y[df.index]\n y_sub = y_sub.map({'nominal': 0, 'anomaly': 1})\n return df.values.astype('float'), y_sub\n\n\ndef test_also_numeric(numeric_data):\n df, y = numeric_data\n # df = StandardScaler().fit_transform(df)\n ae = AlsoEnsemble(LGBMRegressor)\n ae.fit(df)\n\n aerf = AlsoEnsemble(RandomForestRegressor, n_estimators=100)\n aerf.fit(df)\n\n assert roc_auc_score(y.values, ae.oof_scores) > .50\n\n\ndef test_also_numeric_with_noise(numeric_data):\n df, y = numeric_data\n df = np.append(df, np.random.rand(1000).reshape(-1, 1), 1)\n df = np.append(df, np.random.rand(1000).reshape(-1, 1), 1)\n df = np.append(df, np.random.rand(1000).reshape(-1, 1), 1)\n df = np.append(df, np.random.normal(3, 4, 1000).reshape(-1, 1), 1)\n ae = AlsoEnsemble(LGBMRegressor)\n ae.fit(df)\n assert all(ae.w[-4:] == 0)\n\n\ndef test_also_numeric_with_unchanging_feature(numeric_data):\n df, y = numeric_data\n df = np.append(df, np.full(df.shape[0], np.random.normal(1000)).reshape(-1, 1), 1)\n ae = AlsoEnsemble(LGBMRegressor)\n ae.fit(df)\n assert ae.w[-1] == 0\n\n\n# Test Linear Combos\ndef test_also_numeric_with_linear_combos():\n raise NotImplementedError\n\n# Test Highly Correlated Noise Vars\n\n# X = df.values\n# t1 = time()\naerf = AlsoEnsemble(RandomForestRegressor, n_estimators=100)\n# aerf.fit(X)\n# print(time()-t1)\n#\n# t1 = time()\n# aerf = AlsoEnsemble(RandomForestRegressor)\n# aerf.fit(X)\n# print(time()-t1)\n#\n# t1 = time()\n# aebst = AlsoEnsemble(LGBMRegressor)\n# aebst.fit(X)\n# print(time()-t1)\n#\n# y_sub = y[df.index]\n# y_sub = y_sub.map({'nominal': 0, 'anomaly': 1})\n# roc_auc_score(y_sub.values, aebst.oof_scores)\n# roc_auc_score(y_sub.values, aerf.oof_scores)\n# roc_auc_score(y_sub.values, ae.oof_scores)\n#\n#\n#\n# isofor = IsolationForest(behaviour='new')\n# isofor.fit(X)\n# iscores = -1*isofor.decision_function(X)\n# im = iscores-min(iscores)\n# im = im/max(im)\n# roc_auc_score(y_sub.values, im)\n#\n# def test_also_ensemble_with_rf():\n# assert True False\n","sub_path":"outlier_tools/test_also.py","file_name":"test_also.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"521367612","text":"import datetime, os, random, sys\nimport pdb\nroot = '/home/bilalabbasi/projects/pytorch-semantic-segmentation/'\nsys.path.insert(0, root) # compute canada root\n\nimport torch\nimport torchvision.transforms as standard_transforms\nimport torchvision.utils as vutils\nfrom torch import optim\nfrom torch.autograd import Variable\nfrom torch.backends import cudnn\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nimport torch.cuda as cuda\n\nimport utils.transforms as extended_transforms\nfrom datasets import voc\n# from models import *\nfrom models import fcn8s, fcn16s, fcn32s, deeplab_resnet, MBO\nfrom utils import check_mkdir, evaluate, AverageMeter, CrossEntropyLoss2d\n\ncudnn.benchmark = True\n\nckpt_path = os.path.join(root,'logs','ckpt')\nexp_name = 'voc-fcn8s'\n\nargs = {\n 'epoch_num': 300,\n 'lr': 1e-4,\n 'weight_decay': 1e-4,\n 'momentum': 0.95,\n 'lr_patience': 100, # large patience denotes fixed lr\n 'snapshot': '', # empty string denotes learning from scratch\n 'print_freq': 20,\n 'val_save_to_img_file': False,\n 'val_img_sample_rate': 0.1 # randomly sample some validation results to display\n}\n\nmodel = 'fcn8s'\niter_freq = 50\nepoch_freq = 20 # Frequency to save parameter states\nbsz = 10\ndef main(train_args):\n if cuda.is_available():\n net = fcn8s.FCN8s(num_classes=voc.num_classes, pretrained=False).cuda() \n #net = MBO.MBO().cuda()\n #net = deeplab_resnet.Res_Deeplab().cuda()\n else:\n print('cuda is not available')\n net = fcn8s.FCN8s(num_classes=voc.num_classes,pretrained=True)\n\n net.train()\n\n mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n\n input_transform = standard_transforms.Compose([\n standard_transforms.ToTensor(),\n standard_transforms.Normalize(*mean_std)\n ])\n target_transform = extended_transforms.MaskToTensor()\n restore_transform = standard_transforms.Compose([\n extended_transforms.DeNormalize(*mean_std),\n standard_transforms.ToPILImage(),\n ]) \n visualize = standard_transforms.Compose([\n standard_transforms.Scale(400),\n standard_transforms.CenterCrop(400),\n standard_transforms.ToTensor()\n ])\n \n train_set = voc.VOC('train',set='benchmark', transform=input_transform, target_transform=target_transform)\n train_loader = DataLoader(train_set, batch_size=bsz, num_workers=8, shuffle=True)\n \n val_set = voc.VOC('val',set='voc', transform=input_transform, target_transform=target_transform)\n val_loader = DataLoader(val_set, batch_size=1, num_workers=4, shuffle=False)\n\n criterion = CrossEntropyLoss2d(size_average=False, ignore_index=voc.ignore_label).cuda()\n optimizer = optim.Adam([\n {'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],\n 'lr': train_args['lr']},\n {'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],\n 'lr': train_args['lr']}],\n betas=(train_args['momentum'], 0.999))\n scheduler = ReduceLROnPlateau(optimizer, 'min', patience=2, min_lr=1e-10, verbose=True)\n\n lr0 = 1e-7\n max_epoch = 50\n max_iter = max_epoch * len(train_loader)\n #optimizer = optim.SGD(net.parameters(),lr = lr0, momentum = 0.9, weight_decay = 0.0005)\n scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=2,gamma=0.5)\n\n log_dir = os.path.join(root,'logs','voc-fcn')\n time = datetime.datetime.now().strftime('%d-%m-%H-%M')\n train_file = 'train_log' + time + '.txt'\n val_file = 'val_log' + time + '.txt'\n #os.makedirs(log_dir,exist_ok=True) \n \n training_log = open(os.path.join(log_dir,train_file),'w') \n val_log = open(os.path.join(log_dir,val_file),'w')\n\n curr_epoch = 1\n for epoch in range(curr_epoch, train_args['epoch_num'] + 1):\n train(train_loader, net, criterion, optimizer, epoch, train_args,training_log, max_iter,lr0) \n val_loss = validate(val_loader, net, criterion, optimizer, epoch, train_args, restore_transform, visualize,val_log)\n\n scheduler.step(val_loss) \n \n lr_tmp = 0.0\n k = 0\n for param_group in optimizer.param_groups:\n lr_tmp += param_group['lr']\n k+=1\n val_log.write('learning rate = {}'.format(str(lr_tmp/k)) + '\\n')\n #scheduler.step()\n\ndef train(train_loader, net, criterion, optimizer, epoch, train_args,training_log,max_iter,lr0):\n train_loss = AverageMeter()\n curr_iter = (epoch - 1) * len(train_loader)\n for i, data in enumerate(train_loader):\n inputs, labels = data\n bsz = len(inputs)\n #pdb.set_trace()\n if cuda.is_available():\n inputs = Variable(inputs).cuda()\n labels = Variable(labels).cuda()\n else:\n inputs = Variable(inputs)\n labels = Variable(labels)\n\n optimizer.zero_grad()\n outputs = net(inputs)\n\n loss = criterion(outputs, labels) / bsz\n loss.backward()\n optimizer.step()\n\n curr_iter += 1\n #poly_lr_step(optimizer,lr0,curr_iter,max_iter,power=0.9)\n train_loss.update(loss.data[0], bsz)\n \n training_log.write(str(curr_iter) + ' ' + str(train_loss.avg) +'\\n')\n if curr_iter%iter_freq==0:\n print('epoch={}, it={} '.format(epoch,curr_iter),str(train_loss.avg))\n\n\ndef validate(val_loader, net, criterion, optimizer, epoch, train_args, restore, visualize,val_log):\n net.eval()\n\n val_loss = AverageMeter()\n inputs_all, gts_all, predictions_all = [], [], []\n\n for vi, data in enumerate(val_loader):\n inputs, gts = data\n N = inputs.size(0)\n \n if cuda.is_available():\n inputs = Variable(inputs, volatile=True).cuda()\n gts = Variable(gts, volatile=True).cuda()\n else:\n inputs = Variable(inputs,volatile=True)\n gts = Variable(gts, volatile=True)\n\n outputs = net(inputs)\n \n loss = criterion(outputs,gts)/N\n val_loss.update(loss.data[0], N)\n \n #val_log.write(str(epoch) + ' ' + str(val_loss.avg) + '\\n')\n\n inputs_all.append(inputs.data.squeeze_(0).cpu())\n gts_all.append(gts.data.squeeze_(0).cpu().numpy())\n \n predictions = outputs.data.max(1)[1].squeeze_(1).squeeze_(0).cpu().numpy()\n predictions_all.append(predictions)\n \n acc, acc_cls, mean_iu, fwavacc = evaluate(predictions_all, gts_all, voc.num_classes)\n print('Mean IoU for epoch {} is {}'.format(epoch,mean_iu))\n val_log.write('epoch {}, average val loss = {}'.format(epoch,val_loss.avg))\n val_log.write('Mean IoU for epoch {} is {}'.format(epoch,mean_iu) + '\\n')\n root = '/home/bilalabbasi/projects/pytorch-semantic-segmentation/logs/pths'\n if epoch%20 == 0:\n torch.save(net.state_dict(), os.path.join(root,model + '_epoch_' +str(epoch)+ '_iou_' + str(mean_iu)+ '.pth'))\n \n net.train()\n return val_loss.avg\n\ndef poly_lr_step(optimizer,lr0,iter,max_iter,power=0.9):\n lr = lr0 * (1-float(iter)/max_iter)**power\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\nif __name__ == '__main__':\n main(args)\n","sub_path":"train/voc-fcn/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"258249318","text":"import pygame\nfrom pygame.locals import *\n\nfrom life import GameOfLife\nfrom ui import UI\n\n\nclass GUI(UI):\n\n def __init__(self, life: GameOfLife, cell_size: int = 20, speed: int = 10) -> None:\n super().__init__(life)\n self.cell_size = cell_size\n self.speed = speed\n self.screen_size = life.cols * self.cell_size, life.rows * self.cell_size\n self.screen = pygame.display.set_mode(self.screen_size)\n\n def draw_lines(self) -> None:\n \"\"\" Отрисовать сетку \"\"\"\n width, height = self.screen_size\n\n for x in range(0, width, self.cell_size):\n pygame.draw.line(self.screen, pygame.Color('black'),\n (x, 0), (x, height))\n for y in range(0, height, self.cell_size):\n pygame.draw.line(self.screen, pygame.Color('black'),\n (0, y), (width, y))\n\n def draw_grid(self) -> None:\n \"\"\"\n Отрисовка списка клеток с закрашиванием их в соответствующе цвета.\n \"\"\"\n for i in range(self.life.rows):\n for j in range(self.life.cols):\n cur_color = pygame.Color('white')\n if self.life.curr_generation[i][j]:\n cur_color = pygame.Color('green')\n pygame.draw.rect(self.screen, cur_color,\n (j * self.cell_size, i * self.cell_size, self.cell_size, self.cell_size))\n\n def run(self) -> None:\n \"\"\" Запустить игру \"\"\"\n pygame.init()\n clock = pygame.time.Clock()\n pygame.display.set_caption('Game of Life')\n self.screen.fill(pygame.Color('white'))\n\n running = True\n pause = False\n while running and not self.life.is_max_generations_exceeded:\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n elif event.type == KEYUP and event.key == K_SPACE:\n pause = not pause\n elif event.type == MOUSEBUTTONDOWN and pause:\n self.mouse_fill_cell()\n\n self.draw_grid()\n self.draw_lines()\n if not pause:\n self.life.step()\n\n pygame.display.flip()\n clock.tick(self.speed)\n pygame.quit()\n\n def mouse_fill_cell(self) -> None:\n x, y = pygame.mouse.get_pos()\n col = x // self.cell_size\n row = y // self.cell_size\n self.life.curr_generation[row][col] = (self.life.curr_generation[row][col] + 1) % 2\n\n\nif __name__ == '__main__':\n life = GameOfLife((24, 24), max_generations=50)\n gui = GUI(life)\n gui.run()\n","sub_path":"homework03/life-gui.py","file_name":"life-gui.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"354951157","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n\n\"\"\"\nnetwork.py\n------------------\n\nA module to implement the SGD learning algorithm for a feedfoward NN. Gradient\nare calculated using backprogation.\n\nNote the code is simple, easily readable, and easily modifiable. It is not\noptimized, and omits many desirable features.\n\"\"\"\n\nimport random\nimport numpy as np\n\n\ndef sigmoid(z):\n #note that the input z is a vector or np.array, np automatically\n #applies the function sigmoid elementwise in vectorized form\n return 1.0 / (1.0+np.exp(-z))\n\ndef sigmoid_prime(z):\n \"\"\"Derivative of the sigmoid function\"\"\"\n return sigmoid(z) * (1-sigmoid(z))\n\n\n\nclass Network(object):\n\n def __init__(self, sizes):\n \"\"\"\n The lists `sizes`(每层神经元的个数) contains the number of neurons in the respective\n layers of the network. i.e. if the list was [2, 3, 1] then it would be 3\n layer network. The biases and weights for the network are inited randomly,\n using a Gaussian distribution with mean 0, and varience 1. NOTE that the\n first layer is assumed to be an input layer, and by convention we wont set\n any biases for those neurons, since biases are only ever used in computing\n the output from later layers.\n \"\"\"\n self.num_layers = len(sizes)\n self.sizes = sizes\n #random.randn(y, 1) 随机从正态分布(均值 0, 方差 1) 中生成\n #sizes = [2,3,1]\n #test = [np.random.randn(y, 1) for y in sizes[1:]]\n #>>> test\n #[array([[ 1.94011169],\n # [ 0.80066664],\n # [-1.41180998]]), array([[-0.93776222]])]\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\n #net.weights[1] 存储连接第二层和第三层的权重 (python 从 0 开始存放)\n #>>> sizes\n #[1, 2, 3, 4, 5, 6]\n #>>> for x, y in zip(sizes[:-1], sizes[1:]):\n # print(x, y)\n # 1 2\n # 2 3\n # 3 4\n # 4 5\n # 5 6\n self.weights = [np.random.randn(y, x)\n for x, y in zip(sizes[:-1], sizes[1:])]\n\n\n def feedforward(self, a):\n \"\"\"\n Return the output of the network if `a` is input.\n \"\"\"\n for b, w in zip(self.biases, self.weights):\n a = sigmoid(np.dot(w, a) + b)\n return a\n\n\n def SGD(self, training_data, epochs, mini_batch_size, eta,\n test_data=None):\n \"\"\"\n Train the NN using mini-batch stochastic gradient descent.\n The `training_data` is a list of tuples `(x, y)` representing the\n training inputs and the desired outputs. The other non-optional\n parameters are self-explanatory. If `test_data` is provided then\n network will be evaluated against the test data after each epoch,\n and partial progress printed out. This useful for tracking pregress,\n but slow things down substantially.\n :param eta: the learning rate.\n\n :training_data: 是一个 list, 包括了很多 tuple, 每一个 tuple 包含了一个 x 和 y,\n x 即为 numpy.array 的数据类型。y 即为 label。\n :epochs: 是根据我们的具体数据结构设置的,让结果收敛即可。\n :mini_batch_size: 每一小块有多少个实例。\n :eta: 学习速率。\n :test_data:\n \"\"\"\n if test_data: n_test = len(test_data)\n n = len(training_data)\n for j in xrange(epochs):\n #shuffle 洗牌,接受一个 list, 把这个 list 中的元素随机打乱\n random.shuffle(training_data)\n #抽取数据。假如 mini_batches_size 是 100, 挑法是 [0, n) 中间每次\n #隔 mini_batch_size。[0,99), [100,199) ... 形成多个 mini_batch\n mini_batches = [training_data[k:k+mini_batch_size]\n for k in xrange(0, n, mini_batch_size)]\n for mini_batch in mini_batches:\n #update_mini_batch 是最重要的,更新 weight 和 bias\n self.update_mini_batch(mini_batch, eta)\n if test_data:\n #每完成一轮,看看准确性。\n print(\"Epoch {0}: {1} / {2}\".format(\n j, self.evaluate(test_data), n_test))\n else:\n print(\"Epoch {0} complete\".format(j))\n \n\n def update_mini_batch(self, mini_batch, eta):\n \"\"\"\n Update the network's weights and biases by applying\n GD using backpropagation to a single mini batch. The\n \"mini_batch\" is a list of tuples \"(x, y)\", and \"eta\"\n is the learning rate.\n 最关键的一步。backpropagation在计算cost函数的时候计算对\n b和w的偏导数分别是多少的一种方法。\n \"\"\"\n #nabla 就是倒三角, 求 grad\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n for x, y in mini_batch:\n #x是一幅图片, y是标签。这行用来求偏导数\n delta_nabla_b, delta_nabla_w = self.backprop(x, y)\n #如果不清楚这部分的话, 画图显示一下矩阵的形式就可以明白\n nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n #Equation 20 and 21 in the pdf document\n self.weights = [w - (eta/len(mini_batch))*nw \n for w, nw in zip(self.weights, nabla_w)]\n self.biases = [b - (eta/len(mini_batch))*nb\n for b, nb in zip(self.biases, nabla_b)]\n\n \n def backprop(self, x, y):\n \"\"\"\n Return a tuple `nabla_b, nabla_w` representing the gradient for the \n cost function C_x. `nabla_b` and `nabla_w` are layer-by-layer lists \n of numpy arrays, similar to `self.biases` and `self.weights`.\n \"\"\"\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n #feedforward\n activation = x\n activations = [x] #list to store all the activations, layer by layer\n zs = [] #list to store all the z vectors, layer by layer\n for b, w in zip(self.biases, self.weights):\n z = np.dot(w, activation) + b\n zs.append(z)\n activation = sigmoid(z)\n activations.append(activation)\n #backward pass\n #计算最后一层的error。sigmoid_prime是sigmoid的导数。\n delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(zs[-1])\n nabla_b[-1] = delta\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\n\n # Note that the variable l in the loop below is used a little\n # differently to the notation in Chapter 2 of the book. Here,\n # l = 1 means the last layer of neurons, l = 2 is the\n # second-last layer, and so on. It's a renumbering of the\n # scheme in the book, used here to take advantage of the fact\n # that Python can use negative indices in lists.\n for l in xrange(2, self.num_layers):\n #反向更新\n z = zs[-l]\n sp = sigmoid_prime(z)\n delta = np.dot(self.weights[-l+1].transpose(), delta) * sp\n nabla_b[-l] = delta\n nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())\n\n return (nabla_b, nabla_w)\n\n\n def evaluate(self, test_data):\n \"\"\"\n Return the number of test inputs for which the Neural network \n outputs the correct result. Note that the NN's output is assumed \n to be the index of whichever neuron in the final layer has the \n highest activation.\n \"\"\"\n test_results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)\n\n \n def cost_derivative(self, output_activations, y):\n \"\"\"\n Return the vector of partial derivatives \\partial C_x / \\partial a\n for the output activations.\n \"\"\"\n return (output_activations - y)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"deeplearning/digit_ocr/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":8205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"522776446","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n# Carga los datos\ndata = np.loadtxt('food-texture.csv', skiprows=1, usecols=[1,2,3,4,5],delimiter=',')\n#print(data)\noil = data[:,0]\ndensity = data[:,1]\ncrispy = data[:,2]\nfracture = data[:,3]\nhardness = data[:,4]\n\nfig , medidas = plt.subplots(5, 1, figsize= (10,10))\n\nmedidas[0].hist(oil)\nmedidas[1].hist(density)\nmedidas[2].hist(crispy)\nmedidas[3].hist(fracture)\nmedidas[4].hist(hardness)\n\nfig.subplots_adjust(hspace=0.5)\nplt.savefig('food.pdf')\nplt.close()\n\ndef acp(data_matrix):\n '''data_matrix must be the data matrix by COLUMNS i.e. a column is a variable and a row is an observation'''\n data_matrix = data_matrix.T\n cov_matrix = np.cov(data_matrix)\n print('La matriz de covarianza es:')\n print(cov_matrix)\n print('')\n values, vectors = np.linalg.eig(cov_matrix.T)\n print('Las dos componentes principales en orden ascendente son:')\n print(vectors[:,0], ' con valor ', values[0])\n print(vectors[:,1], ' con valor ', values[1])\n \n total_values = np.sum(values)\n \n print('\\nLa primera componente explica el', values[0]/total_values * 100, '% de la varianza')\n print('La segunda componente explica el', values[1]/total_values * 100, '% de la varianza')\n \n scores = np.dot(data_matrix.T, vectors)\n return values, vectors, scores\n\ndef center_scale(data):\n data_scaled = np.zeros_like(data)\n for i in range(len(data[0])):\n av_col = np.mean(data[:,i])\n std_col = np.std(data[:,i])\n for j in range(len(data)):\n data_scaled[j,i] = ( data[j,i] - av_col )/ std_col\n return data_scaled\n\ndata_matrix=center_scale(data)\nvalues, vectors, scores = acp(data_matrix)\n\nfig2 , medidas2 = plt.subplots(5, 1, figsize= (10,10))\n\nmedidas2[0].hist(data_matrix[:,0])\nmedidas2[1].hist(data_matrix[:,1])\nmedidas2[2].hist(data_matrix[:,2])\nmedidas2[3].hist(data_matrix[:,3])\nmedidas2[4].hist(data_matrix[:,3])\n\nfig2.subplots_adjust(hspace=0.5)\nplt.savefig('food2.pdf')\nplt.close()\n\ndef plot_eigen(data, i, j, vectors, labels, name):\n '''Grafica las variables i, j de los datos junto con las dos componentes principales'''\n plt.scatter(data[:,i], data[:,j])\n x = np.linspace(min(data[:,i]), max(data[:,i]))\n plt.plot(x, x*vectors[j,0]/vectors[i,0], linewidth = 1.0, c='r', label = 'Primer vector')\n plt.plot(x, x*vectors[j,1]/vectors[i,1], linewidth = 1.0, c='y', label = 'Segundo Vector')\n plt.title(labels[j]+ ' vs. '+ labels[i])\n plt.xlabel(labels[i])\n plt.ylabel(labels[j])\n plt.ylim(min(data[:,j])-1, max(data[:,j])+1)\n plt.legend(loc=0)\n plt.savefig(name)\n plt.close()\n \nlabels = ['oil', 'density', 'crispy', 'fracture', 'hardness']\n\n\n\nplot_eigen(data_matrix,0,1,vectors, labels, 'grafica.pdf')\nplot_eigen(data_matrix,0,2,vectors, labels, 'grafica1.pdf')\nplot_eigen(data_matrix,0,3,vectors, labels, 'grafica2.pdf')\nplot_eigen(data_matrix,0,4,vectors, labels, 'grafica3.pdf')\nplot_eigen(data_matrix,0,0,vectors, labels, 'grafica4.pdf')\n","sub_path":"Teaching/201719/MetodosCompu/Semana4/4.1/acp.py","file_name":"acp.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"281499329","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Modbus Helpers functions:\n# - acquire all parameters from specified list featuring tuples with:\n# (modbus_index_addr, , units, *float(conversion_factor) )\n#\n#\n# F.Thiebolt Jun.17\n#\n\n\n# #############################################################################\n#\n# Import zone\n#\n\nimport time\n\n# Modbus imports\nimport minimalmodbus\n\n\n\n# #############################################################################\n#\n# Global Variables\n#\n\n\n\n# #############################################################################\n#\n# Functions\n#\n\ndef modbusGetAll( instrument, tinput, float_precision=2 ):\n ''' This function reads all tuples sent as arguments and return two lists:\n [ values ], [ units ]\n ... hence for each value in [values] list, there exists a corresponding unit in [units].\n Example: [ 42, 1206, 0.98 ] [ \"Wh\", \"W\", \"cosPhi\" ] means 42Wh, 1206W, powerFactor=0.98 '''\n\n _values = list()\n _units = list()\n\n # let's parse inputs\n for _index, _format, _unit, _convFactor in tinput:\n\n _max_retries=2\n _retry=0\n while _retry <= _max_retries:\n try:\n # select proper operation according to specified 'format'\n\n # signed OP ?\n _signed = True if _format.lower().startswith(\"s\") else False\n\n # long ?\n if _format.lower().endswith(\"long\"):\n _val = float(instrument.read_long(_index,signed=_signed)) * float(_convFactor)\n _values.append(_val)\n _units.append(_unit)\n break\n\n # float (2 words IEE754) ?\n if _format.lower().endswith(\"float\"):\n _val = float(instrument.read_float(_index)) * float(_convFactor)\n _values.append(round(_val,float_precision))\n _units.append(_unit)\n break\n\n # unknown format\n log.error(\"unknwown format '%s' ?!?! ... continuing\" % _format)\n\n # almost all exceptions including IOError, ValueError\n except Exception as ex:\n _retry = _retry + 1\n if _retry <= _max_retries:\n print(\"#WARNING: exception raised, retrying to read (%d, %s, %s, %.3f) : \" % (_index,_format,_unit,float(_convFactor)) + str(ex) )\n time.sleep(0.1*_retry)\n continue # while loop\n else:\n print(\"###ERROR: an Exception occured while reading (%d, %s, %s, %.3f) : \" % (_index,_format,_unit,float(_convFactor)) + str(ex) )\n raise Exception(ex);\n\n return _values,_units\n\n","sub_path":"tutorial-LoRaWAN/RN2483-powermeter/energy_meters/drivers/modbusHelpers.py","file_name":"modbusHelpers.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52700987","text":"# The sum of the squares of the first ten natural numbers is,\n# 1^2 + 2^2 + ... + 10^2 = 385\n# The square of the sum of the first ten natural numbers is,\n# (1 + 2 + ... + 10)^2 = 55^2 = 3025\n# Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.\n# Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.\n\n# Solution:\n\nn = 101\n\ndef sqr_sum(x):\n s = 0\n for i in range(x):\n s += i*i\n return s\n\ndef sum_sqr(x):\n s = 0\n for i in range(x):\n s += i\n return s*s\n\nprint(abs(sqr_sum(n) - (sum_sqr(n))))\n\n# Answer: 25164150","sub_path":"coding_problems/project_euler/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"12714832","text":"import numpy as np\r\nfrom sklearn.mixture import gaussian_mixture as gauss\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nimport cv2\r\nimport os\r\nimport functions as fun\r\nimport time\r\n\r\n\r\n'''This script is the main script of the make and model recognition using unsupervised learning\r\nAll of the functions used are in functions.py file\r\n'''\r\n#Number of SIFT components that we will be keeping after PCA reduction, original number of components is 128\r\npc_comp=100\r\n\r\n#Booleans that keep track of the fisher vector pipeline\r\ncompute_all_steps=True\r\n\r\n#paths that will be used for our proof of concept\r\n#in further stage of the implementation we will have only one folder that we will be working with\r\npaths=[\"aston\",\"bmw\",\"clio\",\"dodge\",\"peugot\"]\r\n\r\n#First we define the PCA REDUCTOR MATRICE\r\n#This is the matrice that will be used to keep a certain amount of SIFT components\r\n#Name of the file that stores the reducer matrice that will be used for the PCA reduction process\r\nid=\"reducer\"\r\n\r\ncovariance_type=\"diag\"\r\n\r\n#Check to see if there is a reducer file, if not create one \r\nif(not(os.path.isfile(id+\".npy\")) or compute_all_steps):\r\n\tprint(\"No reducer file was found\")\r\n\tprint(\"A new reducer file is being generated...\")\r\n\tfun.compute_save_reduce_vector(paths,id,pc_comp=pc_comp)\r\n\tprint(\"The Reducer file has been generated\")\r\n\tprint(\"\\n\")\r\n\r\n#Once the reducer file has been created it is time to load it and use it for PCA Reduction \r\nprint(\"Loading reducer file...\")\r\nreducer=np.load(id+\".npy\")\r\nprint(\"Reducer file loaded\")\r\nprint(\"\\n\")\r\n\r\n\r\n#Creation and storage of Reduced ROOT SIFT VECTORS\r\nif(compute_all_steps):\r\n\tprint(\"No root sift files were found\")\r\n\tprint(\"Generating root sift files...\")\r\n\tfun.compute_save_reduced_root_sift(reducer,paths)\r\n\tprint(\"Reduced root sift files generated and saved\")\r\n\tprint(\"\\n\")\r\n\t\r\n#Load all of the saved ROOT SIFT DESCRIPTORS and then use them to fit a GMM model\r\n\"\"\"Implementation that has to be kept \"\"\"\r\ndescriptors=np.atleast_2d(np.asarray(fun.file_counter(paths,\".npy\",\"reduced_data\",remove=False,loader=True)))\r\nprint(\"the shape of the descriptors using the second function is \", descriptors.shape)\r\n\r\n#Check to see if there are any trained GMM models\r\n#If so load them and use them to create a fisher vector \r\n#We will be using a range \r\nfor gmm_comp in range(50,1000,50):\r\n\tgmm_means_file=\"./GMM/means\"+str(gmm_comp)+\".gmm.npy\"\r\n\tgmm_covariance_file=\"./GMM/covs\"+str(gmm_comp)+\".gmm.npy\"\r\n\tgmm_weight_file=\"./GMM/weights\"+str(gmm_comp)+\".gmm.npy\"\r\n\tif(os.path.isfile(gmm_means_file) and os.path.isfile(gmm_covariance_file) and os.path.isfile(gmm_weight_file)):\r\n\t\tprint(\"all the GMM files are in place and now we are going to load them\")\r\n\t\tprint(\"loading files...\")\r\n\t\tgmm_means_file=\"./GMM/means\"+str(gmm_comp)+\".gmm.npy\"\r\n\t\tgmm_covariance_file=\"./GMM/covs\"+str(gmm_comp)+\".gmm.npy\"\r\n\t\tgmm_weight_file=\"./GMM/weights\"+str(gmm_comp)+\".gmm.npy\"\r\n\t\tmeans=np.load(gmm_means_file)\r\n\t\tcovs=np.load(gmm_covariance_file)\r\n\t\tweights=np.load(gmm_weight_file)\r\n\t\tprint(\"GMM \"+str(gmm_comp)+\" loaded\")\r\n\t\tprint(\"\\n\")\r\n\telse: \r\n\t\t# print(\"we did not find all of our files\")\r\n\t\t# print(\"we train a GMM Model\")\r\n\t\t# print(\"gathering ROOT SIFT descriptors...\")\r\n\t\t# descriptors=fun.compute_save_reduce_vector(paths,id,pc_comp=pc_comp,reduced=True).T\r\n\t\tdescriptors=np.atleast_2d(np.asarray(fun.file_counter(paths,\".npy\",\"reduced_data\",remove=False,loader=True)))\r\n\t\t# print(\"descriptors gathered\")\r\n\t\tprint(\"training GMM %d...\"%(gmm_comp))\r\n\t\t\r\n\t\t#GMM MODEL\r\n\t\tGMM=gauss.GaussianMixture(n_components=gmm_comp,covariance_type=covariance_type,max_iter=100000,n_init=1,init_params=\"kmeans\")\r\n\t\tGMM.fit(descriptors)\r\n\t\t# print(np.sum(GMM.predict_proba(descriptors[0:20]),axis=1))\r\n\t\tprint(\"trained GMM %d...\"%(gmm_comp))\r\n\t\tprint(\"saving the GMM model\")\r\n\t\tmeans=GMM.means_\r\n\t\tcovs=GMM.covariances_\r\n\t\tweights=GMM.weights_\r\n\t\t\r\n\t\tgmm_means_file=\"./GMM/means\"+str(gmm_comp)+\".gmm.npy\"\r\n\t\tgmm_covariance_file=\"./GMM/covs\"+str(gmm_comp)+\".gmm.npy\"\r\n\t\tgmm_weight_file=\"./GMM/weights\"+str(gmm_comp)+\".gmm.npy\"\r\n\t\t\r\n\t\tnp.save(gmm_means_file,means)\r\n\t\tnp.save(gmm_covariance_file,covs)\r\n\t\tnp.save(gmm_weight_file,weights)\r\n\t\t# print(\"GMM model has been saved\")\r\n\t\tprint(\"\\n\")\r\n\r\n\t# now we check to see if there is any fisher vector\r\n\tnum_fis=fun.file_counter(paths,\".npy\",\"fisher_vectors\",remove=False)\r\n\tif(compute_all_steps):\r\n\t\t# print(\"No fisher vector files were found\")\r\n\t\t# print(\"generating them...\")\r\n\t\tprint(\"Generate and Save fisher files for GMM %d...\"%(gmm_comp))\r\n\t\tfun.generate_fisher_vectors(paths,means,covs,weights,\"_\"+str(gmm_comp))\r\n\t\tprint(\"Fisher files saved\")\r\n\t\tprint(\"\\n\")\r\n\t\t# print(\"loading our fisher files...\")\r\n\t\t# fisher_vectors=np.atleast_2d(fun.file_counter(paths,\".npy\",\"fisher_vectors\",remove=False,loader=True))\r\n\t\t# print(\"fisher files have been generated\")\r\n\t\t# print(fisher_vectors.shape)\r\n\telse:\r\n\t\tprint(\"we found our fisher files\")\r\n\t\tprint(\"loading our fisher files...\")\r\n\t\t# fisher_vectors=np.atleast_2d(fun.file_counter(paths,\".npy\",\"fisher_vectors\",remove=False,loader=True))\r\n\t\t# print(fisher_vectors.shape)'''\r\n\t\t\r\n######################################\r\n# FINAL STAGE OF PROOF OF CONCEPT #\r\n######################################\r\n\r\n\r\nevaluation=\"data.txt\"\r\nfor case in paths :\r\n\tmax_value=0\r\n\tmax_comp=0\r\n\tmin_value=1000\r\n\tbest_gmms=5\r\n\r\n\tdata=[]\r\n\tfor gmm_comp in range(50,1000,50):\r\n\t\ttarget=open(evaluation,\"a\")\r\n\t\tevaluation_limit=10\r\n\t\tprint(\"-------------------------------------------------\")\r\n\t\tprint(\"evaluation of GMM %d for %s\"%(gmm_comp,case))\r\n\t\t# paths=[\"aston\",\"bmw\",\"clio\",\"dodge\",\"peugot\"]\r\n\t\tfisher_vectors=np.atleast_2d(fun.file_counter(paths,\"_\"+str(gmm_comp)+\".npy\",\"fisher_vectors\",remove=False,loader=True,Fisher=True))\r\n\t\tcosine_metric=cosine_similarity(fisher_vectors)\r\n\t\tall_bmw=0\t\t\r\n\t\tall_clio=0\t\t\t\r\n\t\tall_dodge=0\t\t\t\r\n\t\tall_peugot=0\t\t\t\t\r\n\t\tall_aston=0\t\r\n\r\n\r\n\t\tif(case==\"aston\"):\r\n\t\t\tunder=5\r\n\t\t\tupper=15\r\n\t\tif(case==\"bmw\"):\r\n\t\t\tunder=25\r\n\t\t\tupper=35\r\n\t\tif(case==\"clio\"):\r\n\t\t\tunder=45\r\n\t\t\tupper=55\r\n\t\tif(case==\"dodge\"):\r\n\t\t\tunder=65\r\n\t\t\tupper=75\r\n\t\tif(case==\"peugot\"):\r\n\t\t\tunder=85\r\n\t\t\tupper=95\r\n\t\t\r\n\t\tfor ind in range(under,upper):\r\n\t\t\tif(ind<20):\r\n\t\t\t\tcurrent=all_aston\r\n\t\t\t\tcase=\"aston\"\r\n\t\t\tif(ind>19 and ind<40):\r\n\t\t\t\tcurrent=all_bmw\r\n\t\t\t\tcase=\"bmw\"\r\n\t\t\tif(ind>39 and ind<60):\r\n\t\t\t\tcurrent=all_clio\r\n\t\t\t\tcase=\"clio\"\r\n\t\t\tif(ind>59 and ind<80):\r\n\t\t\t\tcurrent=all_dodge\r\n\t\t\t\tcase=\"dodge\"\r\n\t\t\tif(ind>79 and ind<100):\r\n\t\t\t\tcurrent=all_peugot\r\n\t\t\t\tcase=\"peugot\"\r\n\t\t\tindices=np.flip(np.argsort(cosine_metric[ind]),axis=0)\r\n\t\t\taston=0\r\n\t\t\tbmw=0\r\n\t\t\tdodge=0\r\n\t\t\tclio=0\r\n\t\t\tpeugot=0\r\n\t\t\tclio_translate=0\r\n\t\t\tfor sim in range(1,(evaluation_limit+1)):\r\n\t\t\t\r\n\t\t\t\tif(indices[sim]<20):\r\n\t\t\t\t\taston=aston+1\r\n\t\t\t\tif(indices[sim]>19 and indices[sim]<40):\r\n\t\t\t\t\tbmw=bmw+1\r\n\t\t\t\tif(indices[sim]>39 and indices[sim]<60):\r\n\t\t\t\t\tclio=clio+1\r\n\t\t\t\tif(indices[sim]>59 and indices[sim]<80):\r\n\t\t\t\t\tdodge=dodge+1\r\n\t\t\t\tif(indices[sim]>79 and indices[sim]<100):\r\n\t\t\t\t\tpeugot=peugot+1\r\n\t\t\t# print(\"there are %d ASTON vehicles in the first %d images\"%(aston,evaluation_limit))\t\t\r\n\t\t\t# print(\"there are %d BMW vehicles in the first %d images\"%(bmw,evaluation_limit))\t\t\r\n\t\t\t# print(\"there are %d CLIO vehicles in the first %d images\"%(clio,evaluation_limit))\t\t\r\n\t\t\t# print(\"there are %d DODGE vehicles in the first %d images\"%(dodge,evaluation_limit))\t\t\r\n\t\t\t# print(\"there are %d PEUGOT vehicles in the first %d images\"%(peugot,evaluation_limit))\t\t\r\n\t\t\t# print(\"\\n\")\r\n\t\t\t\"\"\"for sim in range(5):\r\n\t\t\t\r\n\t\t\t\tif(indices[sim]<20):\r\n\t\t\t\t\t# print(\"./buildings/%03d.png\"%(indices[sim]+1))\r\n\t\t\t\t\tif (indices[sim]==0):\r\n\t\t\t\t\t\timage=cv2.imread(\"./aston/%03d.png\"%(indices[sim]+1))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\timage=cv2.imread(\"./aston/%03d.png\"%(indices[sim]))\r\n\t\t\t\t\theight, width = image.shape[:2]\r\n\t\t\t\t\timage = cv2.resize(image,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)\r\n\t\t\t\t\tif(sim==0):\r\n\t\t\t\t\t\tcv2.imshow(\"original\",image)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcv2.imshow(\"similar %d\"%(sim),image)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\tif(indices[sim]>19 and indices[sim]<40):\r\n\t\t\t\t\t# print(\"./buildings/%03d.png\"%(indices[sim]+1))\r\n\t\t\t\t\timage=cv2.imread(\"./bmw/%03d.png\"%(indices[sim]+1-20))\r\n\t\t\t\t\theight, width = image.shape[:2]\r\n\t\t\t\t\timage = cv2.resize(image,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)\r\n\t\t\t\t\tif(sim==0):\r\n\t\t\t\t\t\tcv2.imshow(\"original\",image)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcv2.imshow(\"similar %d\"%(sim),image)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\tif(indices[sim]>39 and indices[sim]<60):\r\n\t\t\t\t\t# print(\"./buildings/%03d.png\"%(indices[sim]+1))\r\n\t\t\t\t\timage=cv2.imread(\"./clio/%03d.png\"%(indices[sim]+1-40))\r\n\t\t\t\t\theight, width = image.shape[:2]\r\n\t\t\t\t\timage = cv2.resize(image,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)\r\n\t\t\t\t\tif(sim==0):\r\n\t\t\t\t\t\tcv2.imshow(\"original\",image)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcv2.imshow(\"similar %d\"%(sim),image)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\tif(indices[sim]>59 and indices[sim]<80):\r\n\t\t\t\t\t# print(\"./buildings/%03d.png\"%(indices[sim]+1))\r\n\t\t\t\t\timage=cv2.imread(\"./dodge/%03d.png\"%(indices[sim]+1-60))\r\n\t\t\t\t\theight, width = image.shape[:2]\r\n\t\t\t\t\timage = cv2.resize(image,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)\r\n\t\t\t\t\tif(sim==0):\r\n\t\t\t\t\t\tcv2.imshow(\"original\",image)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcv2.imshow(\"similar %d\"%(sim),image)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\tif(indices[sim]>79 and indices[sim]<90):\r\n\t\t\t\t\timage=cv2.imread(\"./peugot/%03d.png\"%(indices[sim]+1-80))\r\n\t\t\t\t\theight, width = image.shape[:2]\r\n\t\t\t\t\timage = cv2.resize(image,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)\r\n\t\t\t\t\tif(sim==0):\r\n\t\t\t\t\t\tcv2.imshow(\"original\",image)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcv2.imshow(\"similar %d\"%(sim),image)\r\n\t\t\t\t\r\n\t\t\t\tif(indices[sim]>99):\r\n\t\t\t\t\timage=cv2.imread(\"./cliotranslate/%03d.png\"%(indices[sim]+1-100))\r\n\t\t\t\t\theight, width = image.shape[:2]\r\n\t\t\t\t\timage = cv2.resize(image,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)\r\n\t\t\t\t\tif(sim==0):\r\n\t\t\t\t\t\tcv2.imshow(\"original\",image)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcv2.imshow(\"similar %d\"%(sim),image)\"\"\"\r\n\t\t\t\t\t\t\t\t\r\n\t\t\tall_bmw+=bmw\t\t\t\r\n\t\t\tall_clio+=clio\t\t\t\r\n\t\t\tall_dodge+=dodge\t\t\t\r\n\t\t\tall_peugot+=peugot\t\t\t\r\n\t\t\tall_aston+=aston\t\r\n\t\t\t# all_clio_translate+=clio_translate\r\n\t\t\t# cv2.imshow(\"image\",np.zeros((100,1000,3)))\r\n\t\t\t# cv2.waitKey(0)\r\n\t\t# print(\"Overall GMM with %d components gives us : \"%gmm_comp)\r\n\t\t# print(\"BMW: %d\"%all_bmw)\r\n\t\t# print(\"CLIO: %d\"%all_clio)\r\n\t\t# print(\"DODGE: %d\"%all_dodge)\r\n\t\t# print(\"PEUGOT: %d\"%all_peugot)\r\n\t\t# print(\"ASTON: %d\"%all_aston)\r\n\t\t# print(\"Number of vehicles : %d\"%(all_bmw+all_clio+all_dodge+all_peugot+all_aston))\r\n\t\tmax_comp,max_value=(gmm_comp,current) if max_valuecurrent else (min_comp,min_value)\r\n\t\tdata.append((current,gmm_comp))\r\n\t\t# print(\"\\n\")\r\n\t\tdata.sort(key=lambda tup: tup[0])\r\n\t\tdata.reverse()\r\n\t\t# print(\"best comp is %d yielding \" %(max_comp))\r\n\t\t# cv2.imshow(\"image\",np.zeros((100,1000,3)))\r\n\t\t# cv2.waitKey(0)\r\n\t\tprint(\"-------------------------------------------------\")\r\n\tmean_recall=0\r\n\tmean_gmm=0\r\n\tfor i in range(best_gmms):\r\n\t\tmean_recall+=data[i][0]\r\n\t\tmean_gmm+=data[i][1]\r\n\tmean_recall=mean_recall/best_gmms\r\n\tmean_gmm=mean_gmm/best_gmms\r\n\t\r\n\ttarget.write(\"Evaluation of model %s yields \"%(case))\r\n\ttarget.write(\"\\n\")\r\n\ttarget.write(\"[\")\r\n\tfor d in data:\r\n\t\ttarget.write(\"(\"+str(d[0])+\",\"+str(d[1])+\")\")\r\n\ttarget.write(\"]\")\r\n\ttarget.write(\"\\n\")\r\n\ttarget.write(\"best mean GMM is %s and best mean recall is %s\"%(str(mean_gmm),str(mean_recall)))\r\n\ttarget.write(\"\\n\"*2)\r\n\t\r\n\tprint(\"best comp is %d yielding %d \" %(max_comp,max_value))\r\n\tprint(\"worst comp is %d yielding %d \" %(min_comp,min_value))\r\n\tprint(data)\r\n\t# break'''\r\ntarget.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"mmr.py","file_name":"mmr.py","file_ext":"py","file_size_in_byte":11660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"113324591","text":"#!/usr/bin/env python3\r\n# coding: UTF-8\r\n# Display Temperature\r\n# sudo python3 temperature_disp.py --led-rows=16 --led-brightness=40\r\nfrom matrix import Matrix\r\nfrom rgbmatrix import graphics\r\nfrom datetime import datetime\r\nimport time\r\nfrom logging import getLogger, StreamHandler, DEBUG\r\nlogger = getLogger(__name__)\r\nhandler = StreamHandler()\r\nhandler.setLevel(DEBUG)\r\nlogger.setLevel(DEBUG)\r\nlogger.addHandler(handler)\r\nfrom disp_abc import DispAbc\r\nfrom collections import namedtuple\r\nfrom temperature_mod import TemperatureSensor\r\n\r\nclass TemperatureDisp(DispAbc):\r\n u\"\"\"温度表示クラス\r\n \"\"\"\r\n def __init__(self, matrix: Matrix):\r\n self.matrix = matrix\r\n self.accepted_stop = False\r\n\r\n def execute(self):\r\n u\"\"\"温度表示開始\r\n スレッドをstartすると__call__が暗黙的に呼ばれる\r\n \"\"\"\r\n logger.debug(\"Temperture Disp Start\")\r\n \r\n sensor = TemperatureSensor() \r\n\r\n offscreen_canvas = self.matrix.matrix.CreateFrameCanvas()\r\n font = graphics.Font()\r\n font.LoadFont(\"fonts/5x7.bdf\")\r\n\r\n while not self.accepted_stop:\r\n value = sensor.get()\r\n textColor1 = graphics.Color(255, 100, 100)\r\n textColor2 = graphics.Color(100, 255, 100)\r\n textColor3 = graphics.Color(100, 100, 255)\r\n textColor4 = graphics.Color(100, 100, 100)\r\n \r\n disp_text1 = str(value.temp)[0:4] + \" C\"\r\n disp_text2 = str(value.hum)[0:4] + \" %\"\r\n disp_text3 = str(value.hpa)[0:4]\r\n disp_text4 = \"hPa\"\r\n \r\n offscreen_canvas.Clear()\r\n len = graphics.DrawText(offscreen_canvas, font, 2, 7, textColor1, disp_text1)\r\n len = graphics.DrawText(offscreen_canvas, font, 2, 15, textColor2, disp_text2)\r\n offscreen_canvas = self.matrix.matrix.SwapOnVSync(offscreen_canvas)\r\n\r\n time.sleep(3)\r\n\r\n offscreen_canvas.Clear()\r\n len = graphics.DrawText(offscreen_canvas, font, 2, 7, textColor3, disp_text3)\r\n len = graphics.DrawText(offscreen_canvas, font, 16, 15, textColor4, disp_text4)\r\n offscreen_canvas = self.matrix.matrix.SwapOnVSync(offscreen_canvas)\r\n \r\n time.sleep(3)\r\n\r\n def stop(self):\r\n self.accepted_stop = True\r\n \r\n# Main function\r\nif __name__ == \"__main__\":\r\n matrix = Matrix()\r\n #processメソッドを一度呼ぶ必要がある\r\n if (not matrix.process()):\r\n matrix.print_help()\r\n disp_obj = TemperatureDisp(matrix)\r\n disp_obj.execute()\r\n\r\n","sub_path":"temperature_disp.py","file_name":"temperature_disp.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"139901297","text":"from flask import Flask, render_template, url_for, redirect\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Restaurant, MenuItem\n\n\n\napp = Flask(__name__)\n\n\n#fake database of restaurants\n\nrestaurants = [\n {\n 'name':'Pib Burgers',\n 'id':1\n },\n {\n 'name':'McShavom',\n 'id':2\n },\n {\n 'name':'Good Burger',\n 'id':3\n },\n {\n 'name': 'Pats Cheesesteak'\n }\n]\n#\n# restaurants = [{'name':'KCreeppers','id':1}, {'name':'Mc Shrimpss','id':2}]\n#\n# items = [ {'name':'Cheese Pizza', 'description':'made with fresh cheese', 'price':'$5.99','course' :'Entree', 'id':'1'}, {'name':'Chocolate Cake','description':'made with Dutch Chocolate', 'price':'$3.99', 'course':'Dessert','id':'2'},{'name':'Caesar Salad', 'description':'with fresh organic vegetables','price':'$5.99', 'course':'Entree','id':'3'},{'name':'Iced Tea', 'description':'with lemon','price':'$.99', 'course':'Beverage','id':'4'},{'name':'Spinach Dip', 'description':'creamy dip with fresh spinach','price':'$1.99', 'course':'Appetizer','id':'5'} ]\n# item = {'name':'Cheese Pizza','description':'made with fresh cheese','price':'$5.99','course' :'Entree'}\n\n\n\nTESTING\n\n\n#Initial\n#the argument restaurant_id = restaurant id allows me to pass the value\n#of restaurant_id to the template restarants.html\n@app.route('/')\n@app.route('/restaurant/')\ndef showRestaurants():\n user = {'username':'Kennedy'}\n # passing variables to template\n return render_template('restaurants.html',restaurants = restaurants, user = user)\n\n# #Create a new restaurant\n# #int will turn into an interger once checked\n# @app.route('/restaurant/new')\n# @app.route('/restaurant//menu/new/')\n# def newRestaurant():\n# return render_template('newRestaurant.html',restaurants=restaurants)\n#\n# #Edit a restaurant & menu\n# @app.route('/restaurant//edit/')\n# @app.route('/restaurant//menu//edit/')\n# def editRestaurant(restaurant_id,menu_id):\n# restaurant_id = restaurant\n# menu_id = item\n# return render_template('editRestaurant.html',restaurant_id = restaurant_id,menu_id = menu_id)\n#\n# #Delete a restaurant\n# @app.route('/restaurant//delete')\n# def deleteRestaurant(restaurant_id):\n# restaurant_id = restaurant\n# # restaurant_id = \"This page will be for deleting restaurant\"\n# return render_template('deleteRestaurant.html',restaurant_id = restaurant_id)\n#\n# #Show menu\n# @app.route('/restaurant/')\n# @app.route('/restaurant//menu')\n# def showMenu(restaurant_id):\n# # restaurant_id = \"This page is to show menu for restaurant\"\n# restaurant_id = restaurant\n# return render_template('menu.html',restaurant_id = restaurant_id)\n#\n# #New Menu Item\n# @app.route('/restaurant//menu/new')\n# def newMenuItem(restaurant_id):\n# # restaurant_id = \"This page is for making a new menu\"\n# restaurant_id = restaurant\n# return render_template('newMenuItem.html',restaurant_id = restaurant_id)\n#\n# #Edit Menu Item\n# @app.route('/restaurant//menu//edit')\n# def editMenuItem(restaurant_id,menu_id):\n# menu_id = item\n# restaurant_id = restaurant\n# return render_template('editMenuItem.html', restaurant_id = restaurant_id, menu_id = menu_id)\n#\n#\n# #Delete Menu Item\n# @app.route('/restaurant//menu//delete')\n# def deleteMenuItem(restaurant_id,menu_id):\n# restaurant_id = restaurant\n# menu_id = item\n# # return \"This page is to delete the menu\"\n# return render_template('deleteMenuItem.html', restaurant_id = restaurant_id, menu_id = menu_id)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host = '0.0.0.0', port=5000)\n","sub_path":"finalProject.py","file_name":"finalProject.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"383955212","text":"from torchvision import datasets\nimport numpy as np\n\n# mnist = datasets.MNIST('../storage/mnist/', download=True)\ntrain_set = datasets.MNIST('../storage/mnist/', train=True)\ntest_set = datasets.MNIST('../storage/mnist/', train=False)\n\n\ndef relu(X):\n matrix_shape = X.shape\n F_matrix = np.zeros(matrix_shape)\n if len(matrix_shape) == 2:\n for i in range(matrix_shape[0]):\n for j in range(matrix_shape[1]):\n if X[i][j] > 0:\n F_matrix[i][j] = 1\n else:\n for i in range(matrix_shape[0]):\n for j in range(matrix_shape[1]):\n for k in range(matrix_shape[2]):\n if X[i][j][k] > 0:\n F_matrix[i][j][k] = 1\n output = np.multiply(F_matrix, X)\n return output, F_matrix\n\n\ndef Z(K, V, i, j, k, stride):\n k_shape = K.shape\n # kernel_out_channel = k_shape[0]\n kernel_channel = k_shape[1]\n kernel_row = k_shape[2]\n kernel_column = k_shape[3]\n z = 0\n for l in range(kernel_channel):\n for m in range(kernel_row):\n for n in range(kernel_column):\n z += V[l][j*stride+m][k*stride+n] * K[i][l][m][n]\n return z\n\n\ndef conv(V, K, stride=1):\n print('Convolution layer:')\n print('[-]K shape:', K.shape)\n print('[-]V shape:', V.shape)\n output_channel = K.shape[0]\n kernel_channel = K.shape[1]\n kernel_row = K.shape[2]\n kernel_col = K.shape[3]\n\n input_channel = V.shape[0]\n input_row = V.shape[1]\n input_col = V.shape[2]\n assert (input_channel == kernel_channel), \"channel doesn't match!\"\n # stride = 1\n output_row = int((input_row - kernel_row) / stride + 1)\n output_col = int((input_col - kernel_col) / stride + 1)\n output = np.zeros(shape=(output_channel, output_row, output_col))\n # print(output.shape)\n print('[-]output shape:', output.shape)\n for i in range(output_channel):\n for j in range(output_row):\n for k in range(output_col):\n output[i][j][k] = Z(K, V, i, j, k, stride=stride)\n return output\n\n\ndef Pool(V, i, j, k, kernel):\n # print('[--]pooling:')\n values = []\n locations = []\n for m in range(kernel):\n for n in range(kernel):\n row = j * kernel + m\n col = k * kernel + n\n values.append(V[i][row][col])\n locations.append((i, row, col))\n # print('[---]values:', values)\n # print('[---]locations:', locations)\n max_value = max(values)\n ind = values.index(max_value)\n location = locations[ind]\n # print('[---]max:', max_value)\n # print('[---]index:', ind)\n # print('[---]location:', location)\n return max_value, location\n\n\ndef max_pooling(V, kernel=2):\n print('Max pooling layer:')\n print('[-]V shape:', V.shape)\n input_channel = V.shape[0]\n input_row = V.shape[1]\n input_col = V.shape[2]\n\n output_channel = input_channel\n output_row = int(input_row / kernel)\n output_col = int(input_col / kernel)\n\n output = np.zeros(shape=(output_channel, output_row, output_col))\n print('[-]output shape:', output.shape)\n pool_matrix = np.zeros(shape=(input_channel, input_row, input_col))\n print('[-]pool matrix shape:', pool_matrix.shape)\n for i in range(output_channel):\n for j in range(output_row):\n for k in range(output_col):\n output[i][j][k], loc = Pool(V, i, j, k, kernel=kernel)\n pool_matrix[loc[0]][loc[1]][loc[2]] = 1\n return output, pool_matrix\n\n\ndef normalize_image(image_in):\n output_image = np.array(image_in)\n output_image = np.array([output_image])\n output_image = output_image/255\n return output_image\n\n\nK1 = np.random.random(size=(5, 1, 5, 5))\nK2 = np.random.random(size=(10, 5, 5, 5))\n\n\nfor image, label in train_set:\n Image = normalize_image(image)\n O = conv(Image, K1) # U1\n O, mf1 = relu(O) # C1\n\n O = conv(O, K2) # U2\n O, mf2 = relu(O) # C2\n\n O, mp1 = max_pooling(O) # P1\n print(O.shape)\n break","sub_path":"convolutional_layer_implementation/playground_v0001/loader_to_cnn.py","file_name":"loader_to_cnn.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"641730794","text":"\"\"\"\n-------------------------------\nIMPORTS\n-------------------------------\n\"\"\"\nimport signal, json, time, os\nimport tkinter as tk\nimport multiprocessing as mp\n\nfrom Engine import chessboard\nfrom Engine import gameState\n\nfrom Engine.GUI import gui_widgets as widgets\nfrom Engine.lichess import lichessInterface_new as interface\n\n\n\"\"\"\n-------------------------------\nVARIABLES\n-------------------------------\n\"\"\"\neventQueue = mp.Queue()\ngameQueue = mp.Queue()\n\neventstream = None\ngamestream = None\n\nterminated = False\n\n\n\"\"\"\n-------------------------------\nPAGE CLASSES\n-------------------------------\n\"\"\"\n\nclass StartupPage(tk.Frame):\n def __init__(self, master, controller):\n tk.Frame.__init__(self, master)\n header = widgets.createLabel(self, text=\"MagiChess\", font=\"times\", fontsize=25, fontweight=\"bold\")\n header.pack(padx=10, pady=10)\n\n #startup buttons\n signinButton = widgets.createButton(self, function=lambda: controller.show_frame(SigninPage),\n text=\"Sign in to LiChess.org\", bgcolor=\"sky blue\")\n signinButton.pack(pady=10)\n\n exitButton = widgets.createButton(self, function=quit_program,\n text=\"Exit\", bgcolor=\"seashell3\") \n exitButton.pack()\n\n\n\nclass SigninPage(tk.Frame):\n def __init__(self, master, controller):\n tk.Frame.__init__(self, master)\n header = widgets.createLabel(self, text=\"Sign in to LiChess\", font=\"times\", fontsize=14, fontweight=\"bold\")\n header.pack(padx=10, pady=10)\n\n \"\"\" username/password entries\n usernameLabel = widgets.createLabel(self, text=\"Username\", font=\"times\", fontsize=11, fontweight=\"normal\")\n usernameLabel.pack()\n usernameEntry = widgets.createEntry(self, bgcolor=\"beige\")\n usernameEntry.pack()\n\n passwordLabel = widgets.createLabel(self, text=\"Password\", font=\"times\", fontsize=11, fontweight=\"normal\")\n passwordLabel.pack()\n passwordEntry = widgets.createEntry(self, bgcolor=\"beige\", show=\"*\")\n passwordEntry.pack()\n \"\"\"\n\n\n \"\"\" buttons \"\"\"\n loginButton = widgets.createButton(self, function=lambda: self.submit(controller=controller, username=\"degugBot\"),\n text=\"Login as degugBot\", bgcolor=\"seashell3\")\n loginButton.pack(pady=4)\n\n returnButton = widgets.createButton(self, function=lambda: controller.show_frame(StartupPage),\n text=\"Return\", bgcolor=\"seashell3\")\n returnButton.pack(pady=7)\n\n \"\"\" submit username/password for validation \"\"\"\n def submit(self, controller, username, password=None):\n valid = 1\n\n # login as degugBot\n if valid:\n controller.show_frame(MainMenuPage, user=username)\n\n # create and start an event stream process\n global eventstream\n eventstream = mp.Process(target = event_stream, args = (eventQueue,))\n eventstream.start()\n print(\"EVENT STREAM PID: \", eventstream.pid)\n\n else:\n print(\"User not found. Invalid username/password\")\n\n return\n\n\n\n\nclass MainMenuPage(tk.Frame):\n def __init__(self, master, controller):\n tk.Frame.__init__(self, master)\n \n def welcomeHeader(self, username):\n header = widgets.createLabel(self, text=\"Welcome to MagiChess, \" + username, font=\"times\", fontsize=14, fontweight=\"bold\")\n header.pack(padx=10, pady=10)\n \n def menuButtons(self, controller):\n \"\"\" main menu options \"\"\"\n playbotButton = widgets.createButton(self, function=lambda: controller.show_frame(PlayBotPage),\n text=\"Play Bot\", bgcolor=\"sky blue\")\n playbotButton.pack(pady=5)\n \n playrandButton = widgets.createButton(self, function=lambda: controller.show_frame(PlayRandomPage),\n text=\"Seek an Opponent\", bgcolor=\"sky blue\")\n playrandButton.pack(pady=5)\n \n playfriendButton = widgets.createButton(self, function=lambda: controller.show_frame(ChallengePage),\n text=\"Challenge a Friend\", bgcolor=\"sky blue\")\n playfriendButton.pack(pady=5)\n \n exitButton = widgets.createButton(self, function=quit_program,\n text=\"Exit MagiChess\", bgcolor=\"seashell3\")\n exitButton.pack(pady=5)\n \n \n\n\n\"\"\" main menu pages \"\"\"\nclass PlayBotPage(tk.Frame):\n def __init__(self, master, controller):\n tk.Frame.__init__(self, master)\n header = widgets.createLabel(self, text=\"Play a Bot\", font=\"times\", fontsize=14, fontweight=\"bold\")\n header.pack(padx=10, pady=10)\n \n #return to main menu\n returnButton = widgets.createButton(self, function=lambda: controller.show_frame(MainMenuPage),\n text=\"Return to Main Menu\", bgcolor=\"sky blue\")\n returnButton.pack()\n \nclass PlayRandomPage(tk.Frame):\n def __init__(self, master, controller):\n tk.Frame.__init__(self, master)\n header = widgets.createLabel(self, text=\"Seeking Opponent...\", font=\"times\", fontsize=14, fontweight=\"bold\")\n header.pack(padx=10, pady=10)\n \n #return to main menu\n returnButton = widgets.createButton(self, function=lambda: controller.show_frame(MainMenuPage),\n text=\"Return to Main Menu\", bgcolor=\"sky blue\")\n returnButton.pack()\n \n def seekOpponent(self):\n \"\"\"\n send request to LiChess server to seek an opponent\n \"\"\"\n \n return\n \nclass ChallengePage(tk.Frame):\n def __init__(self, master, controller):\n tk.Frame.__init__(self, master)\n header = widgets.createLabel(self, text=\"Search Opponent Name\", font=\"times\", fontsize=14, fontweight=\"bold\")\n header.pack(padx=10, pady=10)\n\n # name input and search button\n usernameEntry = widgets.createEntry(self, bgcolor=\"beige\") \n usernameEntry.pack(pady=10)\n challengeButton = widgets.createButton(self, function=lambda: self.challenge(controller, usernameEntry.get()),\n text=\"Challenge\", bgcolor=\"sky blue\")\n challengeButton.pack(pady=10)\n\n\n # return to main menu\n returnButton = widgets.createButton(self, function=lambda: controller.show_frame(MainMenuPage),\n text=\"Return to Main Menu\", bgcolor=\"sky blue\")\n returnButton.pack(pady=10)\n\n\n def challenge(self, controller, username=\"\"):\n\n if username == \"\":\n print(\"User not found\")\n else:\n\n # challenge user and set gameid\n gameid = interface.challenge_user(username)\n print(gameid)\n\n if not gameid:\n print(\"Unable to complete challenge\")\n else:\n\n interface.change_gameid(gameid)\n\n # wait until challenger accepts or declines challenge\n accepted = False\n while not accepted:\n try:\n event = eventQueue.get_nowait()\n if event[\"type\"] == \"gameStart\":\n if event[\"game\"][\"id\"] == gameid:\n print(\"game accepted\")\n accepted = True\n\n if event[\"type\"] == \"challengeDeclined\":\n print(\"Challenge declined by: \", username)\n break\n except:\n pass\n\n\n if accepted:\n ingame(username, controller)\n\n \n # initialGameStreamProcess = mp.Process(target=initialgame_stream)\n # initialGameStreamProcess.start()\n \n \n\n return\n\n\n\"\"\"\n-------------------------------\nFUNCTIONS\n-------------------------------\n\"\"\"\n\n\"\"\" ingame: runs while user is currently in a game\n\tparams:\n\t\tchallengerName: name of player that user is playing\n\treturn:\n\"\"\"\ndef ingame(challengerName, controller):\n\n # create a game stream\n global gamestream\n gamestream = mp.Process(target=game_stream, args=(gameQueue,))\n gamestream.start()\n print(\"GAME STREAM PID: \", gamestream.pid)\n\n # create a game state\n gamestate = gameState.GameState(gameQueue=gameQueue)\n\n # start chessboard game window and wait until chessboard window is closed\n chessboard.init_chessboard(challengerName, gamestate)\n\n\n\n\n\"\"\" event_stream: seperate process for event stream\n\tparams:\n\t\teventQueue: responses from LiChess event stream will be placed in queue\n\treturn:\n\"\"\"\ndef event_stream(eventQueue):\n \n iterator = 0\n\n # run in background\n while not terminated:\n try:\n time.sleep(3)\n # api call to start an event stream\n response = interface.create_eventstream()\n lines = response.iter_lines()\n\n # iterate through the response message\n for line in lines:\n # place response events in control queue\n if line:\n event = json.loads(line.decode('utf-8'))\n eventQueue.put_nowait(event)\n else:\n eventQueue.put_nowait({\"type\": \"ping\"})\n \n\n except:\n pass\n return\n\n\n\n\n\"\"\" game_stream: seperate process for game stream\n params:\n return:\n\"\"\"\ndef game_stream(gameQueue):\n\n # save initial state\n response = interface.create_gamestream()\n lines = response.iter_lines()\n initialState = json.loads(next(lines).decode('utf-8'))\n\n # run in background\n while not terminated:\n time.sleep(3)\n response = interface.create_gamestream()\n lines = response.iter_lines()\n \n #iterate through the response message\n for line in lines:\n\n if line:\n event = json.loads(line.decode('utf-8'))\n gameQueue.put_nowait(event)\n \n return\n\n\n\n\"\"\" quit_program: terminates all processes and closes window\n\tparams:\n\treturn:\n\"\"\"\ndef quit_program():\n global terminated\n\n terminated = True\n if eventstream != None:\n terminate_eventstream()\n print(\"TERMINATED EVENT STREAM\")\n if gamestream != None:\n terminate_gamestream()\n print(\"TERMINATED GAME STREAM\")\n\n print(\"Quit Program\")\n exit()\n\ndef terminate_gamestream():\n global gamestream\n gamestream.terminate()\n gamestream.join()\n\ndef terminate_eventstream():\n global eventstream\n eventstream.terminate\n eventstream.join()\n","sub_path":"Engine/gui_pages.py","file_name":"gui_pages.py","file_ext":"py","file_size_in_byte":10717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599507736","text":"import folium\nimport pandas\n\n# Check headers =>\n# data.columns\ndata = pandas.read_csv(\"Volcanoes.txt\")\n\nlat = list(data[\"LAT\"])\nlon = list(data[\"LON\"])\nelev = list(data[\"ELEV\"])\nname = list(data[\"NAME\"])\n\nhtml = \"\"\"\nVolcano name:
    \n%s
    \nHeight: %s m\n\"\"\"\n\ndef color_producer(el):\n if el < 1500:\n return \"green\"\n elif 1500 <= el < 3500:\n return \"orange\"\n else:\n return \"red\"\n\nmap = folium.Map(\n location=[38.2, -99.1], \n zoom_start=5,\n tiles=\"Stamen Terrain\",\n)\n\nfg_v = folium.FeatureGroup(name=\"Volcanos\")\n\nfor lt, ln, el, nm in zip(lat, lon, elev, name):\n iframe = folium.IFrame(html=html % (nm, nm, el), width=200, height=100)\n fg_v.add_child(\n folium.CircleMarker(\n location=[lt,ln],\n radius=3, \n popup=folium.Popup(iframe),\n color=color_producer(el),\n fill=True,\n fill_color=color_producer(el),\n )\n )\n\nfg_p = folium.FeatureGroup(name=\"Population\")\nfg_p.add_child(\n folium.GeoJson(\n data = open('world.json', \"r\", encoding=\"utf-8-sig\").read(),\n style_function=lambda x: { \"fillColor\":\"green\" if x[\"properties\"][\"POP2005\"] < 10000000 \n else \"orange\" if 10000000 <= x[\"properties\"][\"POP2005\"] < 20000000 \n else \"red\"\n }\n )\n)\n\nmap.add_child(fg_v)\nmap.add_child(fg_p)\nmap.add_child(folium.LayerControl())\n\nmap.save(\"map.html\")","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460541002","text":"from keras.models import Sequential\nfrom keras.models import model_from_json\nfrom keras.layers import Dense, Activation, Reshape\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.convolutional import UpSampling2D\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers import Flatten, Dropout\nimport math\nimport numpy as np\nimport os\nfrom keras.datasets import mnist\nfrom keras.optimizers import Adam\nimport FriendsLoader\nimport cv2\nimport dill\n\nfrom setting import IMG_SIZE\nfrom setting import BATCH_SIZE\nfrom setting import NUM_EPOCH\nfrom setting import GENERATED_IMAGE_PATH\n\nfrom setting import G_LR\nfrom setting import G_BETA\nfrom setting import D_LR\nfrom setting import D_BETA\n\ndef generator_model():\n layerSize = int(IMG_SIZE/16)\n model = Sequential()\n# model.add(Dense(1024, input_shape=(100,)))\n# model.add(BatchNormalization())\n# model.add(Activation(\"relu\"))\n model.add(Dense(layerSize*layerSize*512, input_shape=(100,)))\n model.add(BatchNormalization())\n model.add(Activation(\"relu\"))\n model.add(Reshape((layerSize, layerSize, 512)))\n model.add(UpSampling2D((2, 2)))\n model.add(Conv2D(256, (5, 5), padding=\"same\"))\n model.add(BatchNormalization())\n model.add(Activation(\"relu\"))\n model.add(UpSampling2D((2, 2)))\n model.add(Conv2D(128, (5, 5), padding=\"same\"))\n model.add(BatchNormalization())\n model.add(Activation(\"relu\"))\n model.add(UpSampling2D((2, 2)))\n model.add(Conv2D( 64, (5, 5), padding=\"same\"))\n model.add(BatchNormalization())\n model.add(Activation(\"relu\"))\n model.add(UpSampling2D((2, 2)))\n model.add(Conv2D(3, (5, 5), padding=\"same\"))\n model.add(Activation(\"tanh\"))\n return model\n\ndef discriminator_model():\n model = Sequential()\n model.add(Conv2D(64, (5, 5), strides=(2, 2),\n input_shape=(IMG_SIZE, IMG_SIZE, 3))) # ここ注意\n model.add(LeakyReLU(0.2))\n model.add(Conv2D(128, (5, 5), strides=(2, 2)))\n model.add(LeakyReLU(0.2))\n model.add(Conv2D(256, (5, 5), strides=(2, 2)))\n model.add(LeakyReLU(0.2))\n model.add(Conv2D(512, (5, 5), strides=(2, 2)))\n model.add(LeakyReLU(0.2))\n model.add(Flatten())\n model.add(Dense(100))\n model.add(LeakyReLU(0.2))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n model.add(Activation(\"sigmoid\"))\n return model\n\ndef combine_images(generated_images):\n\n # メモ : この時点で generated_images.shape = (32, 28, 28, 1) のはず\n\n total = generated_images.shape[0]\n cols = int(math.sqrt(total))\n rows = math.ceil(float(total)/cols)\n width, height = generated_images.shape[1:3]\n combined_image = np.zeros((height*rows, width*cols, 3),dtype=generated_images.dtype)\n\n for index, image in enumerate(generated_images):\n i = int(index/cols)\n j = index % cols\n for k in range(3):\n combined_image[width*i:width*(i+1), height*j:height*(j+1), k] = image[:, :, k]\n return combined_image\n\ndef train():\n # (X_train, y_train), (_, _) = mnist.load_data()\n (X_train, y_train), (_, _) = FriendsLoader.load_data()\n X_train = (X_train.astype(np.float32) - 127.5)/127.5\n X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2], 3)\n\n # メモ : X_train.shape = (60000, 28, 28, 1)\n\n if os.path.exists(\"discriminator.json\"):\n with open(\"discriminator.json\", \"r\", encoding=\"utf-8\") as f:\n discriminator = model_from_json(f.read())\n else:\n discriminator = discriminator_model()\n d_opt = Adam(lr=D_LR, beta_1=D_BETA)\n if os.path.exists(\"discriminator.h5\"):\n discriminator.load_weights(\"discriminator.h5\", by_name=False)\n discriminator.compile(loss=\"binary_crossentropy\", optimizer=d_opt)\n with open(\"discriminator.json\", \"w\", encoding=\"utf-8\") as f:\n f.write(discriminator.to_json())\n discriminator.summary()\n\n # generator+discriminator (discriminator部分の重みは固定)\n discriminator.trainable = False\n if os.path.exists(\"generator.json\"):\n with open(\"generator.json\", \"r\", encoding=\"utf-8\") as f:\n generator = model_from_json(f.read())\n else:\n generator = generator_model()\n dcgan = Sequential([generator, discriminator])\n g_opt = Adam(lr=G_LR, beta_1=G_BETA)\n if os.path.exists(\"generator.h5\"):\n generator.load_weights(\"generator.h5\", by_name=False)\n dcgan.compile(loss=\"binary_crossentropy\", optimizer=g_opt)\n with open(\"generator.json\", \"w\", encoding=\"utf-8\") as f:\n f.write(generator.to_json())\n num_batches = int(X_train.shape[0] / BATCH_SIZE)\n print(\"Number of batches:\", num_batches)\n\n # 出力画像用のノイズを生成\n # 画像の成長過程を見たいので,出力画像には常に同じノイズを使う\n if os.path.exists(\"noize.dill\"):\n with open(\"noize.dill\", \"rb\") as f:\n noise = dill.load(f)\n else:\n noise = np.array([np.random.uniform(-1, 1, 100) for _ in range(BATCH_SIZE)])\n with open(\"noize.dill\", \"wb\") as f:\n dill.dump(noise, f)\n for epoch in range(NUM_EPOCH):\n\n for index in range(num_batches):\n # noise = np.array([np.random.uniform(-1, 1, 100) for _ in range(BATCH_SIZE)])\n \n # メモ : noise.shape = (32(バッチサイズ), 100)\n\n image_batch = X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE]\n generated_images = generator.predict(noise, verbose=0)\n\n # 生成画像を出力\n if index % 700 == 0:\n image = combine_images(generated_images)\n image = image*127.5 + 127.5\n if not os.path.exists(GENERATED_IMAGE_PATH):\n os.mkdir(GENERATED_IMAGE_PATH)\n cv2.imwrite(GENERATED_IMAGE_PATH+\"%04d_%04d.png\" % (epoch, index), image.astype(np.uint8))\n\n # discriminatorを更新\n X = np.concatenate((image_batch, generated_images))\n y = [1]*BATCH_SIZE + [0]*BATCH_SIZE\n d_loss = discriminator.train_on_batch(X, y)\n\n # generatorを更新\n # noise = np.array([np.random.uniform(-1, 1, 100) for _ in range(BATCH_SIZE)])\n g_loss = dcgan.train_on_batch(noise, [1]*BATCH_SIZE)\n print(\"epoch: %d, batch: %d, g_loss: %f, d_loss: %f\" % (epoch, index, g_loss, d_loss))\n\n generator.save_weights(\"generator.h5\")\n discriminator.save_weights(\"discriminator.h5\")\n\nif __name__ == \"__main__\":\n train()\n","sub_path":"python/reading/servalGenerator/gan.py","file_name":"gan.py","file_ext":"py","file_size_in_byte":6541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"429531782","text":"from __future__ import division\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom utils import ensure_shared_grads\nfrom model import agentNET\nfrom torch.autograd import Variable\nfrom shared_optim import SharedRMSprop, SharedAdam\nimport torch.nn as nn\nimport time\nimport random\nimport numpy as np\nimport torch.nn as nn\nimport copy\nfrom utils import setup_logger\nfrom Quoridor import Quoridor\nimport logging\n\ndef train(rank, args, shared_model, optimizer):\n torch.manual_seed(args.seed + rank)\n\n env = Quoridor(rank)\n model = agentNET(1, 129)\n\n model.train()\n criterion = nn.CrossEntropyLoss()\n\n done = True\n episode_length = 0\n uploadtime = 0\n before = 0\n\n while True:\n model.load_state_dict(shared_model.state_dict())\n\n state, _, opp_state, opp_action, _ = env.reset()\n state = torch.from_numpy(np.array([state, ])).float()\n\n values = []\n log_probs = []\n rewards = []\n entropies = []\n\n opp_data = []\n if(opp_action != -1):\n opp_data.append(copy.deepcopy([opp_state, opp_action]))\n\n for step in range(args.num_steps):\n value, logit = model((Variable(state.unsqueeze(0))))\n prob = F.softmax(logit)\n log_prob = F.log_softmax(logit)\n entropy = -(log_prob * prob).sum(1)\n entropies.append(entropy)\n\n action = prob.multinomial().data\n action.view(-1, 1)\n log_prob = log_prob.gather(1, Variable(action))\n # print(action.numpy().tolist()[0])\n \n state, result, opp_state, opp_action = env.action(action.numpy().tolist()[0][0])\n state = torch.from_numpy(np.array([state, ])).float()\n\n if(opp_action != -1):\n opp_data.append(copy.deepcopy([opp_state, opp_action]))\n\n if result == 0:\n reward = 5\n done = True\n elif result == 2:\n dis0, _ = env.findPath(0)\n dis1, _ = env.findPath(1)\n reward = 0\n #if (action.numpy().tolist()[0][0] < 128):\n # ans = float(dis1 - dis0 - before) / 5\n # if(ans > 0):\n # reward += ans\n #before = dis1 - dis0\n done = False\n elif result == 1:\n reward = -5\n done = True\n else:\n reward = -10\n done = True\n\n values.append(value)\n log_probs.append(log_prob)\n rewards.append(reward)\n\n if done:\n before = 0\n state, _, _, _, _ = env.reset()\n state = torch.from_numpy(np.array([state, ])).float()\n\n R = torch.zeros(1, 1)\n if not done:\n value, _ = model((Variable(state.unsqueeze(0))))\n R = value.data\n\n \n values.append(Variable(R))\n R = Variable(R)\n\n policy_loss = 0\n value_loss = 0\n gae = torch.zeros(1, 1)\n\n for i in reversed(range(len(rewards))):\n R = args.gamma * R + rewards[i]\n advantage = R - values[i]\n value_loss = value_loss + 0.5 * advantage.pow(2)\n delta_t = rewards[i] + args.gamma * values[i + 1].data - values[i].data\n gae = gae * args.gamma * args.tau + delta_t\n policy_loss = policy_loss - log_probs[i] * Variable(gae) - 0.01 * entropies[i]\n\n\n optimizer.zero_grad()\n (policy_loss + 0.5 * value_loss).backward()\n\n torch.nn.utils.clip_grad_norm(model.parameters(), 40)\n ensure_shared_grads(model, shared_model)\n optimizer.step()\n\n count = 0\n if count == 5:\n inputs = []\n labels = []\n optimizer.zero_grad()\n # print(opp_data)\n for i in range(10):\n tmp = random.randint(0, len(opp_data) - 1)\n inputs.append([copy.deepcopy(opp_data[tmp][0])])\n # print(inputs)\n # print(opp_data[tmp][0])\n # time.sleep(20)\n labels.append(copy.deepcopy(opp_data[tmp][1]))\n # print(len(inputs))\n # print(len(inputs[0]))\n # print(len(inputs[0][0]))\n # print(len(inputs[0][0][0]))\n # print(len(labels))\n # print(labels)\n inputs, labels = Variable(torch.FloatTensor(inputs)), Variable(torch.LongTensor(labels))\n _, outputs = model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"386871306","text":"__author__ = 'chance'\n\nimport urllib.request\nimport requests\nimport json\n\ngeourl = \"http://statsapi.mlb.com/api/v1/teams\"\n#response = urllib.request.urlopen(geourl)\n\nresponse = urllib.request.urlopen(geourl)\n\ncontent = response.read()\ndata = json.loads(content.decode(\"utf8\"))\nteams = data['teams']\n\nteamlinks = []\nplayerlinks = []\n\nfor team in teams:\n league = team['league']\n if (league.get('name')):\n league_name = league['name']\n if( league_name == 'American League' or league_name == 'National League'):\n #print(team['id'])\n #print(team['name'])\n #print(team['link'])\n #print(league_name)\n roster_link = str(\"http://statsapi.mlb.com\" + team['link'] + \\\n '/roster/fullRoster?season=2019')\n teamlinks.append(roster_link)\n #print(roster_link)\n #print(\"\")\n\nfor teamlink in teamlinks:\n #print(teamlink)\n response = urllib.request.urlopen(teamlink)\n content = response.read()\n data = json.loads(content.decode(\"utf8\"))\n roster = data['roster']\n\n for person in roster:\n print(person)","sub_path":"site/push_notification/statcast_rosters.py","file_name":"statcast_rosters.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"160886500","text":"from snmp_helper import snmp_get_oid_v3,snmp_extract\nfrom email_helper import send_mail\nfrom datetime import datetime\nimport os\nimport json\nimport sys\n\npynetrtr1 = ('184.105.247.70', 161)\npynetrtr2 = ('184.105.247.71', 161)\ndevices = [pynetrtr1, pynetrtr2]\n\na_user = 'pysnmp'\nauth_key = 'galileo1'\nencrypt_key = 'galileo1'\nsnmp_user = (a_user, auth_key, encrypt_key)\n\n# Uptime when running config last changed\nccmHistoryRunningLastChanged = '1.3.6.1.4.1.9.9.43.1.1.1.0' \n\n# Uptime when running config last saved\n# note any 'write' constitutes a save \nccmHistoryRunningLastSaved = '1.3.6.1.4.1.9.9.43.1.1.2.0' \n\n# Uptime when startup config last saved \nccmHistoryStartupLastChanged = '1.3.6.1.4.1.9.9.43.1.1.3.0'\n\n#sys_descr = '1.3.6.1.2.1.1.1.0'\n#snmp_data = snmp_get_oid_v3(pynetrtr1, snmp_user, oid=sys_descr)\n#output = snmp_extract(snmp_data)\n#print(output)\nif not os.path.isfile('data.json'):\n with open('data.json', 'w') as f:\n f.write('{\"lastchange_rtr1\": \"0\", \"lastchange_rtr2\": \"0\"}')\nwith open('data.json', 'r') as f:\n old_values = json.load(f)\n\nlastchange_rtr1 = snmp_get_oid_v3(pynetrtr1, snmp_user, oid=ccmHistoryRunningLastChanged)\nlastchange_rtr2 = snmp_get_oid_v3(pynetrtr2, snmp_user, oid=ccmHistoryRunningLastChanged)\nlastchange_rtr1 = snmp_extract(lastchange_rtr1)\nlastchange_rtr2 = snmp_extract(lastchange_rtr2)\nnew_values = {\n 'lastchange_rtr1':lastchange_rtr1,\n 'lastchange_rtr2':lastchange_rtr2,\n}\n\nprint('Old value of rtr1 = ' + old_values['lastchange_rtr1'])\nprint('New value of rtr1 = ' + new_values['lastchange_rtr1'])\nprint()\nprint('Old value of rtr2 = ' + old_values['lastchange_rtr2'])\nprint('New value of rtr2 = ' + new_values['lastchange_rtr2'])\n\nwith open('data.json', 'w') as f:\n json.dump(new_values, f)\n\nrecipient = 'd.stromblad@outlook.com'\nsubject = 'Test message'\nmessage = 'This is a test message'\nsender = 'python-class@gmail.com'\n\nif new_values['lastchange_rtr1'] != old_values['lastchange_rtr1']:\n subject = 'Configuration of rtr1 has changed'\n message = '''Last configuration change has changed from %s to %s \nThe change was detected as %s \n ''' % (old_values['lastchange_rtr1'], new_values['lastchange_rtr1'], datetime.now())\n send_mail(recipient, subject, message, sender)\n\n \nif new_values['lastchange_rtr2'] != old_values['lastchange_rtr2']:\n subject = 'Configuration of rtr2 has changed'\n message = '''Last configuration change has changed from %s to %s \nThe change was detected as %s \n ''' % (old_values['lastchange_rtr2'], new_values['lastchange_rtr2'], datetime.now())\n send_mail(recipient, subject, message, sender)\n","sub_path":"class3/routerconf.py","file_name":"routerconf.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"256450664","text":"\"\"\"\r\n@author: Maziar Raissi\r\n\"\"\"\r\nimport os\r\nimport sys\r\nimport pickle\r\nimport pandas as pd\r\n\r\nimport interiorburgerslambda\r\nfrom plotting import newfig\r\n\r\nsys.path.insert(0, '../../Utilities/')\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.io\r\nfrom scipy.interpolate import griddata\r\nfrom pyDOE import lhs\r\nimport plotting\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport time\r\nimport matplotlib.gridspec as gridspec\r\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\r\n\r\n#np.random.seed(1234)\r\n#tf.set_random_seed(1234)\r\n\r\n\r\nclass PhysicsInformedNN:\r\n # Initialize the class\r\n def __init__(self, lam, X_u, u, X_f, layers, lb, ub, nu, X_star, N_u, N_f, N_u2, N_f2, m, typen):\r\n\r\n self.lb = lb\r\n self.ub = ub\r\n\r\n self.x_u = X_u[:, 0:1]\r\n self.t_u = X_u[:, 1:2]\r\n\r\n self.x_f = X_f[:, 0:1]\r\n self.t_f = X_f[:, 1:2]\r\n\r\n self.u = u\r\n\r\n self.lam = lam\r\n\r\n self.layers = layers\r\n self.nu = nu\r\n self.X_star = X_star\r\n\r\n # Initialize NNs\r\n self.weights, self.biases = self.initialize_NN(layers)\r\n\r\n log_device_placement = False # very loud\r\n # tf placeholders and graph\r\n self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,\r\n log_device_placement=log_device_placement))\r\n\r\n self.x_u_tf = tf.placeholder(tf.float32, shape=[None, self.x_u.shape[1]])\r\n self.t_u_tf = tf.placeholder(tf.float32, shape=[None, self.t_u.shape[1]])\r\n self.u_tf = tf.placeholder(tf.float32, shape=[None, self.u.shape[1]])\r\n\r\n self.x_f_tf = tf.placeholder(tf.float32, shape=[None, self.x_f.shape[1]])\r\n self.t_f_tf = tf.placeholder(tf.float32, shape=[None, self.t_f.shape[1]])\r\n\r\n self.u_pred = self.net_u(self.x_u_tf, self.t_u_tf)\r\n self.f_pred = self.net_f(self.x_f_tf, self.t_f_tf)\r\n\r\n #self.regularizer = self.extract_weights(self.weights)\r\n self.varis = tf.trainable_variables()\r\n self.lossl2 = tf.add_n([ tf.nn.l2_loss(v) for v in self.varis]) * 0.00001\r\n\r\n self.loss = tf.reduce_mean(tf.square(self.u_tf - self.u_pred)) + \\\r\n self.lam * tf.reduce_mean(tf.square(self.f_pred)) + \\\r\n tf.reduce_mean(self.lossl2)\r\n\r\n self.loss_a = tf.reduce_mean(tf.square(self.u_tf - self.u_pred))\r\n self.loss_b = tf.reduce_mean(tf.square(self.f_pred))\r\n\r\n self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,\r\n method='L-BFGS-B',\r\n options={'maxiter': 50000,\r\n 'maxfun': 50000,\r\n 'maxcor': 50,\r\n 'maxls': 50,\r\n 'ftol': 1.0 * np.finfo(float).eps})\r\n\r\n self.optimizer_Adam = tf.train.AdamOptimizer()\r\n self.train_op_Adam = self.optimizer_Adam.minimize(self.loss) # what is adaomoptimizer?\r\n\r\n log_device_placement = False # used to be true, makes noise\r\n self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,\r\n log_device_placement=log_device_placement))\r\n\r\n init = tf.global_variables_initializer()\r\n self.sess.run(init)\r\n\r\n def initialize_NN(self, layers):\r\n weights = []\r\n biases = []\r\n num_layers = len(layers)\r\n for l in range(0, num_layers - 1):\r\n W = self.xavier_init(size=[layers[l], layers[l + 1]])\r\n b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32)\r\n weights.append(W)\r\n biases.append(b)\r\n return weights, biases\r\n\r\n def extract_weights(self, w):\r\n weight_list = []\r\n for i, w in enumerate(self.weights):\r\n weight_list.append(w.eval(self.sess))\r\n print(weight_list)\r\n\r\n\r\n def xavier_init(self, size):\r\n in_dim = size[0]\r\n out_dim = size[1]\r\n xavier_stddev = np.sqrt(2 / (in_dim + out_dim))\r\n return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)\r\n\r\n def neural_net(self, X, weights, biases):\r\n num_layers = len(weights) + 1\r\n\r\n H = 2.0 * (X - self.lb) / (self.ub - self.lb) - 1.0\r\n for l in range(0, num_layers - 2):\r\n W = weights[l]\r\n b = biases[l]\r\n H = tf.tanh(tf.add(tf.matmul(H, W), b))\r\n W = weights[-1]\r\n b = biases[-1]\r\n Y = tf.add(tf.matmul(H, W), b)\r\n return Y\r\n\r\n def net_u(self, x, t):\r\n u = self.neural_net(tf.concat([x, t], 1), self.weights, self.biases)\r\n return u\r\n\r\n def net_f(self, x, t):\r\n u = self.net_u(x, t)\r\n u_t = tf.gradients(u, t)[0]\r\n u_x = tf.gradients(u, x)[0]\r\n u_xx = tf.gradients(u_x, x)[0]\r\n f = u_t + u * u_x - self.nu * u_xx\r\n\r\n return f\r\n\r\n def callback(self, loss):\r\n print('Loss:', loss)\r\n\r\n def train(self, nIter, burgers_data_loc,N_u, N_f, N_u2, N_f2, m, typen, base_plt_dir):\r\n\r\n tf_dict = {self.x_u_tf: self.x_u, self.t_u_tf: self.t_u, self.u_tf: self.u,\r\n self.x_f_tf: self.x_f, self.t_f_tf: self.t_f}\r\n\r\n start_time = time.time()\r\n losses = {}\r\n for it in range(nIter):\r\n self.sess.run(self.train_op_Adam, tf_dict)\r\n\r\n # Print\r\n if it % 10 == 0:\r\n #print('L2 LOSS:')\r\n #weights_value = self.sess.run(self.weights)\r\n #print(weights_value)\r\n elapsed = time.time() - start_time\r\n loss_value = self.sess.run(self.loss, tf_dict)\r\n losses[it] = loss_value\r\n print('It: %d, Loss: %.3e, Time: %.2f' %\r\n (it, loss_value, elapsed))\r\n start_time = time.time()\r\n lossy_a, lossy_b = self.sess.run([self.loss_a, self.loss_b], feed_dict = tf_dict) #add self.loss_a/b\r\n #with open('epochs_v_error_hp.p', 'rb') as fp:\r\n # d = pickle.load(fp)\r\n inputs = interiorburgerslambda.prepare_nn_inputs_burgers(burgers_data_loc, N_u, N_f, N_u2, N_f2, m, typen, debugging=False)\r\n #data = scipy.io.loadmat('burgers_shock.mat')\r\n #u = np.real(data['usol']).T.flatten()[:,None]\r\n #u_pred, f_pred = self.predict(self.X_star)\r\n #error = np.linalg.norm(u-u_pred, 2)/25000\r\n #d[it] = {'MSE' : lossy_a, 'PDE' : lossy_b, 'error' : error}\r\n #with open('epochs_v_error_hp.p', 'wb') as fp:\r\n # pickle.dump(d, fp, protocol=2)\r\n #weights = {}\r\n #biases={}\r\n #for i, w in enumerate(self.weights):\r\n # weights[i] = w.eval(self.sess)\r\n #for i, b in enumerate(self.biases):\r\n # biases[i] = b.eval(self.sess)\r\n #with open('wab_dict_hp.p', 'rb') as fp:\r\n # wabdict = pickle.load(fp)\r\n #with open('b_dict_hp.p', 'rb') as fp:\r\n # bdict = pickle.load(fp)\r\n #wabdict[it] = weights\r\n #bdict[it] = biases\r\n #with open('wab_dict_hp.p', 'wb') as fp:\r\n # pickle.dump(wabdict, fp, protocol=2)\r\n #with open('b_dict_hp.p', 'wb') as fp:\r\n # pickle.dump(bdict, fp, protocol=2)\r\n\r\n\r\n if (it & (it - 1)) == 0:\r\n inputs = interiorburgerslambda.prepare_nn_inputs_burgers(burgers_data_loc, N_u, N_f, N_u2, N_f2, m, typen, debugging=False)\r\n u_pred, f_pred = self.predict(self.X_star)\r\n\r\n plotting.plotting(inputs, u_pred, base_plt_dir, \"{}\".format(it))\r\n\r\n self.optimizer.minimize(self.sess,\r\n feed_dict=tf_dict,\r\n fetches=[self.loss],\r\n loss_callback=self.callback)\r\n\r\n return losses\r\n\r\n def save_weights_and_biases(self, path):\r\n buffers = {}\r\n for i, w in enumerate(self.weights):\r\n buffers['weight{}'.format(i)] = (w.eval(self.sess))\r\n for i, w in enumerate(self.biases):\r\n buffers['bias{}'.format(i)] = (w.eval(self.sess))\r\n np.savez_compressed(path, **buffers)\r\n\r\n def load_weights_and_biases(self, path):\r\n bucket_o_wts = np.load(path)\r\n for k, numpy_arr in bucket_o_wts.items():\r\n if k.startswith('bias'):\r\n k = int(k[4:])\r\n tensor = self.biases[k]\r\n elif k.startswith('weight'):\r\n k = int(k[6:])\r\n tensor = self.weights[k]\r\n else:\r\n raise ValueError(\"unexpected in {!r}: {!r}\".format(path, k))\r\n assign_op = tensor.assign(numpy_arr)\r\n self.sess.run(assign_op)\r\n\r\n def predict(self, X_star):\r\n\r\n u_star = self.sess.run(self.u_pred, {self.x_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]})\r\n f_star = self.sess.run(self.f_pred, {self.x_f_tf: X_star[:, 0:1], self.t_f_tf: X_star[:, 1:2]})\r\n\r\n return u_star, f_star\r\n\r\n\r\nif __name__ == \"__main__\":\r\n nIter = 10\r\n\r\n # nu = 0.01 / np.pi\r\n # noise = 0.0\r\n # N_u = 100\r\n # N_f = 10000\r\n layers = [2, 20, 20, 20, 20, 20, 20, 20, 20, 1]\r\n #\r\n # data = scipy.io.loadmat('/Users/danamendelson/Desktop/burgers_shock.mat')\r\n #\r\n # t = data['t'].flatten()[:, None]\r\n # x = data['x'].flatten()[:, None]\r\n # Exact = np.real(data['usol']).T\r\n #\r\n # X, T = np.meshgrid(x, t)\r\n #\r\n # X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None]))\r\n # u_star = Exact.flatten()[:, None]\r\n #\r\n # # Domain bounds\r\n # lb = X_star.min(0)\r\n # ub = X_star.max(0)\r\n #\r\n # xx1 = np.hstack((X[0:1, :].T, T[0:1, :].T))\r\n # uu1 = Exact[0:1, :].T\r\n # xx2 = np.hstack((X[:, 0:1], T[:, 0:1]))\r\n # uu2 = Exact[:, 0:1]\r\n # xx3 = np.hstack((X[:, -1:], T[:, -1:]))\r\n # uu3 = Exact[:, -1:]\r\n #\r\n # X_u_train = np.vstack([xx1, xx2, xx3])\r\n # X_f_train = lb + (ub - lb) * lhs(2, N_f)\r\n # X_f_train = np.vstack((X_f_train, X_u_train))\r\n # u_train = np.vstack([uu1, uu2, uu3])\r\n #\r\n # idx = np.random.choice(X_u_train.shape[0], N_u, replace=False)\r\n # X_u_train = X_u_train[idx, :]\r\n # u_train = u_train[idx, :]\r\n\r\n #burgers_data_loc = '~/data/burgers_shock.mat'\r\n #inputs = interior_burgers.prepare_nn_inputs_burgers(burgers_data_loc, N_u, N_f, random_seed=1234, debugging=False)\r\n\r\n #model = PhysicsInformedNN(inputs.X_u_train, inputs.u_train, inputs.X_f_train, layers, inputs.lb, inputs.ub, inputs.nu, inputs.X_star, N_u, N_f)\r\n\r\n #start_time = time.time()\r\n #losses = model.train(nIter, '~/data/burgers_shock.mat', '~/plots')\r\n #elapsed = time.time() - start_time\r\n #print('Training time: %.4f' % (elapsed))\r\n\r\n #u_pred, f_pred = model.predict(inputs.X_star)\r\n #u_star = inputs.exact.flatten()[:, None]\r\n\r\n #error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2)\r\n #print('Error u: %e' % (error_u))\r\n\r\n #t = inputs.t\r\n #x = inputs.x\r\n #X, T = np.meshgrid(x, t)\r\n\r\n #U_pred = griddata(inputs.X_star, u_pred.flatten(), (X, T), method='cubic')\r\n #Error = np.abs(inputs.exact - U_pred)\r\n\r\n #plt.close()\r\n #fig, ax = plt.subplots(1, 1, figsize=(10, 10))\r\n #pd.Series(losses).plot(logy=True, ax=ax)\r\n #lp_loc = '/tmp/loss_plot.eps'\r\n #plt.savefig(lp_loc)\r\n #print(\"saved loss plot to {}\".format(lp_loc))\r\n\r\n #save_base_dir = '~/junk/eg_model'\r\n #model.save_weights_and_biases(os.path.join(save_base_dir, 'weights_and_biases_2.npz'))\r\n\r\n #u_pred, f_pred = model.predict(inputs.X_star) # X_star = tf.convert_to_tensor(X_star) ?\r\n #plotting.plotting(inputs, u_pred, '~/plots')\r\n","sub_path":"parameter_search/skoptmethod/burgersraissilambda.py","file_name":"burgersraissilambda.py","file_ext":"py","file_size_in_byte":12189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562800202","text":"# -*- coding: utf-8 -*-\n#\n# get samples with percent tumor info\n#\n# @author \n# -------------------------------------\n\nfrom unittest import TestCase\nfrom patholagnome.tools import data\nfrom requests import get\nfrom requests.exceptions import RequestException\nfrom os.path import join\nfrom sys import exit\nfrom pandas import DataFrame, set_option\nfrom json import loads, dumps\nfrom re import sub, findall\nfrom patholagnome.tools import data\nset_option('max_colwidth',80)\nset_option('display.max_columns', 500)\nset_option('display.width', 175)\nimport re\nfrom patholagnome.tools import tcga_data\nfrom patholagnome.analysis import isolate_tissue\n\ntcga_obj = tcga_data.TCGA()\n# get lung cancer project\nproject_id ='TCGA-LUAD'\ncase_id = '3434b91a-c05f-460f-a078-7b1bb6e7085d'\nlung_info = tcga_obj.get_project(project_id=project_id)\n# get 500 case ids from lung cancer case\ncase_list = [case['case_id'] for case in tcga_obj.get_cases_in_project(project_id=project_id, n_cases=540)]\n# make sure case id in project\ncase_id = [cid for cid in case_list if cid == case_id][0]\n# get case info\ncase_info = tcga_obj.get_case(project_id=project_id, case_id=case_id)\ncase_submitter_slide_id = [case for case in case_info['submitter_slide_ids'] if re.search('DX[0-9]{1}$', case)][0]\n# get diagnosis information\ncase_diagnosis_info = tcga_obj.get_diagnosis_info(project_id='TCGA-LUAD', case_id=case_id)\n# get 30 cases from lung cancer project\n\n# get cases with documented therapuetic agent\ngood_cases = []\nfor case in case_list:\n\ttmp_info = tcga_obj.get_diagnosis_info(project_id='TCGA-LUAD', case_id=case)\n\tif 'diagnoses' in tmp_info.keys():\n\t\ttherapuetic_agent = tmp_info['diagnoses'][0]['treatments'][0]['therapeutic_agents']\n\t\tprimary_diagnosis = primary_diagnosis\n\t\tif therapuetic_agent:\n\t\t\tprint(\"therapuetic_agent: {}\".format(therapuetic_agent))\n\t\t\tgood_cases.append(tmp_info)\n\n\n# get slide metadata\nfeature_list = ['percent_neutrophil_infiltration', 'percent_monocyte_infiltration', 'percent_normal_cells', \n'percent_eosinophil_infiltration', 'percent_tumor_nuclei', 'percent_lymphocyte_infiltration',\n'percent_granulocyte_infiltration', 'percent_necrosis', 'number_proliferating_cells', 'percent_stromal_cells',\n'percent_inflam_infiltration', 'percent_tumor_cells']\nlymph_slide_stats = tcga_obj.get_slide_info(project_id, case_list, feature_list)\n\n# tumor and normal high samples\ntumorHigh = []\nnormHigh = []\nfor case_id in lymph_slide_stats.keys():\n\tfor slide_id in lymph_slide_stats[case_id]:\n\t\tif 'percent_tumor_cells' in lymph_slide_stats[case_id][slide_id].keys() and lymph_slide_stats[case_id][slide_id]['percent_tumor_cells'] == 100:\n\t\t\ttumorHigh.append(slide_id)\n\t\tif 'percent_normal_cells' in lymph_slide_stats[case_id][slide_id].keys() and lymph_slide_stats[case_id][slide_id]['percent_normal_cells'] > 50:\n\t\t\tprint(\"case id: {}\\tslide id: {}\".format(case_id, slide_id))\n\t\t\tnormHigh.append(slide_id)\n\n\ntumor_out_path = \"data/TCGA-LUAD/slides/tumorHigh\"\ntcga_obj.get_slide_file('dd8e00b5-89c3-45a0-98f3-7ccd2c99e430', norm_out_path)\n\ncase_id = '96dfa373-4597-49a5-942e-d9b4dc5d28fc'\nslide_id = 'dd8e00b5-89c3-45a0-98f3-7ccd2c99e430'\nnorm_out_path = \"data/TCGA-LUAD/slides/normalHigh\"\n\nn_slides = 100\ntcga_obj = tcga_data.TCGA()\nfilters = { \"op\": \"and\", \n\t\"content\":[\n\t\t{\"op\":\"in\", \"content\":{\"field\":'data_format', \"value\":['SVS']}},\n\t\t{\"op\":\"in\", \"content\":{\"field\":'cases.case_id', \"value\":[case_id]}}\n\t]\n}\nfields = \"cases.samples.portions.slides.slide_id,file_id\"\nparams = {'filters': dumps(filters), 'format': 'JSON', 'size':str(n_slides), 'fields':fields}\nresponse = get(join(tcga_obj.url, 'files'), params = params)\nresponse_json = loads(response.text)\nfor res in response_json['data']['hits']:\n\tif res['cases'][0]['samples'][0]['portions'][0]['slides'][0]['slide_id'] == slide_id:\n\t\tfile_id = res['file_id']\n\t\tprint(\"file id: {}\".format(file_id))\n\ndata_endpt = \"https://api.gdc.cancer.gov/data/{}\".format(file_id)\nresponse_slide = get(data_endpt, headers = {\"Content-Type\": \"application/json\"})\nresponse_head_cd = response_slide.headers[\"Content-Disposition\"]\nfile_name = findall(\"filename=(.+)\", response_head_cd)[0]\n\nfile_name = join(norm_out_path, file_name)\n\nwith open(file_name, \"wb\") as output_file:\n output_file.write(response_slide.content)\n\n\n\n\n","sub_path":"bin/interactive_session_workflow.py","file_name":"interactive_session_workflow.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"281060296","text":"\"\"\"\"\"\"\nfrom kaplot.quick import *\nimport kaplot\nimport kaplot.objects\n#import kaplot.astro\nfrom kaplot.quick import _checkPage, _checkContainer\n\n# from plotobjects\ndef barchart(datalist, autoscale=True, contexts=None, barwidth=0.80000000000000004, groupspacing=0.10000000000000001, sortbars=False, container=None, **kwargs):\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.BarChart(container, datalist, autoscale=autoscale, contexts=contexts, barwidth=barwidth, groupspacing=groupspacing, sortbars=sortbars, **kwargs)\n\treturn current.object\n\ndef contour(data2d, levels, matrix=None, container=None, **kwargs):\n\t\"\"\"HIDE\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Contour(container, data2d, levels, matrix=matrix, **kwargs)\n\treturn current.object\n\ndef contourfill(data2d, level1, level2, container=None, **kwargs):\n\t\"\"\"HIDE\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.ContourFill(container, data2d, level1, level2, **kwargs)\n\treturn current.object\n\ndef errorbars(x, y, size='3mm', xerr=None, yerr=None, xpos=None, xneg=None, ypos=None, yneg=None, container=None, **kwargs):\n\t\"\"\"Draws errorbar at (x[n],y[n]) locations\n\t\n\tArguments:\n\t * xerr -- error in x direction, both positive and negative\n\t * xpos -- positive error in x, overriding xerr\n\t * xneg, ypos, yerr -- ...\n\t\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.ErrorBars(container, x, y, size=size, xerr=xerr, yerr=yerr, xpos=xpos, xneg=xneg, ypos=ypos, yneg=yneg, **kwargs)\n\treturn current.object\n\ndef errorrange(x, y, err, pos=None, neg=None, fill=True, caps=False, container=None, **kwargs):\n\t\"\"\"Fills between the errors instead of drawing error bars (see errorbars)\n\t\n\tArguments:\n\t * x, y -- arrays or sequences containing the locations\n\t * err -- the error in y, both positive and negative\n\t * pos, neg -- the positive and negative error in y, overriding err\n\t * fill -- if False, doesn't fill, but draws a line at the edges\n\t * caps -- if fill is False, and caps is False, the 'caps' at the left and right end are not drawn\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.ErrorRange(container, x, y, err, pos=pos, neg=neg, fill=fill, caps=caps, **kwargs)\n\treturn current.object\n\ndef fillrange(x, y, level=0, container=None, **kwargs):\n\t\"\"\"Draws a polygon between the y values and level\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.FillRange(container, x, y, level=level, **kwargs)\n\treturn current.object\n\ndef grid(subgrid=False, xinterval=None, xinteger=False, xstart=None, xsubticks=4, xlogarithmic=False, yinterval=None, yinteger=False, ystart=None, ysubticks=4, ylogarithmic=False, container=None, **kwargs):\n\t\"\"\"Draws a grid on the whole of the container\n\n\tArguments:\n\t * subgrid == if True, only draw the gridlines of the minor ticks\n\t * xinterval -- if specified, the major tick seperation for the x axis\n\t * xinteger -- if True, x will always be an integer, so no floating point strings in your plot\n\t * xstart -- if specified, the start value for the major ticks\n\t * xsubticks -- the number of subticks (minor ticks) between mayor ticks\n\t * xlogarithmic -- if True, the minor subticks will be seperated as on logarithmic paper.\n\t \t\tNote that you have to take the logarithm of your data yourself. Also, labels will\n\t \t\tbe draws as the base (currently only 10 is supported) with the x-value as superscript\n\t * yinterval and the rest -- same, but now for y\n\t \t\t\n\t TODO: example and explain how to do custom labeling and tick locations\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Grid(container, subgrid=subgrid, xinterval=xinterval, xinteger=xinteger, xstart=xstart, xsubticks=xsubticks, xlogarithmic=xlogarithmic, yinterval=yinterval, yinteger=yinteger, ystart=ystart, ysubticks=ysubticks, ylogarithmic=ylogarithmic, **kwargs)\n\treturn current.object\n\ndef histogramline(bins, data, binned=True, bincount=10, fill=True, drawverticals=True, drawsides=True, container=None, **kwargs):\n\t\"\"\"Draws a histogram line\n\t\n\tArguments:\n\t * bins -- values of the bins, the first bin will be drawn between bins[0] and bins[1]\n\t * data -- the binned or unbinned data. If unbinned, the height of the bin, otherwise the raw data\n\t * binned -- if False, data will be binned according to bincount\n\t * fill -- boolean, fill or just draw the outline\n\t * drawverticals -- if fill is False, this determines if the vertical lines between bins are also drawn\n\t * drawsides -- if fill is False, this determines if the outer left and right line are drawn\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.HistogramLine(container, bins, data, binned=binned, bincount=bincount, fill=fill, drawverticals=drawverticals, drawsides=drawsides, **kwargs)\n\treturn current.object\n\ndef indexedimage(data2d, matrix=None, mask2d=None, colormap='rainbow', datamin=None, datamax=None, function='linear', resize=None, context=None, container=None, **kwargs):\n\t\"\"\"Draws an intensity image using the colormap to map the intensity to a color\n\t\n\tExample:\n\t * {{{#!python\n x, y = meshgrid()\n I = e**-(x**2+y**2)\n indexedimage(I, colormap='cool')}}}\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.IndexedImage(container, data2d, matrix=matrix, mask2d=mask2d, colormap=colormap, datamin=datamin, datamax=datamax, function=function, resize=resize, context=context, **kwargs)\n\treturn current.object\n\ndef innercolorbar(label=None, image=None, levels=[], direction='up', location='right, top', labelposition=None, size=None, colormap='rainbow', datamin=None, datamax=None, edgespacing='10mm', container=None, **kwargs):\n\t\"\"\"HIDE\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.InnerColorbar(container, label=label, image=image, levels=levels, direction=direction, location=location, labelposition=labelposition, size=size, colormap=colormap, datamin=datamin, datamax=datamax, edgespacing=edgespacing, **kwargs)\n\treturn current.object\n\ndef legend(types, labels, objects, location='right, top', spacing='2mm', edgespacing='10mm', borderspacing='2mm', linelength='1cm', container=None, **kwargs):\n\t\"\"\"Draws a legend, as information for graphs for instance\n\t\n\tArguments:\n\t * types -- a list of string, specifing the marker that should be drawn\n\t * labels -- a list of string\n\t * location -- specifies where the legend is drawn.\n\t \t\tFormat is \", \", where is 'left', 'center' or 'top'\n\t \t\tand is 'bottom', 'center' or 'top'\n\t * spacing -- the horizontal seperation between the symbols and the labels\n\t * edgespacing -- the displacement from the edge, specified by 'location'\n\t * borderspacing -- the seperation of the text and symbols, and the border drawn\n\t \t\taround it\n\t * linelength -- if type contains a line, it's length will be specified by this\n\t \t\targuments. (dotted or dashed lines sometimes need to be longer to be\n\t \t\tclearly visible)\n\t \t\n\t \t\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Legend(container, types, labels, objects, location=location, spacing=spacing, edgespacing=edgespacing, borderspacing=borderspacing, linelength=linelength, **kwargs)\n\treturn current.object\n\ndef line(x1, y1, x2=1, y2=2, container=None, **kwargs):\n\t\"\"\"Draws a line from x1,y1 to x2,y2\n\tExample:\n\t * line(0, 0, 10, 5)\n\t * line(20, -5, 2, 50)\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Line(container, x1, y1, x2=x2, y2=y2, **kwargs)\n\treturn current.object\n\ndef pointer(x1, y1, x2, y2, text, offset='3mm', halign=None, valign=None, container=None, **kwargs):\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Pointer(container, x1, y1, x2, y2, text, offset=offset, halign=halign, valign=valign, **kwargs)\n\treturn current.object\n\ndef polyline(x, y, close=False, container=None, **kwargs):\n\t\"\"\"Draws a polyline from (x[0],y[0]) to (x[1],y[1]) ... to (x[n],y[n])\n\t\n\tArguments:\n\t * x -- an array or other sequence\n\t * y -- idem\n\t * close -- if True, the begin and endpoint will be connected\n\t \n\tExample:\n\t * polyline([0, 1, 2, 3], [0, 1, 4, 9], color='red')\n\t * {{{#!python\n x = arange(0, 5, 0.1)\n y = sin(x*3) * x**2\n polyline(x, y, linestyle='dotdash')\n }}}\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.PolyLine(container, x, y, close=close, **kwargs)\n\treturn current.object\n\ndef polygon(x, y, close=True, container=None, **kwargs):\n\t\"\"\"Same as polyline but now the interior will be filled.\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Polygon(container, x, y, close=close, **kwargs)\n\treturn current.object\n\ndef rectangle(x1, y1, x2, y2, solid=False, gridsnap=True, container=None, **kwargs):\n\t\"\"\"Draws a rectangle from x1,y1 to x2,y2\n\t\n\tArguments:\n\t * solid -- if True, the rectangle will be filled\n\n\tExample:\n\t * rectangle(0, 0, 10, 5)\n\t * rectangle(20, -5, 2, 50)\n\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Rectangle(container, x1, y1, x2, y2, solid=solid, gridsnap=gridsnap, **kwargs)\n\treturn current.object\n\ndef symbols(x, y, symbolName='x', xscales=None, yscales=None, angles=None, colors=None, colormap='rainbow', datamin=None, datamax=None, container=None, **kwargs):\n\t\"\"\"Draws symbols at locations (x[n], y[n])\n\t\n\tExample:\n\t{{{#!python\n x = arange(0.001, 10, 0.1)\n y = sin(x)/x\n symbols(x, y, symbolName='triangle')\n }}}\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Symbols(container, x, y, symbolName=symbolName, xscales=xscales, yscales=yscales, angles=angles, colors=colors, colormap=colormap, datamin=datamin, datamax=datamax, **kwargs)\n\treturn current.object\n\ndef text(text, x=0.5, y=0.5, halign='center', valign='center', textangle=0, container=None, **kwargs):\n\t\"\"\"Draws a text string\n\t\n\tArguments:\n\t * halign -- horizontal placement of text relative to location\n\t * valign -- vertical placement of text relative to location\n\t \n\tExample:\n\t * Text(\"Hello\", 0.5, 0.5, \"left\", \"center\")\n\t \tThis will draw the text \"Hello\" to the right of location (0.5, 0.5)\n\t\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Text(container, text, x=x, y=y, halign=halign, valign=valign, textangle=textangle, **kwargs)\n\treturn current.object\n\n# from decorators\ndef axes(viewport=((0, 0), (1, 1)), xinterval=None, xinteger=False, xstart=None, xsubticks=4, xlogarithmic=False, yinterval=None, yinteger=False, ystart=None, ysubticks=4, ylogarithmic=False, ticklength='3mm', labeloffset='1mm', linestyle='normal', linewidth='1px', container=None, **kwargs):\n\t\"\"\"Adds axes around the container\n\n\tArguments:\n\t * xinterval -- if specified, the major tick seperation for the x axis\n\t * xinteger -- if True, x will always be an integer, so no floating point strings in your plot\n\t * xstart -- if specified, the start value for the major ticks\n\t * xsubticks -- the number of subticks (minor ticks) between mayor ticks\n\t * xlogarithmic -- if True, the minor subticks will be seperated as on logarithmic paper.\n\t \t\tNote that you have to take the logarithm of your data yourself. Also, labels will\n\t \t\tbe draws as the base (currently only 10 is supported) with the x-value as superscript\n\t * yinterval and the rest -- same, but now for y\n\t * ticklength -- length of the mayor ticks, if negative, ticks are drawn to the outside\n\t * labeloffset -- length of the seperation between the label and the axis or tickmark (whichever is closer)\n\t * linestyle and linewidth -- are added so that axes always look normal, ie your change the default\n\t \t\tlinewidth of the page\n\t \t\t\n\t TODO: example and explain how to do custom labeling and tick locations\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Axes(container=container, viewport=viewport, xinterval=xinterval, xinteger=xinteger, xstart=xstart, xsubticks=xsubticks, xlogarithmic=xlogarithmic, yinterval=yinterval, yinteger=yinteger, ystart=ystart, ysubticks=ysubticks, ylogarithmic=ylogarithmic, ticklength=ticklength, labeloffset=labeloffset, linestyle=linestyle, linewidth=linewidth, **kwargs)\n\treturn current.object\n\ndef axes2(linestyle='normal', linewidth='1px', container=None, **kwargs):\n\t\"\"\"Adds axes around the container\n\n\tArguments:\n\t * xinterval -- if specified, the major tick seperation for the x axis\n\t * xinteger -- if True, x will always be an integer, so no floating point strings in your plot\n\t * xstart -- if specified, the start value for the major ticks\n\t * xsubticks -- the number of subticks (minor ticks) between mayor ticks\n\t * xlogarithmic -- if True, the minor subticks will be seperated as on logarithmic paper.\n\t \t\tNote that you have to take the logarithm of your data yourself. Also, labels will\n\t \t\tbe draws as the base (currently only 10 is supported) with the x-value as superscript\n\t * yinterval and the rest -- same, but now for y\n\t * ticklength -- length of the mayor ticks, if negative, ticks are drawn to the outside\n\t * labeloffset -- length of the seperation between the label and the axis or tickmark (whichever is closer)\n\t * linestyle and linewidth -- are added so that axes always look normal, ie your change the default\n\t \t\tlinewidth of the page\n\t \t\t\n\t TODO: example and explain how to do custom labeling and tick locations\n\t\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Axes2(container, linestyle=linestyle, linewidth=linewidth, **kwargs)\n\treturn current.object\n\ndef axis(location='left', interval=None, integer=False, start=None, ticks=4, subticks=4, logarithmic=False, ticklength='3mm', labeloffset='1mm', linestyle='normal', linewidth='1px', intersects=None, halign=None, valign=None, intersection=None, spacing='3mm', container=None, **kwargs):\n\t\"\"\"A decorator 'decorates' a container, like axes add axes to it\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Axis(container, location=location, interval=interval, integer=integer, start=start, ticks=ticks, subticks=subticks, logarithmic=logarithmic, ticklength=ticklength, labeloffset=labeloffset, linestyle=linestyle, linewidth=linewidth, intersects=intersects, halign=halign, valign=valign, intersection=intersection, spacing=spacing, **kwargs)\n\treturn current.object\n\ndef border(container=None, **kwargs):\n\t\"\"\"Adds a solid border around the container\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Border(container, **kwargs)\n\treturn current.object\n\ndef labels(bottom=None, left=None, right=None, top=None, spacing='2mm', container=None, **kwargs):\n\t\"\"\"Adds 4 labels around the container\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Labels(container, bottom=bottom, left=left, right=right, top=top, spacing=spacing, **kwargs)\n\treturn current.object\n\ndef spacer(space='5pt', bottom='0cm', right='0cm', top='0cm', left='0cm', container=None, **kwargs):\n\t\"\"\"Adds some space around the container\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Spacer(container, space=space, bottom=bottom, right=right, top=top, left=left, **kwargs)\n\treturn current.object\n\ndef title(text='', spacing='2mm', container=None, **kwargs):\n\t\"\"\"Adds a title in the top/center location of the container\"\"\"\n\tif container is None:\n\t\t_checkContainer()\n\t\tcontainer = current.container\n\tcurrent.object = kaplot.objects.Title(container, text=text, spacing=spacing, **kwargs)\n\treturn current.object\n\n# from containers\ndef box(viewport=((0, 0), (1, 1)), world=None, title='', bottomlabel=None, leftlabel=None, rightlabel=None, toplabel=None, xinterval=None, xinteger=False, xstart=None, xsubticks=4, xlogarithmic=False, yinterval=None, yinteger=False, ystart=None, ysubticks=4, ylogarithmic=False, ticklength='3mm', labeloffset='1mm', page=None, **kwargs):\n\t\"\"\"Creates a box, which is a container, with axes around it\n\t\n\tArguments:\n\t * viewport -- the viewport for this container, in normalized page coordinates\n\t * world -- the world tuple, specifying range and domain\n\t * title -- title (a string) with will be displayed at the top of the container\n\t * xxx-label -- a string/label that will be placed as indicated by 'xxx'\n\t\n\tFor the rest of the arguments, see 'axes'\n\t\n\t\"\"\"\n\tif page is None:\n\t\t_checkPage()\n\t\tpage = current.page\n\tcurrent.container = kaplot.objects.Box(page, viewport=viewport, world=world, title=title, bottomlabel=bottomlabel, leftlabel=leftlabel, rightlabel=rightlabel, toplabel=toplabel, xinterval=xinterval, xinteger=xinteger, xstart=xstart, xsubticks=xsubticks, xlogarithmic=xlogarithmic, yinterval=yinterval, yinteger=yinteger, ystart=ystart, ysubticks=ysubticks, ylogarithmic=ylogarithmic, ticklength=ticklength, labeloffset=labeloffset, **kwargs)\n\treturn current.container\n\ndef box2(viewport=((0, 0), (1, 1)), world=None, title='', bottomlabel=None, leftlabel=None, rightlabel=None, toplabel=None, xinterval=None, xinteger=False, xstart=None, xsubticks=4, xlogarithmic=False, yinterval=None, yinteger=False, ystart=None, ysubticks=4, ylogarithmic=False, ticklength='3mm', labeloffset='1mm', page=None, **kwargs):\n\t\"\"\"Creates a box, which is a container, with axes around it\n\t\n\tArguments:\n\t * viewport -- the viewport for this container, in normalized page coordinates\n\t * world -- the world tuple, specifying range and domain\n\t * title -- title (a string) with will be displayed at the top of the container\n\t * xxx-label -- a string/label that will be placed as indicated by 'xxx'\n\t\n\tFor the rest of the arguments, see 'axes'\n\t\n\t\"\"\"\n\tif page is None:\n\t\t_checkPage()\n\t\tpage = current.page\n\tcurrent.container = kaplot.objects.Box2(page, viewport=viewport, world=world, title=title, bottomlabel=bottomlabel, leftlabel=leftlabel, rightlabel=rightlabel, toplabel=toplabel, xinterval=xinterval, xinteger=xinteger, xstart=xstart, xsubticks=xsubticks, xlogarithmic=xlogarithmic, yinterval=yinterval, yinteger=yinteger, ystart=ystart, ysubticks=ysubticks, ylogarithmic=ylogarithmic, ticklength=ticklength, labeloffset=labeloffset, **kwargs)\n\treturn current.container\n\ndef container(viewport=((0, 0), (1, 1)), world=None, page=None, **kwargs):\n\t\"\"\"Container holds objects which can be drawn on the page.\n\t\n\tIt can also have decorators (they 'decorate' the container). Which will\n\tnormally be drawn on the borders of the container, like 'labels' and 'axis' do.\n\tThey shrink the viewport, resulting in an shrunk inner viewport so that your labels\n\tare always visible. \n\n\tArguments:\n\t * viewport -- the viewport for this container, in normalized page coordinates\n\t * world -- the world tuple, specifying range and domain\n\t\"\"\"\n\tif page is None:\n\t\t_checkPage()\n\t\tpage = current.page\n\tcurrent.container = kaplot.objects.Container(page, viewport=viewport, world=world, **kwargs)\n\treturn current.container\n\n","sub_path":"tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":19643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"2042082","text":"#Costumize a list in python\r\n\r\nclass PyList:\r\n def __init__(self, contents=[], size=10):\r\n self.items = [None]*size\r\n self.numItems = size\r\n self.size = size\r\n\r\n for e in contents:\r\n print (\"Append = \", e)\r\n self.items.append(e)\r\n\r\n def print(self):\r\n for i in self.items:\r\n print(i)\r\n \r\n def printVersion2(self):\r\n idx = 0\r\n while idx < self.size-1:\r\n print (\"value: \", self.items[idx], \" at position :\", idx)\r\n idx = idx + 1\r\n print()\r\n\r\n def __getitem__(self, index):\r\n if index >=0 and index < len(self.items):\r\n return self.items[index]\r\n raise IndexError (\"PyList index out of range\")\r\n \r\n def __setitem__(self, index,val):\r\n if index >=0 and index < len(self.items):\r\n self.items[index] = val\r\n return\r\n raise IndexError (\"PyList index out of range\")\r\n\r\n def __makeroom(self):\r\n newlen = (self.size // 4) + self.size + 1\r\n newlst = [None] * newlen\r\n for i in range(self.numItems):\r\n newlst[i] = self.items[i]\r\n self.items = newlst\r\n self.size = newlen\r\n \r\n def append(self,item):\r\n if self.numItems == self.size:\r\n self.__makeroom()\r\n self.items[self.numItems] = item\r\n self.numItems +=1\r\n \r\n def __add_(self, other):\r\n result = PyList(size=self.numItems+other.numItems)\r\n for i in range(self.numItems):\r\n result.append(self.items[i])\r\n for i in range(other.numItems):\r\n result.append(self.items[i])\r\n return result \r\n\r\n def __len__(self):\r\n return len(self.items)\r\n\r\nif __name__ == \"__main__\":\r\n sample = PyList([\"a\", \"b\", \"c\"])\r\n sample.print()\r\n sample.printVersion2()\r\n #print (\"Index value at 8 position: \", sample.__getitem__(11))\r\n sample.__setitem__(1, \"w\")\r\n sample.printVersion2()\r\n sample.append([\"n\",\"o\"])\r\n sample.printVersion2()\r\n print (\"Size of List: \",sample.__len__())","sub_path":"source code samples/DataStructures/pylist.py","file_name":"pylist.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"407716268","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 31 13:13:49 2017\n\n@author: Rodrigo.Andrade\n\nATENÇÃO: Este treinamento utiliza o generator do keras, se o treinamento for\nutilizando imagens rgb a entrada da rede será RGB (obvio, não?), mas lembre que\no opencv funciona com BGR, utilize a função cv2.cvtColor para converter de RGB2BGR\n\"\"\"\n\nfrom keras.optimizers import Adadelta\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.callbacks import TensorBoard\nfrom models import Darknet19 as cnnModel\nfrom imutils import paths\nimport os\nimport json\n\n# configurações da rede\nprefix = \"trained_models/weed_cam\" # nome para salvar\nepochs = 10000\nbatchSize = 32\nwidth = 256\nheight = 256\ndepth = 3\n\n\n# path e nomes do dataset\ndatasetTrainPath = \"/home/rodrigo/Documents/weed-detection-in-soybean-crops/train\"\ndatasetValPath = \"/home/rodrigo/Documents/weed-detection-in-soybean-crops/val\"\n\nclassesTrain = next(os.walk(datasetTrainPath))[1]\nclassesVal = next(os.walk(datasetValPath))[1]\n\nif not classesVal == classesTrain:\n raise Exception(\"As classes de treino são diferentes das classes de validação\")\nelse:\n pastas = classesTrain\n\n\n# config os geradores de dataset do keras\n# caso haja apenas duas classes, a rede será uma rede de classificação binária (um unico neurônio de saída)\nif len(pastas) == 2:\n classes = 1\nelse:\n classes = len(pastas)\n\n# faz a leitura do nome de todos arquivos para ter a contagem de amostras\nimagesTrainPaths = []\nimagesValPaths = []\nfor pasta in pastas:\n imagesTrainPaths += list(paths.list_images(os.path.join(datasetTrainPath, pasta)))\n imagesValPaths += list(paths.list_images(os.path.join(datasetValPath, pasta)))\nprint(len(imagesValPaths),len(imagesTrainPaths))\n\ntrainDatagen = ImageDataGenerator(\n rescale=1. / 255,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n vertical_flip=True)\n\ntrainGenerator = trainDatagen.flow_from_directory(\n datasetTrainPath,\n color_mode=\"grayscale\" if depth == 1 else \"rgb\",\n target_size=(height, width),\n batch_size=batchSize,\n class_mode=\"binary\" if classes == 1 else \"categorical\")\n\nvalDatagen = ImageDataGenerator(rescale=1. / 255)\n\nvalGenerator = valDatagen.flow_from_directory(\n datasetValPath,\n color_mode=\"grayscale\" if depth == 1 else \"rgb\",\n target_size=(height, width),\n batch_size=batchSize,\n class_mode=\"binary\" if classes == 1 else \"categorical\")\n\nwith open(\"classIndicesTrain.txt\", \"w\") as file:\n print(\"indice de classes data treino:\\n\", trainGenerator.class_indices)\n file.write(json.dumps(trainGenerator.class_indices))\nwith open(\"classIndicesVal.txt\", \"w\") as file:\n print(\"indice de classes data validação:\\n\", valGenerator.class_indices)\n file.write(json.dumps(valGenerator.class_indices))\n\n\n# callbacks\ncheckPointSaverBest = ModelCheckpoint(prefix+\"_bestacc.hdf5\", monitor='val_acc', verbose=1,\n save_best_only=True, save_weights_only=False, mode='auto', period=1)\ncheckPointSaverBestloss = ModelCheckpoint(prefix+\"_bestloss.hdf5\", monitor='val_loss', verbose=1, save_best_only=True,\n save_weights_only=False, mode='auto', period=1)\ncheckPointSaver = ModelCheckpoint(prefix + \"_ckp_{epoch}.hdf5\", verbose=1, save_best_only=False,\n save_weights_only=False, period=10)\n\ntb = TensorBoard(log_dir='logsTB', histogram_freq=0, batch_size=batchSize, write_graph=True,\n write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None,\n embeddings_metadata=None)\n\nif __name__ == \"__main__\":\n pass\n\n# criação da rede\nopt = Adadelta()\nmodel = cnnModel.build(width=None, height=None, depth=depth, classes=classes)\nmodel.compile(loss=\"binary_crossentropy\" if classes == 1 else \"categorical_crossentropy\",\n optimizer=opt, metrics=[\"accuracy\"])\nmodel.summary()\nmodel.fit_generator(\n trainGenerator,\n steps_per_epoch=len(imagesTrainPaths) // batchSize,\n epochs=epochs,\n validation_data=valGenerator,\n validation_steps=len(imagesValPaths),\n callbacks=[checkPointSaverBest, checkPointSaver, checkPointSaverBestloss, tb],\n workers=8,\n max_queue_size=40)\n\n\n\n\n\n\n\n","sub_path":"CAM/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"137049977","text":"from unittest import TestCase\n\nfrom django.conf import settings\nfrom django.test import override_settings\nfrom core.message_handlers import are_items_unique\nfrom core.message_handlers import store_subtask\nfrom core.models import Subtask\nfrom core.tests.utils import ConcentIntegrationTestCase\nfrom core.tests.utils import parse_iso_date_to_timestamp\nfrom core.utils import hex_to_bytes_convert\n\n\nclass TestMessageHandlers(TestCase):\n def test_that_function_returns_true_when_ids_are_diffrent(self):\n response = are_items_unique([1, 2, 3, 4, 5])\n self.assertTrue(response)\n\n def test_that_function_returns_false_when_ids_are_the_same(self):\n response = are_items_unique([1, 2, 3, 2, 5])\n self.assertFalse(response)\n\n\n@override_settings(\n CONCENT_MESSAGING_TIME=10, # seconds\n)\nclass TestMessagesStored(ConcentIntegrationTestCase):\n def setUp(self):\n super().setUp()\n self.compute_task_def = self._get_deserialized_compute_task_def(\n task_id='1',\n deadline=\"2017-12-01 11:00:00\"\n )\n\n self.task_to_compute_timestamp = \"2017-12-01 10:00:00\"\n self.task_to_compute = self._get_deserialized_task_to_compute(\n timestamp=self.task_to_compute_timestamp,\n compute_task_def=self.compute_task_def,\n )\n\n self.report_computed_task_timestamp = \"2017-12-01 11:01:00\"\n self.report_computed_task = self._get_deserialized_report_computed_task(\n timestamp=self.report_computed_task_timestamp,\n task_to_compute=self.task_to_compute,\n )\n self.provider_public_key = hex_to_bytes_convert(self.task_to_compute.provider_public_key)\n self.requestor_public_key = hex_to_bytes_convert(self.task_to_compute.requestor_public_key)\n\n def test_that_messages_are_stored_with_correct_timestamps(self):\n subtask = store_subtask(\n task_id=self.task_to_compute.compute_task_def['task_id'],\n subtask_id=self.task_to_compute.compute_task_def['subtask_id'],\n provider_public_key=self.provider_public_key,\n requestor_public_key=self.requestor_public_key,\n state=Subtask.SubtaskState.FORCING_REPORT,\n next_deadline=int(self.task_to_compute.compute_task_def['deadline']) + settings.CONCENT_MESSAGING_TIME,\n task_to_compute=self.task_to_compute,\n report_computed_task=self.report_computed_task,\n )\n self.assertEqual(\n parse_iso_date_to_timestamp(subtask.task_to_compute.timestamp.isoformat()),\n parse_iso_date_to_timestamp(self.task_to_compute_timestamp)\n )\n self.assertEqual(\n parse_iso_date_to_timestamp(subtask.report_computed_task.timestamp.isoformat()),\n parse_iso_date_to_timestamp(self.report_computed_task_timestamp)\n )\n","sub_path":"concent_api/core/tests/test_unit_message_handlers.py","file_name":"test_unit_message_handlers.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"116658359","text":"from random import uniform\n\nfrom django.core.cache import cache\n\nfrom submission.models import SubmissionStatus, Submission\nfrom utils.permission import is_problem_manager\n\nUSER_TOTAL_COUNT = 'u{user}_c{contest}_total_count'\nUSER_TOTAL_LIST = 'u{user}_c{contest}_total_list'\nUSER_AC_COUNT = 'u{user}_c{contest}_ac_count'\nUSER_AC_DIFF_COUNT = 'u{user}_c{contest}_ac_diff'\nUSER_AC_LIST = 'u{user}_c{contest}_ac_list'\n\n\ndef _get_or_invalidate(user_id, contest_id, cache_name):\n t = cache.get(cache_name)\n if t is None:\n update_user(user_id, contest_id)\n return cache.get(cache_name)\n else:\n return t\n\n\ndef get_accept_submission_count(user_id, contest_id=0):\n cache_name = USER_AC_COUNT.format(user=user_id, contest=contest_id)\n return _get_or_invalidate(user_id, contest_id, cache_name)\n\n\ndef get_accept_problem_count(user_id, contest_id=0):\n cache_name = USER_AC_DIFF_COUNT.format(user=user_id, contest=contest_id)\n return _get_or_invalidate(user_id, contest_id, cache_name)\n\n\ndef get_accept_problem_list(user_id, contest_id=0):\n cache_name = USER_AC_LIST.format(user=user_id, contest=contest_id)\n return _get_or_invalidate(user_id, contest_id, cache_name)\n\n\ndef get_total_submission_count(user_id, contest_id=0):\n cache_name = USER_TOTAL_COUNT.format(user=user_id, contest=contest_id)\n return _get_or_invalidate(user_id, contest_id, cache_name)\n\n\ndef get_attempted_problem_list(user_id, contest_id=0):\n cache_name = USER_TOTAL_LIST.format(user=user_id, contest=contest_id)\n return _get_or_invalidate(user_id, contest_id, cache_name)\n\n\ndef invalidate_user(user_id, contest_id=0):\n cache.delete_many([USER_TOTAL_COUNT.format(user=user_id, contest=contest_id),\n USER_TOTAL_LIST.format(user=user_id, contest=contest_id),\n USER_AC_COUNT.format(user=user_id, contest=contest_id),\n USER_AC_DIFF_COUNT.format(user=user_id, contest=contest_id),\n USER_AC_LIST.format(user=user_id, contest=contest_id)])\n\n\ndef update_user(user_id, contest_id=0):\n cache_time = 300 * uniform(0.6, 1)\n if contest_id > 0:\n cache_time = 60 * uniform(0.6, 1)\n submission_filter = Submission.objects.filter(author_id=user_id, contest_id=contest_id).all()\n else:\n submission_filter = Submission.objects.filter(author_id=user_id).all()\n ac_filter = submission_filter.filter(status=SubmissionStatus.ACCEPTED).all()\n\n total_count = submission_filter.count()\n total_list = list(submission_filter.order_by().values_list(\"problem_id\", flat=True).distinct())\n accept_count = ac_filter.count()\n accept_list = list(ac_filter.order_by().values_list(\"problem_id\", flat=True).distinct())\n accept_diff = len(accept_list)\n\n cache.set(USER_TOTAL_COUNT.format(user=user_id, contest=contest_id), total_count, cache_time)\n cache.set(USER_TOTAL_LIST.format(user=user_id, contest=contest_id), total_list, cache_time)\n cache.set(USER_AC_COUNT.format(user=user_id, contest=contest_id), accept_count, cache_time)\n cache.set(USER_AC_DIFF_COUNT.format(user=user_id, contest=contest_id), accept_diff, cache_time)\n cache.set(USER_AC_LIST.format(user=user_id, contest=contest_id), accept_list, cache_time)\n\n\ndef is_problem_accepted(user, problem):\n return is_problem_manager(user, problem) or (user.is_authenticated and\n user.submission_set.filter(problem=problem, status=SubmissionStatus.ACCEPTED).exists())\n","sub_path":"submission/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"523552451","text":"import tkinter as tk\r\n\r\nroot = tk.Tk() # メインウィンドウを作成\r\nroot.geometry('200x200') # ウィンドウのサイズを設定\r\n\r\nbutton1 = tk.Button(\r\n root, text='ボタン1').place(x=0, y=0) # ウィンドウの左上隅に配置\r\n\r\nbutton2 = tk.Button( # 左端から50px、\r\n root, text='ボタン2').place(x=50, y=50) # 上端から50pxの位置に配置\r\n\r\nbutton3 = tk.Button( # 左端から100px、\r\n root, text='ボタン3').place(x=100, y=100) # 上端から100pxの位置に配置\r\n\r\nroot.mainloop()\r\n","sub_path":"_otherbook/Python_tutorial/chap10/10_01/button_pack_place.py","file_name":"button_pack_place.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114187159","text":"# -*- coding: utf-8 -*-\n# @Author: claravania\n# @Date: 2019-02-10 15:16:10\n# @Last Modified by: claravania\n# @Last Modified time: 2019-03-20 10:29:35\n\nfrom typing import Dict, Optional\nfrom overrides import overrides\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport sys\n\nfrom allennlp.common import Params\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.data import Vocabulary\nfrom allennlp.modules import FeedForward, TextFieldEmbedder\nfrom allennlp.models.model import Model\nfrom allennlp.nn import InitializerApplicator, RegularizerApplicator\nfrom allennlp.nn import util\nfrom allennlp.training.metrics import CategoricalAccuracy\n\n\n@Model.register(\"mlp_classifier_contrastive\")\nclass MlpClassifierContrastive(Model):\n\t\"\"\"\n\tThis is a simple feedforward classifier. Given embeddings of two tokens, we want to predict some linguistic feature (tag).\n\tWe concatenate the embedding of both tokens and then feed it to the classifier.\n\n\tParameters\n\t----------\n\tclassifier_feedforward : ``FeedForward``\n\tinitializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)\n\t\tUsed to initialize the model parameters.\n\tregularizer : ``RegularizerApplicator``, optional (default=``None``)\n\t\tIf provided, will be used to calculate the regularization penalty during training.\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t vocab: Vocabulary,\n\t\t\t\t text_field_embedder: TextFieldEmbedder,\n\t\t\t\t classifier_feedforward: FeedForward,\n\t\t\t\t initializer: InitializerApplicator = InitializerApplicator(),\n\t\t\t\t regularizer: Optional[RegularizerApplicator] = None) -> None:\n\n\t\tsuper(MlpClassifierContrastive, self).__init__(vocab, regularizer)\n\n\t\tself.text_field_embedder = text_field_embedder\n\t\tself.num_classes = self.vocab.get_vocab_size(\"labels\")\n\t\tself.classifier_feedforward = classifier_feedforward\n\n\t\tself.metrics = {\"accuracy\": CategoricalAccuracy()}\n\t\tself.loss = torch.nn.CrossEntropyLoss()\n\t\tinitializer(self)\n\n\n\t@overrides\n\tdef forward(self,\n\t\t\t\ttoken1: str,\n\t\t\t\ttoken2: str,\n\t\t\t\tlabel: torch.LongTensor = None) -> Dict[str, torch.Tensor]:\n\t\t# pylint: disable=arguments-differ\n\t\t\"\"\"\n\t\tParameters\n\t\t----------\n\t\ttoken1 : Variable, input token 1, required\n\t\ttoken2 : Variable, input token 2, required\n\t\tlabel : Variable, optional (default = None)\n\t\t\tA variable representing the label for each instance in the batch.\n\t\tReturns\n\t\t-------\n\t\tAn output dictionary consisting of:\n\t\tclass_probabilities : torch.FloatTensor\n\t\t\tA tensor of shape ``(batch_size, num_classes)`` representing a distribution over the\n\t\t\tlabel classes for each instance.\n\t\tloss : torch.FloatTensor, optional\n\t\t\tA scalar loss to be optimised.\n\t\t\"\"\"\n\t\tembedded_token_1 = self.text_field_embedder(token1)\n\t\tembedded_token_1 = torch.squeeze(embedded_token_1, dim=1)\n\n\t\tembedded_token_2 = self.text_field_embedder(token2)\n\t\tembedded_token_2 = torch.squeeze(embedded_token_2, dim=1)\n\n\t\tembedded_token = torch.cat((embedded_token_1, embedded_token_2), 1)\n\n\t\tlogits = self.classifier_feedforward(embedded_token)\n\t\toutput_dict = {'logits': logits}\n\t\t\n\t\tif label is not None:\n\t\t\tloss = self.loss(logits, label)\n\t\t\tfor metric in self.metrics.values():\n\t\t\t\tmetric(logits, label)\n\t\t\toutput_dict['loss'] = loss\n\n\t\treturn output_dict\n\n\n\t@overrides\n\tdef decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n\t\t\"\"\"\n\t\tDoes a simple argmax over the class probabilities, converts indices to string labels, and\n\t\tadds a ``\"label\"`` key to the dictionary with the result.\n\t\t\"\"\"\n\t\tclass_probabilities = F.softmax(output_dict['logits'], dim=-1)\n\t\toutput_dict['class_probabilities'] = class_probabilities\n\n\t\tpredictions = class_probabilities.cpu().data.numpy()\n\t\targmax_indices = np.argmax(predictions, axis=-1)\n\t\tlabels = [self.vocab.get_token_from_index(x, namespace=\"labels\") for x in argmax_indices]\n\t\toutput_dict['label'] = labels\n\t\treturn output_dict\n\n\n\t@overrides\n\tdef get_metrics(self, reset: bool = False) -> Dict[str, float]:\n\t\treturn {metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()}\n\n\t\t\n\n","sub_path":"intrinsic/evaluation/classifiers/models/mlp_classifier_contrastive.py","file_name":"mlp_classifier_contrastive.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"108953218","text":"from odoo.exceptions import UserError\nfrom odoo import models, fields, api, _\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\n\nclass IrCron(models.Model):\n _inherit = 'ir.cron'\n\n mk_instance_id = fields.Many2one(\"mk.instance\", string=\"Marketplace Instance\", ondelete='cascade')\n\n def unlink(self):\n for record in self:\n if record.mk_instance_id:\n raise UserError(_('You can not delete Marketplace Cron because it is associated with Marketplace Instance: {}.'.format(record.mk_instance_id.name)))\n res = super(IrCron, self).unlink()\n return res\n\n def write(self, vals):\n for record in self:\n if record.mk_instance_id and ('code' in vals or 'model_id' in vals or 'state' in vals):\n raise UserError(_('You can not modify some fields of Marketplace Cron because it is associated with Marketplace Account: {}.'.format(self.mk_instance_id.name)))\n res = super(IrCron, self).write(vals)\n return res\n\n def create_marketplace_cron(self, mk_instance_id, name, method_name='', model_name='', interval_type='minutes', interval_number=20):\n vals = {'name': name,\n 'active': False,\n 'numbercall': -1,\n 'interval_number': interval_number,\n 'interval_type': interval_type,\n 'nextcall': fields.Datetime.to_string(datetime.now() + relativedelta(**{interval_type: interval_number})),\n 'code': \"model.{}({})\".format(method_name, mk_instance_id.id),\n 'state': 'code',\n 'model_id': self.env['ir.model'].search([('model', '=', model_name)]).id,\n 'mk_instance_id': mk_instance_id.id,\n 'user_id': self.env.user.id}\n self.create(vals)\n return True\n\n def setup_schedule_actions(self, mk_instance_id):\n \"\"\"\n Calling hook type method to setup marketplace Crons. Just need to add hook type method in marketplace app.\n :param mk_instance_id: Recordset of mk.instance\n :return: True\n Exp. def shopify_setup_schedule_actions(self, mk_instance_id):\n \"\"\"\n if hasattr(mk_instance_id, '%s_setup_schedule_actions' % mk_instance_id.marketplace):\n getattr(mk_instance_id, '%s_setup_schedule_actions' % mk_instance_id.marketplace)(mk_instance_id)\n return True\n","sub_path":"base_marketplace/models/ir_cron.py","file_name":"ir_cron.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59066002","text":"# import msvcrt\r\nimport random\r\n\r\nprint(\"loading LibreOfficeFile\")\r\n\r\n\r\nclass LibreOfficeFile:\r\n\r\n def __init__(self, fdcr):\r\n self.fdcr = fdcr\r\n self.current_feuille = None\r\n self.feuilles = {}\r\n\r\n def save_colonne(self, num_col):\r\n if self.current_feuille:\r\n self.feuilles[self.current_feuille].append(num_col)\r\n\r\n else:\r\n print(\"Aucune feuille n'est active !\")\r\n\r\n def ajouter_feuille(self, nom_feuille):\r\n if nom_feuille not in self.feuilles:\r\n self.current_feuille = nom_feuille\r\n self.feuilles[nom_feuille] = []\r\n fin = 0\r\n for i in range(random.randint(1, 5)):\r\n debut = random.randint(fin+1, fin+10)\r\n fin = debut + random.randint(1, 5)\r\n for col in range(debut, fin):\r\n self.save_colonne(col)\r\n\r\n print(f\"Feuille : *{nom_feuille}* ajoutée avec colonnes : {self.feuilles[nom_feuille]}\")\r\n\r\n else:\r\n print(f\"*{nom_feuille}* déjà présente avec {self.feuilles[nom_feuille]}\")\r\n\r\n def restore_colonnes(self):\r\n print(f\"restauration : {len(self.feuilles)} feuilles\")\r\n for feuille in self.feuilles:\r\n print(\" \", feuille, end=\" : \")\r\n for num_col in self.feuilles[feuille]:\r\n print(f\"{num_col}, \", end=\"\")\r\n\r\n print()\r\n\r\n self.feuilles.clear()\r\n print(\"colonnes restaurées !\")\r\n\r\n def __enter__(self):\r\n return self\r\n\r\n def __exit__(self, *args):\r\n for i, arg in enumerate(args):\r\n if arg: \r\n print(i, arg)\r\n\r\n self.restore_colonnes()\r\n\r\n def __repr__(self):\r\n return f\"print : {self.feuilles}\\n\"\r\n\r\n\r\ntry:\r\n with LibreOfficeFile(\"objFdCR\") as fichier:\r\n\r\n for num in range(10):\r\n fichier.ajouter_feuille(f\"Feuille {random.randint(2, 10)}\")\r\n\r\n # print(fichier)\r\n\r\n a = 1/10\r\n\r\n\r\nexcept Exception as e:\r\n print(e)\r\n\r\nprint(fichier)\r\n\r\n# msvcrt.getch()\r\n","sub_path":"libreoffice.py","file_name":"libreoffice.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"612826191","text":"from SimpleCanvas import Canvas\nfrom effects import Colors\nfrom Photo import Photo\nfrom errors import *\nfrom urllib.request import urlopen\nfrom urllib.error import URLError\nfrom bs4 import BeautifulSoup\nfrom random import randint\nfrom copy import deepcopy\nfrom time import sleep\nimport numpy as np\nimport cv2\n\n\nclass GalleryGenerator:\n def __init__(self, width: int = 1500, height: int = 1000):\n self._width = width\n self._height = height\n self._canvas = np.zeros((height, width, 3), np.uint8)\n self._numeric_canvas = Canvas(width, height)\n self._main_page = \"https://source.unsplash.com/featured/?\"\n self._photos = []\n self._approved_photos = []\n self._photos_limit = 5\n\n\n def _create_canvas(self, background: str):\n \"\"\"\n Creating a empty canvas.\n \"\"\"\n if background in Colors.keys():\n self._canvas = np.zeros((self._height, self._width, 3), np.uint8)\n self._canvas[:,:] = Colors[background]\n else:\n background, response_code = self._download_photo(background)\n self._canvas = self._resize_photo(Photo(background), self._width, self._height).image()\n self._background = Photo(deepcopy(self._canvas), 0, 0, self._width, self._height, \"background\")\n\n\n def show_canvas(self):\n \"\"\"\n Showing the canvas with photos.\n \"\"\"\n cv2.imshow(\"gallery\", self._canvas)\n\n\n def canvas(self):\n \"\"\"\n Returning canvas as ndarray.\n \"\"\"\n return self._canvas\n\n\n def set_canvas(self, new_canvas: np.ndarray):\n \"\"\"\n Setting new canvas.\n \"\"\"\n self._canvas = new_canvas\n\n\n def _check_difference(self, photo_1: np.ndarray, photo_2: np.ndarray):\n \"\"\"\n Checking if is some difference between two photos.\n in:\n photo_1: NUMPY_ndarray\n photo_2: NUMPY_ndarray\n out:\n BOOL if there are are a difference\n \"\"\"\n difference = cv2.subtract(photo_1, photo_2)\n b, g, r = cv2.split(difference)\n if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:\n return True\n return False\n\n\n def _compare_photo(self, the_photo: np.ndarray):\n \"\"\"\n Checking if the photo is not in the chosen photos.\n in:\n photo\n out:\n BOOL If the photo is in the photos\n \"\"\"\n for photo in self._photos:\n if photo.image().shape != the_photo.shape:\n continue # another shapes => not the same\n \n difference = self._check_difference(photo.image(), the_photo)\n if difference:\n return False\n\n return True # there are differences\n\n\n def _download_photo(self, topic: str):\n \"\"\"\n Downloading photo on the topic.\n \"\"\"\n response = urlopen(self._main_page+topic.replace(' ', '-'))\n code = response.status\n image = np.asarray(bytearray(response.read()), dtype = \"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n return image, code\n\n\n def _find_photo(self, topic: str):\n \"\"\"\n Finding a new photo which is not in the chosen photos.\n \"\"\"\n difference = False\n number_of_loop = 0\n while not difference:\n photo, response_code = self._download_photo(topic)\n difference = self._compare_photo(photo)\n if number_of_loop >= self._photos_limit:\n break\n number_of_loop += 1\n return Photo(photo, None, None, photo.shape[1], photo.shape[0], \"photo\")\n\n\n def _resize_shapes(self, photo: Photo, divider: int = 3):\n \"\"\"\n Resizing shapes of photo.\n \"\"\"\n width, height = photo.width()//divider, photo.height()//divider\n return width, height\n\n\n def _resize_photo(self, photo: Photo, new_width: int, new_height: int):\n \"\"\"\n Returning resized photo.\n \"\"\"\n resized_image = cv2.resize(photo.image(), (new_width, new_height))\n new_photo = Photo(resized_image, photo.x(), photo.y(), new_width, new_height, photo.name())\n return new_photo\n\n\n def resized_canvas(self, new_width: int, new_height: int):\n \"\"\"\n Returning resized canvas\n \"\"\"\n return cv2.resize(self._canvas, (new_width, new_height))\n\n\n def _add_photo(self, photo: Photo):\n \"\"\"\n Adding the photo to the canvas if there are space for it.\n \"\"\"\n width, height = self._resize_shapes(photo)\n \n cords = self._numeric_canvas.find_free_place(width, height)\n if not cords: # There are not free space for this photo :C\n return None\n \n x, y = cords\n photo.setx(x)\n photo.sety(y)\n resized_photo = self._resize_photo(photo, width, height)\n self._canvas[y:y+height, x:x+width] = resized_photo.image()\n self._approved_photos.append(resized_photo)\n\n\n def background(self):\n \"\"\"\n Return background as Photo class.\n \"\"\"\n return self._background\n\n\n def photos(self):\n \"\"\"\n Return list with photos\n \"\"\"\n return self._approved_photos\n\n\n def check_topic(self, topic: str):\n \"\"\"\n Checking if the topic is correctly.\n in:\n topic\n out:\n BOOL if the topict is correctly\n \"\"\"\n url = f\"https://unsplash.com/s/photos/{topic.replace(' ', '-')}\"\n try:\n request = urlopen(url)\n except URLError:\n assert UnsplashConnectError\n return False, -1\n soup = BeautifulSoup(request.read(), \"html.parser\")\n response = soup.findAll(\"span\", {\"class\": \"_3ruL8\"})\n try:\n count = response[0].get_text()\n count = int(count) # count is a int number (<1000)\n return (count > 10, count)\n except ValueError: # count is bigger than 1000 (1.0k)\n return True, 300\n\n\n def cut_canvas(self):\n \"\"\"\n Cutting the canvas to minimal possibility size.\n \"\"\"\n new_width, new_height = self._numeric_canvas.cut_canvas()\n crop_canvas = self._canvas[0: new_height, 0: new_width]\n crop_canvas = cv2.cvtColor(crop_canvas, cv2.COLOR_BGR2RGB)\n self._canvas = crop_canvas\n\n\n def generate_gallery(self, topic: str, number_of_photos: int = 9, background: str = \"Black\"):\n \"\"\"\n Generating new gallery with photos about given topic.\n \"\"\"\n topic_bool, _ = self.check_topic(topic)\n number_of_loop = 0\n self._create_canvas(background)\n while topic_bool and (len(self._photos) < number_of_photos):\n if not self._numeric_canvas.is_free_space():\n break\n \n new_photo = self._find_photo(topic)\n self._add_photo(new_photo)\n \n self._photos.append(new_photo)\n\n number_of_loop += 1\n if number_of_loop >= self._photos_limit:\n break\n\n\n def save_gallery(self, file_name: str = \"my_gallery.jpg\"):\n \"\"\"\n Saving the gallery as file_name\n \"\"\"\n cv2.imwrite(file_name, self.canvas())\n\n\n \n\nif __name__ == \"__main__\":\n gen = GalleryGenerator(1000, 800)\n\n gen.generate_gallery(topic = \"new york\", background = \"Black\")\n # gen.cut_canvas()\n gallery = gen.canvas()\n \n gen.show_canvas()\n \n gen._numeric_canvas.show()\n","sub_path":"GalleryGenerator.py","file_name":"GalleryGenerator.py","file_ext":"py","file_size_in_byte":7496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"296793449","text":"'''\nhttps://leetcode.com/problems/add-binary/description/\n'''\n\n\ndef addBinary(a, b):\n \"\"\"\n :type a: str\n :type b: str\n :rtype: str\n \"\"\"\n a = [int(content) for content in list(a)]\n b = [int(content) for content in list(b)]\n if(len(a) > len(b)):\n for i in range(len(a)-len(b)):\n b.insert(0,0)\n else:\n for i in range(len(b)-len(a)):\n a.insert(0,0)\n res = []\n size = -(len(b)+1)\n pos = -1\n carry = False\n while(pos > size):\n if(a[pos] == 0 and b[pos] == 0):\n if(carry):\n res.insert(0,1)\n carry = False\n else:\n res.insert(0,0)\n pos -= 1\n \n elif(a[pos] == 1 and b[pos] == 1):\n if(carry):\n res.insert(0,1)\n else:\n res.insert(0,0)\n carry = True\n pos -= 1\n \n else:\n if(carry):\n res.insert(0,0)\n carry = True\n else:\n res.insert(0,1)\n pos -= 1\n\n if(carry):\n res.insert(0,1)\n return \"\".join(list(map(str, res)))\n \nprint(addBinary(\"1010\",\"1011\"))\n","sub_path":"LeetCode/1. Easy/Add Binary.py","file_name":"Add Binary.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"156066618","text":"import json\nfrom datetime import datetime\nimport discord\nfrom discord.ext import commands\n\nclass MiErrors():\n def __init__(self, bot):\n self.bot = bot\n\n async def on_command_error(self, ctx, error):\n # Ignore CommandNotFound\n if isinstance(error, commands.CommandNotFound):\n return\n\n # MAPI cooldown is active\n if str(error) == \"Command raised an exception: CooldownActive: Cooldown is active\":\n await ctx.send(embed=discord.Embed(color=0xff0000).add_field(name=\"API Cooldown\", value=\"This command is being ratelimited by the Mila Bot API, please try again in a few seconds!\"))\n\n # Not enough arguments\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(embed=discord.Embed(color=0xff0000).add_field(name=\"Input Error\", value=f\"You did not provide enough arguments! Check the usage instructions then try again!\\n\\n**Usage:** {Utils.usage(ctx.command)}\").set_footer(text=\"<> = Required | [] = Optional | (x|y|z) = Aliases\"))\n\n # Arguments aren't of correct type\n if isinstance(error, commands.BadArgument):\n await ctx.send(embed=discord.Embed(color=0xff0000).add_field(name=\"Input Error\", value=f\"You did not provide the correct arguements! Check the usage instructions then try again!\\n\\n**Usage:** {Utils.usage(ctx.command)}\").set_footer(text=\"<> = Required | [] = Optional | (x|y|z) = Aliases\"))\n\n # Command is on cooldown\n if isinstance(error, commands.CommandOnCooldown):\n await ctx.send(embed=discord.Embed(color=0xff0000).add_field(name=\"Cooldown Active\", value=f\"This command is on cooldown, check the waiting time before trying again!\\n\\n**Retry after:** {round(error.retry_after)} seconds\"))\n\n # Invoker lacks needed permissions\n if isinstance(error, commands.MissingPermissions):\n if getattr(error, \"missing_perms\"):\n await ctx.send(embed=discord.Embed(color=0xff0000).add_field(name=\"Missing Permissions\", value=f\"You are missing required permissions to use this command! Check that you are authorized to do this!\\n\\n**Missing permissions:** {[x for x in error.missing_perms]}\"))\n\n # Bot lacks needed permissions\n if isinstance(error, commands.BotMissingPermissions) or isinstance(error, discord.Forbidden):\n await ctx.send(embed=discord.Embed(color=0xff0000).add_field(name=\"Missing Permissions\", value=f\"I am missing required permissions to execute this action! Check that I have permission to do this!\\n\\n**Missing permissions:** {'|'.join([f'`{x}`' for x in error.missing_perms])}\"))\n\n # Check failure\n if isinstance(error, commands.CheckFailure):\n owner = False\n nsfw = False\n for x in ctx.command.checks:\n # Bot developer\n if str(x).startswith(\".predicate\"):\n owner = True\n break\n\n # NSFW command\n if str(x).startswith(\" 0:\n print(\"Worker mission assigned {}\".format(self.worker_missions[0].action))\n return self.worker_missions.pop(0)\n else:\n return self.__create_new_worker_mission__()\n elif unit_type == bc.UnitType.Healer:\n if len(self.healer_missions) > 0:\n print(\"Healer mission assigned\")\n return self.healer_missions.pop(0)\n else:\n return self.__create_new_healer_mission__()\n elif unit_type == bc.UnitType.Factory:\n if len(self.factory_missions) > 0:\n print(\"Factory mission assigned\")\n return self.factory_missions.pop(0)\n else:\n return self.__create_new_factory_mission__()\n elif unit_type == bc.UnitType.Rocket:\n if len(self.factory_missions) > 0:\n print(\"Factory mission assigned\")\n return self.rocket_missions.pop(0)\n else:\n return self.__create_new_factory_mission__()\n else:\n if len(self.combat_missions) > 0:\n print(\"Combat mission assigned\")\n return self.combat_missions.pop(0)\n else:\n return self.__create_new_combat_mission__()\n\n def CreateBuildMission(self,structure):\n new_mission = Mission()\n new_mission.action = Missions.Build\n new_mission.info = MissionInfo()\n new_map_location = structure.location.map_location().clone()\n x = random.randint(-1,1)\n y = random.randint(-1,1)\n if x == 0 and y == 0:\n x = 1\n new_mission.info.map_location = bc.MapLocation(bc.Planet.Earth,new_map_location.x + x,new_map_location.y + y )\n new_mission.info.map_location = new_map_location\n new_mission.info.unit_id = structure.id\n new_mission.info.unit = structure\n if structure.unit_type == bc.UnitType.Rocket:\n new_mission.info.isRocket = True\n return new_mission\n\n def CreateFactoryBlueprintMission(self, location):\n new_mission = Mission()\n new_mission.action = Missions.CreateBlueprint\n new_mission.info = MissionInfo()\n #map_location = bc.MapLocation(self.game_controller.planet(), 0, 0)\n #map_location.x = random.randint(0, 12)\n #map_location.y = random.randint(0, 12)\n new_mission.info.map_location = location # TODO get open location from the map\n \n return new_mission\n\n def CreateRocketBlueprintMission(self,location):\n new_mission = Mission()\n new_mission.action = Missions.CreateBlueprint\n new_mission.info = MissionInfo()\n new_mission.info.isRocket = True\n # map_location = bc.MapLocation(self.game_controller.planet(), 0, 0)\n # map_location.x = random.randint(0, 12)\n #map_location.y = random.randint(0, 12)\n new_mission.info.map_location = location # TODO get open location from the map\n #self.rocketCount += 1\n self.MustBuildRocket = False\n return new_mission\n\n def GetMarsLocation(self):\n location = self.map_controller.GetRandomMarsNode()\n return location\n\n def __create_new_worker_mission__(self):\n #Determine what mission to assign based on the current strategy\n if self.strategy_controller.unitStrategy == UnitStrategies.Default:\n\n #Mine Karbonite\n new_mission = Mission()\n new_mission.action = Missions.Mining\n map_location = bc.MapLocation(self.game_controller.planet(), 0, 0)\n map_location.x = random.randint(0, 10)\n map_location.y = random.randint(0, 10)\n new_mission.info = map_location # TODO get mining location from map\n return new_mission\n\n def __create_new_healer_mission__(self):\n\n if self.strategy_controller.unitStrategy == UnitStrategies.Default:\n chance = random.randint(0, 100)\n if chance > 50:\n if len(self.game_controller.my_units()) > 1:\n new_mission = Mission()\n new_mission.action = Missions.FollowUnit\n new_mission.info = self.game_controller.my_units()[0]\n # TODO creat logic for aquiring a target to follow\n return Missions.FollowUnit\n else:\n new_mission = Mission()\n new_mission.action = Missions.Idle\n return new_mission\n else:\n new_mission = Mission()\n new_mission.action = Missions.Idle\n return new_mission\n\n def __create_new_combat_mission__(self):\n\n if self.strategy_controller.unitStrategy == UnitStrategies.Default:\n chance = random.randint(1, 100)\n if chance > 0:\n new_mission = Mission()\n new_mission.action = Missions.RandomMovement\n return new_mission\n elif chance > 25:\n new_mission = Mission()\n new_mission.action = Missions.Patrol\n new_mission.info = MissionInfo()\n map_location = bc.MapLocation(self.game_controller.planet(), 0, 0)\n #TODO better patrol location\n map_location.x = random.randint(0, 20)\n map_location.y = random.randint(0, 20)\n new_mission.info.map_location = map_location\n return new_mission\n else:\n new_mission = Mission()\n new_mission.action = Missions.Idle\n return new_mission\n\n def __create_new_factory_mission__(self):\n production_chance = None\n if self.strategy_controller.unitStrategy == UnitStrategies.Default:\n production_chance = [50, 0, 40, 20, 0] # Workers and Knights\n #production_chance = [80, 60, 40, 20, 0]\n #Balanced production chance\n\n chance = random.randint(1, 100)\n #if not self.MustBuildRocket and chance > production_chance[0]:\n # new_mission = Mission()\n # new_mission.action = Missions.TrainBot\n # new_mission.info = bc.UnitType.Worker\n # return new_mission\n #elif not self.MustBuildRocket and chance > production_chance[1]:\n # new_mission = Mission()\n # new_mission.action = Missions.TrainBot\n # new_mission.info = bc.UnitType.Knight\n # return new_mission\n #elif not self.MustBuildRocket and chance > production_chance[2]:\n # new_mission = Mission()\n # new_mission.action = Missions.TrainBot\n # new_mission.info = bc.UnitType.Healer\n # return new_mission\n if not self.MustBuildRocket and \\\n self.game_controller.karbonite() >= bc.UnitType.Ranger.factory_cost():\n new_mission = Mission()\n new_mission.action = Missions.TrainBot\n new_mission.info = bc.UnitType.Ranger\n return new_mission\n #elif not self.MustBuildRocket and chance > production_chance[4]:\n # new_mission = Mission()\n # new_mission.action = Missions.TrainBot\n # new_mission.info = bc.UnitType.Mage\n # return new_mission\n else:\n new_mission = Mission()\n new_mission.action = Missions.Idle\n return new_mission\n\n def __create_new_rocket_mission__(self):\n \n if self.strategy_controller.unitStrategy == UnitStrategies.Default:\n chance = random.randint(1, 100)\n if chance > 0:\n new_mission = Mission()\n new_mission.action = Missions.RandomMovement\n return new_mission\n elif chance > 25:\n new_mission = Mission()\n new_mission.action = Missions.Patrol\n new_mission.info = MissionInfo()\n map_location = bc.MapLocation(self.game_controller.planet(), 0, 0)\n #TODO better patrol location\n map_location.x = random.randint(0, 20)\n map_location.y = random.randint(0, 20)\n new_mission.info.map_location = map_location\n return new_mission\n else:\n new_mission = Mission()\n new_mission.action = Missions.Idle\n return new_mission","sub_path":"bc18-scaffold/OnshoreBattlebot2018/Controllers/MissionController.py","file_name":"MissionController.py","file_ext":"py","file_size_in_byte":11657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"223046741","text":"import torch\nimport time\nimport nltk\n\nnltk.download('punkt')\n\n\nclass TranslationError(Exception):\n def __init__(self, message):\n self.message = message\n\n\nclass Translator:\n def __init__(self):\n self.models_loaded = False\n self._model_de_en = None\n self._model_en_de = None\n\n def initialize_models(self):\n print(\"Loading de2en model...\")\n self._model_de_en = self._initialize_model('transformer.wmt19.de-en')\n print(\"Loading en2de model...\")\n self._model_en_de = self._initialize_model('transformer.wmt19.en-de')\n\n self.models_loaded = True\n\n print(self.translate('Das Modell ist nun geladen', 'en'))\n\n @staticmethod\n def _initialize_model(model_name):\n load_start = time.time()\n model = torch.hub.load('pytorch/fairseq:v0.10.2', model_name,\n checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt',\n tokenizer='moses', bpe='fastbpe')\n model.eval()\n load_end = time.time()\n print(\"Model loaded in {} seconds\".format(load_end - load_start))\n if torch.cuda.is_available():\n if torch.cuda.get_device_properties(0).total_memory > 11900000000:\n print(\"CUDA available and GPU memory appears sufficient - loading de2en model into GPU...\")\n model.cuda()\n else:\n print(\"CUDA available but GPU memory is lower than the recommended 12GB. Running from RAM...\")\n else:\n print(\"CUDA not available, running model from RAM...\")\n\n return model\n\n def translate(self, text, target_language):\n if not self.models_loaded:\n raise TranslationError(\"Translation model not yet loaded\")\n\n return self._translate_text(text, target_language)\n\n def _translate_text(self, text, target_language):\n model = self._model_en_de\n if target_language == 'en':\n model = self._model_de_en\n elif target_language != 'de':\n raise ValueError(\"Currently, only 'en' and 'de' are supported as language\")\n\n text_list = self._tokenize_and_split_sentences(text)\n text_list_translated = model.translate(text_list)\n text_list_translated = self._fix_empty_lines(text_list, text_list_translated)\n return ' '.join(text_list_translated)[:-1]\n\n @staticmethod\n def _tokenize_and_split_sentences(unprocessed_string):\n lines = unprocessed_string.split('\\n')\n processed_sentences = []\n for line in lines:\n sentences = nltk.tokenize.sent_tokenize(line)\n for sentence in sentences:\n processed_sentences.append(sentence + \" \")\n processed_sentences.append(\"\\n\")\n return processed_sentences\n\n @staticmethod\n def _fix_empty_lines(input_string_list, output_string_list):\n for i in range(len(input_string_list)):\n if input_string_list[i] == \"\\n\":\n output_string_list[i] = \"\\n\"\n return output_string_list\n","sub_path":"src/main/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"49909116","text":"from random import randrange\n\nlengthDifficultyList = {'K':{'Easy':3, 'Normal':5, 'Hard':7, 'gongcha':2}, 'E':{'Easy':5, 'Normal':8, 'Hard':11, 'gongcha':3}}\n\n\nclass Word():\n def __init__(self, language='E'):\n self.words = []\n self.count = 0\n self.language = language\n\n if self.language == 'K':\n file = open('wordK.txt', 'r')\n elif self.language == 'E':\n file = open('wordE.txt', 'r')\n else:\n file = open('wordKE.txt', 'r')\n\n for line in file.readlines():\n self.words.append(line.rstrip())\n self.count += 1\n file.close()\n\n def randFromDB(self, difficulty='Easy'):\n randomWord = self.words[randrange(self.count)]\n self.language = 'E' if 'a' <= randomWord <= 'z' or 'A' <= randomWord <= 'Z' else 'K' # KE일때만 ( 한 + 영 ) 랜덤으로 뽑은 단어가 영어여야 참임\n limitLen = lengthDifficultyList[self.language][difficulty]\n while not (limitLen - lengthDifficultyList[self.language]['gongcha'] <= len(randomWord) <= limitLen):\n randomWord = self.words[randrange(self.count)]\n self.ranguage = 'E' if 'a' <= randomWord <= 'z' or 'A' <= randomWord <= 'Z' else 'K'\n return randomWord\n","sub_path":"MoleGame/word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"598450604","text":"# -*- coding: utf-8 -*-\n\"\"\"\n wakatime.stats\n ~~~~~~~~~~~~~~\n\n Stats about files\n\n :copyright: (c) 2013 Alan Hamlett.\n :license: BSD, see LICENSE for more details.\n\"\"\"\n\nimport logging\nimport os\nimport sys\n\nif sys.version_info[0] == 2:\n sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'packages', 'pygments2'))\nelse:\n sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'packages', 'pygments3'))\nfrom pygments.lexers import guess_lexer_for_filename\n\n\nlog = logging.getLogger(__name__)\n\n\ndef guess_language(file_name):\n lexer = None\n try:\n with open(file_name) as f:\n lexer = guess_lexer_for_filename(file_name, f.read(512000))\n except:\n pass\n if lexer:\n return str(lexer.name)\n else:\n return None\n\n\ndef number_lines_in_file(file_name):\n lines = 0\n try:\n with open(file_name) as f:\n for line in f:\n lines += 1\n except IOError:\n return None\n return lines\n\n\ndef get_file_stats(file_name):\n stats = {\n 'language': guess_language(file_name),\n 'lines': number_lines_in_file(file_name),\n }\n return stats\n","sub_path":"wakatime/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"439523245","text":"# You can also import scripts that you put into the folder with controller\nfrom rcj_soccer_robot import RCJSoccerRobot, TIME_STEP\n\nimport my_functions\n# -----------------------------------------------------------------------------------------\n# ----------------------------------------------------------------IMPORT BUILT_IN LIBRARIES\n# Feel free to import built-in libraries here\n\n# ----------------------------------------------------------------- DO NOT CHANGE OR DELETE\nimport math\n\nOP_GOAL = [-0.856, 0] # Opponent's goal position [x, y]\nOWN_GOAL = [0.856, 0] # Own's goal position [x, y]\ncounter = 0\nscore = [0, 0]\nGOAL_FLAG = False\nTIME_FLAG = False\nIMPORT_FLAG = False\nGOAL_DEFAULT_DIST = 0.4\ngoal_time = 0\nSTOP_NEAR_GOALKEEPER = True\nKICK_FLAG = 0\nKICK_INTENSITY_DEFAULT = 5\nMIRROR_FLAG = True\nCONTANDO_FLAG = False\nBALL_AREA = [0, 0, 0, 0]\nINICIO = 0\nASSISTENCIA = True # anda na linha da bola no atk\nSUPORTE_GOLEIRO = False # Ajuda o goleiro 'jogo de corpo'\n\n\n# ------------------------------------------------------------------------------------------\n\nclass MyRobot2(RCJSoccerRobot):\n def run(self, counter, score, goal_time, GOAL_FLAG, TIME_FLAG, IMPORT_FLAG, KICK_FLAG, CONTANDO_FLAG, BALL_AREA,\n INICIO):\n # ----------------------------------------------------------------------------\n if not IMPORT_FLAG:\n if self.name[0] == 'Y':\n import my_functions\n else:\n import my_functions\n IMPORT_FLAG = True\n # ----------------------------------------------------------------------------\n while self.robot.step(TIME_STEP) != -1:\n if self.is_new_data():\n data = self.get_new_data()\n # --------------------------------------------------- INITS CRIADAS POR ARMADILLOS\n game_clock, counter = my_functions.game_time(counter)\n score, goal_time, GOAL_FLAG, TIME_FLAG = my_functions.scoreboard(data, score, game_clock, goal_time,\n GOAL_FLAG, TIME_FLAG)\n\n mais_proximo_do_gol = my_functions.theChoosenGoalKeeper(self, data)\n mais_proximo_da_bola = my_functions.theChoosenOne(self, data)\n distancias_do_meio, eu_sou_mais_proximo_do_meio = my_functions.mais_proximo_do_meio(self, data)\n distancias_do_gol_adv, eu_sou_mais_proximo_do_gol_adv = my_functions.mais_proximo_do_gol_adv(self, data)\n\n adv_mais_proximo, ladrao = my_functions.adv_mais_px(self, data)\n jogador_mais_proximo, tabela = my_functions.jogador_mais_px(self, data)\n TRUNCAMENTO_FLAG, CONTANDO_FLAG, BALL_AREA, INICIO = my_functions.truncamento(data, game_clock,\n CONTANDO_FLAG, BALL_AREA,\n INICIO)\n\n if self.name[0] == 'Y':\n OP_GOAL = [0.75, 0] # Own's goal position [x, y]\n OWN_GOAL = [-0.75, 0] # Opponent's goal position [x, y]\n\n meus_gols = score[0]\n adv_gols = score[1]\n else:\n OP_GOAL = [-0.75, 0] # Opponent's goal position [x, y]\n OWN_GOAL = [0.75, 0] # Own's goal position [x, y]\n meus_gols = score[1]\n adv_gols = score[0]\n\n # print(self.name + \" COD1\")\n if (mais_proximo_do_gol == True and mais_proximo_da_bola == True):\n if (jogador_mais_proximo == False and mais_proximo_da_bola == False and adv_mais_proximo == True):\n # print(self.name + \" COD2\")\n KICK_FLAG = my_functions.artilheiro(self, data, STOP_NEAR_GOALKEEPER, KICK_FLAG,\n KICK_INTENSITY_DEFAULT)\n else:\n # print(self.name +\" CONTINUO GOLEIRO\")\n # print(self.name + \" COD3\")\n my_functions.upg_goleiro(self, data, GOAL_DEFAULT_DIST)\n elif (mais_proximo_do_gol == True and mais_proximo_da_bola == False):\n my_functions.upg_goleiro(self, data, GOAL_DEFAULT_DIST)\n # print(self.name + \" COD4\")\n elif ((mais_proximo_do_gol == False and mais_proximo_da_bola == True) or (\n eu_sou_mais_proximo_do_gol_adv == True and mais_proximo_da_bola == True)): # verificar antes ou depois dp centro\n # print(self.name + \" COD5\")\n KICK_FLAG = my_functions.artilheiro(self, data, STOP_NEAR_GOALKEEPER, KICK_FLAG,\n KICK_INTENSITY_DEFAULT)\n elif mais_proximo_do_gol == False and mais_proximo_da_bola == False and eu_sou_mais_proximo_do_meio == True:\n # print(self.name + \" COD6\")\n CONTANDO_FLAG, BALL_AREA, INICIO = my_functions.banheira2(self, data, game_clock, CONTANDO_FLAG,\n BALL_AREA, INICIO, ASSISTENCIA,\n SUPORTE_GOLEIRO)\n elif mais_proximo_do_gol == False and mais_proximo_da_bola == True and eu_sou_mais_proximo_do_meio == True:\n KICK_FLAG = my_functions.artilheiro(self, data, STOP_NEAR_GOALKEEPER, KICK_FLAG,\n KICK_INTENSITY_DEFAULT)\n # print(self.name + \" COD7\")\n else:\n KICK_FLAG = my_functions.artilheiro(self, data, STOP_NEAR_GOALKEEPER, KICK_FLAG,\n KICK_INTENSITY_DEFAULT)\n # print(self.name + \" COD8\")\n\n # ********************************************************************\n # WRITE YOUR CODE HERE ***********************************************\n\n","sub_path":"rcj_soccer_team_yellow (SESI)/robot2.py","file_name":"robot2.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"91072433","text":"# maximum path sum 1\n\nimport numpy as np\n\ntriangle = [[75],\n[95, 64],\n[17, 47, 82],\n[18, 35, 87, 10],\n[20, 4, 82, 47, 65],\n[19, 1, 23, 75, 3, 34],\n[88, 2, 77, 73, 7, 63, 67],\n[99, 65, 4, 28, 6, 16, 70, 92],\n[41, 41, 26, 56, 83, 40, 80, 70, 33],\n[41, 48, 72, 33, 47, 32, 37, 16, 94, 29],\n[53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14],\n[70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57],\n[91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48],\n[63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31],\n[4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23]]\n\n\n\ndef copy2DList(theList):\n new = []\n row=[]\n for i in theList:\n row=[]\n for j in i:\n row.append(j)\n new.append(row)\n return new\n\ndef checkPath(path):\n # checks if path is valid\n for i in range(1,len(path)):\n diff=path[i]-path[i-1]\n if diff>1 or diff<-1:\n print(\"\\n\\nError there is a jump somewhere\")\n return False\n elif path[i]>i:\n # here i wanna throw an index out of bounds exception...\n print(\"\\n\\nError index out of bounds\")\n return False\n return True\n\ndef initSumTriangle(triangle):\n for i in range(len(triangle)-1,0,-1):\n # for each row starting from the bottom and ending at the third last\n for j in range(len(triangle[i])-1):\n # for each element of this excluding the last one \n # (we sum the adjacent pairs and add this to the above thing)\n left = triangle[i][j]\n right = triangle[i][j+1]\n if left>=right:\n triangle[i-1][j]+=left\n else:\n triangle[i-1][j]+=right\n return triangle\n\ndef displayTriangle(triangle):\n for i in triangle:\n print(i)\n\ndef findPath(tri):\n # takes sumTriangle array and decides what the path should be\n path=[0]\n for i in range(1,len(tri)):\n left = tri[i][path[-1]]\n right = tri[i][path[-1]+1]\n if left>right:\n path.append(path[-1])\n else:\n path.append(path[-1]+1)\n return path\n\ndef heaviestPath(path,tri):\n # takes path and tri and returns the corresponding sum and then weights\n best=[]\n for i in range(len(tri)):\n best.append(tri[i][path[i]])\n return np.sum(best), best\n\n\n# main\n\n# this will be a triangle with each row gets the longest path added to it\nsumTriangle = copy2DList(triangle)\nsumTriangle = initSumTriangle(sumTriangle)\n\ndisplayTriangle(sumTriangle) # the answer is the top one here \ndisplayTriangle(triangle)\n\n\n# sequence of numbers which code for indices in path, it always starts with 0\npath = findPath(sumTriangle)\nif not checkPath(path):\n print(\"AAAAAAAAAAAHHHHHHHHHHH\"*100)\n\n# display the heaviest path\nbest = heaviestPath(path,triangle)\nprint(\"the best path is\\n\",path)\nprint(\"\\nthis path gives\\n\",best)\n#print(\"\\nthe sum is\", theSum)\n\n","sub_path":"python/misc/maximumpathsum1.py","file_name":"maximumpathsum1.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"205146297","text":"#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-\n\nfrom collections import namedtuple\nimport os.path as path\n\nfrom bes.system.check import check\nfrom bes.common.node import node\nfrom bes.property.cached_property import cached_property\nfrom bes.common.string_util import string_util\nfrom bes.key_value.key_value import key_value\nfrom bes.key_value.key_value_list import key_value_list\nfrom bes.system.log import log\nfrom bes.common.variable import variable\n\nfrom rebuild.recipe.recipe_data_manager import recipe_data_manager\nfrom rebuild.recipe.recipe_error import recipe_error\nfrom rebuild.recipe.recipe_parser_util import recipe_parser_util\nfrom rebuild.recipe.recipe_util import recipe_util\nfrom rebuild.recipe.value.masked_value import masked_value\nfrom rebuild.recipe.value.masked_value_list import masked_value_list\nfrom rebuild.recipe.value.value_origin import value_origin\nfrom rebuild.recipe.recipe_data_manager import recipe_data_manager\n\nfrom .ingest_method import ingest_method\n\nclass ingest_entry(namedtuple('ingest_entry', 'name, version, description, data, variables, method')):\n\n def __new__(clazz, name, version, description, data, variables, method):\n check.check_string(name)\n check.check_string(version)\n check.check_ingest_method(method)\n check.check_string(description, allow_none = True)\n check.check_masked_value_list(data, allow_none = True)\n check.check_masked_value_list(variables, allow_none = True)\n return clazz.__bases__[0].__new__(clazz, name, version, description, data, variables, method)\n\n def __str__(self):\n return self.to_string().strip() + '\\n'\n\n @cached_property\n def builtin_variables(self):\n return {\n 'NAME': self.name,\n 'VERSION': self.version,\n }\n\n def to_string(self, depth = 0, indent = 2):\n return recipe_util.root_node_to_string(self.to_node(), depth = depth, indent = indent)\n \n def to_node(self):\n root = node('entry {} {}'.format(self.name, self.version))\n root.add_child('')\n if self.description:\n root.children.append(recipe_util.description_to_node(self.description))\n root.add_child('')\n if self.data:\n root.children.append(recipe_util.variables_to_node('data', self.data))\n root.add_child('')\n if self.variables:\n root.children.append(recipe_util.variables_to_node('variables', self.variables))\n root.add_child('')\n if self.method:\n root.children.append(self.method.to_node())\n root.add_child('')\n return root\n\n def resolve_variables(self, system):\n if not self.variables:\n return key_value_list()\n return self.variables.resolve(system, 'key_values')\n \n def resolve_data(self, system):\n if not self.data:\n return []\n result = []\n for value in self.data:\n if value.mask_matches(system):\n result.append(tuple(value.value.value))\n return result\n\n def resolve_method_values(self, system, global_variables = None):\n check.check_dict(global_variables, allow_none = True)\n substitutions = self.resolve_variables(system).to_dict()\n substitutions.update(self.builtin_variables)\n substitutions.update(global_variables or {})\n result = self.method.resolve_values(system)\n result.substitute_variables(substitutions, patterns = variable.BRACKET)\n dm = recipe_data_manager()\n dm.set_from_tuples(self.resolve_data(system))\n for i, kv in enumerate(result):\n result[i] = key_value(kv.key, dm.substitute(kv.value))\n return result\n\n def data_substitutions(self, system):\n dm = recipe_data_manager()\n dm.set_from_tuples(self.resolve_data(system))\n return dm.substitutions()\n\n def download(self, system, global_variables, cache_dir, dest_dir):\n args = self.resolve_method_values(system, global_variables).to_dict()\n args['cache_dir'] = cache_dir\n return self.method.download(args)\n\n @property\n def origin(self):\n return value_origin.get_origin(self)\n\n @origin.setter\n def origin(self, origin):\n value_origin.set_origin(self, origin)\n\n @property\n def ingest_file(self):\n ingest_file = getattr(self, '_ingest_file', None)\n if not ingest_file:\n raise ValueError('ingest_file not set: {}'.format(str(self)))\n return ingest_file\n\n @ingest_file.setter\n def ingest_file(self, ingest_file):\n setattr(self, '_ingest_file', ingest_file)\n \ncheck.register_class(ingest_entry, include_seq = False)\n","sub_path":"lib/rebuild/ingest/ingest_entry.py","file_name":"ingest_entry.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"75169635","text":"# The Keras model loading function does not play well with\n# Pathlib at the moment, so we are using the old os module\n# style\n\nimport os\nfrom pathlib import Path\n\nPWD = os.path.dirname(os.path.abspath(__file__))\nPACKAGE_ROOT = os.path.abspath(os.path.join(PWD, '..'))\nDATASET_DIR = os.path.join(PACKAGE_ROOT, 'datasets')\nTRAINED_MODEL_DIR = os.path.join(PACKAGE_ROOT, 'trained_models')\n# Allows you to specify a different folder for the source data\n# Useful for training the model on Kaggle\nDATA_FOLDER = os.environ.get('DATA_FOLDER', os.path.join(DATASET_DIR, 'test_data'))\n# Changed from: DATA_FOLDER = os.path.join(DATASET_DIR, 'v2-plant-seedlings-dataset')\n\n# MODEL PERSISTING\nMODEL_NAME = 'cnn_model'\nPIPELINE_NAME = 'cnn_pipe'\nCLASSES_NAME = 'classes'\nENCODER_NAME = 'encoder'\n\n# MODEL FITTING\nIMAGE_SIZE = 150 # 50 for testing, 150 for final model\nBATCH_SIZE = 10\nEPOCHS = int(os.environ.get('EPOCHS', 1)) # 1 for testing, 8 for final model\n\n# FOR TESTING ON LIMITED DATA\n# Normally, the following variable would be 12. However, when the training data\n# set does not contain at least one of each of the 12 plant types we need to \n# change the number here. This is only for testing purposes.\nNUM_OF_CLASSES = len({\n image_file.parent.name for image_file in Path(DATA_FOLDER).glob(\"*/*.png\")\n})\n# If we're just testing the code, we'll just use 1 sample for the test set\nPROP_OF_DATA_TEST = 0.2 \nif NUM_OF_CLASSES < 12:\n PROP_OF_DATA_TEST = 1\n NUM_OF_CLASSES = NUM_OF_CLASSES - PROP_OF_DATA_TEST\n\nwith open(os.path.join(PACKAGE_ROOT, 'VERSION')) as version_file:\n _version = version_file.read().strip()\n\nMODEL_FILE_NAME = f'{MODEL_NAME}_{_version}.h5'\nMODEL_PATH = os.path.join(TRAINED_MODEL_DIR, MODEL_FILE_NAME)\n\nPIPELINE_FILE_NAME = f'{PIPELINE_NAME}_{_version}.pkl'\nPIPELINE_PATH = os.path.join(TRAINED_MODEL_DIR, PIPELINE_FILE_NAME)\n\nCLASSES_FILE_NAME = f'{CLASSES_NAME}_{_version}.pkl'\nCLASSES_PATH = os.path.join(TRAINED_MODEL_DIR, CLASSES_FILE_NAME)\n\nENCODER_FILE_NAME = f'{ENCODER_NAME}_{_version}.pkl'\nENCODER_PATH = os.path.join(TRAINED_MODEL_DIR, ENCODER_FILE_NAME)\n","sub_path":"packages/neural_network_model/neural_network_model/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"502037789","text":"#!./venv/bin/python\nimport os\nfrom contextual_robustness import ContextualRobustnessTest\nfrom contextual_robustness.transforms import test_transforms as transforms\nfrom contextual_robustness.datasets import load_gtsrb\nfrom contextual_robustness.utils import parse_indexes\n\n# reduce tensorflow log level\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\ndef main(models, transform_names, outdir, sample_indexes):\n # load dataset\n _, _, X_test, Y_test, _ = load_gtsrb()\n\n # analyze each model on each transform\n for transform in transform_names:\n transform_name = transform.capitalize()\n for m in models:\n model_name = f'Model{m}'\n model_path = f'./models/gtsrb/model{m}.h5'\n print(f'{(\"-\"*80)}\\nAnalyzing {model_name} {transform_name}\\n{(\"-\"*80)}')\n cr = ContextualRobustnessTest(\n model_path=model_path,\n model_name=model_name,\n X=X_test,\n Y=Y_test,\n transform_fn=transforms[transform]['fn'],\n transform_args=transforms[transform]['args'],\n transform_name=transform_name,\n sample_indexes=sample_indexes\n )\n cr.analyze(epsilons_outpath=os.path.join(outdir, f'model{m}-{transform}.csv'))\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument('-m', '--models',\n nargs='+',\n default=['1a', '1b', '2a', '2b', '3a', '3b'],\n help='model(s) to analyze')\n parser.add_argument('-t', '--transforms',\n nargs='+',\n default=['haze', 'contrast', 'blur'],\n help='image transform(s) to test')\n parser.add_argument('-o', '--outdir',\n default='./results/gtsrb/test/data',\n help='output directory')\n parser.add_argument('-s', '--sampleindexes',\n nargs='*',\n default=[],\n help='list of indexes and/or ranges of samples to test (e.g. 1 2 10-20 100-110)')\n args = parser.parse_args()\n \n main(args.models, args.transforms, args.outdir, parse_indexes(args.sampleindexes))\n","sub_path":"examples/gtsrb_test_analysis.py","file_name":"gtsrb_test_analysis.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"64629649","text":"import numpy as np\n\n\nclass AcronymBatcherLoader:\n def __init__(self, df, batch_size=32):\n self.batch_size = batch_size\n self.N = df.shape[0]\n self.data = df\n self.batch_ct, self.batches = 0, None\n\n def num_batches(self):\n return len(self.batches)\n\n def has_next(self):\n return self.batch_ct < self.num_batches()\n\n def extract_context_ids(self, ids, center_idx, target_window):\n start_idx = max(0, center_idx - target_window)\n end_idx = min(len(ids), center_idx + target_window + 1)\n\n left_context = ids[start_idx:center_idx]\n right_context = ids[center_idx + 1:end_idx]\n\n section_boundary_left = np.where(left_context <= 0)[0]\n section_boundary_right = np.where(right_context <= 0)[0]\n\n left_trunc_idx = 0 if len(section_boundary_left) == 0 else section_boundary_left[-1] + 1\n right_trunc_idx = len(right_context) if len(section_boundary_right) == 0 else section_boundary_right[0]\n\n left_context_truncated = left_context[left_trunc_idx:]\n right_context_truncated = right_context[:right_trunc_idx]\n\n return np.concatenate([left_context_truncated, right_context_truncated])\n\n def get_prev_batch(self):\n return self.batches[self.batch_ct - 1]\n\n def next(self, vocab, sf_tokenized_lf_map):\n batch = self.batches[self.batch_ct]\n self.batch_ct += 1\n batch_size = batch.shape[0]\n sf_ids = np.zeros([batch_size, ], dtype=int)\n target_lf_ids = np.zeros([batch_size, ], dtype=int)\n max_context_len = max([len(tt.split()) for tt in batch['trimmed_tokens'].tolist()])\n num_outputs = [len(sf_tokenized_lf_map[sf]) for sf in batch['sf'].tolist()]\n context_ids = np.zeros([batch_size, max_context_len])\n max_output_length = max(num_outputs)\n max_lf_len = 5\n lf_ids = np.zeros([batch_size, max_output_length, max_lf_len], dtype=int)\n lf_token_ct = np.zeros([batch_size, max_output_length])\n max_lf_token_ct = 0\n for batch_idx, (_, row) in enumerate(batch.iterrows()):\n row = row.to_dict()\n sf_ids[batch_idx] = vocab.get_id(row['sf'].lower())\n # Find target_sf index in sf_lf_map\n target_lf_ids[batch_idx] = row['used_target_lf_idx']\n context_id_seq = vocab.get_ids(row['trimmed_tokens'].split())\n context_ids[batch_idx, :len(context_id_seq)] = context_id_seq\n candidate_lfs = sf_tokenized_lf_map[row['sf']]\n for lf_idx, lf_toks in enumerate(candidate_lfs):\n lf_id_seq = vocab.get_ids(lf_toks)\n lf_id_seq_trunc = lf_id_seq[:min(max_lf_len, len(lf_id_seq))]\n num_toks = len(lf_id_seq_trunc)\n lf_ids[batch_idx, lf_idx, :num_toks] = lf_id_seq_trunc\n lf_token_ct[batch_idx, lf_idx] = num_toks\n max_lf_token_ct = max(max_lf_token_ct, num_toks)\n\n return (sf_ids, context_ids, lf_ids, target_lf_ids, lf_token_ct), num_outputs\n\n def reset(self, shuffle=True):\n self.batch_ct = 0\n if shuffle:\n self.data = self.data.sample(frac=1).reset_index(drop=True)\n self.batches = np.array_split(self.data, self.N // self.batch_size)\n","sub_path":"acronyms/acronym_batcher.py","file_name":"acronym_batcher.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"292264567","text":"from config import *\nimport copy\nfrom point import Point\nfrom time import time\n\n\nclass PowerUp:\n\n def __init__(self, brick, ball):\n self.x = brick.x\n self.y = brick.y\n self.x_velocity = ball.x_velocity\n self.y_velocity = ball.y_velocity\n\n @staticmethod\n def power():\n print(\"Power Up Activated\")\n\n @staticmethod\n def unpower():\n print(\"Power Up De-activated\")\n\n def oldmove(self, paddle):\n ret_val = 0\n next_y = self.y + POWER_VEL\n\n if next_y == paddle.y and (paddle.x - paddle.length // 2) <= self.x <= (paddle.x + paddle.length // 2):\n ret_val = 1\n\n self.y += POWER_VEL\n\n if self.y >= SCREEN_ROWS - 1:\n ret_val = -1\n\n return ret_val\n\n def move(self, paddle):\n ret_val = 0\n collision = True\n cur_ball = Point(self.x, self.y)\n next_ball = Point(self.x + self.x_velocity, self.y + self.y_velocity)\n i = 0\n while collision and i < 10:\n collision = False\n i += 1\n\n # Collision with paddle\n paddle_left = Point(paddle.x - paddle.length // 2 - 0.5, paddle.y - 0.5)\n paddle_right = Point(paddle.x + paddle.length // 2 + 0.5, paddle.y - 0.5)\n if Point.is_intersecting(cur_ball, next_ball, paddle_left, paddle_right) and cur_ball.y != paddle_left.y:\n return 1\n\n # Collision with walls\n if next_ball.x <= 0.5:\n collision = True\n self.v_reflection(0.5, next_ball)\n cur_ball.x = 0.5\n cur_ball.y = Point.find_intersect_x(cur_ball, next_ball, cur_ball.x)\n\n if next_ball.x >= SCREEN_COLS - 1.5:\n collision = True\n self.v_reflection(SCREEN_COLS - 1.5, next_ball)\n cur_ball.x = SCREEN_COLS - 1.5\n cur_ball.y = Point.find_intersect_x(cur_ball, next_ball, cur_ball.x)\n\n if next_ball.y <= UPPER_WALL + 0.5:\n collision = True\n self.h_reflection(UPPER_WALL + 0.5, next_ball)\n cur_ball.y = UPPER_WALL + 0.5\n cur_ball.x = Point.find_intersect_y(cur_ball, next_ball, cur_ball.y)\n\n if next_ball.y >= SCREEN_ROWS - 1:\n ret_val = -1\n\n self.x = next_ball.x\n self.y = next_ball.y\n\n self.y_velocity = min(POWER_VEL_CAP, self.y_velocity + GRAVITY)\n return ret_val\n\n def h_reflection(self, y, next_ball):\n self.y_velocity = -1 * self.y_velocity\n next_ball.y = next_ball.y - 2 * (next_ball.y - y)\n\n def v_reflection(self, x, next_ball):\n self.x_velocity = -1 * self.x_velocity\n next_ball.x = next_ball.x - 2 * (next_ball.x - x)\n\n\nclass ExpandPaddle(PowerUp):\n\n def __init__(self, brick, ball):\n super().__init__(brick, ball)\n self.char = \"E\"\n self.color = POWER_COLOR['ExpandPaddle']\n\n @staticmethod\n def power(paddle, balls):\n paddle.length += PAD_LEN_EXTENSION\n\n if paddle.x - paddle.length // 2 <= 0:\n paddle.x = 1 + paddle.length // 2\n if paddle.x + paddle.length // 2 >= SCREEN_COLS - 1:\n paddle.x = SCREEN_COLS - 2 - paddle.length // 2\n return True\n\n @staticmethod\n def unpower(paddle, balls):\n paddle.length -= PAD_LEN_EXTENSION\n\n\nclass ShrinkPaddle(PowerUp):\n\n def __init__(self, brick, ball):\n super().__init__(brick, ball)\n self.char = \"S\"\n self.color = POWER_COLOR['ShrinkPaddle']\n\n @staticmethod\n def power(paddle, balls):\n if paddle.length > 3:\n paddle.length -= PAD_LEN_SHRINK\n return True\n return False\n\n @staticmethod\n def unpower(paddle, balls):\n paddle.length += PAD_LEN_SHRINK\n\n\nclass BallMultiplier(PowerUp):\n\n def __init__(self, brick, ball):\n super().__init__(brick, ball)\n self.char = \"M\"\n self.color = POWER_COLOR['BallMultiplier']\n\n @staticmethod\n def power(paddle, balls):\n new_balls = []\n for ball in balls:\n ball1 = copy.deepcopy(ball)\n ball1.x_velocity += 1\n ball2 = copy.deepcopy(ball)\n ball2.x_velocity -= 1\n new_balls.append(ball1)\n new_balls.append(ball2)\n return new_balls\n\n\nclass FastBall(PowerUp):\n\n def __init__(self, brick, ball):\n super().__init__(brick, ball)\n self.char = \"F\"\n self.color = POWER_COLOR['FastBall']\n\n @staticmethod\n def power(paddle, balls):\n for ball in balls:\n if ball.y_velocity > 0:\n ball.y_velocity += FAST_BALL_VEL\n else:\n ball.y_velocity -= FAST_BALL_VEL\n return True\n\n @staticmethod\n def unpower(paddle, balls):\n for ball in balls:\n if ball.y_velocity > 0:\n ball.y_velocity -= FAST_BALL_VEL\n else:\n ball.y_velocity += FAST_BALL_VEL\n\n\nclass ThruBall(PowerUp):\n\n def __init__(self, brick, ball):\n super().__init__(brick, ball)\n self.char = \"T\"\n self.color = POWER_COLOR['ThruBall']\n\n @staticmethod\n def power(paddle, balls):\n for ball in balls:\n ball.strength = MAX_BALL_STRENGTH\n return True\n\n @staticmethod\n def unpower(paddle, balls):\n for ball in balls:\n ball.strength = 1\n\n\nclass PaddleGrab(PowerUp):\n\n def __init__(self, brick, ball):\n super().__init__(brick, ball)\n self.char = \"G\"\n self.color = POWER_COLOR['PaddleGrab']\n\n @staticmethod\n def power(paddle, balls):\n paddle.is_sticky = True\n return True\n\n @staticmethod\n def unpower(paddle, balls):\n paddle.is_sticky = False\n\n\nclass ShootingPaddle(PowerUp):\n\n def __init__(self, brick, ball):\n super().__init__(brick, ball)\n self.char = \"I\"\n self.color = POWER_COLOR['ShootingPaddle']\n\n @staticmethod\n def power(paddle, balls):\n paddle.is_shooter = True\n paddle.last_bullet = time() - BULLET_TIMEOUT\n return True\n\n @staticmethod\n def unpower(paddle, balls):\n paddle.is_shooter = False\n\n\nclass FireBall(PowerUp):\n\n def __init__(self, brick, ball):\n super().__init__(brick, ball)\n self.char = \"Y\"\n self.color = POWER_COLOR[\"FireBall\"]\n\n @staticmethod\n def power(paddle, balls):\n for ball in balls:\n ball.is_fireball = True\n return True\n\n @staticmethod\n def unpower(paddle, balls):\n for ball in balls:\n ball.is_fireball = False\n paddle.is_shooter = False\n","sub_path":"powerup.py","file_name":"powerup.py","file_ext":"py","file_size_in_byte":6635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"540897345","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\" Functions and utilities for exploring and manipulating data coverages.\n\nModule can be used alone or as part of Snakemake workflow.\n\"\"\"\nimport logging\nimport numpy as np\nimport numpy.ma as ma\nimport rasterio\n\nfrom timeit import default_timer as timer\n\n\ndef create_value_coverage(input_raster, output_raster, compress='DEFLATE',\n verbose=False, logger=None):\n \"\"\" Create a binary raster based on informative cell values.\n\n All values that have information in input_raster are given value 1 in the\n output_raster.\n\n :param input_raster: String path to input raster.\n :param output_raster: String path to output raster.\n :param compress: String compression level used for the output raster.\n :param verbose: Boolean indicating how much information is printed out.\n :param logger: logger object to be used.\n :return Boolean success.\n \"\"\"\n # 1. Setup --------------------------------------------------------------\n\n all_start = timer()\n\n if not logger:\n logging.basicConfig()\n llogger = logging.getLogger('create_value_coverage')\n llogger.setLevel(logging.DEBUG if verbose else logging.INFO)\n else:\n llogger = logger\n\n # 2. Read and process raster ---------------------------------------------\n # Read raster bands directly to Numpy arrays.\n with rasterio.open(input_raster) as raster:\n llogger.info(\"Reading and processing raster {}\".format(input_raster))\n input_nodata = raster.nodata\n # Read in the data\n src = raster.read(1, masked=True)\n mask = ma.getmask(src)\n llogger.debug(\"Number of informative cells: {}\".format(np.sum(~mask)))\n # Binarize input where there are values\n np.place(src, mask == False, 1)\n\n profile = raster.profile\n profile.update(dtype=rasterio.uint8, count=1, compress=compress,\n nodata=255)\n\n # Since we're saving a byte, replace old NoData value with 255\n np.place(src, src == input_nodata, 255)\n\n with rasterio.open(output_raster, 'w', **profile) as dst:\n llogger.info(\"Writing output raster {}\".format(output_raster))\n dst.write_mask(mask)\n dst.write(src.astype(rasterio.uint8), 1)\n\n all_end = timer()\n all_elapsed = round(all_end - all_start, 2)\n llogger.info(\" [TIME] Binarizing took {} sec\".format(all_elapsed))\n\n\ndef expand_value_coverage(input_raster, expand_raster, output_raster,\n union=False, compress='DEFLATE', verbose=False,\n logger=None):\n \"\"\" Expand a raster based on occurrence of informative cells in another.\n\n Argument \"intersect\" can be used to define if only the mask of the\n expand raster should be used, or an union between masks of input and\n expand raster.\n\n :param input_raster: String path to input raster.\n :param expand_raster: String path to mask raster.\n :param output_raster: String path to output raster.\n :param union: Boolean should masks of input_raster and expand_raster\n be unioned.\n :param compress: String compression level used for the output raster.\n :param verbose: Boolean indicating how much information is printed out.\n :param logger: logger object to be used.\n :return Boolean success.\n \"\"\"\n # 1. Setup --------------------------------------------------------------\n\n all_start = timer()\n\n if not logger:\n logging.basicConfig()\n llogger = logging.getLogger('maskvalue_coverage')\n llogger.setLevel(logging.DEBUG if verbose else logging.INFO)\n else:\n llogger = logger\n\n # 2. Read and process raster ---------------------------------------------\n\n # First, get the mask and dtype from the mask raster\n expand_raster = rasterio.open(expand_raster)\n expand_raster_src = expand_raster.read(1, masked=True)\n expand_mask = expand_raster_src.mask\n\n # Read raster bands directly to Numpy arrays.\n with rasterio.open(input_raster) as raster:\n llogger.info(\"Reading and processing raster {}\".format(input_raster))\n input_nodata = raster.nodata\n\n # Read in the data\n src = raster.read(1, masked=True)\n src_dtype = src.dtype\n src_mask = src.mask\n\n # Perform a union on the masks if needed\n if union:\n llogger.info(\"[NOTE] Using union of masks\")\n expand_mask = ma.mask_or(expand_mask, src_mask)\n\n llogger.debug(\"Number of informative cells in the data: {}\".format(np.sum(~src_mask)))\n llogger.debug(\"Number of informative cells in the expand mask: {}\".format(np.sum(~expand_mask)))\n\n # Change the mask and the underlying values\n src.mask = expand_mask\n src.data[src.mask] = input_nodata\n # There might be some NoData values lurking around, replace them with\n # zero.\n src.data[src == input_nodata] = 0.0\n \n profile = raster.profile\n profile.update(dtype=src_dtype, count=1, compress=compress,\n nodata=input_nodata)\n\n with rasterio.open(output_raster, 'w', **profile) as dst:\n llogger.info(\"Writing output raster {}\".format(output_raster))\n #import pdb; pdb.set_trace()\n dst.write(src.astype(src_dtype), 1)\n\n all_end = timer()\n all_elapsed = round(all_end - all_start, 2)\n llogger.info(\" [TIME] Masking took {} sec\".format(all_elapsed))\n","sub_path":"src/03_post_processing/data_coverage.py","file_name":"data_coverage.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"204026000","text":"\"\"\" Urls for welbornprod sub-apps. \"\"\"\nfrom django.conf.urls import patterns, include, url\nfrom apps import views as appviews\n\n# Patterns for apps.\nurlpatterns = patterns('',\n # apps index\n url(r'^/?$', appviews.view_index),\n # phonewords\n url(r'^[Pp]hone[Ww]ords/?',\n include('apps.phonewords.urls')),\n # paste\n url(r'^[Pp]aste/?',\n include('apps.paste.urls')),\n )\n","sub_path":"apps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"65906087","text":"import os\nimport glob\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig()\nlogger.setLevel(logging.INFO)\nlogger.setLevel(logging.DEBUG)\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nclass Image(models.Model):\n \"\"\" Represents an Avatar of the champion.\n The name is missleading here but for now it is not renamed avatar.\n \"\"\"\n # TODO: rename to avatar or create Avatar a subclass of Image.\n name = models.CharField(max_length = 200)\n human_name = models.CharField(\n max_length = 200,\n null = True,\n )\n path = models.CharField(max_length = 200)\n regex = static(\n settings.STATIC_URL,\n document_root=settings.STATIC_ROOT\n )[0]\n subfolder = 'avatars'\n\n @classmethod\n def full_file_path(cls, champion_name):\n return os.path.join(\n cls.subfolder, '{}_Square_0.png'.format(champion_name.capitalize())\n )\n\n def __unicode__(self):\n return self.name\n\n def file_exists(self):\n \"\"\" Tests if the registered file exists.\n\n :rtype: bool\n \"\"\"\n res = self.regex.resolve(\n os.path.join(\n settings.STATIC_URL[1:],\n self.path\n )\n )\n\n if res:\n kwargs = res.kwargs\n return os.path.exists(\n os.path.join(\n kwargs['document_root'],\n kwargs['path']\n )\n )\n else:\n return res\n\n class Meta:\n ordering = ('name',)\n\n\nclass Skin(Image):\n \"\"\" Represents the skins of champion.\n Skin is a large picture with the different style of the champion.\n \"\"\"\n subfolder = 'skins'\n\n @classmethod\n def file_paths(cls, champion_name):\n \"\"\" Getting all the path of skins.\n\n :type champion_name: string\n :param champion_name: name of the champion the skins are related to.\n\n :rtype: list\n :returns: all the paths to the skins.\n \"\"\"\n paths = glob.glob(\n os.path.join(\n settings.STATIC_ROOT,\n cls.subfolder,\n '{}_*.jpg'.format(champion_name.capitalize())\n )\n )\n logger.debug('Skin paths: {}'.format(paths))\n return paths\n\n @classmethod\n def full_file_path(cls, file_path):\n return os.path.join(cls.subfolder, file_path)\n\n\nclass Tag(models.Model):\n \"\"\" Represents a tag that will characterize.\"\"\"\n name = models.CharField(max_length = 200)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n ordering = ('name',)\n\n\nclass Champion(models.Model):\n \"\"\" Represents a champion of the game. \"\"\"\n name = models.CharField(max_length = 200)\n avatar = models.OneToOneField(\n Image,\n related_name = 'champion_avatar',\n null = True,\n blank = True\n )\n skins = models.ManyToManyField(\n Skin,\n related_name = 'champion_skins',\n null = True,\n blank = True\n )\n tags = models.ManyToManyField(\n Tag,\n related_name = 'tags_set',\n null = True,\n blank = True\n )\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n ordering = ('name',)\n\n def update(self):\n \"\"\" Try to find skins and avatar of the champion.\"\"\"\n\n if self.avatar is None:\n avatar = Image(\n path = Image.full_file_path(self.name),\n name = self.name\n )\n avatar.save()\n self.avatar = avatar\n self.save()\n\n if not self.avatar.file_exists():\n logger.warning(\n '{} avatar doesn\\'t exists ({})'.format(\n self.name,\n self.avatar.path\n )\n )\n\n for skin_file in Skin.file_paths(self.name):\n skin_name = os.path.split(skin_file)[-1]\n logger.debug('{}: {}'.format(skin_name, skin_file))\n\n # Using a try except here to make it easier\n # if the try runs that mean a skin has been already created\n # for this file and linked to the champion\n # So that mean we are all good here.\n try:\n skin = self.skins.get(name = skin_name)\n logger.debug('{} already linked to {}'.format(skin, self.name))\n\n # if ObjectDoesNotExist exception is catch that mean the skin doesn't exist\n # and has to be created.\n except ObjectDoesNotExist:\n skin = Skin(\n path = Skin.full_file_path(skin_name),\n name = skin_name\n )\n\n skin.save()\n self.skins.add(skin)\n self.save()\n logger.debug(\n '{} has been linked to {}'.format(skin, self.name)\n )\n\n\n\n\n\n\n\n\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"141223009","text":"# coding=utf-8\n# Copyright (c) 2021 THUML @ Tsinghua University\n# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# Copyright 2023 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch Autoformer model.\"\"\"\n\nimport math\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\n\nfrom ...activations import ACT2FN\nfrom ...modeling_outputs import (\n BaseModelOutput,\n ModelOutput,\n SampleTSPredictionOutput,\n Seq2SeqTSPredictionOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput\nfrom ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings\nfrom .configuration_autoformer import AutoformerConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"AutoformerConfig\"\n\n\n@dataclass\nclass AutoFormerDecoderOutput(ModelOutput):\n \"\"\"\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n\n Args:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n\n If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,\n hidden_size)` is output.\n trend (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Trend tensor for each time series.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if\n `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,\n encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if\n `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`\n input) to speed up sequential decoding.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n \"\"\"\n\n last_hidden_state: torch.FloatTensor = None\n trend: torch.FloatTensor = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass AutoformerModelOutput(ModelOutput):\n \"\"\"\n Autoformer model output that contains the additional trend output.\n\n Args:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the decoder of the model.\n\n If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,\n hidden_size)` is output.\n trend (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Trend tensor for each time series.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.\n decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.\n encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*):\n Shift values of each time series' context window which is used to give the model inputs of the same\n magnitude and then used to shift back to the original magnitude.\n scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*):\n Scaling values of each time series' context window which is used to give the model inputs of the same\n magnitude and then used to rescale back to the original magnitude.\n static_features: (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*):\n Static features of each time series' in a batch which are copied to the covariates at inference time.\n \"\"\"\n\n last_hidden_state: torch.FloatTensor = None\n trend: torch.FloatTensor = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n loc: Optional[torch.FloatTensor] = None\n scale: Optional[torch.FloatTensor] = None\n static_features: Optional[torch.FloatTensor] = None\n\n\nAUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"huggingface/autoformer-tourism-monthly\",\n # See all Autoformer models at https://huggingface.co/models?filter=autoformer\n]\n\n\n# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesFeatureEmbedder with TimeSeries->Autoformer\nclass AutoformerFeatureEmbedder(nn.Module):\n \"\"\"\n Embed a sequence of categorical features.\n\n Args:\n cardinalities (`list[int]`):\n List of cardinalities of the categorical features.\n embedding_dims (`list[int]`):\n List of embedding dimensions of the categorical features.\n \"\"\"\n\n def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None:\n super().__init__()\n\n self.num_features = len(cardinalities)\n self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])\n\n def forward(self, features: torch.Tensor) -> torch.Tensor:\n if self.num_features > 1:\n # we slice the last dimension, giving an array of length\n # self.num_features with shape (N,T) or (N)\n cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)\n else:\n cat_feature_slices = [features]\n\n return torch.cat(\n [\n embed(cat_feature_slice.squeeze(-1))\n for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)\n ],\n dim=-1,\n )\n\n\n# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeries->Autoformer\nclass AutoformerStdScaler(nn.Module):\n \"\"\"\n Standardize features by calculating the mean and scaling along some given dimension `dim`, and then normalizes it\n by subtracting from the mean and dividing by the standard deviation.\n\n Args:\n dim (`int`):\n Dimension along which to calculate the mean and standard deviation.\n keepdim (`bool`, *optional*, defaults to `False`):\n Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it.\n minimum_scale (`float`, *optional*, defaults to 1e-5):\n Default scale that is used for elements that are constantly zero along dimension `dim`.\n \"\"\"\n\n def __init__(self, dim: int, keepdim: bool = False, minimum_scale: float = 1e-5):\n super().__init__()\n if not dim > 0:\n raise ValueError(\"Cannot compute scale along dim = 0 (batch dimension), please provide dim > 0\")\n self.dim = dim\n self.keepdim = keepdim\n self.minimum_scale = minimum_scale\n\n @torch.no_grad()\n def forward(self, data: torch.Tensor, weights: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n denominator = weights.sum(self.dim, keepdim=self.keepdim)\n denominator = denominator.clamp_min(1.0)\n loc = (data * weights).sum(self.dim, keepdim=self.keepdim) / denominator\n\n variance = (((data - loc) * weights) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator\n scale = torch.sqrt(variance + self.minimum_scale)\n return (data - loc) / scale, loc, scale\n\n\n# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeries->Autoformer\nclass AutoformerMeanScaler(nn.Module):\n \"\"\"\n Computes a scaling factor as the weighted average absolute value along dimension `dim`, and scales the data\n accordingly.\n\n Args:\n dim (`int`):\n Dimension along which to compute the scale.\n keepdim (`bool`, *optional*, defaults to `False`):\n Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it.\n default_scale (`float`, *optional*, defaults to `None`):\n Default scale that is used for elements that are constantly zero. If `None`, we use the scale of the batch.\n minimum_scale (`float`, *optional*, defaults to 1e-10):\n Default minimum possible scale that is used for any item.\n \"\"\"\n\n def __init__(\n self, dim: int = -1, keepdim: bool = True, default_scale: Optional[float] = None, minimum_scale: float = 1e-10\n ):\n super().__init__()\n self.dim = dim\n self.keepdim = keepdim\n self.minimum_scale = minimum_scale\n self.default_scale = default_scale\n\n @torch.no_grad()\n def forward(\n self, data: torch.Tensor, observed_indicator: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n # shape: (N, [C], T=1)\n ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)\n num_observed = observed_indicator.sum(self.dim, keepdim=True)\n\n scale = ts_sum / torch.clamp(num_observed, min=1)\n\n # If `default_scale` is provided, we use it, otherwise we use the scale\n # of the batch.\n if self.default_scale is None:\n batch_sum = ts_sum.sum(dim=0)\n batch_observations = torch.clamp(num_observed.sum(0), min=1)\n default_scale = torch.squeeze(batch_sum / batch_observations)\n else:\n default_scale = self.default_scale * torch.ones_like(scale)\n\n # apply default scale where there are no observations\n scale = torch.where(num_observed > 0, scale, default_scale)\n\n # ensure the scale is at least `self.minimum_scale`\n scale = torch.clamp(scale, min=self.minimum_scale)\n scaled_data = data / scale\n\n if not self.keepdim:\n scale = scale.squeeze(dim=self.dim)\n\n return scaled_data, torch.zeros_like(scale), scale\n\n\n# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeries->Autoformer\nclass AutoformerNOPScaler(nn.Module):\n \"\"\"\n Assigns a scaling factor equal to 1 along dimension `dim`, and therefore applies no scaling to the input data.\n\n Args:\n dim (`int`):\n Dimension along which to compute the scale.\n keepdim (`bool`, *optional*, defaults to `False`):\n Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it.\n \"\"\"\n\n def __init__(self, dim: int, keepdim: bool = False):\n super().__init__()\n self.dim = dim\n self.keepdim = keepdim\n\n def forward(\n self, data: torch.Tensor, observed_indicator: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)\n loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)\n return data, loc, scale\n\n\n# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average\ndef weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor:\n \"\"\"\n Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero,\n meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`.\n\n Args:\n input_tensor (`torch.FloatTensor`):\n Input tensor, of which the average must be computed.\n weights (`torch.FloatTensor`, *optional*):\n Weights tensor, of the same shape as `input_tensor`.\n dim (`int`, *optional*):\n The dim along which to average `input_tensor`.\n\n Returns:\n `torch.FloatTensor`: The tensor with values averaged along the specified `dim`.\n \"\"\"\n if weights is not None:\n weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor))\n sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)\n return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights\n else:\n return input_tensor.mean(dim=dim)\n\n\n# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll\ndef nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Computes the negative log likelihood loss from input distribution with respect to target.\n \"\"\"\n return -input.log_prob(target)\n\n\n# Copied from transformers.models.bart.modeling_bart._make_causal_mask\ndef _make_causal_mask(\n input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0\n):\n \"\"\"\n Make causal mask used for bi-directional self-attention.\n \"\"\"\n bsz, tgt_len = input_ids_shape\n mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)\n mask_cond = torch.arange(mask.size(-1), device=device)\n mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)\n mask = mask.to(dtype)\n\n if past_key_values_length > 0:\n mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)\n return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)\n\n\n# Copied from transformers.models.bart.modeling_bart._expand_mask\ndef _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):\n \"\"\"\n Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.\n \"\"\"\n bsz, src_len = mask.size()\n tgt_len = tgt_len if tgt_len is not None else src_len\n\n expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)\n\n inverted_mask = 1.0 - expanded_mask\n\n return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)\n\n\n# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Autoformer\nclass AutoformerSinusoidalPositionalEmbedding(nn.Embedding):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\"\"\"\n\n def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:\n super().__init__(num_positions, embedding_dim)\n self.weight = self._init_weight(self.weight)\n\n @staticmethod\n def _init_weight(out: nn.Parameter) -> nn.Parameter:\n \"\"\"\n Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in\n the 2nd half of the vector. [dim // 2:]\n \"\"\"\n n_pos, dim = out.shape\n position_enc = np.array(\n [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]\n )\n out.requires_grad = False # set early to avoid an error in pytorch-1.8+\n sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1\n out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))\n out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))\n out.detach_()\n return out\n\n @torch.no_grad()\n def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:\n \"\"\"`input_ids_shape` is expected to be [bsz x seqlen].\"\"\"\n bsz, seq_len = input_ids_shape[:2]\n positions = torch.arange(\n past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device\n )\n return super().forward(positions)\n\n\n# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesValueEmbedding with TimeSeries->Autoformer\nclass AutoformerValueEmbedding(nn.Module):\n def __init__(self, feature_size, d_model):\n super().__init__()\n self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False)\n\n def forward(self, x):\n return self.value_projection(x)\n\n\n# Class based on\n# https://github.com/thuml/Autoformer/blob/c6a0694ff484753f2d986cc0bb1f99ee850fc1a8/layers/Autoformer_EncDec.py#L39\n# where AutoformerSeriesDecompositionLayer is series_decomp + moving_average\nclass AutoformerSeriesDecompositionLayer(nn.Module):\n \"\"\"\n Returns the trend and the seasonal parts of the time series. Calculated as:\n\n x_trend = AvgPool(Padding(X)) and x_seasonal = X - x_trend\n \"\"\"\n\n def __init__(self, config: AutoformerConfig):\n super().__init__()\n self.kernel_size = config.moving_average\n self.avg = nn.AvgPool1d(kernel_size=self.kernel_size, stride=1, padding=0)\n\n def forward(self, x):\n \"\"\"Input shape: Batch x Time x EMBED_DIM\"\"\"\n # padding on the both ends of time series\n num_of_pads = (self.kernel_size - 1) // 2\n front = x[:, 0:1, :].repeat(1, num_of_pads, 1)\n end = x[:, -1:, :].repeat(1, num_of_pads, 1)\n x_padded = torch.cat([front, x, end], dim=1)\n\n # calculate the trend and seasonal part of the series\n x_trend = self.avg(x_padded.permute(0, 2, 1)).permute(0, 2, 1)\n x_seasonal = x - x_trend\n return x_seasonal, x_trend\n\n\n# Class based on\n# https://github.com/thuml/Autoformer/blob/c6a0694ff484753f2d986cc0bb1f99ee850fc1a8/layers/Autoformer_EncDec.py#L6\n# where AutoformerLayernorm is my_Layernorm\nclass AutoformerLayernorm(nn.Module):\n \"\"\"\n Special designed layer normalization for the seasonal part, calculated as: AutoformerLayernorm(x) = nn.LayerNorm(x)\n - torch.mean(nn.LayerNorm(x))\n \"\"\"\n\n def __init__(self, config: AutoformerConfig):\n super().__init__()\n self.layernorm = nn.LayerNorm(config.d_model)\n\n def forward(self, x):\n x_hat = self.layernorm(x)\n bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1)\n return x_hat - bias\n\n\nclass AutoformerAttention(nn.Module):\n \"\"\"\n AutoCorrelation Mechanism with the following two phases:\n (1) period-based dependencies discovery (2) time delay aggregation\n This block replace the canonical self-attention mechanism.\n \"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n is_decoder: bool = False,\n bias: bool = True,\n autocorrelation_factor: int = 3,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n\n if (self.head_dim * num_heads) != self.embed_dim:\n raise ValueError(\n f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}\"\n f\" and `num_heads`: {num_heads}).\"\n )\n self.scaling = self.head_dim**-0.5\n self.is_decoder = is_decoder\n\n self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n self.autocorrelation_factor = autocorrelation_factor\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n key_value_states: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.Tensor] = None,\n layer_head_mask: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n # if key_value_states are provided this layer is used as a cross-attention layer\n # for the decoder\n is_cross_attention = key_value_states is not None\n\n bsz, tgt_len, _ = hidden_states.size()\n\n # get query proj\n query_states = self.q_proj(hidden_states)\n # get key, value proj\n # `past_key_value[0].shape[2] == key_value_states.shape[1]`\n # is checking that the `sequence_length` of the `past_key_value` is the same as\n # the provided `key_value_states` to support prefix tuning\n if (\n is_cross_attention\n and past_key_value is not None\n and past_key_value[0].shape[2] == key_value_states.shape[1]\n ):\n # reuse k,v, cross_attentions\n key_states = past_key_value[0]\n value_states = past_key_value[1]\n elif is_cross_attention:\n # cross_attentions\n key_states = self._shape(self.k_proj(key_value_states), -1, bsz)\n value_states = self._shape(self.v_proj(key_value_states), -1, bsz)\n elif past_key_value is not None:\n # reuse k, v, self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n key_states = torch.cat([past_key_value[0], key_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n else:\n # self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_states, value_states)\n\n proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)\n key_states = key_states.view(*proj_shape)\n value_states = value_states.view(*proj_shape)\n\n # (1) period-based dependencies discovery\n # Resize (truncation or zero filling)\n queries_time_length = query_states.size(1)\n values_time_length = value_states.size(1)\n if queries_time_length > values_time_length:\n query_states = query_states[:, : (queries_time_length - values_time_length), :]\n zeros = torch.zeros_like(query_states).float()\n value_states = torch.cat([value_states, zeros], dim=1)\n key_states = torch.cat([key_states, zeros], dim=1)\n else:\n value_states = value_states[:, :queries_time_length, :]\n key_states = key_states[:, :queries_time_length, :]\n\n query_states_fft = torch.fft.rfft(query_states, n=tgt_len, dim=1)\n key_states_fft = torch.fft.rfft(key_states, n=tgt_len, dim=1)\n attn_weights = query_states_fft * torch.conj(key_states_fft)\n attn_weights = torch.fft.irfft(attn_weights, n=tgt_len, dim=1) # Autocorrelation(Q,K)\n\n src_len = key_states.size(1)\n channel = key_states.size(2)\n\n if attn_weights.size() != (bsz * self.num_heads, tgt_len, channel):\n raise ValueError(\n f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, channel)}, but is\"\n f\" {attn_weights.size()}\"\n )\n\n if attention_mask is not None:\n if attention_mask.size() != (bsz, 1, tgt_len, src_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"\n )\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if layer_head_mask is not None:\n if layer_head_mask.size() != (self.num_heads,):\n raise ValueError(\n f\"Head mask for a single layer should be of size {(self.num_heads,)}, but is\"\n f\" {layer_head_mask.size()}\"\n )\n attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, channel)\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, channel)\n\n if output_attentions:\n # this operation is a bit awkward, but it's required to\n # make sure that attn_weights keeps its gradient.\n # In order to do so, attn_weights have to be reshaped\n # twice and have to be reused in the following\n attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, channel)\n attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, channel)\n else:\n attn_weights_reshaped = None\n\n # time delay aggregation\n time_length = value_states.size(1)\n autocorrelations = attn_weights.view(bsz, self.num_heads, tgt_len, channel)\n\n # find top k autocorrelations delays\n top_k = int(self.autocorrelation_factor * math.log(time_length))\n autocorrelations_mean_on_head_channel = torch.mean(autocorrelations, dim=(1, -1)) # bsz x tgt_len\n if self.training:\n autocorrelations_mean_on_bsz = torch.mean(autocorrelations_mean_on_head_channel, dim=0)\n _, top_k_delays_index = torch.topk(autocorrelations_mean_on_bsz, top_k)\n top_k_autocorrelations = torch.stack(\n [autocorrelations_mean_on_head_channel[:, top_k_delays_index[i]] for i in range(top_k)], dim=-1\n )\n else:\n top_k_autocorrelations, top_k_delays_index = torch.topk(\n autocorrelations_mean_on_head_channel, top_k, dim=1\n )\n\n top_k_autocorrelations = torch.softmax(top_k_autocorrelations, dim=-1) # bsz x top_k\n\n # compute aggregation: value_states.roll(delay) * top_k_autocorrelations(delay)\n if not self.training:\n # used for compute values_states.roll(delay) in inference\n tmp_values = value_states.repeat(1, 2, 1)\n init_index = (\n torch.arange(time_length)\n .view(1, -1, 1)\n .repeat(bsz * self.num_heads, 1, channel)\n .to(value_states.device)\n )\n\n delays_agg = torch.zeros_like(value_states).float() # bsz x time_length x channel\n for i in range(top_k):\n # compute value_states roll delay\n if not self.training:\n tmp_delay = init_index + top_k_delays_index[:, i].view(-1, 1, 1).repeat(\n self.num_heads, tgt_len, channel\n )\n value_states_roll_delay = torch.gather(tmp_values, dim=1, index=tmp_delay)\n else:\n value_states_roll_delay = value_states.roll(shifts=-int(top_k_delays_index[i]), dims=1)\n\n # aggregation\n top_k_autocorrelations_at_delay = (\n top_k_autocorrelations[:, i].view(-1, 1, 1).repeat(self.num_heads, tgt_len, channel)\n )\n delays_agg += value_states_roll_delay * top_k_autocorrelations_at_delay\n\n attn_output = delays_agg.contiguous()\n\n if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is\"\n f\" {attn_output.size()}\"\n )\n\n attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)\n attn_output = attn_output.transpose(1, 2)\n\n # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be\n # partitioned across GPUs when using tensor-parallelism.\n attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)\n\n attn_output = self.out_proj(attn_output)\n\n return attn_output, attn_weights_reshaped, past_key_value\n\n\nclass AutoformerEncoderLayer(nn.Module):\n def __init__(self, config: AutoformerConfig):\n super().__init__()\n self.embed_dim = config.d_model\n self.self_attn = AutoformerAttention(\n embed_dim=self.embed_dim,\n num_heads=config.encoder_attention_heads,\n dropout=config.attention_dropout,\n autocorrelation_factor=config.autocorrelation_factor,\n )\n self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.dropout = config.dropout\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = config.activation_dropout\n self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)\n self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = AutoformerLayernorm(config)\n self.decomp1 = AutoformerSeriesDecompositionLayer(config)\n self.decomp2 = AutoformerSeriesDecompositionLayer(config)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: torch.FloatTensor,\n layer_head_mask: torch.FloatTensor,\n output_attentions: Optional[bool] = False,\n ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:\n \"\"\"\n Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n `(encoder_attention_heads,)`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n \"\"\"\n residual = hidden_states\n hidden_states, attn_weights, _ = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n layer_head_mask=layer_head_mask,\n output_attentions=output_attentions,\n )\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n # added layer norm here as an improvement\n hidden_states = self.self_attn_layer_norm(hidden_states)\n hidden_states, _ = self.decomp1(hidden_states)\n\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states, _ = self.decomp2(hidden_states)\n hidden_states = self.final_layer_norm(hidden_states)\n\n if hidden_states.dtype == torch.float16 and (\n torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()\n ):\n clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\nclass AutoformerDecoderLayer(nn.Module):\n def __init__(self, config: AutoformerConfig):\n super().__init__()\n self.embed_dim = config.d_model\n\n self.self_attn = AutoformerAttention(\n embed_dim=self.embed_dim,\n num_heads=config.decoder_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=True,\n autocorrelation_factor=config.autocorrelation_factor,\n )\n self.dropout = config.dropout\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = config.activation_dropout\n\n self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.encoder_attn = AutoformerAttention(\n self.embed_dim,\n config.decoder_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=True,\n autocorrelation_factor=config.autocorrelation_factor,\n )\n self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)\n self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = AutoformerLayernorm(config)\n\n self.decomp1 = AutoformerSeriesDecompositionLayer(config)\n self.decomp2 = AutoformerSeriesDecompositionLayer(config)\n self.decomp3 = AutoformerSeriesDecompositionLayer(config)\n\n # source: https://github.com/thuml/Autoformer/blob/e6371e24f2ae2dd53e472edefdd5814c5176f864/layers/Autoformer_EncDec.py#L128\n self.trend_projection = nn.Conv1d(\n in_channels=self.embed_dim,\n out_channels=config.feature_size,\n kernel_size=3,\n stride=1,\n padding=1,\n padding_mode=\"circular\",\n bias=False,\n )\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n layer_head_mask: Optional[torch.Tensor] = None,\n cross_attn_layer_head_mask: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = True,\n ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n \"\"\"\n Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n encoder_hidden_states (`torch.FloatTensor`):\n cross attention input to the layer of shape `(batch, seq_len, embed_dim)`\n encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n `(encoder_attention_heads,)`.\n cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of\n size `(decoder_attention_heads,)`.\n past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache: (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the `present_key_value` state to be used for subsequent\n decoding.\n \"\"\"\n residual = hidden_states\n\n # Self Attention\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n # add present self-attn cache to positions 1,2 of present_key_value tuple\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n past_key_value=self_attn_past_key_value,\n attention_mask=attention_mask,\n layer_head_mask=layer_head_mask,\n output_attentions=output_attentions,\n )\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states, trend1 = self.decomp1(hidden_states)\n # added layer norm here as an improvement\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n # Cross-Attention Block\n cross_attn_present_key_value = None\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n\n # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(\n hidden_states=hidden_states,\n key_value_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n layer_head_mask=cross_attn_layer_head_mask,\n past_key_value=cross_attn_past_key_value,\n output_attentions=output_attentions,\n )\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states, trend2 = self.decomp2(hidden_states)\n # added layer norm here as an improvement\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n\n # add cross-attn to positions 3,4 of present_key_value tuple\n present_key_value = present_key_value + cross_attn_present_key_value\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states, trend3 = self.decomp3(hidden_states)\n hidden_states = self.final_layer_norm(hidden_states)\n\n if encoder_hidden_states is not None:\n residual_trend = trend1 + trend2 + trend3\n else:\n residual_trend = trend1 + trend3\n residual_trend = self.trend_projection(residual_trend.permute(0, 2, 1)).transpose(1, 2)\n outputs = ((hidden_states, residual_trend),)\n\n if output_attentions:\n outputs += (self_attn_weights, cross_attn_weights)\n\n if use_cache:\n outputs += (present_key_value,)\n\n return outputs\n\n\nclass AutoformerPreTrainedModel(PreTrainedModel):\n config_class = AutoformerConfig\n base_model_prefix = \"model\"\n main_input_name = \"past_values\"\n supports_gradient_checkpointing = True\n\n def _init_weights(self, module):\n std = self.config.init_std\n if isinstance(module, (nn.Linear, nn.Conv1d)):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, AutoformerSinusoidalPositionalEmbedding):\n pass\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (AutoformerDecoder, AutoformerEncoder)):\n module.gradient_checkpointing = value\n\n\nAUTOFORMER_START_DOCSTRING = r\"\"\"\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`AutoformerConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nAUTOFORMER_INPUTS_DOCSTRING = r\"\"\"\n Args:\n past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Past values of the time series, that serve as context in order to predict the future. These values may\n contain lags, i.e. additional values from the past which are added in order to serve as \"extra context\".\n The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as\n `static_categorical_features`, `static_real_features`, `past_time_features`).\n\n The sequence length here is equal to `context_length` + `max(config.lags_sequence)`.\n\n Missing values need to be replaced with zeros.\n\n past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`, *optional*):\n Optional time features, which the model internally will add to `past_values`. These could be things like\n \"month of year\", \"day of the month\", etc. encoded as vectors (for instance as Fourier features). These\n could also be so-called \"age\" features, which basically help the model know \"at which point in life\" a\n time-series is. Age features have small values for distant past time steps and increase monotonically the\n more we approach the current time step.\n\n These features serve as the \"positional encodings\" of the inputs. So contrary to a model like BERT, where\n the position encodings are learned from scratch internally as parameters of the model, the Time Series\n Transformer requires to provide additional time features.\n\n The Autoformer only learns additional embeddings for `static_categorical_features`.\n\n past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in\n `[0, 1]`:\n\n - 1 for values that are **observed**,\n - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).\n\n static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):\n Optional static categorical features for which the model will learn an embedding, which it will add to the\n values of the time series.\n\n Static categorical features are features which have the same value for all time steps (static over time).\n\n A typical example of a static categorical feature is a time series ID.\n\n static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):\n Optional static real features which the model will add to the values of the time series.\n\n Static real features are features which have the same value for all time steps (static over time).\n\n A typical example of a static real feature is promotion information.\n\n future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)`):\n Future values of the time series, that serve as labels for the model. The `future_values` is what the\n Transformer needs to learn to output, given the `past_values`.\n\n See the demo notebook and code snippets for details.\n\n Missing values need to be replaced with zeros.\n\n future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`, *optional*):\n Optional time features, which the model internally will add to `future_values`. These could be things like\n \"month of year\", \"day of the month\", etc. encoded as vectors (for instance as Fourier features). These\n could also be so-called \"age\" features, which basically help the model know \"at which point in life\" a\n time-series is. Age features have small values for distant past time steps and increase monotonically the\n more we approach the current time step.\n\n These features serve as the \"positional encodings\" of the inputs. So contrary to a model like BERT, where\n the position encodings are learned from scratch internally as parameters of the model, the Time Series\n Transformer requires to provide additional features.\n\n The Autoformer only learns additional embeddings for `static_categorical_features`.\n\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on certain token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Mask to avoid performing attention on certain token indices. By default, a causal mask will be used, to\n make sure the model can only look at previous inputs in order to predict the future.\n\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerEncoder with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer\nclass AutoformerEncoder(AutoformerPreTrainedModel):\n \"\"\"\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n [`AutoformerEncoderLayer`].\n\n Args:\n config: AutoformerConfig\n \"\"\"\n\n def __init__(self, config: AutoformerConfig):\n super().__init__(config)\n\n self.dropout = config.dropout\n self.layerdrop = config.encoder_layerdrop\n if config.prediction_length is None:\n raise ValueError(\"The `prediction_length` config needs to be specified.\")\n\n self.value_embedding = AutoformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)\n self.embed_positions = AutoformerSinusoidalPositionalEmbedding(\n config.context_length + config.prediction_length, config.d_model\n )\n self.layers = nn.ModuleList([AutoformerEncoderLayer(config) for _ in range(config.encoder_layers)])\n self.layernorm_embedding = nn.LayerNorm(config.d_model)\n\n self.gradient_checkpointing = False\n # Initialize weights and apply final processing\n self.post_init()\n\n def forward(\n self,\n attention_mask: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutput]:\n r\"\"\"\n Args:\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n hidden_states = self.value_embedding(inputs_embeds)\n embed_pos = self.embed_positions(inputs_embeds.size())\n\n hidden_states = self.layernorm_embedding(hidden_states + embed_pos)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n\n # expand attention_mask\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)\n\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n # check if head_mask has a correct number of layers specified if desired\n if head_mask is not None:\n if head_mask.size()[0] != (len(self.layers)):\n raise ValueError(\n f\"The head_mask should be specified for {len(self.layers)} layers, but it is for\"\n f\" {head_mask.size()[0]}.\"\n )\n\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n to_drop = False\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop: # skip the layer\n to_drop = True\n\n if to_drop:\n layer_outputs = (None, None)\n else:\n if self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(encoder_layer),\n hidden_states,\n attention_mask,\n (head_mask[idx] if head_mask is not None else None),\n )\n else:\n layer_outputs = encoder_layer(\n hidden_states,\n attention_mask,\n layer_head_mask=(head_mask[idx] if head_mask is not None else None),\n output_attentions=output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions\n )\n\n\nclass AutoformerDecoder(AutoformerPreTrainedModel):\n \"\"\"\n Transformer decoder consisting of `config.decoder_layers` layers. Each layer is a [`AutoformerDecoderLayer`]\n\n Args:\n config: AutoformerConfig\n \"\"\"\n\n def __init__(self, config: AutoformerConfig):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n if config.prediction_length is None:\n raise ValueError(\"The `prediction_length` config needs to be specified.\")\n\n self.value_embedding = AutoformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)\n self.embed_positions = AutoformerSinusoidalPositionalEmbedding(\n config.context_length + config.prediction_length, config.d_model\n )\n self.layers = nn.ModuleList([AutoformerDecoderLayer(config) for _ in range(config.decoder_layers)])\n self.layernorm_embedding = nn.LayerNorm(config.d_model)\n\n # https://github.com/thuml/Autoformer/blob/e6371e24f2ae2dd53e472edefdd5814c5176f864/models/Autoformer.py#L74\n self.seasonality_projection = nn.Linear(config.d_model, config.feature_size)\n\n self.gradient_checkpointing = False\n # Initialize weights and apply final processing\n self.post_init()\n\n def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(\n input_shape,\n inputs_embeds.dtype,\n device=inputs_embeds.device,\n past_key_values_length=past_key_values_length,\n ).to(inputs_embeds.device)\n\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(\n inputs_embeds.device\n )\n combined_attention_mask = (\n expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask\n )\n\n return combined_attention_mask\n\n def forward(\n self,\n trend: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, AutoFormerDecoderOutput]:\n r\"\"\"\n Args:\n trend (`torch.FloatTensor` of shape `(batch_size, prediction_length, feature_size)`, *optional*):\n The trend sequence to be fed to the decoder.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing\n cross-attention on hidden heads. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of\n shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the\n cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those\n that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of\n all `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If `use_cache` is True, `past_key_values` key value states are returned and can be used to speed up\n decoding (see `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n input_shape = inputs_embeds.size()[:-1]\n\n # expand encoder attention mask\n if encoder_hidden_states is not None and encoder_attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])\n\n hidden_states = self.value_embedding(inputs_embeds)\n embed_pos = self.embed_positions(\n inputs_embeds.size(), past_key_values_length=self.config.context_length - self.config.label_length\n )\n hidden_states = self.layernorm_embedding(hidden_states + embed_pos)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n\n # decoder layers\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None\n next_decoder_cache = () if use_cache else None\n\n # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired\n for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], [\"head_mask\", \"cross_attn_head_mask\"]):\n if attn_mask is not None:\n if attn_mask.size()[0] != (len(self.layers)):\n raise ValueError(\n f\"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for\"\n f\" {head_mask.size()[0]}.\"\n )\n\n for idx, decoder_layer in enumerate(self.layers):\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n continue\n\n past_key_value = past_key_values[idx] if past_key_values is not None else None\n\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, output_attentions, use_cache)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(decoder_layer),\n hidden_states,\n attention_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n head_mask[idx] if head_mask is not None else None,\n cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,\n None,\n )\n else:\n layer_outputs = decoder_layer(\n hidden_states,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n layer_head_mask=(head_mask[idx] if head_mask is not None else None),\n cross_attn_layer_head_mask=(\n cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None\n ),\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n (hidden_states, residual_trend) = layer_outputs[0]\n trend = trend + residual_trend\n\n if use_cache:\n next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)\n\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n\n if encoder_hidden_states is not None:\n all_cross_attentions += (layer_outputs[2],)\n\n # project seasonality representation\n hidden_states = self.seasonality_projection(hidden_states)\n\n # add hidden states from the last decoder layer\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n next_cache = next_decoder_cache if use_cache else None\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, trend, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]\n if v is not None\n )\n return AutoFormerDecoderOutput(\n last_hidden_state=hidden_states,\n trend=trend,\n past_key_values=next_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n cross_attentions=all_cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"The bare Autoformer Model outputting raw hidden-states without any specific head on top.\",\n AUTOFORMER_START_DOCSTRING,\n)\nclass AutoformerModel(AutoformerPreTrainedModel):\n def __init__(self, config: AutoformerConfig):\n super().__init__(config)\n\n if config.scaling == \"mean\" or config.scaling is True:\n self.scaler = AutoformerMeanScaler(dim=1, keepdim=True)\n elif config.scaling == \"std\":\n self.scaler = AutoformerStdScaler(dim=1, keepdim=True)\n else:\n self.scaler = AutoformerNOPScaler(dim=1, keepdim=True)\n\n if config.num_static_categorical_features > 0:\n self.embedder = AutoformerFeatureEmbedder(\n cardinalities=config.cardinality, embedding_dims=config.embedding_dimension\n )\n\n # transformer encoder-decoder and mask initializer\n self.encoder = AutoformerEncoder(config)\n self.decoder = AutoformerDecoder(config)\n\n # used for decoder seasonal and trend initialization\n self.decomposition_layer = AutoformerSeriesDecompositionLayer(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @property\n def _past_length(self) -> int:\n return self.config.context_length + max(self.config.lags_sequence)\n\n def get_lagged_subsequences(\n self, sequence: torch.Tensor, subsequences_length: int, shift: int = 0\n ) -> torch.Tensor:\n \"\"\"\n Returns lagged subsequences of a given sequence. Returns a tensor of shape (batch_size, subsequences_length,\n feature_size, indices_length), containing lagged subsequences. Specifically, lagged[i, j, :, k] = sequence[i,\n -indices[k]-subsequences_length+j, :].\n\n Args:\n sequence (`torch.Tensor` or shape `(batch_size, context_length,\n feature_size)`): The sequence from which lagged subsequences should be extracted.\n subsequences_length (`int`):\n Length of the subsequences to be extracted.\n shift (`int`, *optional* defaults to 0):\n Shift the lags by this amount back in the time index.\n \"\"\"\n\n # calculates the indices of the lags by subtracting the shift value from the given lags_sequence\n indices = [lag - shift for lag in self.config.lags_sequence]\n\n # checks if the maximum lag plus the length of the subsequences exceeds the length of the input sequence\n sequence_length = sequence.shape[1]\n if max(indices) + subsequences_length > sequence_length:\n raise ValueError(\n f\"lags cannot go further than history length, found lag {max(indices)} \"\n f\"while history length is only {sequence_length}\"\n )\n\n # extracts the lagged subsequences from the input sequence using the calculated indices\n lagged_values = []\n for lag_index in indices:\n begin_index = -lag_index - subsequences_length\n end_index = -lag_index if lag_index > 0 else None\n lagged_values.append(sequence[:, begin_index:end_index, ...])\n\n # return as stacked tensor in the feature dimension\n return torch.stack(lagged_values, dim=-1)\n\n def create_network_inputs(\n self,\n past_values: torch.Tensor,\n past_time_features: torch.Tensor,\n static_categorical_features: Optional[torch.Tensor] = None,\n static_real_features: Optional[torch.Tensor] = None,\n past_observed_mask: Optional[torch.Tensor] = None,\n future_values: Optional[torch.Tensor] = None,\n future_time_features: Optional[torch.Tensor] = None,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Creates the inputs for the network given the past and future values, time features, and static features.\n\n Args:\n past_values (`torch.Tensor`):\n A tensor of shape `(batch_size, past_length, input_size)` containing the past values.\n past_time_features (`torch.Tensor`):\n A tensor of shape `(batch_size, past_length, num_features)` containing the past time features.\n static_categorical_features (`Optional[torch.Tensor]`):\n An optional tensor of shape `(batch_size, num_categorical_features)` containing the static categorical\n features.\n static_real_features (`Optional[torch.Tensor]`):\n An optional tensor of shape `(batch_size, num_real_features)` containing the static real features.\n past_observed_mask (`Optional[torch.Tensor]`):\n An optional tensor of shape `(batch_size, past_length, input_size)` containing the mask of observed\n values in the past.\n future_values (`Optional[torch.Tensor]`):\n An optional tensor of shape `(batch_size, future_length, input_size)` containing the future values.\n\n Returns:\n A tuple containing the following tensors:\n - reshaped_lagged_sequence (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_lags *\n input_size)` containing the lagged subsequences of the inputs.\n - features (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_features)` containing the\n concatenated static and time features.\n - loc (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the mean of the input\n values.\n - scale (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the std of the input\n values.\n - static_feat (`torch.Tensor`): A tensor of shape `(batch_size, num_static_features)` containing the\n concatenated static features.\n \"\"\"\n # time feature\n time_feat = (\n torch.cat(\n (\n past_time_features[:, self._past_length - self.config.context_length :, ...],\n future_time_features,\n ),\n dim=1,\n )\n if future_values is not None\n else past_time_features[:, self._past_length - self.config.context_length :, ...]\n )\n\n # target\n if past_observed_mask is None:\n past_observed_mask = torch.ones_like(past_values)\n\n context = past_values[:, -self.config.context_length :]\n observed_context = past_observed_mask[:, -self.config.context_length :]\n _, loc, scale = self.scaler(context, observed_context)\n\n inputs = (\n (torch.cat((past_values, future_values), dim=1) - loc) / scale\n if future_values is not None\n else (past_values - loc) / scale\n )\n\n # static features\n log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p()\n log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log()\n static_feat = torch.cat((log_abs_loc, log_scale), dim=1)\n\n if static_real_features is not None:\n static_feat = torch.cat((static_real_features, static_feat), dim=1)\n if static_categorical_features is not None:\n embedded_cat = self.embedder(static_categorical_features)\n static_feat = torch.cat((embedded_cat, static_feat), dim=1)\n expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1)\n\n # all features\n features = torch.cat((expanded_static_feat, time_feat), dim=-1)\n\n # lagged features\n subsequences_length = (\n self.config.context_length + self.config.prediction_length\n if future_values is not None\n else self.config.context_length\n )\n lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length)\n lags_shape = lagged_sequence.shape\n reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)\n\n if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]:\n raise ValueError(\n f\"input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match\"\n )\n return reshaped_lagged_sequence, features, loc, scale, static_feat\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n @add_start_docstrings_to_model_forward(AUTOFORMER_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=AutoformerModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n past_values: torch.Tensor,\n past_time_features: torch.Tensor,\n past_observed_mask: torch.Tensor,\n static_categorical_features: Optional[torch.Tensor] = None,\n static_real_features: Optional[torch.Tensor] = None,\n future_values: Optional[torch.Tensor] = None,\n future_time_features: Optional[torch.Tensor] = None,\n decoder_attention_mask: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.Tensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n output_hidden_states: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n use_cache: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[AutoformerModelOutput, Tuple]:\n r\"\"\"\n Returns:\n\n Examples:\n\n ```python\n >>> from huggingface_hub import hf_hub_download\n >>> import torch\n >>> from transformers import AutoformerModel\n\n >>> file = hf_hub_download(\n ... repo_id=\"hf-internal-testing/tourism-monthly-batch\", filename=\"train-batch.pt\", repo_type=\"dataset\"\n ... )\n >>> batch = torch.load(file)\n\n >>> model = AutoformerModel.from_pretrained(\"huggingface/autoformer-tourism-monthly\")\n\n >>> # during training, one provides both past and future values\n >>> # as well as possible additional features\n >>> outputs = model(\n ... past_values=batch[\"past_values\"],\n ... past_time_features=batch[\"past_time_features\"],\n ... past_observed_mask=batch[\"past_observed_mask\"],\n ... static_categorical_features=batch[\"static_categorical_features\"],\n ... future_values=batch[\"future_values\"],\n ... future_time_features=batch[\"future_time_features\"],\n ... )\n\n >>> last_hidden_state = outputs.last_hidden_state\n ```\"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_inputs, temporal_features, loc, scale, static_feat = self.create_network_inputs(\n past_values=past_values,\n past_time_features=past_time_features,\n past_observed_mask=past_observed_mask,\n static_categorical_features=static_categorical_features,\n static_real_features=static_real_features,\n future_values=future_values,\n future_time_features=future_time_features,\n )\n\n if encoder_outputs is None:\n enc_input = torch.cat(\n (\n transformer_inputs[:, : self.config.context_length, ...],\n temporal_features[:, : self.config.context_length, ...],\n ),\n dim=-1,\n )\n encoder_outputs = self.encoder(\n inputs_embeds=enc_input,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n if future_values is not None:\n # Decoder inputs\n # seasonality and trend from context length\n seasonal_input, trend_input = self.decomposition_layer(\n transformer_inputs[:, : self.config.context_length, ...]\n )\n mean = (\n torch.mean(transformer_inputs[:, : self.config.context_length, ...], dim=1)\n .unsqueeze(1)\n .repeat(1, self.config.prediction_length, 1)\n )\n zeros = torch.zeros(\n [transformer_inputs.shape[0], self.config.prediction_length, transformer_inputs.shape[2]],\n device=enc_input.device,\n )\n\n decoder_input = torch.cat(\n (\n torch.cat((seasonal_input[:, -self.config.label_length :, ...], zeros), dim=1),\n temporal_features[:, self.config.context_length - self.config.label_length :, ...],\n ),\n dim=-1,\n )\n trend_init = torch.cat(\n (\n torch.cat((trend_input[:, -self.config.label_length :, ...], mean), dim=1),\n temporal_features[:, self.config.context_length - self.config.label_length :, ...],\n ),\n dim=-1,\n )\n\n decoder_outputs = self.decoder(\n trend=trend_init,\n inputs_embeds=decoder_input,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=encoder_outputs[0],\n head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n else:\n decoder_outputs = AutoFormerDecoderOutput()\n\n if not return_dict:\n return decoder_outputs + encoder_outputs + (loc, scale, static_feat)\n\n return AutoformerModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n trend=decoder_outputs.trend,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n loc=loc,\n scale=scale,\n static_features=static_feat,\n )\n\n\n@add_start_docstrings(\n \"The Autoformer Model with a distribution head on top for time-series forecasting.\",\n AUTOFORMER_START_DOCSTRING,\n)\nclass AutoformerForPrediction(AutoformerPreTrainedModel):\n def __init__(self, config: AutoformerConfig):\n super().__init__(config)\n self.model = AutoformerModel(config)\n if config.distribution_output == \"student_t\":\n self.distribution_output = StudentTOutput(dim=config.input_size)\n elif config.distribution_output == \"normal\":\n self.distribution_output = NormalOutput(dim=config.input_size)\n elif config.distribution_output == \"negative_binomial\":\n self.distribution_output = NegativeBinomialOutput(dim=config.input_size)\n else:\n raise ValueError(f\"Unknown distribution output {config.distribution_output}\")\n\n self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.feature_size)\n self.target_shape = self.distribution_output.event_shape\n\n if config.loss == \"nll\":\n self.loss = nll\n else:\n raise ValueError(f\"Unknown loss function {config.loss}\")\n\n # Initialize weights of distribution_output and apply final processing\n self.post_init()\n\n def output_params(self, decoder_output):\n return self.parameter_projection(decoder_output[:, -self.config.prediction_length :, :])\n\n def get_encoder(self):\n return self.model.get_encoder()\n\n def get_decoder(self):\n return self.model.get_decoder()\n\n @torch.jit.ignore\n def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:\n sliced_params = params\n if trailing_n is not None:\n sliced_params = [p[:, -trailing_n:] for p in params]\n return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale)\n\n @add_start_docstrings_to_model_forward(AUTOFORMER_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqTSPredictionOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n past_values: torch.Tensor,\n past_time_features: torch.Tensor,\n past_observed_mask: torch.Tensor,\n static_categorical_features: Optional[torch.Tensor] = None,\n static_real_features: Optional[torch.Tensor] = None,\n future_values: Optional[torch.Tensor] = None,\n future_time_features: Optional[torch.Tensor] = None,\n future_observed_mask: Optional[torch.Tensor] = None,\n decoder_attention_mask: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.Tensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n output_hidden_states: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n use_cache: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Seq2SeqTSPredictionOutput, Tuple]:\n r\"\"\"\n Returns:\n\n Examples:\n\n ```python\n >>> from huggingface_hub import hf_hub_download\n >>> import torch\n >>> from transformers import AutoformerForPrediction\n\n >>> file = hf_hub_download(\n ... repo_id=\"hf-internal-testing/tourism-monthly-batch\", filename=\"train-batch.pt\", repo_type=\"dataset\"\n ... )\n >>> batch = torch.load(file)\n\n >>> model = AutoformerForPrediction.from_pretrained(\"huggingface/autoformer-tourism-monthly\")\n\n >>> # during training, one provides both past and future values\n >>> # as well as possible additional features\n >>> outputs = model(\n ... past_values=batch[\"past_values\"],\n ... past_time_features=batch[\"past_time_features\"],\n ... past_observed_mask=batch[\"past_observed_mask\"],\n ... static_categorical_features=batch[\"static_categorical_features\"],\n ... static_real_features=batch[\"static_real_features\"],\n ... future_values=batch[\"future_values\"],\n ... future_time_features=batch[\"future_time_features\"],\n ... )\n\n >>> loss = outputs.loss\n >>> loss.backward()\n\n >>> # during inference, one only provides past values\n >>> # as well as possible additional features\n >>> # the model autoregressively generates future values\n >>> outputs = model.generate(\n ... past_values=batch[\"past_values\"],\n ... past_time_features=batch[\"past_time_features\"],\n ... past_observed_mask=batch[\"past_observed_mask\"],\n ... static_categorical_features=batch[\"static_categorical_features\"],\n ... static_real_features=batch[\"static_real_features\"],\n ... future_time_features=batch[\"future_time_features\"],\n ... )\n\n >>> mean_prediction = outputs.sequences.mean(dim=1)\n ```\"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if future_values is not None:\n use_cache = False\n\n outputs = self.model(\n past_values=past_values,\n past_time_features=past_time_features,\n past_observed_mask=past_observed_mask,\n static_categorical_features=static_categorical_features,\n static_real_features=static_real_features,\n future_values=future_values,\n future_time_features=future_time_features,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n output_hidden_states=output_hidden_states,\n output_attentions=output_attentions,\n use_cache=use_cache,\n return_dict=return_dict,\n )\n\n prediction_loss = None\n params = None\n if future_values is not None:\n # outputs.last_hidden_state and trend\n # loc is 4rd last and scale is 3rd last output\n params = self.output_params(outputs[0] + outputs[1])\n distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2])\n\n loss = self.loss(distribution, future_values)\n\n if future_observed_mask is None:\n future_observed_mask = torch.ones_like(future_values)\n\n if len(self.target_shape) == 0:\n loss_weights = future_observed_mask\n else:\n loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False)\n\n prediction_loss = weighted_average(loss, weights=loss_weights)\n\n if not return_dict:\n outputs = ((params,) + outputs[2:]) if params is not None else outputs[2:]\n return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs\n\n return Seq2SeqTSPredictionOutput(\n loss=prediction_loss,\n params=params,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n loc=outputs.loc,\n scale=outputs.scale,\n static_features=outputs.static_features,\n )\n\n @torch.no_grad()\n def generate(\n self,\n past_values: torch.Tensor,\n past_time_features: torch.Tensor,\n future_time_features: torch.Tensor,\n past_observed_mask: Optional[torch.Tensor] = None,\n static_categorical_features: Optional[torch.Tensor] = None,\n static_real_features: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n ) -> SampleTSPredictionOutput:\n r\"\"\"\n Greedily generate sequences of sample predictions from a model with a probability distribution head.\n\n Parameters:\n past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):\n Past values of the time series, that serve as context in order to predict the future. The sequence size\n of this tensor must be larger than the `context_length` of the model, since the model will use the\n larger size to construct lag features, i.e. additional values from the past which are added in order to\n serve as \"extra context\".\n\n The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if\n no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest\n look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length\n of the past.\n\n The `past_values` is what the Transformer encoder gets as input (with optional additional features,\n such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).\n\n Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.\n\n For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number\n of variates in the time series per time step.\n past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):\n Required time features, which the model internally will add to `past_values`. These could be things\n like \"month of year\", \"day of the month\", etc. encoded as vectors (for instance as Fourier features).\n These could also be so-called \"age\" features, which basically help the model know \"at which point in\n life\" a time-series is. Age features have small values for distant past time steps and increase\n monotonically the more we approach the current time step. Holiday features are also a good example of\n time features.\n\n These features serve as the \"positional encodings\" of the inputs. So contrary to a model like BERT,\n where the position encodings are learned from scratch internally as parameters of the model, the Time\n Series Transformer requires to provide additional time features. The Time Series Transformer only\n learns additional embeddings for `static_categorical_features`.\n\n Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these\n features must but known at prediction time.\n\n The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.\n future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):\n Required time features for the prediction window, which the model internally will add to sampled\n predictions. These could be things like \"month of year\", \"day of the month\", etc. encoded as vectors\n (for instance as Fourier features). These could also be so-called \"age\" features, which basically help\n the model know \"at which point in life\" a time-series is. Age features have small values for distant\n past time steps and increase monotonically the more we approach the current time step. Holiday features\n are also a good example of time features.\n\n These features serve as the \"positional encodings\" of the inputs. So contrary to a model like BERT,\n where the position encodings are learned from scratch internally as parameters of the model, the Time\n Series Transformer requires to provide additional time features. The Time Series Transformer only\n learns additional embeddings for `static_categorical_features`.\n\n Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these\n features must but known at prediction time.\n\n The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.\n past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):\n Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected\n in `[0, 1]`:\n\n - 1 for values that are **observed**,\n - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).\n\n static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):\n Optional static categorical features for which the model will learn an embedding, which it will add to\n the values of the time series.\n\n Static categorical features are features which have the same value for all time steps (static over\n time).\n\n A typical example of a static categorical feature is a time series ID.\n static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):\n Optional static real features which the model will add to the values of the time series.\n\n Static real features are features which have the same value for all time steps (static over time).\n\n A typical example of a static real feature is promotion information.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers.\n\n Return:\n [`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of\n samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for\n multivariate predictions.\n \"\"\"\n outputs = self(\n static_categorical_features=static_categorical_features,\n static_real_features=static_real_features,\n past_time_features=past_time_features,\n past_values=past_values,\n past_observed_mask=past_observed_mask,\n future_time_features=None,\n future_values=None,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=True,\n use_cache=False,\n )\n\n decoder = self.model.get_decoder()\n enc_last_hidden = outputs.encoder_last_hidden_state\n loc = outputs.loc\n scale = outputs.scale\n static_feat = outputs.static_features\n\n num_parallel_samples = self.config.num_parallel_samples\n repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0)\n repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0)\n\n repeated_past_values = (\n past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc\n ) / repeated_scale\n\n time_features = torch.cat((past_time_features, future_time_features), dim=1)\n\n expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_features.shape[1], -1)\n features = torch.cat((expanded_static_feat, time_features), dim=-1)\n repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0)\n\n repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0)\n\n lagged_sequence = self.model.get_lagged_subsequences(\n sequence=repeated_past_values, subsequences_length=self.config.context_length\n )\n lags_shape = lagged_sequence.shape\n reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)\n seasonal_input, trend_input = self.model.decomposition_layer(reshaped_lagged_sequence)\n\n mean = torch.mean(reshaped_lagged_sequence, dim=1).unsqueeze(1).repeat(1, self.config.prediction_length, 1)\n zeros = torch.zeros(\n [reshaped_lagged_sequence.shape[0], self.config.prediction_length, reshaped_lagged_sequence.shape[2]],\n device=reshaped_lagged_sequence.device,\n )\n\n decoder_input = torch.cat(\n (\n torch.cat((seasonal_input[:, -self.config.label_length :, ...], zeros), dim=1),\n repeated_features[:, -self.config.prediction_length - self.config.label_length :, ...],\n ),\n dim=-1,\n )\n trend_init = torch.cat(\n (\n torch.cat((trend_input[:, -self.config.label_length :, ...], mean), dim=1),\n repeated_features[:, -self.config.prediction_length - self.config.label_length :, ...],\n ),\n dim=-1,\n )\n decoder_outputs = decoder(\n trend=trend_init, inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden\n )\n decoder_last_hidden = decoder_outputs.last_hidden_state\n trend = decoder_outputs.trend\n params = self.output_params(decoder_last_hidden + trend)\n distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale)\n future_samples = distr.sample()\n\n return SampleTSPredictionOutput(\n sequences=future_samples.reshape(\n (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape,\n )\n )\n","sub_path":"src/transformers/models/autoformer/modeling_autoformer.py","file_name":"modeling_autoformer.py","file_ext":"py","file_size_in_byte":109505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"339900706","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nfrom logging import getLogger\nfrom ykdl.compact import Request, urlopen\nfrom ykdl.util import log\nfrom .html import fake_headers\n\nlogger = getLogger(\"downloader\")\n\ntry:\n from concurrent.futures import ThreadPoolExecutor\n MultiThread = True\nexcept:\n MultiThread = False\n logger.warning(\"failed to import ThreadPoolExecutor!\")\n logger.warning(\"multithread download disabled!\")\n logger.warning(\"please install concurrent.futures from https://github.com/agronholm/pythonfutures !\")\n\ndef simple_hook(arg1, arg2, arg3):\n if arg3 > 0:\n percent = int(arg1 * arg2 * 100 / arg3)\n if percent > 100:\n percent = 100\n sys.stdout.write('\\r %3d' % percent + '%')\n sys.stdout.flush()\n else:\n sys.stdout.write('\\r' + str(round(arg1 * arg2 / 1048576, 1)) + 'MB')\n sys.stdout.flush()\n\ndef save_url(url, name, ext, status, part = None, reporthook = simple_hook):\n if part is None:\n print(\"Download: \" + name)\n name = name + '.' + ext\n else:\n print(\"Download: \" + name + \" part %d\" % part)\n name = name + '_%d_.' % part + ext\n bs = 1024*8\n size = -1\n read = 0\n blocknum = 0\n open_mode = 'wb'\n req = Request(url, headers = fake_headers)\n response = urlopen(req, None)\n if \"content-length\" in response.headers:\n size = int(response.headers[\"Content-Length\"])\n if os.path.exists(name):\n filesize = os.path.getsize(name)\n if filesize == size:\n print('Skipped: file already downloaded')\n if part is None:\n status[0] = 1\n else:\n status[part] =1\n return\n elif -1 != size:\n req.add_header('Range', 'bytes=%d-' % filesize)\n blocknum = int(filesize / bs)\n response = urlopen(req, None)\n open_mode = 'ab'\n reporthook(blocknum, bs, size)\n with open(name, open_mode) as tfp:\n while True:\n block = response.read(bs)\n if not block:\n break\n read += len(block)\n tfp.write(block)\n blocknum += 1\n reporthook(blocknum, bs, size)\n if part is None:\n status[0] = 1\n else:\n status[part] =1\n\ndef save_urls(urls, name, ext, jobs=1):\n status = [0] * len(urls)\n if len(urls) == 1:\n save_url(urls[0], name, ext, status)\n if 0 in status:\n logger.error(\"donwload failed\")\n return not 0 in status\n if not MultiThread:\n for no, u in enumerate(urls):\n save_url(u, name, ext, status, part = no)\n else:\n with ThreadPoolExecutor(max_workers=jobs) as worker:\n for no, u in enumerate(urls):\n worker.submit(save_url, u, name, ext, status, part = no)\n worker.shutdown()\n i = 0\n for a in status:\n if a == 0:\n logger.error(\"downloader failed at part {}\".format(i))\n i += 1\n return not 0 in status\n","sub_path":"ykdl/util/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"382563121","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/1/29 14:13\n# @Author : Jclian91\n# @File : model_evaluate.py\n# @Place : Yangpu, Shanghai\nimport json\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import classification_report\nfrom transformers import BertConfig, BertForSequenceClassification, BertTokenizer\n\nfrom params import *\nfrom model_train import convert_text_to_ids, seq_padding, test_file\n\n# read label id dict\nwith open(\"{}_label2id.json\".format(dataset), \"r\", encoding=\"utf-8\") as g:\n label_id_dict = json.loads(g.read())\n id_label_dict = {v: k for k, v in label_id_dict.items()}\n\n# load model\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nconfig = BertConfig.from_pretrained(\"../bert-base-chinese\", num_labels=num_labels, hidden_dropout_prob=hidden_dropout_prob)\nmodel = BertForSequenceClassification.from_pretrained(\"../bert-base-chinese\", config=config)\nmodel.to(device)\nstate_dict = torch.load('{}_cls.pth'.format(dataset))\nmodel.load_state_dict(state_dict)\ntokenizer = BertTokenizer(\"../bert-base-chinese/vocab.txt\")\n\n# read test file\ntest_df = pd.read_csv(test_file)\ncontents, true_labels = test_df[\"content\"].tolist(), test_df[\"label\"].tolist()\n\n# model evaluate\npred_labels = []\nfor i, text in enumerate(contents):\n print(\"predict {} samples\".format(i+1))\n input_ids, token_type_ids = convert_text_to_ids(tokenizer, [text], max_sequence_length)\n # print(input_ids, token_type_ids)\n input_ids = seq_padding(tokenizer, input_ids)\n token_type_ids = seq_padding(tokenizer, token_type_ids)\n input_ids, token_type_ids = input_ids.long(), token_type_ids.long()\n input_ids, token_type_ids = input_ids.to(device), token_type_ids.to(device)\n output = model(input_ids=input_ids, token_type_ids=token_type_ids)\n label_id = np.argmax(output[0].detach().cpu().numpy(), axis=1)[0]\n pred_labels.append(id_label_dict[label_id])\n\n\n# print evaluate output\nprint(classification_report(true_labels, pred_labels, digits=4))","sub_path":"transformers_learning/text_classification/model_evaluate.py","file_name":"model_evaluate.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"230889703","text":"#!/usr/bin/env python2.7\n\nfrom bottle import route, run, Bottle, request\nimport random\n\n''' Define the app '''\ntile_calc_app = Bottle()\n\n@tile_calc_app.get('/')\n@tile_calc_app.get('/tilecalc')\ndef main_page():\n ''' Display main form '''\n html = '''\n

    Welcome to Tile Calc

    \n

    Enter the width, length, and price per unit

    \n
    \n Width:

    \n Length:

    \n Cost Per Unit:

    \n \n
    \n

    Testimonial from our customers: %s

    \n '''\n return html % get_cust_feedback()\n\n@tile_calc_app.post('/')\n@tile_calc_app.post('/tilecalc')\ndef result_page():\n ''' Display the error or results page '''\n try:\n width = float(request.forms.get('width'))\n length = float(request.forms.get('length'))\n cost_per_unit = float(request.forms.get('cost_per_unit'))\n html = '''\n

    Total cost: %s

    \n Go back\n ''' % (width * length * cost_per_unit)\n\n except:\n html = '''\n

    ERROR: All inputs must be ints or floats

    \n Go back\n '''\n return html\n\ndef get_cust_feedback():\n ''' Teen talk barbie really likes our app! '''\n cust_feedback = (\n \"Will we ever have enough clothes?\",\n \"I love shopping!\",\n \"Wanna have a pizza party?\",\n \"Math class is tough!\"\n )\n return random.choice(cust_feedback)\n\n''' Run the app '''\nrun(tile_calc_app, host='localhost', port=8081, debug=True)\n","sub_path":"cost-of-tile/skumpf/tile_calc.py","file_name":"tile_calc.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"308216117","text":"from util import *\n\n\n@apply\ndef apply(eq_limit):\n tangent, (epsilon, _0, dir) = eq_limit.of(Equal[Limit, Infinity])\n assert _0 == 0 and dir > 0\n delta = tangent * epsilon\n fx1, fx = delta.of(Expr - Expr)\n for x in fx.free_symbols:\n if fx1 == fx._subs(x, x + epsilon):\n break\n else:\n raise\n\n return Limit[epsilon:0:1](fx) > fx\n\n\n@prove\ndef prove(Eq):\n from axiom import calculus, algebra, sets\n\n x, epsilon = Symbol(real=True)\n f = Function(real=True)\n Eq << apply(Equal(Limit[epsilon:0:1]((f(x + epsilon) - f(x)) / epsilon), oo))\n\n Eq << calculus.eq.imply.any_all.limit_definition.apply(Eq[0], 'chi')\n\n Eq << Eq[-1].this.expr.apply(algebra.all.imply.all_et)\n\n Eq << Eq[-1].this.find(Element).apply(sets.el_interval.imply.gt)\n\n Eq << Eq[-1].this.expr.expr.apply(algebra.gt_zero.gt.imply.gt.mul)\n Eq << Eq[-1].this.expr.expr.apply(algebra.gt.transport, lhs=0)\n\n\nif __name__ == '__main__':\n run()\n# created on 2020-04-28\n","sub_path":"axiom/calculus/limit_is_infinite/imply/gt/limit.py","file_name":"limit.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"359176161","text":"#!/usr/bin/env python2\n# XXX: Refactor to a comand line tool and remove pylint disable\n\"\"\"Merge columns of multiple experiments by gene id.\"\"\"\nfrom __future__ import absolute_import\n\nimport argparse\nimport csv\nimport os\nimport sys\nfrom itertools import chain\n\nimport utils\n\nparser = argparse.ArgumentParser(\n description=\"Merge columns of multiple experiments by gene id.\"\n)\nparser.add_argument(\"files\", nargs=\"*\", help=\"expression files\")\nparser.add_argument(\"--experiments\", nargs=\"+\", help=\"experiment ids\")\nparser.add_argument(\"--genes\", nargs=\"+\", help=\"filter genes\")\nparser.add_argument(\n \"--intersection\", action=\"store_true\", help=\"merge by intersection of gene ids\"\n)\nparser.add_argument(\"--out\", help=\"output file\")\n\n\nargs = parser.parse_args()\n\n# if args.experiments and len(args.experiments) != len(args.files):\n# raise ValueError(\"Number of experiments must match the number of files\")\n\n\ngenes = set()\nexpressions = []\nheaders = []\nop = set.intersection if args.intersection else set.union\noffset = 0\n\nfor f in args.files:\n if not os.path.isfile(f):\n exit(1)\n\n base, ext = os.path.splitext(f)\n delimiter = \";\" if ext == \".csv\" else \"\\t\"\n\n with utils.gzopen(f) as csvfile:\n reader = csv.reader(csvfile, delimiter=delimiter)\n header = reader.next()[1:]\n headers.append(\n args.experiments[offset : offset + len(header)]\n if args.experiments\n else header\n )\n offset += len(headers[-1])\n expressions.append(dict((r[0], r[1:]) for r in reader))\n genes = (\n set(expressions[-1].keys())\n if args.intersection and not genes\n else op(genes, expressions[-1].keys())\n )\n\nif args.genes:\n genes = genes.intersection(args.genes)\n\ngenes = sorted(genes)\nhe = zip(headers, expressions)\nrows = [\n dict(chain.from_iterable([zip(h, e[g]) for h, e in he if g in e]), **{\"Gene\": g})\n for g in genes\n]\nfhandler = open(args.out, \"wb\") if args.out else sys.stdout\n\nwriter = csv.DictWriter(\n fhandler, [\"Gene\"] + [h for subheader in headers for h in subheader], delimiter=\"\\t\"\n)\nwriter.writeheader()\nwriter.writerows(rows)\n","sub_path":"resolwe_bio/tools/expressionmerge.py","file_name":"expressionmerge.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"191701217","text":"import numpy as np\nimport csv\nimport time\nimport datetime\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution3D, MaxPooling3D, AveragePooling3D\nfrom keras import backend as K\n\nfrom keras.utils import np_utils\n\n\ntrainTestIDs = []\ntrainTestLabels = []\nvalidationIDs = []\nwith open('stage1_labels.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n trainTestIDs.append(row['id'])\n trainTestLabels.append(row['cancer'])\n\nwith open('stage1_sample_submission.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n validationIDs.append(row['id'])\n\ntrainingRatio = 0.90\nnumTrainTestAll = len(trainTestIDs)\nnumTrain = int(np.floor(trainingRatio*numTrainTestAll))\nnumTest = numTrainTestAll-numTrain\nnumValid = len(validationIDs)\n\nrandInds = np.random.permutation(numTrainTestAll)\nindsTrain = randInds[0:numTrain]\nindsTest = randInds[numTrain:numTrainTestAll]\n\nimg_rows=33\nimg_sli=49\nnb_classes=2\nnb_epoch=30\n\nfileNmPrefix1 = '/home/zdestefa/data/segFilesResizedResNetAct48/resnetFeats_'\nfileNmPrefix2 = '/home/zdestefa/data/segFilesResizedResNetAct49/resnetFeats_'\nimg_cols1=512\nimg_cols2=2048\n\ndef getFeatureData(ids,fileNmPrefix):\n fileName = fileNmPrefix + ids + '.npy'\n dataFromFile = np.load(fileName)\n returnData = np.reshape(dataFromFile,(dataFromFile.shape[0],\n dataFromFile.shape[1],\n dataFromFile.shape[2]*dataFromFile.shape[3]))\n return returnData\n\ndef dataGenerator(patIDnumbers, patLabels, indsUse,img_cols,fileNmPrefix):\n while 1:\n for ind in range(len(indsUse)):\n patID = patIDnumbers[indsUse[ind]]\n XCur = getFeatureData(patID,fileNmPrefix)\n if K.image_dim_ordering() == 'th':\n XCur = XCur.reshape(1, 1, img_rows, img_cols, img_sli)\n else:\n XCur = XCur.reshape(1, img_rows, img_cols, img_sli, 1)\n YCur = int(patLabels[indsUse[ind]])\n YUse = np_utils.to_categorical(YCur, nb_classes)\n #print(\"Ind:\" + str(ind))\n yield (XCur.astype('float32'),YUse)\n\ndef validDataGenerator(img_cols,fileNmPrefix):\n while 1:\n for ind in range(len(validationIDs)):\n patID = validationIDs[ind]\n XCur = getFeatureData(patID,fileNmPrefix)\n if K.image_dim_ordering() == 'th':\n XCur = XCur.reshape(1, 1, img_rows, img_cols, img_sli)\n else:\n XCur = XCur.reshape(1, img_rows, img_cols, img_sli, 1)\n yield (XCur.astype('float32'))\n\ndef getInputShape(img_cols):\n return (1, img_rows, img_cols, img_sli)\n\n\ndef trainAndValidateNN(img_cols,fileNmPefix):\n model = Sequential()\n\n #filter blocks to compres the info\n initKernel=(1,img_cols,1)\n model.add(Convolution3D(2, initKernel[0], initKernel[1], initKernel[2],\n border_mode='valid',input_shape=getInputShape(img_cols)))\n model.add(Activation('sigmoid'))\n model.add(MaxPooling3D(pool_size=(img_rows, 1, img_sli)))\n model.add(Flatten())\n\n model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])\n model.fit_generator(dataGenerator(trainTestIDs, trainTestLabels, indsTrain,img_cols,fileNmPefix),\n samples_per_epoch=1000,nb_epoch=nb_epoch, nb_val_samples=50,verbose=1,\n validation_data=dataGenerator(trainTestIDs, trainTestLabels,\n indsTest,img_cols,fileNmPefix))\n\n yValidPred = model.predict_generator(validDataGenerator(img_cols,fileNmPefix),\n val_samples=len(validationIDs))\n pred = yValidPred[:,1]\n\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y_%m_%d__%H_%M_%S')\n fileName = 'submissions/resNetPlusMIL_' + st + '.csv'\n\n with open(fileName, 'w') as csvfile:\n fieldnames = ['id', 'cancer']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for ind in range(len(validationIDs)):\n writer.writerow({'id': validationIDs[ind], 'cancer': str(pred[ind])})\n\ntrainAndValidateNN(img_cols1,fileNmPrefix1)\ntrainAndValidateNN(img_cols2,fileNmPrefix2)\n\n\n","sub_path":"CUR_ResNetFeatsToPred_MIL.py","file_name":"CUR_ResNetFeatsToPred_MIL.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"445476244","text":"import datetime\r\nfrom functools import wraps\r\n\r\nfrom dateutil import parser\r\n\r\nfrom common import utils\r\nfrom common.logger import logger\r\n\r\n\r\ndef utc_to_bj(func):\r\n \"\"\"\r\n 将UTC时间或者不带时区的时间,单纯的把时区更换成东八区\r\n :param func:\r\n :return:\r\n \"\"\"\r\n\r\n def wrapper(*args, **kw):\r\n result = func(*args, **kw)\r\n if result is None:\r\n return result\r\n if not isinstance(result, datetime.datetime):\r\n try:\r\n result = parser.parse(result)\r\n except Exception as e:\r\n logger.error(\"invalid args to dateutil parse :%s\", e)\r\n return None\r\n tz_str = result.strftime(\"%z\")\r\n if tz_str in ['+0000']:\r\n result = result.replace(tzinfo=datetime.timezone.utc).astimezone(\r\n datetime.timezone(datetime.timedelta(hours=8)))\r\n elif tz_str in ['']:\r\n result = result.replace(tzinfo=datetime.timezone(datetime.timedelta(hours=8)))\r\n return result\r\n\r\n return wrapper\r\n\r\n\r\ndef date_fac(span=5):\r\n \"\"\"\r\n 如果没有起始时间就生成,如果有但是字符串就就解析\r\n :param span:\r\n :param days:\r\n :param func:\r\n :return:\r\n \"\"\"\r\n\r\n def outer_wrapper(func):\r\n\r\n @wraps(func)\r\n def wrapper(*args, **kw):\r\n\r\n if \"span\" in kw.keys():\r\n span_spec = kw[\"span\"]\r\n else:\r\n span_spec = span\r\n if \"end_dt\" not in kw.keys():\r\n kw[\"end_dt\"] = datetime.datetime.today()\r\n elif isinstance(kw[\"end_dt\"], str):\r\n kw[\"end_dt\"] = utils.parse_dt(kw[\"end_dt\"])\r\n\r\n if \"start_dt\" not in kw.keys():\r\n # local import避免kdb与decorator因循环引用报错\r\n from kdataapp import kdb\r\n kw[\"start_dt\"] = kdb.get_last_n_trade_cal(kw[\"end_dt\"], span_spec)\r\n elif isinstance(kw[\"start_dt\"], str):\r\n kw[\"start_dt\"] = utils.parse_dt(kw[\"start_dt\"])\r\n\r\n kw[\"start_dt\"] = kw[\"start_dt\"].replace(hour=0, minute=0, second=0)\r\n logger.info(\"args:%s,kw:%s\", args, kw)\r\n result = func(*args, **kw)\r\n\r\n return result\r\n\r\n return wrapper\r\n\r\n return outer_wrapper\r\n\r\n\r\ndef chrod_protect(func):\r\n @wraps(func)\r\n def wrapper(*args, **kw):\r\n try:\r\n result = func(*args, **kw)\r\n except Exception as e:\r\n logger.error(\"while invoke %s, args:%r, kw:%r,error occured: %s \", func.__name__, args, kw, e)\r\n logger.exception(e)\r\n result = False\r\n\r\n return result\r\n\r\n return wrapper\r\n","sub_path":"common/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"578720879","text":"#! /usr/bin/python\n\nimport re\nimport argparse\nimport sys\nimport os\nimport json\nimport subprocess\nimport tempfile\nimport time\nimport socket\n\ngverbose = \"\"\ngresults = {}\n\ngchild_stdout = open('gchild_stdout', 'w')\ngchild_stderr = open('gchild_stderr', 'w')\n\n\n# helper functions\ndef get_next_port():\n # increment the counter file with each call\n # write to file for debug\n fh = open('/home/ubuntu/bibifi/rundir/port', 'r+')\n count_str = fh.read()\n count_int = int(count_str)\n count_int += 1\n fh.seek(0) # reset teh location in the file to beginning\n fh.write(str(count_int))\n fh.close\n return count_int\n\n\ndef connect_to_server(port):\n s = socket.socket() # Create a socket object\n s.connect((\"localhost\", port))\n return s\n\n\ndef send_input(s, inp):\n s.send(inp)\n\n\ndef readlines(sock, recv_buffer=4096, delim='\\n'):\n buffer = ''\n data = True\n while data:\n data = sock.recv(recv_buffer)\n buffer += data\n while buffer.find(delim) != -1:\n line, buffer = buffer.split('\\n', 1)\n yield line\n return\n\n\ndef read_test():\n f = open(testFile, 'r')\n test = json.loads(f.read())\n f.close()\n return test\n\n\ndef runServer(server, port):\n # print \"--\" + server + \"--\"\n # print type(server)\n p = subprocess.Popen([server, str(port)]\n # )\n ,\n stdout=gchild_stdout,\n stderr=gchild_stderr)\n ##out,err = p.communicate())\n # gchild_stdout, gchild_stderr = p.communicate()\n\n\n time.sleep(2)\n\n p.poll()\n # print( p.returncode)\n if p.returncode == 63:\n return runServer(server, port + 1)\n\n return (p, port)\n\n\ndef stopServer(p):\n # print \"one\"\n p.terminate()\n cnt = 0\n while cnt < 3:\n stat = p.poll()\n # print stat\n if stat == None:\n time.sleep(1)\n cnt += 1\n print\n \"waited for %d second\" % cnt\n else:\n cnt = 10\n\n # p.wait()\n if p.poll == None: # still alive -> kill\n p.kill()\n\n print(\"server exited with return code: \" + str(p.returncode))\n\n\ndef serializeProgram(program):\n plist = list(program)\n l = len(plist)\n\n for i in range(l - 1, -1, -1):\n if (ord(plist[i]) == 10): # new line\n plist[i] = \"n\"\n plist.insert(i, chr(92)) # \"\\\"\n if (ord(plist[i]) == 34): # -\"-\n plist.insert(i, chr(92)) # \"\\\"\n\n # print \"%2d: -%s-%d-\" % (i,alist[i],ord(alist[i]))\n\n return (\"\".join(plist))\n\n\ndef extract_team_num(server):\n match = re.search(r'\\/(\\d{3,4})\\/', server)\n return match.group(1)\n\n\ndef compare_results():\n print\n \"================== COMPARE ===================================\"\n for i in range(0, len(gresults)):\n # print i\n for j in range(0, len(gresults)):\n # print \" \" + str(j)\n if j > i:\n print\n \"compare keys %s vs %s\" % (gresults.keys()[i], gresults.keys()[j])\n for k in range(0, len(gresults[gresults.keys()[0]])):\n # print \" \" + str(k)\n elem1 = gresults[gresults.keys()[i]]\n elem2 = gresults[gresults.keys()[j]]\n if (elem1[k] == [] or elem2[k] == []):\n empty = \" (one is empty)\"\n else:\n empty = \"\"\n if (elem1[k] == elem2[k]):\n print\n \"%2d: MATCH\" % k\n else:\n print\n \"%2d: DO NOT MATCH%s\" % (k, empty)\n print\n \"%s: %s\" % (gresults.keys()[i], elem1[k])\n print\n \"%s: %s\" % (gresults.keys()[j], elem2[k])\n\n print\n \"=====================================================\"\n\n\n################################################################\ndef Init():\n # this procedure is used for initializations of what\n # ever is needed\n global gverbose\n\n test = \"\"\n init_file = \"\"\n servers = \"\"\n gverbose = \"\"\n test_path = \"\"\n filt_server = []\n only_server = []\n\n # handling arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--test', '-t',\n help='the name of the test that is being run')\n parser.add_argument('--test_path', '-p',\n help='path to where the tests are stored')\n parser.add_argument('--verbose', '-v',\n help='spits out also the communication with teh server',\n action='store_true')\n parser.add_argument('--init_file', '-i',\n help='points to the init file that holds the definitions of locations for the program. default is init.file in the run directory')\n parser.add_argument('--servers', '-s',\n help='points to the list of servers to run the test upon.')\n parser.add_argument('--filter_server', '-f',\n help='do not run for this server. can take multiple of these. conflicts with -o, but the program does not protect from this.',\n action='append')\n parser.add_argument('--only_server', '-o',\n help='run only for this server. can take multiple of these. conflicts with -f, but no protection in the program itself.',\n action='append')\n\n args = parser.parse_args()\n test = args.test\n init_file = args.init_file\n servers = args.servers\n gverbose = args.verbose\n test_path = args.test_path\n filt_server = args.filter_server\n only_server = args.only_server\n\n # print init_file\n # print type(init_file)\n\n\n if init_file == None:\n fh = open('init.file', 'r')\n else:\n fh = open(init_file, 'r')\n\n for line in fh:\n # print line.rstrip(\"\\n\")\n match = re.search(r'^(\\w+) (\\S+)', line)\n if match:\n if match.group(1) == \"test\":\n if test == None:\n test = match.group(2)\n if match.group(1) == \"test_path\":\n if test_path == None:\n test_path = match.group(2)\n # if match.group(1) == \"init_file\":\n # if init_file == False:\n # init_file = match.group(2)\n if match.group(1) == \"servers\":\n if servers == None:\n servers = match.group(2)\n\n fh.close()\n\n print\n \"using as test: %s\" % test\n print\n \"using as test path: %s\" % test_path\n print\n \"using as init_file: %s\" % init_file\n print\n \"using as servers: %s\" % servers\n print\n \"using as gverbose: %s\" % gverbose\n\n return test, init_file, servers, test_path, filt_server, only_server\n\n\n#######################################################\ndef run_test_on_server(progs, server):\n global gresults\n # spec = read_test()\n\n snum = extract_team_num(server)\n res_list = []\n\n # print server\n port_sent = get_next_port()\n (p, port) = runServer(server.rstrip(\"\\n\"), port_sent)\n # (p, port) = runServer(\"/home/osboxes/MYSTUFF/breakers/840/build/server\",8900)\n\n # progs = spec['programs']\n\n prog_cnt = 1\n for prog in progs:\n # send program\n # prog = proginfo['program']\n s = connect_to_server(port)\n print(\"===> sending program \")\n # print(\"Printing prog:\")\n # for i in prog.splitlines(True):\n # print(\"^^\" + i)\n # print(prog.splitlines(True))\n for i in prog.splitlines(True):\n print\n i.rstrip(\"\\n\")\n send_input(s, prog)\n # get output\n print(\"===> receiving output:\")\n results = \"[\"\n oneline = False\n for line in readlines(s):\n # print(line)\n if (oneline):\n results += \", \"\n results += line\n oneline = True\n results += \"]\"\n s.close()\n if gverbose:\n print(\"===============================\")\n print(results)\n print(\"===============================\")\n\n res = json.loads(results)\n res_list.append(res)\n # output = proginfo['output']\n output = {\"status\": \"INVALID - FOR TEST PURPOSE ONLY\"}\n if (res == output):\n print(\"===> output MATCHES\")\n else:\n print(\"===> output DOES NOT match\")\n prog_cnt += 1\n\n gresults[snum] = res_list\n stopServer(p)\n\n\n########################################################################\n\ndef main():\n print\n \"Starting ... \"\n print\n \"======================================================\"\n print\n\n test, init_file, servers, test_path, filt_server, only_server = Init()\n\n # print test\n # print init_file\n print\n servers\n\n testfile = test_path + \"/\" + test + \".scr\"\n outfile = test_path + \"/\" + test + \".out\"\n\n print\n \"======================================================\"\n print\n \"Working on test %s\" % testfile\n print\n \"Expecting output from %s\" % outfile\n print\n \"======================================================\"\n\n progs1 = []\n progs2 = []\n f = open(testfile, 'r')\n build_prog = \"\"\n for line in f:\n match = re.search(r'^\\s*$', line)\n if match:\n continue\n print\n line.rstrip(\"\\n\")\n build_prog = build_prog + line\n match = re.search(r'^\\*\\*\\*', line)\n if match:\n progs1.append(serializeProgram(build_prog))\n progs2.append(build_prog)\n # print \"--\" + build_prog\n build_prog = \"\"\n\n f.close()\n\n # for p in progs1:\n # print p\n # for p in progs2:\n # print p\n\n # build the test\n # read the test and create a json file out of it\n # spec = { \"arguments\" : { \"argv\" : [\"%PORT%\" ]} }\n\n # print json.dumps(spec, sort_keys=True, indent=4, separators=(',', ': '))\n\n ## send to server in\n\n if type(filt_server) != type([]):\n filt_server = []\n if type(only_server) != type([]):\n only_server = []\n\n f = open(servers, 'r')\n for s in f:\n match = re.search(r'^\\s*$', s)\n if match:\n continue\n match = re.search(r'^#', s)\n if match:\n continue\n tmp1 = 0\n for h in filt_server:\n if h in s:\n print\n \"Filtered %s\" % h\n tmp1 = 1\n continue\n if tmp1 == 1:\n continue\n\n tmp2 = 0\n if only_server != []:\n for h in only_server:\n if h in s:\n tmp2 = 1\n if tmp2 == 0:\n continue\n\n print\n \"================================================================\"\n print\n s\n print\n \"================================================================\"\n run_test_on_server(progs2, s)\n\n f.close()\n\n print\n json.dumps(gresults, sort_keys=True,\n indent=4, separators=(',', ': '))\n\n if (gchild_stdout):\n gchild_stdout.close()\n\n if (gchild_stderr):\n gchild_stderr.close()\n\n compare_results()\n\n\n#######################################################\n\nmain()\n\n#######################################################\n","sub_path":"ref/run-test.py","file_name":"run-test.py","file_ext":"py","file_size_in_byte":11473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"337198108","text":"from stdmodandoption import *\nimport plot_setup as PS\nimport collections\nfrom crdenfromdata import crdenfromdata\n\nkeystore=['plot1','plot2','plot3','plot4','plot5','plot6']\n\ndef crdenfromdata_testinput(subdict):\n startno=subdict['startno']\n Nsnap=subdict['Nsnap']\n snapsep=subdict['snapsep']\n wanted=subdict['wanted']\n dirneed=np.array(subdict['dirneed'])\n fmeat=subdict['fmeat']\n dirdict = collections.defaultdict(dict)\n dshape=dirneed.shape\n if dirneed.ndim==1:\n ncols=1\n nrows=dshape[0]\n else:\n ncols=dshape[1]\n nrows=dshape[0]\n keylist = dirneed\n if dirneed.ndim==1:\n enulist = list(enumerate(dirneed))\n else:\n enulist = list(np.ndenumerate(dirneed))\n for (index, runtodo) in enulist:\n dirdict[keylist[index]]=[runtodo]\n noplots=len(dirdict.keys())\n fig, gs = PS.setupgs(nrows=nrows, ncols=ncols)\n for (index, key) in enulist:\n items=dirdict[key]\n for j, runtodo in enumerate(items):\n ssdict = collections.defaultdict(dict)\n ssdict = subdict\n ssdict['runtodo'] = runtodo\n plotdict = crdenfromdata(ssdict)\n xlab = plotdict[wanted]['xlab'];\n ylab = plotdict[wanted]['ylab'];\n ptitle = plotdict[wanted]['ptitle']\n runtitle = plotdict[wanted]['runtitle']\n filename = plotdict[wanted]['filename'];\n linelist = plotdict[wanted]['linelist']\n for k,inkey in enumerate(linelist):\n xnl = plotdict[wanted]['xnl'][inkey];\n ynl = plotdict[wanted]['ynl'][inkey];\n labelneed = plotdict[wanted]['labelneed'];\n color = plotdict[wanted]['color'][inkey];\n lsn = plotdict[wanted]['lsn'][inkey];\n lw = plotdict[wanted]['lw'][inkey];\n marker = plotdict[wanted]['marker'][inkey];\n linelabel = plotdict[wanted]['linelab'][inkey];\n legendneed = 1\n ax = plt.subplot(gs[index])\n if dirneed.ndim==1:\n if index==nrows-1: \n label = linelabel\n else:\n label = '_nolegend_'\n else:\n if index[0]==nrows-1: \n label = linelabel\n else:\n label = '_nolegend_'\n ax.plot(xnl,ynl,label=label,lw=lw,ls=lsn,color=color,marker=marker)\n if dirneed.ndim==1:\n if index0:\n title=''\n else:\n title=runtitle\n else:\n if index[1]>0: \n ylab=''\n ax.tick_params(labelleft='off')\n if index[0]0:\n title=''\n else:\n title=runtitle \n ax.text(0.8, 0.9, ptitle, horizontalalignment='center',\n verticalalignment='center', transform=ax.transAxes,fontsize=22)\n logx=0; logy=1;\n locneed='lower left'\n if wanted=='vr':\n logy=1; locneed='upper left'\n PS.miscsetup(ax,logx=logx,logy=logy,xlab=xlab,ylab=ylab,legendneed=legendneed,\\\n labfs=22,legfs=12,title=title,locneed=locneed)\n if wanted=='pr':\n ax.set_ylim([1e-14,5e-9])\n if wanted=='pz':\n ax.set_ylim([1e-17,5e-10])\n if wanted=='vr':\n ax.set_ylim([1.0,100.0])\n if wanted=='vz':\n ax.set_ylim([0.1,600.0])\n if wanted=='vturr':\n ax.set_ylim([1.0,500.0])\n \n if dirneed.ndim==1: \n if not index==nrows-1:\n if not noplots==1:\n ax.legend().set_visible(False)\n else: \n if not index[0]==nrows-1:\n if not noplots==1:\n ax.legend().set_visible(False)\n PS.finishsave(plt,filename,subplotadjust=0,tightbbox=1)\n return None\n","sub_path":"tools/ISMmainplotfunction/ISMtools/crdenfromdata_testinput.py","file_name":"crdenfromdata_testinput.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"595430410","text":"from collections import defaultdict\n\nfrom sqlalchemy import func, and_\nfrom sqlalchemy.orm import aliased, contains_eager, joinedload\n\nfrom pycroft.lib.logging import log_room_event, log_event\nfrom pycroft.model import session\nfrom pycroft.model.facilities import Room\nfrom pycroft.model.host import Host\nfrom pycroft.model.session import with_transaction\nfrom pycroft.model.user import User\n\n\nclass RoomAlreadyExistsException(Exception):\n pass\n\n\ndef get_overcrowded_rooms(building_id=None):\n \"\"\"\n :param building_id: Limit to rooms of the building.\n Returns a dict of overcrowded rooms with their inhabitants\n :return: dict\n \"\"\"\n\n oc_rooms_filter = []\n if building_id is not None:\n oc_rooms_filter.append(Room.building_id == building_id)\n\n # rooms containing multiple users each of which has a host in the room\n oc_rooms_query = (\n Room.q.join(User)\n .join(Host).filter(User.room_id == Host.room_id)\n .filter(*oc_rooms_filter)\n .group_by(Room.id).having(func.count(User.id) > 1)\n .subquery()\n )\n\n user = aliased(User)\n\n # room can be extracted from the subquery\n oc_room = contains_eager(user.room, alias=oc_rooms_query)\n\n query = (\n session.session.query(user)\n # only include users living in overcrowded rooms\n .join(oc_rooms_query)\n # only include users that have a host in their room\n .join(Host,\n and_(user.id == Host.owner_id, user.room_id == Host.room_id))\n .options(oc_room)\n .options(oc_room.joinedload(Room.building))\n .options(joinedload(user.current_properties))\n )\n\n rooms = defaultdict(list)\n for user in query.all():\n rooms[user.room.id].append(user)\n\n return rooms\n\n\n@with_transaction\ndef create_room(building, level, number, processor, inhabitable=True):\n if Room.q.filter_by(number=number, level=level, building=building).first() is not None:\n raise RoomAlreadyExistsException()\n\n room = Room(number=number,\n level=level,\n inhabitable=inhabitable,\n building=building)\n\n log_room_event(\"Room created.\", processor, room)\n\n return room\n\n\n@with_transaction\ndef edit_room(room, number, inhabitable, processor):\n if room.number != number:\n if Room.q.filter_by(number=number, level=room.level, building=room.building).filter(Room.id!=room.id).first() is not None:\n raise RoomAlreadyExistsException()\n\n log_room_event(\"Renamed room from {} to {}.\".format(room.number, number), processor, room)\n\n room.number = number\n\n if room.inhabitable != inhabitable:\n log_room_event(\"Changed inhabitable status to {}.\".format(str(inhabitable)), processor, room)\n\n room.inhabitable = inhabitable\n\n return room\n\n\ndef get_room(building_id, level, room_number):\n return Room.q.filter_by(number=room_number,\n level=level, building_id=building_id).one_or_none()\n","sub_path":"pycroft/lib/facilities.py","file_name":"facilities.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225365235","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('main/', views.main_visitor, name='main_visitor'),\n path('main/stores/',views.showStores,name='showStores'),\n path('main/stores//', views.storeInfo, name='storeInfo'),\n path('main/stores//showReview', views.showReview, name='showReview'),\n path('main/stores//order//', views.cakeOrder, name='cakeOrder'),\n path('login/',views.visitorlogin, name='visitorlogin')\n ]","sub_path":"visitor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"634268440","text":"\"\"\"Production Flask settings.\"\"\"\n\nfrom .base import * # noqa\n\n\nSECRET_KEY = 'production key'\n\nSERVER_NAME = 'newalbums.priver.org'\n\nSQLALCHEMY_DATABASE_URI = 'postgres://newalbums@localhost/newalbums'\nSQLALCHEMY_BINDS = {\n 'musicbrainz': 'postgres://musicbrainz@localhost/musicbrainz',\n}\n","sub_path":"newalbums/config/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"79998230","text":"# force floating point division. Can still use integer with //\nfrom __future__ import division\n# other good compatibility recquirements for python3\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n# use OS for file IO and the like\nimport os\n# use numpy for array operations\nimport numpy as np\n# Use CSV for writing human readable files\nimport csv\nfrom scipy.optimize import curve_fit\n# path!\nimport ntpath\n# for getting formatted times\nimport time\n\n# flags for recquiring data\nfileFlag=\"--file\"\nnFlag=\"--n\"\n\ndef digitize(data,maxBins):\n bins,stepSize = smartBins(data,maxBins=maxBins)\n return np.digitize(data,bins,right=True)\n\ndef smartBins(data,minStep=None,includeExtraRight=True,maxBins=100):\n # includeExtraRight: if we need to include the 'rightmost bin edge'\n # get the minimum step in the data\n uniVals = np.unique(data)\n if (uniVals.size == 1):\n stepSize = uniVals[0]/2\n else:\n stepSize = min(np.abs(np.diff(uniVals)))\n # threshhold the bins, if we need to, based on a reasonable number\n if (minStep is None and maxBins is not None):\n minStep = (uniVals[-1]-uniVals[0])/maxBins\n stepSize = max(stepSize,minStep)\n minV = np.min(data)\n maxV = np.max(data)\n # create bins of size minSize, including the endpoints\n start = minV-stepSize/2\n end = maxV+stepSize if includeExtraRight else maxV + stepSize/2\n bins = np.arange(start,end,stepSize)\n # return the bins and bin sizes\n return bins,stepSize\n\ndef whereclose(arr,val):\n minIdx = np.argmin( np.abs(arr-val) )\n return minIdx,arr[minIdx]\n\ndef column_stack(toFlat,filePath = None,labels=None,fmt='%s',delim='\\t,\\t',\n comments='#',newline='\\n',fillChar=' '):\n # column stacks everything you give it by flattening and\n # then using numpy's column stack\n # fillChar is what we put in the footer and comments if we see the delimiter\n newArr = []\n for arg in toFlat:\n newArr.append(np.array(list(arg),dtype=np.object).flatten())\n arr = np.column_stack(newArr)\n if (filePath is not None):\n if labels is None:\n labels = [str(i) for i in range(len(toFlat)) ]\n # POST: have labels\n header = delim.join(labels)\n footer = (\"(c) Patrick Heenan patrick.raymond.heenan@gmail.com.\" + \n \"Generated on {:s}\").format(getTimeStamp())\n footer = footer.replace(delim,fillChar)\n comments = comments.replace(delim,fillChar) + newline\n np.savetxt(filePath, arr, fmt=fmt, delimiter=delim,footer=footer,\n header=header,newline=newline,comments=comments)\n return arr\n\n\n# Stats\ndef RSQ(predicted,actual):\n # given predicted and actual values, get the RSQ\n meanObs = np.mean(actual)\n SS_Res = np.sum((predicted-actual)**2)\n SS_Tot = np.sum((actual-meanObs)**2)\n return 1 - SS_Res/SS_Tot\n\ndef lineIntersect(slope1,intercept1,slope2,intercept2):\n return (intercept1-intercept2)/(slope2-slope1)\n\n# assumes that aThenBX are lists for the two lines\ndef lineIntersectParam(aThenB1,aThenB2):\n return lineIntersect(aThenB1[0],aThenB1[1],aThenB2[0],aThenB2[1])\n\n\ndef linModel(xData,a,b):\n # y = ax+b\n return np.array(xData)*a+b\n\ndef GenFit(x,y,model=linModel,**kwargs):\n params,Cov = curve_fit(f=model,xdata=x,ydata=y,**kwargs)\n # the square root of the diagonal elements are the standard deviations\n paramsStd = np.sqrt(np.diag(Cov))\n predicted = model(x,*params)\n return params,paramsStd,predicted\n\ndef fitInfo(x,y,units=['',''],model=linModel,varStr=['a','b'],\n modelStr=\"y=a*x+b\"\n ,degFit=1,fmtStr=\".3g\",full=False,simplify=True,**kwargs):\n # get all the information you could want about the fit.\n # XXX TODO: add in support for non linear models.\n # x: observed x\n # y: observed y\n # units: units of the variables in varStr\n # varStr: parameters of the fit. goes from high degree to low \n # modelStr: describing the model.\n # degFit: degree of the model\n # fmtStr: formating of the data\n # full : if we should return all the data\n params,paramsStd,predicted = GenFit(x,y,model,**kwargs)\n R_SQ = RSQ(predicted,y)\n # if RSQ is very close to 1 (XXX add parameter?) don't display, since\n # we are likely not interested in an actual fit...\n if (not simplify or (R_SQ-1) > 1.e-6):\n modelStr += \"\\nRSQ: {:.3f}\".format(R_SQ)\n for label,mean,stdev,unitTmp in zip(varStr,params,paramsStd,units):\n tempMStr = (\"\\n{:5s}={:\" + fmtStr + \"}\").format(label,mean)\n # if either in range or told not to simplify, add the stdev\n if (not (np.isfinite(stdev) or stdev<0 or stdev == float('inf'))\n or not simplify):\n tempMStr += \"+/-{:.1g}\".format(stdev)\n modelStr += tempMStr\n # add the units (if we have any)\n if (len(unitTmp) > 0):\n modelStr += \"[{:s}]\".format(unitTmp)\n if (full):\n return predicted,modelStr,params,paramsStd,RSQ\n else:\n return predicted,modelStr\n\ndef getSanitaryPath(path,includeSep = True):\n # return the sanitized path plus an os-dependent separator,maybe\n toRet =os.path.normpath(path)\n if (includeSep):\n return toRet + os.sep\n else:\n # dont include the separator\n return toRet\n\ndef file_name_from_path(path):\n return ntpath.basename(path)\n \ndef getFileFromPath(path):\n return file_name_from_path(path)\n\ndef getBasePath(path):\n return getSanitaryPath(os.path.dirname(path))\n\ndef dirExists(directory):\n return os.path.exists(getSanitaryPath(directory))\n\ndef makeTrialDir(base,label=None,time=True):\n basePath = getSanitaryPath(base)\n # start the full path just as the base\n fullPath = ensureDirExists(basePath)\n # add if necessary\n if (label is not None):\n fullPath += getSanitaryPath(label)\n if time:\n # add the timestamp, then make sure to add the separator\n fullPath += getTimeStamp() + os.sep\n fullPath = ensureDirExists(fullPath)\n return fullPath\n\ndef getTimeStamp(fmt=\"%d_%m_%Y_%H:%M:%S\"):\n \"\"\"\n Returns: the current time stamp, formatted as fmt\n \"\"\"\n return time.strftime(fmt)\n\ndef ensureDirExists(directory):\n \"\"\"\n if directory doesnt exist, makes it \n\n Args:\n directory: which directory we want \n Returns:\n the sanitized path name \n \"\"\"\n # make the directory if it isn't there!\n sanit = getSanitaryPath(directory)\n if not dirExists(sanit):\n os.makedirs(sanit)\n return sanit\n\ndef isfile(filename):\n \"\"\"\n Returns: true iff filename is actually a file \n \"\"\"\n sanit = getSanitaryPath(filename,False)\n return os.path.isfile(sanit)\n\ndef ensurePathExists(globalOutput,subPaths):\n ensureDirExists(globalOutput)\n path = globalOutput\n for nextPath in subPaths:\n path += os.sep + nextPath\n path = getSanitaryPath(path)\n ensureDirExists(path)\n return path + os.sep\n\ndef getAllFiles(path,ext=None):\n \"\"\"\n Gets all files with extension \"ext\" in path\n\n Args:\n path: which path\n ext: extension the file must end with. if none, gets all\n Returns:\n list of path-appended filenames\n \"\"\"\n # add the trailing slash\n path = os.path.join(path, '')\n filesRaw = os.listdir(path)\n filesFull = [path + f for f in filesRaw\n if (ext is None or f.endswith(ext))]\n return filesFull\n\ndef humanReadableSave(listToSave,fileName,header):\n # if opening a file object, use newline ='' , according to:\n # https://docs.python.org/3/library/csv.html#id2\n with open(fileName + \".csv\",\"w\",newline='') as f:\n writeObj = csv.writer(f)\n # XXX move this? right now, try and catch. prolly should just\n # check if the first element is a list.\n try:\n writeObj.writerows(listToSave)\n except (csv.Error) as e:\n # must not be a list\n writeObj.writerows([listToSave])\n \n\ndef saveAll(matricesToSave,labels,thisPath,saveCSVForHumans=True):\n # matricesToSave: a list of N matrices to save\n # labels: a list of labels to put in the headers of the matrices\n # global output: a single, global output folder\n # thispath: a list of N strings giving sub-folders under the global output \n path = globalIO.getOutputDir(thisPath)\n for i,mat in enumerate(matricesToSave):\n fName = path + labels[i]\n ReportMessage(\"Saving \" + labels[i])\n np.save(fName,mat)\n # only save CSV is they want it\n if (saveCSVForHumans):\n humanReadableSave(mat,fName,labels[i])\n # XXX: probably want somethng like this \n # http://stackoverflow.com/questions/14037540/writing-a-python-list-of-lists-to-a-csv-file\n\ndef ensureEnds(strV,ext):\n if not (strV.endswith(ext)):\n return strV + ext\n else:\n return strV\n\ndef toLatexStr(numpyArr,fmt=\"{:s}\"):\n \"\"\"\n Returns a numpy array as a string see:\n tex.stackexchange.com/questions/54990/convert-numpy-array-into-tabular\n \n Args:\n the array\n Returns:\n The string to use \n \"\"\"\n return \" \\\\\\\\\\n\".join([\" & \".join( (fmt.format(str(c))\n for c in line))\n for line in numpyArr])\n","sub_path":"GenUtilities.py","file_name":"GenUtilities.py","file_ext":"py","file_size_in_byte":9342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"109869495","text":"# -*- coding: utf-8 -*-\n\nimport simple_draw as sd\n\n\ndef draw_house(house_left, house_bottom, house_right, house_top):\n left_bottom = sd.get_point(house_left, house_bottom)\n right_top = sd.get_point(house_right, house_top)\n right_bottom = sd.get_point(house_right, house_bottom)\n right_top_plus = sd.get_point(house_right + 26, house_top)\n sd.rectangle(left_bottom, right_top, color=sd.COLOR_DARK_RED, width=0)\n for i in range(int((house_right - house_left) / 25)):\n for j in range(int((house_top - house_bottom) / 12) + 1):\n brick = sd.rectangle(sd.get_point((i * 25 + j % 2 * 12) + house_left, (0 + j * 12) + house_bottom),\n sd.get_point((25 + i * 25 + j % 2 * 12) + house_left, (12 + j * 12) + house_bottom),\n color=sd.COLOR_WHITE, width=1)\n sd.rectangle(right_bottom, right_top_plus, color=sd.background_color, width=0)\n roof_left_x = house_left - (house_right - house_left) / 6\n roof_right_x = house_right + (house_right - house_left) / 6\n house_center_x = house_left + (house_right - house_left) / 2\n roof_top_y = house_top + (house_top - house_bottom) / 2\n roof_points = []\n roof_points.append(sd.get_point(house_center_x, roof_top_y))\n roof_points.append(sd.get_point(roof_left_x, house_top))\n roof_points.append(sd.get_point(roof_right_x, house_top))\n sd.polygon(roof_points, color=sd.COLOR_RED, width=0, )\n left_bottom_window = sd.get_point(house_left + 50, house_bottom + 50)\n right_top_window = sd.get_point(house_right - 50, house_top - 50)\n sd.rectangle(left_bottom_window, right_top_window, color=sd.COLOR_GREEN, width=0)\n sd.rectangle(left_bottom_window, right_top_window, color=sd.COLOR_WHITE, width=5)\n sd.line(sd.get_point(house_center_x, house_top - 50), sd.get_point(house_center_x, house_bottom + 50),\n color=sd.COLOR_WHITE, width=5)\n\n","sub_path":"lesson_005/my_draw/house.py","file_name":"house.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"163040043","text":"from django.shortcuts import render, get_object_or_404, redirect, reverse\nfrom django.http import HttpResponse\nfrom django.db.models import Q\nfrom utils.slack import slack_notify\n\nfrom .models import Message, Report, Sharing\nfrom .forms import MessageForm, ReportForm\nfrom accounts.models import MyUser\nfrom books.models import Book\n\n\ndef chat(request):\n user = request.user\n\n # 현 사용자와 거래 내역이 있는 user로만!\n # all_user = MyUser.objects.all().exclude(pk=user.pk)\n # nested list (list > dict > list)\n # ongoing = [\n # { \n # 'username': ,\n # 'sharing_info': list( dict {pk, book_title}),\n # },\n # ...\n # ]\n sharing = Sharing.objects.filter(Q(userA=user)|Q(userB=user))\n if sharing == None:\n return render(request,'chat/chat.html',{'user':user})\n\n ongoing = list()\n finished = list()\n\n all_user = dict()\n\n for s in sharing:\n if s.userA == user:\n username = s.userB.nickname\n all_user[s.userB.pk] = username\n else:\n username = s.userA.nickname\n all_user[s.userA.pk] = username\n\n s.book.title = s.book.title.split('(')[0]\n\n new_neighbor = True\n # 거래 중인 도서 (ongoing)\n if s.isFinished == False:\n for item in ongoing:\n if item['username'] == username:\n # list에 존재하는 요소인 경우, book list를 append\n new_neighbor = False\n item['sharing_info'].append(\n {s.pk : s.book}\n )\n\n # 거래 완료된 도서 (finished)\n else:\n for item in finished:\n if item['username'] == username:\n new_neighbor = False\n item['sharing_info'].append(\n {s.pk : s.book}\n )\n\n # list에 존재하지 않던 요소인 경우, 새로운 항목을 append\n if new_neighbor == True:\n sharing_info = list()\n sharing_info.append(\n {s.pk : s.book}\n )\n\n chat = dict()\n chat['username'] = username\n chat['sharing_info'] = sharing_info\n \n if s.isFinished == False:\n ongoing.append(chat)\n else:\n finished.append(chat)\n \n # print(ongoing)\n # print(finished)\n # for item in finished:\n # for i in item['sharing_info']:\n # for key,value in i.items():\n # print(type(key))\n # print(value)\n # print(type(value))\n\n report_form = ReportForm()\n\n print(all_user)\n return render(request,'chat/chat.html',{'sharing':'True','ongoing':ongoing, 'finished':finished,'all_user':all_user,'report_form':report_form})\n\ndef sharing(request,book_pk,neighbor_pk):\n \n user = request.user\n neighbor = get_object_or_404(MyUser,pk=neighbor_pk)\n book = get_object_or_404(Book,pk=book_pk)\n\n try:\n q1 = Sharing.objects.get(Q(userA=user)&Q(userB=neighbor)&Q(book=book))\n except Sharing.DoesNotExist:\n q1 = None\n \n try:\n q2 = Sharing.objects.get(Q(userB=user)&Q(userA=neighbor)&Q(book=book))\n except Sharing.DoesNotExist:\n q2 = None\n\n if q1:\n if q1.isFinished == False:\n print('거래 중인 도서입니다')\n else:\n q1.isFinished = False\n print('거래가 완료된 도서입니다')\n return redirect('chat')\n\n elif q2:\n if q2.isFinished == False:\n print('거래 중인 도서입니다')\n else:\n q2.isFinished = False\n print('거래가 완료된 도서입니다')\n return redirect('chat')\n \n else:\n sharing = Sharing.objects.create(\n userA = user,\n userB = neighbor,\n book = book,\n isFinished = False\n )\n return redirect('listMessage',sharing.pk)\n\ndef set_sharing_state(request,sharing_pk):\n sharing = get_object_or_404(Sharing,pk=sharing_pk)\n\n if sharing.isFinished == False:\n sharing.isFinished = True\n sharing.save()\n return redirect('chat')\n\n return redirect('chat')\n\ndef chatroom(request,sharing_pk):\n # 채팅 메인페이지에서 채팅 페이지로 이동\n print(sharing_pk)\n \n sharing = get_object_or_404(Sharing,pk=sharing_pk)\n message_list = Message.objects.filter(sharing=sharing)\n received = message_list.filter(receiver=request.user)\n for r in received:\n r.receiver_isRead = True\n r.save()\n\n return redirect('listMessage',sharing_pk)\n\ndef message(request,pk):\n # 채팅방에 접속하면, \n # 내가 마지막으로 전송한 메세지 이전의 메세지들을 모두 읽음 처리\n \n cur_user = request.user\n sharing = Sharing.objects.get(pk=pk)\n if sharing.userA == cur_user:\n receiver = sharing.userB.nickname\n else:\n receiver = sharing.userA.nickname\n\n message_list = Message.objects.filter(sharing=sharing) # 메세지 log\n\n received = message_list.filter(receiver=request.user) # 받은 메세지\n sent = message_list.filter(sender=request.user) # 보낸 메세지\n\n form = MessageForm()\n if request.method == 'POST':\n content = request.POST.get('content')\n\n message = Message.objects.create(\n sharing = sharing,\n sender = cur_user,\n content = content,\n receiver_isRead = False,\n sender_isRead = True\n )\n if sharing.userA == cur_user:\n message.receiver = sharing.userB\n else:\n message.receiver = sharing.userA\n message.save()\n\n return render(request,'chat/message.html',{'messages':message_list,'receiver': receiver, 'form':form})\n\n\ndef chat_guide(request):\n return render(request,'report/chat_guideline.html')\n\ndef service_guide(request):\n return render(request,'report/service_guideline.html') \n\n\ndef report(request):\n if request.method == 'POST':\n report_user_pk = request.POST.get('user')\n report1 = request.POST.get('report1')\n report2 = request.POST.get('report2')\n report3 = request.POST.get('report3')\n content = request.POST.get('content')\n\n report_user = get_object_or_404(MyUser,pk=report_user_pk)\n print(report_user.nickname)\n if report1 == None:\n report1 = False\n if report2 == None:\n report2 = False\n if report3 == None:\n report3 = False\n\n report = Report.objects.create(\n user = report_user,\n report1 = report1,\n report2 = report2,\n report3 = report3,\n content = content\n )\n # slack에 신고 접수 알림 보내기\n attachments = [{\n \"color\": \"#FF0000\",\n \"title\": \"신고접수 알림\",\n \"text\": \"{}\".format(report.content)\n }]\n slack_message = \"[신고접수] {}에 대한 신고가 접수되었습니다.\".format(report_user.nickname)\n slack_notify(slack_message,\"#random\",username='신고 접수봇',attachments=attachments)\n\n return redirect('chat')","sub_path":"ChaekHwaJeom/chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"1701389","text":"from .fixtures import app_client\nimport pytest\n\npytest.fixture(scope='module')(app_client)\n\n\ndef test_homepage(app_client):\n _, response = app_client.get('/.json')\n assert response.status == 200\n assert response.json.keys() == {'test_tables': 0}.keys()\n d = response.json['test_tables']\n assert d['name'] == 'test_tables'\n assert d['tables_count'] == 7\n\n\ndef test_database_page(app_client):\n response = app_client.get('/test_tables.json', gather_request=False)\n data = response.json\n assert 'test_tables' == data['database']\n assert [{\n 'columns': ['content'],\n 'name': '123_starts_with_digits',\n 'count': 0,\n 'hidden': False,\n 'foreign_keys': {'incoming': [], 'outgoing': []},\n 'label_column': None,\n }, {\n 'columns': ['pk', 'content'],\n 'name': 'Table With Space In Name',\n 'count': 0,\n 'hidden': False,\n 'foreign_keys': {'incoming': [], 'outgoing': []},\n 'label_column': None,\n }, {\n 'columns': ['pk', 'f1', 'f2', 'f3'],\n 'name': 'complex_foreign_keys',\n 'count': 1,\n 'foreign_keys': {\n 'incoming': [],\n 'outgoing': [{\n 'column': 'f3',\n 'other_column': 'id',\n 'other_table': 'simple_primary_key'\n }, {\n 'column': 'f2',\n 'other_column': 'id',\n 'other_table': 'simple_primary_key'\n }, {\n 'column': 'f1',\n 'other_column': 'id',\n 'other_table': 'simple_primary_key'\n }],\n },\n 'hidden': False,\n 'label_column': None,\n }, {\n 'columns': ['pk1', 'pk2', 'content'],\n 'name': 'compound_primary_key',\n 'count': 1,\n 'hidden': False,\n 'foreign_keys': {'incoming': [], 'outgoing': []},\n 'label_column': None,\n }, {\n 'columns': ['content', 'a', 'b', 'c'],\n 'name': 'no_primary_key',\n 'count': 201,\n 'hidden': False,\n 'foreign_keys': {'incoming': [], 'outgoing': []},\n 'label_column': None,\n }, {\n 'columns': ['pk', 'content'],\n 'name': 'simple_primary_key',\n 'count': 3,\n 'hidden': False,\n 'foreign_keys': {\n 'incoming': [{\n 'column': 'id',\n 'other_column': 'f3',\n 'other_table': 'complex_foreign_keys'\n }, {\n 'column': 'id',\n 'other_column': 'f2',\n 'other_table': 'complex_foreign_keys'\n }, {\n 'column': 'id',\n 'other_column': 'f1',\n 'other_table': 'complex_foreign_keys'\n }],\n 'outgoing': [],\n },\n 'label_column': None,\n }, {\n 'columns': ['pk', 'content'],\n 'name': 'table/with/slashes.csv',\n 'count': 1,\n 'hidden': False,\n 'foreign_keys': {'incoming': [], 'outgoing': []},\n 'label_column': None,\n }] == data['tables']\n\n\ndef test_custom_sql(app_client):\n response = app_client.get(\n '/test_tables.jsono?sql=select+content+from+simple_primary_key',\n gather_request=False\n )\n data = response.json\n assert {\n 'sql': 'select content from simple_primary_key',\n 'params': {}\n } == data['query']\n assert [\n {'content': 'hello'},\n {'content': 'world'},\n {'content': ''}\n ] == data['rows']\n assert ['content'] == data['columns']\n assert 'test_tables' == data['database']\n assert not data['truncated']\n\n\ndef test_sql_time_limit(app_client):\n response = app_client.get(\n '/test_tables.jsono?sql=select+sleep(0.5)',\n gather_request=False\n )\n assert 400 == response.status\n assert 'interrupted' == response.json['error']\n\n\ndef test_custom_sql_time_limit(app_client):\n response = app_client.get(\n '/test_tables.jsono?sql=select+sleep(0.01)',\n gather_request=False\n )\n assert 200 == response.status\n response = app_client.get(\n '/test_tables.jsono?sql=select+sleep(0.01)&_sql_time_limit_ms=5',\n gather_request=False\n )\n assert 400 == response.status\n assert 'interrupted' == response.json['error']\n\n\ndef test_invalid_custom_sql(app_client):\n response = app_client.get(\n '/test_tables.json?sql=.schema',\n gather_request=False\n )\n assert response.status == 400\n assert response.json['ok'] is False\n assert 'Statement must be a SELECT' == response.json['error']\n\n\ndef test_table_json(app_client):\n response = app_client.get('/test_tables/simple_primary_key.jsono', gather_request=False)\n assert response.status == 200\n data = response.json\n assert data['query']['sql'] == 'select * from simple_primary_key order by pk limit 51'\n assert data['query']['params'] == {}\n assert data['rows'] == [{\n 'pk': '1',\n 'content': 'hello',\n }, {\n 'pk': '2',\n 'content': 'world',\n }, {\n 'pk': '3',\n 'content': '',\n }]\n\n\ndef test_table_with_slashes_in_name(app_client):\n response = app_client.get('/test_tables/table%2Fwith%2Fslashes.csv.jsono', gather_request=False)\n assert response.status == 200\n data = response.json\n assert data['rows'] == [{\n 'pk': '3',\n 'content': 'hey',\n }]\n\n\n@pytest.mark.parametrize('path,expected_rows,expected_pages', [\n ('/test_tables/no_primary_key.jsono', 201, 5),\n ('/test_tables/paginated_view.jsono', 201, 5),\n ('/test_tables/123_starts_with_digits.jsono', 0, 1),\n])\ndef test_paginate_tables_and_views(app_client, path, expected_rows, expected_pages):\n fetched = []\n count = 0\n while path:\n response = app_client.get(path, gather_request=False)\n count += 1\n fetched.extend(response.json['rows'])\n path = response.json['next_url']\n if path:\n assert response.json['next'] and path.endswith(response.json['next'])\n assert count < 10, 'Possible infinite loop detected'\n\n assert expected_rows == len(fetched)\n assert expected_pages == count\n\n\n@pytest.mark.parametrize('path,expected_rows', [\n ('/test_tables/simple_primary_key.json?content=hello', [\n ['1', 'hello'],\n ]),\n ('/test_tables/simple_primary_key.json?content__contains=o', [\n ['1', 'hello'],\n ['2', 'world'],\n ]),\n ('/test_tables/simple_primary_key.json?content__exact=', [\n ['3', ''],\n ]),\n ('/test_tables/simple_primary_key.json?content__not=world', [\n ['1', 'hello'],\n ['3', ''],\n ]),\n])\ndef test_table_filter_queries(app_client, path, expected_rows):\n response = app_client.get(path, gather_request=False)\n assert expected_rows == response.json['rows']\n\n\ndef test_max_returned_rows(app_client):\n response = app_client.get(\n '/test_tables.jsono?sql=select+content+from+no_primary_key',\n gather_request=False\n )\n data = response.json\n assert {\n 'sql': 'select content from no_primary_key',\n 'params': {}\n } == data['query']\n assert data['truncated']\n assert 100 == len(data['rows'])\n\n\ndef test_view(app_client):\n response = app_client.get('/test_tables/simple_view.jsono', gather_request=False)\n assert response.status == 200\n data = response.json\n assert data['rows'] == [{\n 'upper_content': 'HELLO',\n 'content': 'hello',\n }, {\n 'upper_content': 'WORLD',\n 'content': 'world',\n }, {\n 'upper_content': '',\n 'content': '',\n }]\n\n\ndef test_row(app_client):\n response = app_client.get('/test_tables/simple_primary_key/1.jsono', gather_request=False)\n assert response.status == 200\n assert [{'pk': '1', 'content': 'hello'}] == response.json['rows']\n\n\ndef test_row_foreign_key_tables(app_client):\n response = app_client.get('/test_tables/simple_primary_key/1.json?_extras=foreign_key_tables', gather_request=False)\n assert response.status == 200\n assert [{\n 'column': 'id',\n 'count': 1,\n 'other_column': 'f3',\n 'other_table': 'complex_foreign_keys'\n }, {\n 'column': 'id',\n 'count': 0,\n 'other_column': 'f2',\n 'other_table': 'complex_foreign_keys'\n }, {\n 'column': 'id',\n 'count': 1,\n 'other_column': 'f1',\n 'other_table': 'complex_foreign_keys'\n }] == response.json['foreign_key_tables']\n","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":8422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"572757425","text":"\n\ndef GetUglyNumber_Solution(index):\n # write code here\n if index == 0:\n return 0\n dp = [0] * index\n a,b,c = 0,0,0\n dp[0] = 1\n for i in range(1,index):\n n2,n3,n5 = dp[a] * 2,dp[b] * 3,dp[c] * 5\n dp[i] = min(n2,n3,n5)\n if dp[i] == n2: a += 1\n if dp[i] == n3: b += 1\n if dp[i] == n5: c += 1\n return dp[index-1]","sub_path":"CodingInterviews/49.丑数.py","file_name":"49.丑数.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"374424840","text":"def find(n):\n if table[n] == n:\n return n\n else:\n find(table[n])\n\n\nN, M = map(int, input().split())\npoint = [list(map(int, input().split())) for _ in range(N)]\nedge = [list(map(int, input().split())) for _ in range(M)]\n\ntable = [n for n in range(N+1)]\nfor e in edge:\n f, b = e\n if table[b] == b:\n table[b] = f\n else:\n table[b] = find(table[b])\n\ndict = {}\nfor i in range(1, len(table)):\n if dict.get(table[i]) == None:\n dict[table[i]] = [point[i-1]]\n else:\n dict[table[i]].append(point[i-1])\nmaxV = 0\nfor val in dict.values():\n X, Y = [], []\n for v in val:\n X.append(v[0])\n Y.append(v[1])\n lx = max(X) - min(X)\n ly = max(Y) - min(Y)\n temp = (lx + ly)*2\n maxV = max(maxV, temp)\nprint(maxV)\n","sub_path":"Coding_Test/SW_mastro/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"300453263","text":"\"\"\"\n#How to use:\n#errorProject is a project with no min and a large max. Students that can't be assigned to a project end up here.\nerrorProject = Project(\"Error\", 0, 0, 100, 1, 1, [], 0, 0)\n#if SDI is in spring, remember to set\nspringMode=1\n#otherwise, make sure\nspringMode=0\n\n#Start with\nprojectList=[]\nid=1\n#Then do\nprojectList.append(Project(name, id, min, max, canDoSummer, canDoFall, listOfStudents, priority, current))\nid=id+1\n#name is a string. id is the ID of the project, starting at 1. min and max are the minimum and maximum numbers of students. \n#canDoSummer and canDoFall are 1 if the project can be continued in summer and fall respectively, and 0 if it can't.\n#If SDI is not during the spring, set both of these to 1 for all projects.\n#listOfStudents should be empty [] when creating the project, even if students have already been chosen. Same with current being 0.\n#This will be taken care of when students are being assigned to projects, as long as Student.lockedInto is set correctly.\n#priority is what priority the project has when having students assigned. The higher, the more important. priority = -1 means no students will be assigned.\n\n#Start with\nstudentList=[]\n#Then do\nstudentList.append(Student(name, priorityList, gpa, bootcamp, canDoSummer, canDoFall, currentProject, lockedInto, blockedList))\n#name is a string. \n#priorityList is a list of the priorities the student gave to each project. For example, if the student placed project 5 at priority 2, the 5th entry in priorityList will be 2.\n#The lower the priority, the more the student wants the project.\n#bootcamp is 1 if the student attended bootcamp and 0 otherwise.\n#canDoSummer and canDoFall are 1 if the student can take SDII in summer and fall respectively, and 0 if they can't.\n#If SDI is not during the spring, set both of these to 1 for all students.\n#currentProject should be -1 for all students, even those that have been chosen for projects\n#lockedInto is the value that determines whether a student has been pre-assigned to a project. ***This is 1 less than the project's ID!*** It starts at 0!\n#lockedInto = -1 for students that have not been chosen for projects.\n#blockedList includes any projects the student may not be assigned to. It is a list of ints such as [0, 4, 2]. These also start at 0.\n\nProject.assignStudents(studentList, projectList, errorProject)\n#Use this static method when you have some students who need to be assigned.\n\nProject.randomizeAndAssign(studentList, projectList, errorProject)\n#Use this static method if you want a different configuration of students. \n#It reassigns students that were assigned by assignStudents, but not those that have been lockedInto projects\n\nProject.reassignFromUnmet(studentList, projectList, errorProject)\n#Use this static method to reassign all students from projects that have failed to meet the mimimum student requirement.\n#If you run this and then randomizeAndAssign, the projects that didn't have enough students will become locked.\n\nProject.printAllProjects(projectList, studentList, errorProject, springMode)\n#prints all the information about what students have been assigned to what projects and various statistics\n\"\"\"\n\nfrom random import shuffle\nimport sys\nimport json\nimport datetime, os\nimport traceback\n\nclass Project(object):\n def __init__(self, projectName, id, min, max, canDoSummer, canDoFall, listOfStudents, priority, current):\n self.projectName=projectName\n self.id=id\n self.min=min\n self.max=max\n self.canDoFall=canDoFall\n self.canDoSummer=canDoSummer\n self.listOfStudents=listOfStudents\n self.priority=priority\n self.current=current\n \n @staticmethod\n def assignStudents(studentList, projectList, errorProject):\n \"\"\"maxPriority\n for x in projectList:\n if x.priority>maxPriority:\n maxPriority=x.priority\"\"\"\n for x in studentList:\n x.optimize(projectList, errorProject)\n \n @staticmethod\n def reassignFromUnmet(studentList, projectList, errorProject):\n maxMinSize=0\n for x in projectList:\n if x.min > maxMinSize:\n maxMinSize=x.min\n for z in range(maxMinSize): #this must be at least the minimum size of the largest group\n for x in projectList:\n if x.current < x.min and x.current <= z and x.priority != -1:\n x.priority = -1\n x.current=0\n for y in x.listOfStudents:\n y.currentProject=-1\n #print(y.currentProject + \"reassign\")\n y.optimize(projectList, errorProject)\n x.listOfStudents=[]\n \n @staticmethod\n def randomizeAndAssign(studentList, projectList, errorProject):\n shuffle(studentList)\n for a in studentList:\n a.currentProject=-1\n #print(a.currentProject + \"randomize\")\n for a in projectList:\n a.current=0\n a.listOfStudents=[]\n Project.assignStudents(studentList, projectList, errorProject)\n \n @staticmethod\n def printAllProjects(projectList, studentList, errorProject, springMode):\n i=0\n j=0\n for a in projectList:\n #a.printList()\n if springMode == 1:\n summer=a.canDoSummer\n fall=a.canDoFall\n for b in a.listOfStudents:\n summer=summer*b.canDoSummer\n fall=fall*b.canDoFall\n j=j+1\n if a.priority >=0:\n i=i+a.max\n #f.write(\"This project currently does not have enough students.\\n\")\n #errorProject.printList()\n \"\"\"if i < len(studentList):\n print(\"There are currently not enough assignment slots for every student.\")\"\"\"\n got1=0\n got2=0\n got3=0\n for a in studentList:\n if a.currentProject>=0:\n if a.priorityList[a.currentProject]==3:\n got3=got3+1\n if a.priorityList[a.currentProject]==2:\n got2=got2+1\n if a.priorityList[a.currentProject]==1:\n got1=got1+1\n # print(\"{} out of {} students got their first choice.\".format(got1, len(studentList)))\n # print(\"{} out of {} students got their second choice.\".format(got2, len(studentList)))\n #print(\"{} out of {} students got their third choice.\".format(got3, len(studentList)))\n\n #f.write(\"{} out of {} students got their first choice.\\n\".format(got1, len(studentList)))\n #f.write(\"{} out of {} students got their second choice.\\n\".format(got2, len(studentList)))\n #f.write(\"{} out of {} students got their third choice.\\n\".format(got3, len(studentList)))\n date = datetime.datetime.now()\n fileName = date.strftime(\"%Y-%m-%d_%H_%M_%S.txt\")\n fileName = dir_path = os.path.dirname(os.path.realpath(__file__)) + \"/Schedule_Runs/\" + sys.argv[2] + \"/\" + sys.argv[3] + \"/\" + fileName\n with open(fileName, \"w+\") as f:\n temp = {}\n temp['studentsFirstChoice'] = got1\n temp['studentsSecondChoice'] = got2\n temp['studentsThirdChoice'] = got3\n temp['totalStudents'] = len(studentList)\n temp['studentList'] = studentList\n temp['projectList'] = projectList\n f.write(json.dumps(temp, default=lambda x: x.__dict__))\n \n\n #def printList(self):\n #f = open(\"run.txt\", \"a+\")\n #print(\"\\nName: {} ID: {} Number of Students: {} Priority: {}\".format(self.projectName, self.id, self.current, self.priority))\n #f.write(\"\\nName: {} ID: {} Number of Students: {} Priority: {}\\n\".format(self.name, self.id, self.current, self.priority))\n #for x in self.listOfStudents:\n #print(\"{} {} Summer: {} Fall: {}\".format(x.name, x.priorityList[self.id-1], x.canDoSummer, x.canDoFall))\n #f.write(\"{} {} Summer: {} Fall: {}\\n\".format(x.name, x.priorityList[self.id-1], x.canDoSummer, x.canDoFall))\n # if x.lockedInto != -1:\n #print(\"This student is locked into this project.\")\n #f.write(\"This student is locked into this project.\\n\")\n # if self.priority==-1:\n # print(\"This project is locked.\")\n #f.write(\"This project is locked.\\n\")\n\n def reprioritize(self, projectList, studentList):\n while (self.current0 and projectList[b.currentProject].priority < self.priority and b.lockedInto == -1:\n bestPrior=b.priorityList[self.id]\n bestPriorStudent=b\n bestPriorSet=1\n if bestPriorSet == 1:\n summer=self.canDoSummer\n fall=self.canDoFall\n for c in self.listOfStudents:\n summer=summer*c.canDoSummer\n fall=fall*c.canDoFall\n if summer == 1 or fall == 1:\n projectList[bestPriorStudent.currentProject].listOfStudents.remove(bestPriorStudent)\n projectList[bestPriorStudent.currentProject].current=projectList[bestPriorStudent.currentProject].current-1\n self.current=self.current+1\n self.listOfStudents.append(bestPriorStudent)\n bestPriorStudent.currentProject=self.id\n return\n\ndef GetStudentListFromJson(prjID, stdList):\n assignList = []\n for std in stdList:\n if std.currentProject == prjID:\n assignList.append(std)\n else:\n pass\n return assignList\n\n \n\ndef ConvertJsonFileToAlgData(cacheObj):\n prjList = []\n stdList = []\n ret = {}\n for std in cacheObj['studentList']:\n name2 = std['Name']\n ID2 = std['ID']\n pri2 = std['PriorityList']\n gpa2 = std['GPA']\n bc2 = std['BC']\n fall2 = std['CanDoFall']\n summer2 = std['CanDoSummer']\n curr2 = std['ProjectID']\n lock = std['Locked']\n if(lock == 15):\n print(\"scheduler\")\n print(curr2)\n block = std['BlockedList']\n #print(lock)\n #print(\"space\")\n #print(std)\n if(fall2 == 0 and summer2 == 0):\n fall2 = 1\n stdList.append(Student(name2, ID2, pri2, gpa2, bc2, summer2, fall2, curr2, lock, block))\n\n for prj in cacheObj['projectList']:\n name = prj['Name']\n ID = prj['ID']\n Min = prj['Min']\n Max = prj['Max']\n fall = prj['CanDoFall']\n summer = prj['CanDoSummer']\n pri = prj['Priority']\n curr = prj['CurrentStudents']\n if(fall == 0 and summer == 0):\n fall = 1\n if cacheObj['runBefore'] == 1:\n sList = GetStudentListFromJson(ID, stdList)\n prjList.append(Project(name, ID, Min, Max, summer, fall, sList, pri, curr))\n else:\n prjList.append(Project(name, ID, Min, Max, summer, fall, [], pri, curr))\n ret[\"prjList\"] = prjList\n ret[\"stdList\"] = stdList\n return ret\n\nclass Student(object):\n def __init__(self, name, id, priorityList, gpa, bootcamp, canDoSummer, canDoFall, currentProject, lockedInto, blockedList):\n self.name = name\n self.priorityList = priorityList\n self.gpa=gpa\n self.bootcamp=bootcamp\n self.canDoFall=canDoFall\n self.canDoSummer=canDoSummer\n self.currentProject=currentProject\n self.lockedInto=lockedInto\n self.blockedList=blockedList\n self.id = id\n \n def place(self, projectList, errorProject):\n if self.lockedInto != -1 and self.currentProject == -1:\n projectList[self.lockedInto].listOfStudents.append(self)\n projectList[self.lockedInto].current+=1\n self.currentProject = self.lockedInto\n return\n if self.currentProject <= -1 or projectList[self.currentProject].priority == -1 or self.currentProject in self.blockedList:\n for y in range(1, max(self.priorityList)+1):\n z=countX(self.priorityList, y)\n for a in z:\n if projectList[a].current < projectList[a].max and projectList[a].priority!=-1 and a not in self.blockedList:\n summer=projectList[a].canDoSummer*self.canDoSummer\n fall=projectList[a].canDoFall*self.canDoFall\n for b in projectList[a].listOfStudents:\n summer=summer*b.canDoSummer\n fall=fall*b.canDoFall\n if summer == 1 or fall == 1:\n if self.currentProject >= 0:\n projectList[self.currentProject].current=projectList[self.currentProject].current-1\n projectList[self.currentProject].listOfStudents.remove(self)\n projectList[a].current=projectList[a].current+1\n projectList[a].listOfStudents.append(self)\n if self.currentProject==-2:\n errorProject.listOfStudents.remove(self)\n errorProject.current=errorProject.current-1\n self.currentProject=a\n return\n for y in range(1, max(self.priorityList)+1):\n z=countX(self.priorityList, y)\n for a in z:\n for c in projectList[a].listOfStudents:\n if self.priorityList[a]= 0 and self.lockedInto == -1:\n for y in range(1, self.priorityList[self.currentProject]):\n z=countX(self.priorityList,y)\n for a in z:\n if projectList[a].current < projectList[a].max and projectList[a].priority != -1 and projectList[a].priority>=projectList[self.currentProject].priority and a not in self.blockedList:\n summer=projectList[a].canDoSummer*self.canDoSummer\n fall=projectList[a].canDoFall*self.canDoFall\n for c in projectList[a].listOfStudents:\n summer=summer*c.canDoSummer\n fall=fall*c.canDoFall\n if summer == 1 or fall == 1:\n projectList[self.currentProject].listOfStudents.remove(self)\n projectList[self.currentProject].current=projectList[self.currentProject].current-1\n projectList[a].current=projectList[a].current+1\n projectList[a].listOfStudents.append(self)\n self.currentProject=a\n return\n for b in projectList[a].listOfStudents:\n if b.priorityList[b.currentProject] + self.priorityList[self.currentProject] > b.priorityList[self.currentProject] + self.priorityList[b.currentProject] and a not in self.blockedList:\n summer1=projectList[a].canDoSummer*self.canDoSummer\n fall1=projectList[a].canDoFall*self.canDoFall\n summer2=projectList[self.currentProject].canDoSummer*b.canDoSummer\n fall2=projectList[self.currentProject].canDoFall*b.canDoFall\n projectList[self.currentProject].listOfStudents.remove(self)\n projectList[a].listOfStudents.remove(b)\n for c in projectList[a].listOfStudents:\n summer1=summer1*c.canDoSummer\n fall1=fall1*c.canDoFall\n for c in projectList[self.currentProject].listOfStudents:\n summer2=summer2*c.canDoSummer\n fall2=fall2*c.canDoFall\n if (summer1 == 1 or fall1 == 1) and (summer2 == 1 or fall2 == 1):\n #print(self.name, self.currentProject)\n projectList[self.currentProject].listOfStudents.append(b)\n projectList[a].listOfStudents.append(self)\n temp = b.currentProject\n b.currentProject = self.currentProject\n self.currentProject = temp\n b.optimize(projectList, errorProject)\n return\n else:\n projectList[self.currentProject].listOfStudents.append(self)\n projectList[a].listOfStudents.append(b)\n \n# Python code to create a list containing the index of every time a specific value appears in the list\ndef countX(lst, x): \n count = []\n for y in range(len(lst)): \n if (lst[y] == x): \n count.append(y)\n return count \n \ndef isInt(s):\n try:\n int(s,10)\n return True\n except ValueError:\n return False\n\n#Test 1: basic test (one student per project, uses optimization)\n\"\"\"\njosh = Student(\"Josh\", [1, 3, 4, 5, 2], 4.0, 1, 1, 1, -1)\njade = Student(\"Jade\", [5, 1, 3, 4, 2], 4.0, 1, 1, 1, -1)\njohn = Student(\"John\", [4, 5, 1, 3, 2], 4.0, 1, 1, 1, -1)\njane = Student(\"Jane\", [3, 4, 5, 1, 2], 4.0, 1, 1, 1, -1)\njude = Student(\"Jude\", [1, 2, 3, 4, 5], 4.0, 1, 1, 1, -1)\nstudentList = [josh, jade, john, jane, jude]\n\nproject1 = Project(\"1\", 1, 1, 1, 1, 1, [], 0, 0)\nproject2 = Project(\"2\", 2, 1, 1, 1, 1, [], 0, 0)\nproject3 = Project(\"3\", 3, 1, 1, 1, 1, [], 0, 0)\nproject4 = Project(\"4\", 4, 1, 1, 1, 1, [], 0, 0)\nproject5 = Project(\"5\", 5, 1, 1, 1, 1, [], 0, 0)\nprojectList = [project1, project2, project3, project4, project5]\"\"\"\n\n\n#Test 2: test for minimum group size.\n\"\"\"\njosh = Student(\"Josh\", [1, 3, 4, 5, 2], 4.0, 1, 1, 1, -1, -1, [])\njade = Student(\"Jade\", [5, 1, 3, 4, 2], 4.0, 1, 1, 1, -1, -1, [])\njohn = Student(\"John\", [4, 5, 1, 3, 2], 4.0, 1, 1, 1, -1, -1, [])\njane = Student(\"Jane\", [3, 4, 5, 1, 2], 4.0, 1, 1, 1, -1, -1, [])\njude = Student(\"Jude\", [1, 2, 3, 4, 5], 4.0, 1, 1, 1, -1, -1, [])\njuan = Student(\"Juan\", [3, 4, 5, 2, 1], 4.0, 1, 1, 1, -1, -1, [])\njose = Student(\"Jose\", [2, 1, 4, 5, 3], 4.0, 1, 1, 1, -1, -1, [])\nstudentList = [josh, jade, john, jane, jude, juan, jose]\n\nproject1 = Project(\"1\", 1, 2, 3, 1, 1, [], 0, 0)\nproject2 = Project(\"2\", 2, 2, 3, 1, 1, [], 0, 0)\nproject3 = Project(\"3\", 3, 2, 3, 1, 1, [], 0, 0)\nproject4 = Project(\"4\", 4, 2, 3, 1, 1, [], 0, 0)\nproject5 = Project(\"5\", 5, 2, 3, 1, 1, [], 0, 0)\nprojectList = [project1, project2, project3, project4, project5]\"\"\"\n\n#Test 3: test of summer and fall (a song of fire and ice, or in florida, fire and more fire)\n\"\"\"\nproject1 = Project(\"1\", 1, 1, 1, 0, 1, [], 0)\nproject2 = Project(\"2\", 2, 1, 1, 1, 1, [], 0)\nproject3 = Project(\"3\", 3, 1, 1, 1, 0, [], 0)\nproject4 = Project(\"4\", 4, 1, 1, 1, 1, [], 0)\nproject5 = Project(\"5\", 5, 1, 1, 0, 1, [], 0)\nprojectList = [project1, project2, project3, project4, project5]\n\njuan = Student(\"Juan\", [3, 2, 4, 5, 1], 4.0, 1, 1, 0, -1)\njade = Student(\"Jade\", [2, 3, 5, 1, 4], 4.0, 1, 1, 1, -1)\njohn = Student(\"John\", [5, 1, 2, 4, 3], 4.0, 1, 1, 1, -1)\njose = Student(\"Jose\", [3, 4, 1, 2, 5], 4.0, 1, 0, 1, -1)\njude = Student(\"Jude\", [4, 5, 1, 3, 2], 4.0, 1, 1, 1, -1)\nstudentList = [juan, jade, john, jose, jude]\n\n#p1 - Jose 3rd choice\n#p2 - Juan 1st choice\n#p3 - Jude 1st choice\n#p4 - Jade 3rd choice\n#p5 - John 2nd choice\n#3+1+1+3+2 = 10 for all final rankings summed together\"\"\"\n\n#Test 4: Fall 2018\n\"\"\"\nprojectNames = [\"Dance\",\"Color\",\"Data\",\"Real\",\"ELLE\",\"SEE\",\"E-RASSOR\",\"Arcade\",\"Agile\",\"Sound\",\"3D Arm\",\"AUVSI\",\"Blockchain\",\"Slavery\",\"Starcraft\",\"Voice\",\"Recommend\",\"Boat\",\"Task\",\"AVAST\",\"E-GOAT\",\"ARPD\",\"FPL Drone\",\"Image ML\",\"Beach\",\"Gravity\",\"Parking\",\"Boost\",\"Carebit\",\"CAP\",\"Sherlock\",\"Reality\",\"Knights\",\"Athlete\",\"Forage\",\"D&D\",\"Tinder\",\"Microscope\",\"Laser\",\"Tour\",\"Turbo+\",\"Vacay\",\"Outfitter\",\"Sheet\",\"Vegan\",\"Indoor Nav\"]\ni = 1\nprojectList=[]\nfor a in projectNames:\n projectList.append(Project(a, i, 3, 5, 1, 1, [], 0, 0))\n i=i+1\n\nfor a in projectList:\n if a.name == \"E-RASSOR\": #ID=7\n a.max=10\n if a.name == \"Starcraft\": #ID=15\n a.max=15\n if a.name == \"AVAST\": #ID = 20\n a.max = 8\n if a.name == \"E-GOAT\": #ID = 21\n a.max = 8\n if a.name == \"Gravity\": #ID = 26\n a.max = 6\"\"\"\n \ntry:\n studentList=[]\n projectList=[]\n springMode=0\n with open(str(sys.argv[1]), 'r', encoding='utf-8-sig') as f:\n dat = f.read()\n cacheObj = {}\n data = json.loads(dat)\n cacheObj['projectList'] = data['projectList']\n cacheObj['studentList'] = data['studentList']\n cacheObj['term'] = data['term']\n cacheObj['runBefore'] = 0\n ret2 = ConvertJsonFileToAlgData(cacheObj)\n studentList = ret2['stdList']\n #print(studentList[0].name)\n projectList = ret2['prjList']\n if(cacheObj['term'].lower() == \"spring\"):\n springMode = 1\nexcept Exception as e:\n exc_info = sys.exc_info()\n print(''.join(traceback.format_exception(*exc_info)))\n\n\n\nerrorProject = Project(\"Error\", 0, 0, 100, 1, 1, [], 0, 0)\n\"\"\"maxPriority=0\n\nfor x in projectList:\n if x.priority>maxPriority:\n maxPriority=x.priority\n\nfirstPlaceChoices=[]\nfor a in projectList:\n firstPlaceChoices.append(0)\n\nfor x in studentList:\n #x.place(projectList, errorProject)\n y=countX(x.priorityList,1)\n for a in y:\n firstPlaceChoices[a]=firstPlaceChoices[a]+1\n \nfor a in projectList:\n print(\"{} students chose project {}, {}, as their first choice.\".format(firstPlaceChoices[a.id-1], a.id, a.name))\"\"\"\n \nProject.assignStudents(studentList, projectList, errorProject)\n\"\"\"\nval = -1\nwhile val != \"c\":\n val = input(\"Enter the id number of the project you want to change, or 'c' to continue. \")\n if isInt(val):\n i = int(val,10)-1\n for a in studentList:\n if a.priorityList[i] == 1:\n print(\"{} Bootcamp: {} GPA: {}\".format(a.name, a.bootcamp, a.gpa))\n val = input(\"Type 'max' to change the maximum number of students that can be assigned to this project, 'a' to assign a student to this project, or 'b' to go back. \")\n if val == \"max\":\n val=input(\"Enter the new maximum number of students. \")\n if isInt(val):\n projectList[i].max=int(val,10)\n elif val == \"a\":\n val=input(\"Enter the name of the student you want to assign to this project. \")\n studentFound=0\n foundStudent=studentList[0]\n for a in studentList:\n if a.name == val:\n studentFound=1\n foundStudent=a\n if studentFound == 1:\n foundStudent.lockedInto=i\n if i != foundStudent.currentProject:\n projectList[i].current=projectList[i].current+1\n projectList[i].listOfStudents.append(foundStudent)\n foundStudent.currentProject=i\n elif studentFound == 0:\n print(\"Student not found.\")\n elif val != \"b\":\n print(\"Invalid input.\")\n\nfor x in studentList:\n x.place(projectList, errorProject)\n\nfor x in studentList:\n x.optimize(projectList, errorProject)\"\"\"\n \n\"\"\"for a in projectList:\n a.printList()\nerrorProject.printList()\"\"\"\n\nval = -1\n\nwhile val != \"0\":\n #print(\"The current commands that are supported are:\")\n #print(\"(0): Quit\")\n #print(\"(1): Reassign all students from projects with fewer than the minimum amount of students\")#\n #print(\"(2): Choose a project and allow no students to be assigned to it\")\n #print(\"(3): Undo a command of type 1 or 2\")\n #print(\"(4): Require that a project be populated by at least the minimum number of students, or undo a previous action of this type.\")\n #print(\"(5): Require that a student be assigned to a specific project, or undo a previous action of this type.\")\n #print(\"(6): Prevent a student from being assigned to one or more specific projects, or undo a previous action of this type.\")\n #print(\"(7): Change the number of mimimum or maximum students for a project.\")\n #print(\"(8): Randomize the order of students and run the algorithm again.\")#\n #print(\"(9): Show the current status of all projects and students, including statistics.\")#\n #val = input(\"Type the number, then enter, to activate a command. \")\n Project.reassignFromUnmet(studentList, projectList, errorProject)\n #print(studentList[0].name + \"1\")\n Project.randomizeAndAssign(studentList, projectList, errorProject)\n #print(studentList[0].name + \"2\")\n Project.reassignFromUnmet(studentList, projectList, errorProject)\n #print(studentList[0].name + \"3\")\n Project.printAllProjects(projectList, studentList, errorProject, springMode)\n #print(studentList[0].name + \"4\")\n break\n if val == \"1\":\n Project.reassignFromUnmet(studentList, projectList, errorProject)\n if val == \"2\":\n while val != \"b\":\n print(\"The following projects are able to have students assigned to them:\")\n for a in projectList:\n if a.priority != -1:\n a.printList()\n val = input(\"Type a project ID and press enter to reassign all students assigned to a project to new projects and prevent new students from being assigned to it, or type b and press enter to return to the previous menu. \")\n if (isInt(val)):\n i=int(val,10)-1\n if projectList[i].priority == -1:\n print(\"This project has already been locked.\")\n elif i>=len(projectList) or i < 0:\n print(\"There is no project with this ID in the system.\")\n else:\n projectList[i].priority = -1\n projectList[i].current=0\n for y in projectList[i].listOfStudents:\n y.currentProject=-1\n y.optimize(projectList, errorProject)\n projectList[i].listOfStudents=[]\n else:\n print(\"Invalid entry.\")\n if val == \"3\":\n while val != \"b\":\n print(\"The following projects are unable to have students assigned to them.\")\n for a in projectList:\n if a.priority == -1:\n a.printList()\n val = input(\"Type a project ID and press enter to allow students to be assigned to that project again, or type b and press enter to return to the previous menu. \")\n if isInt(val):\n i=int(val, 10)-1\n if i>=len(projectList) or i < 0:\n print(\"There is no project with this ID in the system.\")\n elif projectList[i].priority == -1:\n projectList[i].priority = 0\n for x in studentList:\n x.optimize(projectList, errorProject)\n else:\n print(\"This project ID is not on the list of blocked projects.\")\n else:\n print(\"Invalid entry.\")\n \"\"\"for a in projectList:\n a.printList()\n errorProject.printList()\"\"\"\n if val == \"4\":\n while val != \"b\":\n print(\"These are the projects and their current priority values.\")\n for a in projectList:\n print(\"\\nName: {} ID: {} Number of Students: {} Priority: {}\".format(a.name, a.id, a.current, a.priority))\n #errorProject.printList()\n val = input(\"Type the ID of a project to change its priority value, or type b to return to the previous menu. \")\n if isInt(val):\n i=int(val,10)-1\n if i>=len(projectList) or i < 0:\n print(\"There is no project with this ID in the system.\")\n else:\n print(\"Project ID {}, {} currently has priority {}\".format(i+1, projectList[i].name, projectList[i].priority))\n print(\"Please enter its new priority value. The higher the value, the earlier it will be filled. Default priority is 0\")\n print(\"You can also stop projects from being filled by setting their priority to -1\")\n val = input(\"The value must be an integer. \")\n if isInt(val):\n j=int(val,10)\n projectList[i].priority=j\n print(\"Project {}'s priority has successfully been set to {}\".format(i+1, j))\n if (j=len(projectList) or i < -1:\n print(\"Invalid input.\")\n else:\n foundStudent.lockedInto=i\n if i != -1 and i != foundStudent.currentProject:\n projectList[i].current=projectList[i].current+1\n projectList[foundStudent.currentProject].current=projectList[foundStudent.currentProject].current-1\n projectList[foundStudent.currentProject].listOfStudents.remove(foundStudent)\n projectList[i].listOfStudents.append(foundStudent)\n foundStudent.currentProject=i\n if projectList[i].current>projectList[i].max:\n worstPrior=projectList[i].studentList[0].priorityList[i]\n worstPriorStudent=projectList[i].studentList[0]\n for b in projectList[i].listOfStudents:\n if b.priorityList[i] > worstPrior and b.lockedInto == -1:\n worstPrior = b.priorityList[i]\n worstPriorStudent = b\n projectList[i].current = projectList[i].current-1\n projectList[i].listOfStudents.remove(worstPriorStudent)\n worstPriorStudent.currentProject=-1\n worstPriorStudent.optimize(projectList, errorProject)\n if i == -1:\n foundStudent.optimize(projectList, errorProject)\n for b in projectList:\n b.printList()\n elif val != \"b\":\n print(\"Invalid input.\")\n if val == \"6\":\n val = input(\"Type the name of a student you would like to lock out of a project or type b to return to the previous menu. \")\n if val != \"b\":\n studentFound=0\n foundStudent=studentList[0]\n for a in studentList:\n if a.name == val:\n studentFound=1\n foundStudent=a\n if studentFound == 1:\n while val != \"b\":\n print(\"Student {} is currently locked out of the following projects:\".format(foundStudent.name))\n for b in foundStudent.blockedList:\n print(b+1)\n print(\"Type the id of a project. If {} is already blocked from it, they will be unblocked. Otherwise, they will be blocked.\".format(foundStudent.name))\n val = input(\"Or type b to return to the menu. \")\n if isInt(val):\n i = int(val)-1\n if i in foundStudent.blockedList:\n foundStudent.blockedList.remove(i)\n foundStudent.optimize(projectList, errorProject)\n elif i < len(projectList) and i >= 0:\n foundStudent.blockedList.append(i)\n foundStudent.optimize(projectList, errorProject)\n else:\n print(\"Invalid input.\")\n elif val != \"b\":\n print(\"Invalid input.\")\n break\n if val == \"7\":\n while val != \"b\":\n print(\"Type the ID of the project you want to edit, or ? to see a list of every project's minimum and maximum, or b to go back to the previous menu.\")\n val = input(\"Note that if the minimum increases but is no longer met, students won't be reassigned immediately. You will need to run (1) from the main menu. \")\n if val == \"?\":\n for a in projectList:\n print(\"Project {}: {}. Min: {} Max: {}\".format(a.id,a.name, a.min, a.max))\n elif isInt(val):\n i = int(val)-1\n if i<=0 or i>len(projectList):\n print(\"Invalid input.\")\n else:\n print(\"Project {}: {}. Min: {} Max: {}\".format(projectList[i].id, projectList[i].name, projectList[i].min, projectList[i].max))\n val = input(\"Type min to change the minimum, max to change the maximum, or b to go back. \")\n if (val == \"min\"):\n val = input(\"Enter the new minimum. \")\n if isInt(val):\n projectList[i].min=val\n if (val == \"max\"):\n val=input(\"Input the new maximum. \")\n if isInt(val):\n projectList[i].max=val\n while projectList[i].cur>projectList[i].max and projectList[i].current>0:\n worstPrior=projectList[i].studentList[0].priorityList[i]\n worstPriorStudent=projectList[i].studentList[0]\n for b in projectList[i].listOfStudents:\n if b.priorityList[i] > worstPrior and b.lockedInto == -1:\n worstPrior = b.priorityList[i]\n worstPriorStudent = b\n projectList[i].current = projectList[i].current-1\n projectList[i].listOfStudents.remove(worstPriorStudent)\n worstPriorStudent.currentProject=-1\n worstPriorStudent.optimize(projectList, errorProject)\n elif val != \"b\":\n print(\"Invalid input.\")\n else:\n print(\"Invalid input.\")\n elif val != \"b\":\n print(\"Invalid input.\")\n if val == \"8\":\n Project.randomizeAndAssign(studentList, projectList, errorProject)\n if val == \"9\":\n Project.printAllProjects(projectList, studentList, errorProject, springMode)\n \"\"\"i=0\n j=0\n for a in projectList:\n a.printList()\n if springMode == 1:\n summer=a.canDoSummer\n fall=a.canDoFall\n for b in a.listOfStudents:\n summer=summer*b.canDoSummer\n fall=fall*b.canDoFall\n if summer == 1 and fall == 1:\n print(\"This project can happen in summer or fall.\")\n elif summer == 1:\n print(\"This project should happen in summer.\")\n elif fall == 1:\n print(\"This project should happen in fall.\")\n else:\n print(\"I'm not really sure when this project should happen. Sorry.\")\n print(\"{} students chose this project as their first choice.\".format(firstPlaceChoices[j]))\n j=j+1\n if a.priority >=0:\n i=i+a.max\n if a.min > a.current:\n print(\"This project currently does not have enough students.\")\n errorProject.printList()\n if i < len(studentList):\n print(\"There are currently not enough assignment slots for every student.\")\n got1=0\n got2=0\n got3=0\n for a in studentList:\n if a.currentProject>=0:\n if a.priorityList[a.currentProject]==3:\n got3=got3+1\n if a.priorityList[a.currentProject]==2:\n got2=got2+1\n if a.priorityList[a.currentProject]==1:\n got1=got1+1\n print(\"{} out of {} students got their first choice.\".format(got1, len(studentList)))\n print(\"{} out of {} students got their second choice.\".format(got2, len(studentList)))\n print(\"{} out of {} students got their third choice.\".format(got3, len(studentList)))\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"API/tutorial/quickstart/Scheduler/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":39355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"4343607","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2021 CERN.\n# Copyright (C) 2021 Northwestern University.\n#\n# Invenio-RDM-Records is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Vocabulary fixtures module.\"\"\"\n\nimport csv\nimport json\nfrom os.path import splitext\n\nimport yaml\nfrom invenio_vocabularies.proxies import current_service\n\n\n#\n# Data iterators\n#\nclass DataIterator:\n \"\"\"Data iterator base class.\"\"\"\n\n def __init__(self, data_file):\n \"\"\"Initialize iterator.\"\"\"\n self._data_file = data_file\n\n\nclass YamlIterator(DataIterator):\n \"\"\"YAML data iterator that loads records from YAML files.\"\"\"\n\n def __iter__(self):\n \"\"\"Iterate over records.\"\"\"\n with open(self._data_file) as fp:\n data = yaml.load(fp)\n if data: # Allow empty files\n for entry in data:\n yield entry\n\n\nclass CSVIterator(DataIterator):\n \"\"\"CSV data iterator that loads records from CSV files.\"\"\"\n\n def map_row(self, header, row):\n \"\"\"Map a CSV row into a record.\"\"\"\n entry = {}\n for attr, value in zip(header, row):\n if attr == 'tags':\n value = [x.strip() for x in value.split(',')]\n keys = attr.split('__')\n if len(keys) == 1:\n entry[keys[0]] = value\n elif len(keys) == 2:\n if keys[0] not in entry:\n entry[keys[0]] = {}\n entry[keys[0]][keys[1]] = value\n return entry\n\n def __iter__(self):\n \"\"\"Iterate over records.\"\"\"\n with open(self._data_file) as fp:\n reader = csv.reader(fp, delimiter=';', quotechar='\"')\n header = next(reader)\n for row in reader:\n yield self.map_row(header, row)\n\n\nclass JSONLinesIterator(DataIterator):\n \"\"\"JSON Lines data iterator that loads records from JSON Lines files.\"\"\"\n\n def __iter__(self):\n \"\"\"Iterate over records.\"\"\"\n with open(self._data_file) as fp:\n for line in fp:\n yield json.loads(line)\n\n\n#\n# Fixture\n#\nclass VocabulariesFixture:\n \"\"\"Vocabularies fixture.\"\"\"\n\n def __init__(self, identity, search_path, filename):\n \"\"\"Initialize the fixture.\"\"\"\n self._search_path = search_path\n self._filename = filename\n self._identity = identity\n\n def load(self):\n \"\"\"Load the fixture.\"\"\"\n with open(self._search_path.path(self._filename)) as fp:\n data = yaml.load(fp)\n for id_, entry in data.items():\n self.load_vocabulary(id_, entry)\n\n def load_vocabulary(self, id_, entry):\n \"\"\"Load a single vocabulary.\"\"\"\n pid_type = entry['pid-type']\n # Create the vocabulary type\n current_service.create_type(self._identity, id_, pid_type)\n # Load the data file\n data_file_path = entry.get('data-file')\n if data_file_path: # Creates pid_type, no data yet\n data_file = self._search_path.path(data_file_path)\n self.load_datafile(id_, data_file)\n\n def load_datafile(self, id_, data_file):\n \"\"\"Load the records form the data file.\"\"\"\n for record in self.iter_datafile(data_file):\n record['type'] = id_\n # TODO: edit out languages which is not configured by the system\n current_service.create(self._identity, record)\n\n def iter_datafile(self, data_file):\n \"\"\"Get an row iterator for a given data file.\"\"\"\n ext = splitext(data_file)[1].lower()\n if ext == '.yaml':\n return YamlIterator(data_file)\n elif ext == '.csv':\n return CSVIterator(data_file)\n elif ext == '.jsonl':\n return JSONLinesIterator(data_file)\n raise RuntimeError(f'Unknown data format: {ext}')\n","sub_path":"invenio_rdm_records/fixtures/vocabularies.py","file_name":"vocabularies.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"273471776","text":"# 实验01 猜单词初始python\nimport random\n\nword_list = ['answer', 'python', 'hello', 'world', 'environment', 'event', 'external']\nend_process = ''\nprint('{: ^40}\\n{: ^40}'.format('欢迎参加猜单词游戏', '请把下列各字母组合成一个正确的单词。'))\n\n\ndef end(end_flag):\n if (end_flag == 'N' or end_flag == 'n'):\n return 0\n elif (end_flag == 'Y' or end_flag == 'y'):\n return 1\n else:\n print('输入字符错误!')\n\n\nwhile (True):\n get_or_word = word_list[random.randint(0, len(word_list) - 1)]\n random_word = get_or_word\n # 从单词表选取源单词\n out_of_order_word = ''\n for i in range(len(get_or_word)):\n position = random.randint(0, len(random_word) - 1)\n out_of_order_word += random_word[position]\n random_word = random_word[:position] + random_word[(position + 1):]\n print('乱序的单词是:' + out_of_order_word)\n if (input('请你猜:') != get_or_word):\n print('对不起不正确!')\n while (input('继续猜:') != get_or_word):\n print('对不起不正确!')\n print('真棒!你猜对了!')\n if (end(input('是否继续?Y/N:'))):\n continue\n else:\n break\n","sub_path":"python_program_design/ch2/jubleGame-qh6109118148.py","file_name":"jubleGame-qh6109118148.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"131863353","text":"import xxhash\nimport mmap\nimport time\nimport ctypes as ct\n\nfrom multiprocessing import Queue\nfrom itertools import chain\n\nfrom gcd.etc import as_many\nfrom gcd.work import Batcher\n\n\n_model = ct.pydll['daat/_model.so']\n\nNumber, NA = '##', '__' # Special cats for numeric and unavailable vals.\n\n\ndef _hid(strs):\n return xxhash.xxh64('@@'.join(strs)).intdigest()\n\n\nclass _Observation(ct.Structure):\n\n _fields_ = [\n ('nfeats', ct.c_size_t),\n ('feats', ct.POINTER(ct.c_uint64)),\n ('vals', ct.POINTER(ct.c_float)),\n ('response', ct.c_float),\n ('weight', ct.c_float),\n ('time', ct.c_double),\n ('original', ct.py_object)\n ]\n\n\nclass _Term(ct.Structure):\n\n _fields_ = [\n ('feat', ct.py_object),\n ('nattrs', ct.c_size_t),\n ('iattrs', ct.POINTER(ct.c_size_t)),\n ('nmodels', ct.c_size_t),\n ('imodels', ct.POINTER(ct.c_size_t))\n ]\n\n\nclass _Attribute(ct.Structure):\n\n _fields_ = [\n ('name', ct.py_object),\n ('cat', ct.py_object),\n ('val', ct.c_float)\n ]\n\n\nclass Terms(tuple):\n\n def __new__(cls, *terms):\n terms = sorted({tuple(sorted(as_many(t))) for t in terms})\n return tuple.__new__(cls, terms)\n\n def __add__(self, terms):\n other = Terms(*as_many(terms))\n return Terms(*chain(self, other))\n\n __radd__ = __add__\n\n def __sub__(self, terms):\n other = Terms(*as_many(terms))\n return Terms(*(t for t in self if t not in other))\n\n def __rsub__(self, terms):\n other = Terms(*as_many(terms))\n return other - self\n\n def __mul__(self, terms):\n other = Terms(*as_many(terms))\n return Terms(*(t1 + t2 for t1 in self for t2 in other))\n\n __rmul__ = __mul__\n\n def imul(self, *terms):\n other = Terms(*terms)\n return Terms(*(set(t1 + t2) for t1 in self for t2 in other))\n\n def __pow__(self, degree):\n return self._pow(lambda m1, m2: m1 * m2, degree)\n\n def ipow(self, degree):\n return self._pow(lambda m1, m2: m1.imul(*m2), degree)\n\n def __mod__(self, degree):\n return Terms(*(t for t in self.ipow(degree) if len(t) == degree))\n\n def _pow(self, fun, degree):\n terms = self\n while degree > 1:\n terms = fun(terms, self)\n degree -= 1\n return terms\n\nT, I, O = Terms, Terms(()), Terms()\n\n\nclass Model:\n\n def __init__(self, terms, response_attr='response', weight_attr='weight',\n time_attr='time'):\n self.terms = terms\n self.term_attrs = tuple(set().union(*terms))\n self.response_attr = response_attr\n self.weight_attr = weight_attr\n self.time_attr = time_attr\n self.hid = _hid(str(_hid(t)) for t in terms +\n (response_attr, weight_attr, time_attr))\n\n\nclass Modeler:\n\n def __init__(self, models, storer=None, attach=False):\n self.models = models = as_many(models)\n terms = tuple(set().union(*(m.terms for m in models)))\n self._terms = (_Term * len(terms))()\n attrs = tuple(set().union(*(m.term_attrs for m in models)))\n self._attrs = (_Attribute * len(attrs))(\n *(_Attribute(a) for a in attrs))\n for term, cterm in zip(terms, self._terms):\n cterm.feat = [str(_hid(term))] + [None] * len(term)\n iattrs = [attrs.index(a) for a in term]\n cterm.nattrs = len(iattrs)\n cterm.iattrs = (ct.c_size_t * len(iattrs))(*iattrs)\n imodels = [models.index(m) for m in models if term in m.terms]\n cterm.nmodels = len(imodels)\n cterm.imodels = (ct.c_size_t * len(imodels))(*imodels)\n if storer:\n storer.add_terms(terms)\n self._add_feat = storer.add_feat\n else:\n self._add_feat = _hid\n self._attach = attach\n\n def model(self, obs):\n for attr in self._attrs:\n val = getattr(obs, attr.name, None)\n if type(val) is float:\n attr.cat = Number\n attr.val = val\n else:\n attr.cat = NA if val is None else str(val)\n attr.val = 1\n\n outs = (_Observation * len(self.models))()\n for model, out in zip(self.models, outs):\n out.nfeats = 0\n out.feats = (ct.c_uint64 * len(model.terms))()\n out.vals = (ct.c_float * len(model.terms))()\n out.response = getattr(obs, model.response_attr)\n out.weight = getattr(obs, model.weight_attr, 1)\n out.time = getattr(obs, model.time_attr, time.time())\n out.original = obs if self._attach else None\n\n _model.model(self._attrs, outs, self._terms,\n ct.c_size_t(len(self._terms)),\n ct.py_object(self._add_feat))\n\n return [(m.hid, o) for m, o in zip(self.models, outs)]\n\n\nclass FeatureStorer:\n\n def __init__(self, store, timer, max_queue=10000, cache_bits=22):\n self._store = store\n self._create_batcher(timer, max_queue)\n self._create_cache(cache_bits)\n\n def add_terms(self, terms):\n self._store.add_terms(zip(map(_hid, terms), terms))\n\n def add_feat(self, feat):\n hid = _hid(feat) or 1 # 0 means not cached.\n cached_hid = self._cache[hid & self._mask]\n if cached_hid == 0 or cached_hid != hid:\n self._cache[hid & self._mask] = hid\n self._batcher.add((hid, int(feat[0]), feat[1:]))\n return hid\n\n def _create_batcher(self, timer, max_queue):\n def handle(batch):\n if batch: # Remove duplicated hids.\n batch = {feat[0]: feat for feat in batch}.values()\n self._store.add(batch)\n self._batcher = Batcher(timer, handle, queue=Queue(max_queue)).start()\n\n def _create_cache(self, cache_bits):\n size = 2 ** cache_bits\n self._mask = size - 1\n data = mmap.mmap(-1, 8 * size)\n self._cache = (ct.c_uint64 * size).from_buffer(data)\n ct.memset(self._cache, 0, 8 * size)\n for feat_hid in self._store.sample_hids(size):\n self._cache[feat_hid & self._mask] = feat_hid\n","sub_path":"daat/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"539762355","text":"# 이분탐색 : 징검다리 Level 4\n# https://programmers.co.kr/learn/courses/30/lessons/43236\n\n# 바위를 최대 n개 제거한 후 거리의 최솟값이 mid라 가정했을 경우 올바르게 나오는지 검사\ndef binarySearch(rocks, left, right, n):\n # 범위 값이 잘못 되는 경우 0을 리턴\n if left > right:\n return 0\n mid = (left + right) // 2\n\n # 제거할 바위의 개수를 세준다\n cnt = 0\n isUse = [False for i in range(len(rocks))]\n for i in range(len(rocks) - 1):\n if isUse[i]:\n continue\n for j in range(i+1, len(rocks) - 1):\n # 바위 사이의 거리가 mid 보다 작다면 j번째 바위를 제거해 주어야 한다\n if rocks[j] - rocks[i] < mid:\n cnt += 1\n isUse[j] = True\n else:\n break\n\n if cnt > n:\n return binarySearch(rocks, left, mid - 1, n)\n else:\n return max(mid, binarySearch(rocks, mid + 1, right, n))\n\ndef solution(distance, rocks, n):\n # 징검다리에 시작 지점과 도착 지점의 정보를 추가한다\n rocks.append(distance)\n rocks.append(0)\n # 징검다리에 있는 바위의 좌표를 기준으로 정렬\n rocks.sort()\n # [정답, 제거할 수 있는 바위의 개수]\n # 이분 탐색을 통해 문제를 해결할 수 있다\n return binarySearch(rocks, 0, distance, n)","sub_path":"python/programmers/징검다리.py","file_name":"징검다리.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"636130157","text":"\"\"\"\n Given an array of integers with possible duplicates, randomly output the index of a given target number.\n You can assume that the given target number must exist in the array.\n\n Note:\n The array size can be very large. Solution that uses too much extra space will not pass the judge.\n\n Example:\n\n int[] nums = new int[] {1,2,3,3,3};\n Solution solution = new Solution(nums);\n\n // pick(3) should return either index 2, 3, or 4 randomly. Each index should have equal probability of returning.\n solution.pick(3);\n\n // pick(1) should return 0. Since in the array only nums[0] is equal to 1.\n solution.pick(1);\n\n\n\n\n https://www.cnblogs.com/strugglion/p/6424874.html\n\n\n\"\"\"\n\nimport random\n\n\nclass Solution:\n\n def __init__(self, nums):\n \"\"\"\n :type nums: List[int]\n \"\"\"\n self.nums = nums\n\n def pick(self, target):\n \"\"\"\n :type target: int\n :rtype: int\n \"\"\"\n result = -1\n count = 0\n for i, num in enumerate(self.nums):\n if target != num:\n continue\n rnd = random.randint(0, count)\n if rnd == 0:\n result = i\n count += 1\n return result\n\n\n# Your Solution object will be instantiated and called as such:\n# obj = Solution(nums)\n# param_1 = obj.pick(target)\n\ndef main():\n nums = [1, 2, 2, 5, 6, 7, 7, 5, 8, 9]\n obj = Solution(nums)\n result = obj.pick(5)\n print(result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/pl/atlantischi/leetcode/medium/Medium398.py","file_name":"Medium398.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"122770662","text":"# -*- coding:utf-8 -*-\nimport os\nimport codecs\n\ndef fun(filepath):\n arr=[]\n for root,dirs,files in os.walk(filepath):\n for fn in files:\n arr.append(root+\"\\\\\"+fn)\n return arr\n\ndef read(path):\n f=open(path,encoding=\"utf8\")\n #count=0\n data=[]\n for line in f.readlines():\n data.append(line)\n return data\n\ndef wry(lis,path):\n count=0\n f=codecs.open(path,'a','utf16')#.encoding('utf16')\n for i in lis:\n f.write(str(i)+'\\n')\n count=count+1\n f.close()\n return count\n\ndef main():\n filepath=\"D:\\校园\\信息131\\大三下\\计算机检索\\实验1shiyan\\2015.1.16-2015.1.18\"\n uipath=unicode(filepath,'utf8')\n for i in fun(uipath):\n print(i)\n\nif __name__=='__main__':\n main()","sub_path":"CRF/实验/实验2/code/Statistical/Statistical/Statistical.py","file_name":"Statistical.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"530145675","text":"import numpy,re\r\n\r\nnumbers=[\"ZERO\", \"ONE\", \"TWO\", \"THREE\", \"FOUR\", \"FIVE\", \"SIX\", \"SEVEN\", \"EIGHT\", \"NINE\"]\r\na=[]\r\nfor i in xrange(ord('A'),ord('Z')+1):\r\n temp=[]\r\n for j in xrange(len(numbers)):\r\n temp+=[len(re.findall(chr(i),numbers[j]))]\r\n temp+=[0]\r\n a+=[temp]\r\na=numpy.array(a)\r\n\r\nt=input()\r\nfor ti in xrange(t):\r\n s=raw_input()\r\n b=[]\r\n for i in xrange(ord('A'),ord('Z')+1):\r\n b+=[len(re.findall(chr(i),s))]\r\n b=numpy.array(b)\r\n x=numpy.linalg.lstsq(a,b)\r\n answer=\"\"\r\n for i in xrange(10):\r\n if x[0][i]>0.5:\r\n for j in xrange(int(round(x[0][i]))):\r\n answer+=str(i)\r\n print(\"Case #%d: %s\"%(ti+1,answer))\r\n","sub_path":"solutions_5648941810974720_1/Python/lessilife/gcj1ba.py","file_name":"gcj1ba.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"88294057","text":"from wisdem.commonse import NFREQ, eps, gravity\n\nRIGID = 1e30\nNREFINE = 3\nNPTS_SOIL = 10\n\n\ndef get_nfull(npts, nref=NREFINE):\n n_full = int(1 + nref * (npts - 1))\n return n_full\n\n\ndef get_npts(nFull, nref=NREFINE):\n npts = int(1 + (nFull - 1) / nref)\n return npts\n","sub_path":"wisdem/towerse/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"243986239","text":"class Solution:\r\n def solveNQueens(self, n: int) -> [[str]]:\r\n col=[False for _ in range(n)]#存储皇后排行在哪一列\r\n dia1=[False for _ in range(2*n-1)]#第一个对角线,个数都为2*n-1\r\n dia2=[False for _ in range(2*n-1)]#第二个对角线\r\n\r\n res=[]#存放最终结果\r\n row=[]#存放每一次皇后在第几列\r\n def generateboard(n,row):#将第几列的元素表示成题目要求的样子\r\n l=[]\r\n for i in range(n):\r\n l.append('.' * row[i]+'Q'+'.'*(n-row[i]-1))\r\n return l\r\n\r\n def putQueue(index,row:list):#index表示在摆放第几行的皇后,row表示将皇后放在第几列\r\n if index==n:\r\n res.append(generateboard(n,row))\r\n return\r\n for j in range(n):#将第index行的皇后放在第j列\r\n if not col[j] and not dia1[index+j] and not dia2[index-j+n-1]:#当前第index行j列这个位置,行,列及两个斜对角线上没有皇后\r\n row.append(j)\r\n col[j]=True\r\n dia1[index+j]=True\r\n dia2[index-j+n-1]=True\r\n putQueue(index+1,row)\r\n #接下来进行回溯\r\n col[j] = False\r\n dia1[index + j] = False\r\n dia2[index - j + n - 1] = False\r\n row.pop()\r\n return\r\n putQueue(0,row)\r\n print(res)\r\n return res\r\na=Solution()\r\na.solveNQueens(n=4)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"��号题/051.py","file_name":"051.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"542111292","text":"import pandas as pd\r\nimport numpy as np\r\nimport pickle\r\nimport streamlit as st\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nst.write(\"\"\"\r\n# Penguin Prediction App\r\n\r\n## This app predicts the **Palmer Penguin** species!\r\n\r\nData obtained from the [palmer penguins library](https://github.com/allisonhorst/palmerpenguins) in R by Allison Horst. \r\n\"\"\")\r\n\r\n# creating the sidebar\r\nst.sidebar.header(\"User Input Features\")\r\nst.sidebar.markdown(\"\"\"\r\n[Example CSV input file](https://raw.githubusercontent.com/dataprofessor/data/master/penguins_example.csv)\r\n\"\"\")\r\n\r\nupload_file = st.sidebar.file_uploader(\"Upload your csv file here\", type=[\"csv\"])\r\n\r\n\r\n# sliders part as a function\r\ndef user_input_sliders():\r\n island = st.sidebar.selectbox('Island', ('Biscoe', 'Dream', 'Torgersen'))\r\n sex = st.sidebar.selectbox('Sex', ('male', 'female'))\r\n bill_length = st.sidebar.slider('Bill Length (mm)', 32.0, 60.0, 46.5)\r\n bill_depth = st.sidebar.slider(\"Bill Depth (mm)\", 13.0, 22.0, 14.2)\r\n flipper_length = st.sidebar.slider(\"Flipper Length (mm)\", 171.0, 232.0, 198.5)\r\n body_mass = st.sidebar.slider(\"Body mass (g)\", 2600, 6400, 3830)\r\n data = {\r\n 'island': island,\r\n 'sex': sex,\r\n 'bill_length_mm':bill_length,\r\n 'bill_depth_mm':bill_depth,\r\n 'flipper_length_mm':flipper_length,\r\n 'body_mass_g':body_mass\r\n }\r\n features = pd.DataFrame(data,index=[0])\r\n return features\r\n\r\n# condition to get input values\r\n\r\n\r\nif upload_file is not None:\r\n input_df = pd.read_csv(upload_file)\r\nelse:\r\n input_df = user_input_sliders()\r\n\r\n# showing the user input\r\nst.subheader(\"User Input Features\")\r\nif upload_file is not None:\r\n st.write(input_df)\r\nelse:\r\n st.write('Awaiting CSV file to be uploaded. Currently using example input parameters (shown below).')\r\n st.write(input_df)\r\n\r\n# importing the penguins file and processing\r\npenguins_raw = pd.read_csv('penguins_cleaned.csv')\r\npenguins = penguins_raw.drop(columns=['species'])\r\ndf = pd.concat([input_df, penguins], axis=0)\r\n\r\n# Encoding of ordinal features\r\n# https://www.kaggle.com/pratik1120/penguin-dataset-eda-classification-and-clustering\r\nencode = ['sex','island']\r\nfor col in encode:\r\n dummy = pd.get_dummies(df[col], prefix=col)\r\n df = pd.concat([df,dummy], axis=1)\r\n del df[col]\r\ndf = df[:1]\r\n\r\n# reads in saved classification model\r\nload_clf = pickle.load(open('penguins_clf.pkl','rb'))\r\n\r\n# prediction\r\nprediction = load_clf.predict(df)\r\nprediction_prob = load_clf.predict_proba(df)\r\n\r\n# displaying the prediction\r\nst.subheader(\"Prediction\")\r\npenguin_species = np.array((['Adelie','Chinstrap','Gentoo']))\r\nst.write(penguin_species[prediction])\r\n\r\nst.subheader(\"Prediction Probability\")\r\nst.write(prediction_prob)\r\n\r\n\r\n","sub_path":"penguins_classifier_app.py","file_name":"penguins_classifier_app.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"74116114","text":"# tests.test_compliance\n\nimport nose.tools as nose\nimport glob\nimport json\nimport jsonschema\nimport os.path\nimport pep8\nimport radon.complexity as radon\n\n\ndef test_pep8():\n file_paths = glob.iglob('*/*.py')\n for file_path in file_paths:\n style_guide = pep8.StyleGuide(quiet=True)\n total_errors = style_guide.input_file(file_path)\n test_pep8.__doc__ = '{} should comply with PEP 8'.format(file_path)\n fail_msg = '{} does not comply with PEP 8'.format(file_path)\n yield nose.assert_equal, total_errors, 0, fail_msg\n\n\ndef test_complexity():\n file_paths = glob.iglob('*/*.py')\n for file_path in file_paths:\n with open(file_path, 'r') as file:\n blocks = radon.cc_visit(file.read())\n for block in blocks:\n test_doc = '{} ({}) should have a low cyclomatic complexity score'\n test_complexity.__doc__ = test_doc.format(\n block.name, file_path)\n fail_msg = '{} ({}) has a cyclomatic complexity of {}'.format(\n block.name, file_path, block.complexity)\n yield nose.assert_less_equal, block.complexity, 10, fail_msg\n\n\ndef test_json():\n schemas = {\n 'schema-languages': 'yvs/data/languages.json',\n 'schema-defaults': 'yvs/data/defaults.json',\n 'schema-chapters': 'yvs/data/bible/chapters.json',\n 'schema-bible': 'yvs/data/bible/language-*.json'\n }\n for schema_name, data_path_pattern in schemas.iteritems():\n schema_path = 'yvs/data/schema/{}.json'.format(schema_name)\n with open(schema_path) as schema_file:\n schema = json.load(schema_file)\n data_paths = glob.iglob(data_path_pattern)\n for data_path in data_paths:\n with open(data_path) as data_file:\n data = json.load(data_file)\n test_json.__doc__ = '{} should comply with schema'.format(\n os.path.relpath(data_path, 'yvs/data'))\n validator = jsonschema.validate(data, schema)\n yield nose.assert_is_none, validator\n","sub_path":"tests/test_compliance.py","file_name":"test_compliance.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"492639668","text":"import numpy as np\n\n\nclass Game:\n def __init__(self):\n self.board = np.array([0, 0, 0, 0, 0])\n self.balance = 1500\n self.bet = 40\n self.level = 1\n self.coin_value = 0.5\n self.values = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [4, 2, 0, 0, 0, 0, 0, 0, 0, 0],\n [100, 40, 30, 20, 20, 10, 10, 5, 5, 5],\n [300, 200, 150, 100, 75, 50, 40, 30, 25, 20],\n [500, 400, 300, 250, 200, 160, 140, 120, 110, 100]])\n self.symbols = ['W', 'P', 'D', 'Z', 'M', 'A', 'K', 'Q', 'J', '1']\n\n def gen_board(self):\n self.board = np.random.randint(0,10,(3,5))\n\n def print_board(self):\n new_board = []\n for line in self.board:\n new_line = []\n for element in line:\n new_line.append(self.symbols[element])\n new_board.append(new_line)\n print(np.array(new_board))\n\n def print_balance(self):\n print(self.balance)\n\n def check_line(self, line):\n win = 0\n value = 0\n for element in line:\n if element != 0:\n line = np.where(line==0, element, line)\n value = element\n break\n for i in range(4):\n if line[i] == line[i+1]:\n win += 1\n else:\n break\n if self.values[win][value] > 0:\n print('value', self.symbols[value])\n print('win', self.values[win][value])\n return self.values[win][value]*self.level*self.coin_value\n\n def spin(self):\n self.balance -= g.bet*g.level*g.coin_value\n self.gen_board()\n self.print_board()\n for i in range(3):\n self.balance += self.check_line(self.board[i])\n print('Balance:', self.balance)\n\n\nif __name__ == \"__main__\":\n g = Game()\n print('Balance:', g.balance)\n print('Bet:', g.bet*g.level*g.coin_value)\n endgame = ''\n while endgame != 'q':\n g.spin()\n endgame = input('Enter q to exit:')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"121627584","text":"# 用于实验8:在Bilibili数据集上实现新的训练方法,使用每个序列的中间一位节点的输出作为这一节点的预测,\n# 使用pairwise训练的方法,训练时选择正负样本的各自邻域节点分别组成一个序列输入attention,\n# 测试时用滑动窗口的方法获取每个样本的邻域并计算样本的输出。\n\nimport os\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport math\nimport json\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, mean_squared_error\nimport random\nimport logging\nimport argparse\nimport Transformer_v2\nfrom Transformer_v2 import self_attention\n\nSERVER = 0\n\nclass Path:\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', default='1',type=str)\n parser.add_argument('--num_heads',default=32,type=int)\n parser.add_argument('--num_blocks',default=5,type=int)\n parser.add_argument('--seq_len',default=15,type=int)\n parser.add_argument('--bc',default=4,type=int)\n parser.add_argument('--dropout',default='0.1',type=float)\n parser.add_argument('--gpu_num',default=1,type=int)\n if SERVER == 0:\n parser.add_argument('--msd', default='SelfAttention', type=str)\n else:\n parser.add_argument('--msd', default='model_bilibili_SA', type=str)\nhparams = Path()\nparser = hparams.parser\nhp = parser.parse_args()\n\nif SERVER == 0:\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nelse:\n tf.logging.set_verbosity(tf.logging.ERROR)\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = hp.gpu\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n# global paras\nPRESTEPS = 0\nWARMUP_STEP = 4000\nMIN_TRAIN_STEPS = 0\nMAXSTEPS = 20000\nPHASES_STEPS = [2000]\nPHASES_LR = [1e-6,1e-7]\nHIDDEN_SIZE = 128 # for lstm\nDROP_OUT = hp.dropout\n\nEVL_EPOCHS = 1 # epochs for evaluation\nL2_LAMBDA = 0.005 # weightdecay loss\nGRAD_THRESHOLD = 10.0 # gradient threshold\nMAX_F1 = 0.21\n\nGPU_NUM = hp.gpu_num\nBATCH_SIZE = hp.bc\nD_MODEL = Transformer_v2.D_MODEL\nSEQ_LEN = hp.seq_len\nNUM_BLOCKS = hp.num_blocks\nNUM_HEADS = hp.num_heads\n\nV_NUM = 2 # 3D卷积的最高一维\nV_HEIGHT = 7\nV_WIDTH = 7\nV_CHANN = 512\n\nA_NUM = 6 # 一个clip的A_NUM个spectro,运算时需要并入batch,保持2D卷积操作的3D输入张量\nA_HEIGHT = 8\nA_WIDTH = 8\nA_CHANN = 128\n\nload_ckpt_model = True\n\nif SERVER == 0:\n # path for JD server\n LABEL_PATH = r'/public/data0/users/hulinkang/bilibili/label_record_zmn_24s.json'\n FEATURE_BASE = r'/public/data0/users/hulinkang/bilibili/feature/'\n visual_model_path = '../model_HL/pretrained/sports1m_finetuning_ucf101.model'\n audio_model_path = '../model_HL/pretrained/MINMSE_0.019'\n model_save_dir = r'/public/data0/users/hulinkang/model_HL/'+hp.msd+'/'\n ckpt_model_path = '../model_HL/SelfAttention_3/STEP_30000'\n # ckpt_model_path = '../model_HL/SelfAttention_1/MAXF1_0.286_0'\n\nelse:\n # path for USTC server\n LABEL_PATH = '//data//linkang//bilibili//label_record_zmn_24s.json'\n FEATURE_BASE = '//data//linkang//bilibili//feature//'\n visual_model_path = '../../model_HL/mosi_pretrained/sports1m_finetuning_ucf101.model'\n audio_model_path = '../../model_HL_v2/mosi_pretrained/MINMSE_0.019'\n model_save_dir = r'/data/linkang/model_HL_v3/'+hp.msd+'/'\n # ckpt_model_path = '../../model_HL_v3/model_bilibili_SA_2/STEP_9000'\n ckpt_model_path = '../../model_HL_v3/model_bilibili_SA_6l/STEP_27000'\n\nlogging.basicConfig(level=logging.INFO)\n\nclass NpEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(NpEncoder, self).default(obj)\n\ndef load_label(label_path):\n file = open(label_path,'r')\n label_record = json.load(file)\n file.close()\n return label_record\n\ndef load_data(label_record, feature_base):\n # 装载所有特征,划分测试集\n # 增加对所有正样本和负样本的索引,处理score使所有元素都大于零\n vids = list(label_record.keys())\n data_train = {}\n data_valid = {}\n data_test = {}\n for vid in vids:\n logging.info('-'*20+str(vid)+'-'*20)\n # load data & label\n # label顺序:train-valid-test\n visual_path = feature_base + vid + r'/features_visual_ovr.npy'\n audio_path = feature_base + vid + r'/features_audio_ovr.npy'\n visual = np.load(visual_path).reshape((-1, V_NUM, V_HEIGHT, V_WIDTH, V_CHANN))\n audio = np.load(audio_path).reshape((-1, A_NUM, A_HEIGHT, A_WIDTH, A_CHANN))\n labels = np.array(label_record[vid]['label'])\n scores = np.array(label_record[vid]['score'])\n scores = scores - np.min(scores) + 1e-6 # 将scores调整到最小值为1e-6,方差仍为1\n\n # split train & valid & test set\n valid_pos = round(len(labels) * 0.6)\n test_pos = round(len(labels) * 0.8)\n\n temp_train = {}\n temp_train['visual'] = visual[:valid_pos]\n temp_train['audio'] = audio[:valid_pos]\n temp_train['labels'] = labels[:valid_pos]\n temp_train['scores'] = scores[:valid_pos]\n temp_train['pos_index'] = np.where(temp_train['labels'] > 0)[0] # 正样本索引\n temp_train['neg_index'] = np.where(temp_train['labels'] < 1)[0] # 负样本索引\n data_train[vid] = temp_train\n\n temp_valid = {}\n temp_valid['visual'] = visual[valid_pos:test_pos]\n temp_valid['audio'] = audio[valid_pos:test_pos]\n temp_valid['labels'] = labels[valid_pos:test_pos]\n temp_valid['scores'] = scores[valid_pos:test_pos]\n temp_valid['pos_index'] = np.where(temp_valid['labels'] > 0)[0]\n temp_valid['neg_index'] = np.where(temp_valid['labels'] < 1)[0]\n data_valid[vid] = temp_valid\n\n temp_test = {}\n temp_test['visual'] = visual[test_pos:]\n temp_test['audio'] = audio[test_pos:]\n temp_test['labels'] = labels[test_pos:][:len(temp_test['visual'])] # 截断\n temp_test['scores'] = scores[test_pos:][:len(temp_test['visual'])] # 截断\n temp_test['pos_index'] = np.where(temp_test['labels'] > 0)[0]\n temp_test['neg_index'] = np.where(temp_test['labels'] < 1)[0]\n data_test[vid] = temp_test\n\n logging.info('Data(train, valid, test): '+str(temp_train['visual'].shape)+str(temp_valid['audio'].shape)+str(temp_test['labels'].shape))\n logging.info('Scores(train, valid, test): '+str(len(temp_train['scores']))+str(len(temp_valid['scores']))+str(len(temp_test['scores'])))\n\n return data_train, data_valid, data_test\n\ndef train_scheme_build_v3(data_train,seq_len):\n # 根据正负样本制定的train_scheme,取每个样本的左右领域与样本共同构成一个序列,分别得到正样本序列与负样本序列\n # 在getbatch时数据用零填充,score也用零填充,在attention计算时根据score将负无穷输入softmax,消除padding片段对有效片段的影响\n # 正负样本序列生成后随机化,直接根据step确定当前使用哪个序列,正负各取一个计算pairwise loss\n # train_scheme = [pos_list=(vid,seq_start,seq_end,sample_pos,sample_label),neg_list=()]\n\n pos_list = []\n neg_list = []\n for vid in data_train:\n label = data_train[vid]['labels']\n pos_index = data_train[vid]['pos_index']\n neg_index = data_train[vid]['neg_index']\n vlength = len(label)\n # 遍历正样本索引与负样本索引中的所有样本,计算其邻域索引范围,分别加入两个列表\n for sample_pos in pos_index:\n seq_start = sample_pos - int(seq_len / 2)\n seq_end = seq_start + seq_len\n seq_start = max(0,seq_start) # 截断\n pos_list.append((vid,seq_start,seq_end,sample_pos,1))\n for sample_pos in neg_index:\n seq_start = sample_pos - int(seq_len / 2)\n seq_end = seq_start + seq_len\n seq_start = max(0, seq_start) # 截断\n neg_list.append((vid,seq_start,seq_end,sample_pos,0))\n\n random.shuffle(pos_list)\n random.shuffle(neg_list)\n return (pos_list,neg_list)\n\ndef get_batch_train(data,train_scheme,step,gpu_num,bc,seq_len):\n # 按照train-scheme制作batch,每次选择gpu_num*bc个序列返回,要求每个bc中一半是正样本一半是负样本,交替排列\n # 每个序列构成一个sample,故共有gpu_num*bc个sample,每个gpu上计算bc个sample的loss\n # 返回gpu_num*bc个label,对应每个sample中一个片段的标签\n # 同时返回一个取样位置序列sample_pos,顺序记录每个sample中标签对应的片段在序列中的位置,模型输出后根据sample_pos计算loss\n # 根据step顺序读取pos_list与neg_list中的序列并组合为batch_index,再抽取对应的visual,audio,score与label\n pos_list,neg_list = train_scheme\n pos_num = len(pos_list)\n neg_num = len(neg_list)\n\n # 生成batch_index与sample_pos\n batch_index = []\n sample_poses = []\n batch_labels = [] # only for check\n for i in range(int(gpu_num * bc / 2)): # gpu_num*bc应当为偶数\n pos_position = (step * int(gpu_num * bc / 2) + i) % pos_num # 当前在pos_list中的起始位置\n neg_position = (step * int(gpu_num * bc / 2) + i) % neg_num # 当前在neg_list中的起始位置\n # 读正样本\n vid,seq_start,seq_end,sample_pos,sample_label = pos_list[pos_position]\n batch_index.append((vid,seq_start,seq_end,sample_pos))\n sample_poses.append(sample_pos - seq_start)\n batch_labels.append(sample_label)\n # 读负样本\n vid, seq_start, seq_end, sample_pos, sample_label = neg_list[neg_position]\n batch_index.append((vid, seq_start, seq_end, sample_pos))\n sample_poses.append(sample_pos - seq_start)\n batch_labels.append(sample_label)\n\n # 根据索引读取数据,并做padding\n visuals = []\n audios = []\n scores = []\n labels = []\n for i in range(len(batch_index)):\n vid,seq_start,seq_end,sample_pos = batch_index[i]\n vlength = len(data[vid]['labels'])\n seq_end = min(vlength,seq_end) # 截断\n padding_len = seq_len - (seq_end - seq_start)\n visual = data[vid]['visual'][seq_start:seq_end]\n audio = data[vid]['audio'][seq_start:seq_end]\n score = data[vid]['scores'][seq_start:seq_end]\n if padding_len > 0:\n visual_pad = np.zeros((padding_len, V_NUM, V_HEIGHT, V_WIDTH, V_CHANN))\n audio_pad = np.zeros((padding_len, A_NUM, A_HEIGHT, A_WIDTH, A_CHANN))\n score_pad = np.zeros((padding_len,))\n visual = np.vstack((visual,visual_pad)) # 统一在后侧padding\n audio = np.vstack((audio,audio_pad))\n score = np.hstack((score, score_pad))\n visuals.append(visual)\n audios.append(audio)\n scores.append(score)\n labels.append(data[vid]['labels'][sample_pos])\n visuals = np.array(visuals).reshape((gpu_num * bc, seq_len, V_NUM, V_HEIGHT, V_WIDTH, V_CHANN))\n audios = np.array(audios).reshape((gpu_num * bc, seq_len, A_NUM, A_HEIGHT, A_WIDTH, A_CHANN))\n scores = np.array(scores).reshape((gpu_num * bc, seq_len))\n labels = np.array(labels).reshape((gpu_num * bc,))\n sample_poses = np.array(sample_poses).reshape((gpu_num * bc,))\n\n # check\n if np.sum(labels - np.array(batch_labels)) != 0:\n logging.info('Label Mismatch: %d' % step)\n return visuals, audios, scores, labels, sample_poses\n\ndef test_scheme_build(data_test,seq_len):\n # 与train_schem_build一致,但是不区分正负样本,也不做随机化\n seq_list = []\n test_vids = []\n for vid in data_test:\n label = data_test[vid]['labels']\n vlength = len(label)\n # 顺序将每个片段的邻域加入列表中,记录片段在序列中的位置以及片段标签\n for sample_pos in range(vlength):\n seq_start = sample_pos - int(seq_len / 2)\n seq_end = seq_start + seq_len\n seq_start = max(0, seq_start) # 截断\n seq_list.append((vid, seq_start, seq_end, sample_pos, label[sample_pos]))\n test_vids.append(vid) # 记录vid顺序用于evaluation\n return seq_list, test_vids\n\ndef get_batch_test(data,test_scheme,step,gpu_num,bc,seq_len):\n # 与get_batch_test一致,每次选择gpu_num*bc个序列返回,但是保持原有顺序\n seq_list = test_scheme\n\n # 生成batch_index与sample_pos\n batch_index = []\n sample_poses = [] # 取样点在序列中的相对位置\n batch_labels = [] # only for check\n for i in range(gpu_num * bc): # 每次预测gpu_num*bc个片段\n position = (step * gpu_num * bc + i) % len(seq_list) # 当前起始位置,经过最后一个视频末尾后折返,多余的序列作为padding\n # 读取样本\n vid,seq_start,seq_end,sample_pos,sample_label = seq_list[position]\n batch_index.append((vid,seq_start,seq_end,sample_pos))\n sample_poses.append(sample_pos - seq_start)\n batch_labels.append(sample_label)\n\n # 根据索引读取数据,并做padding\n visuals = []\n audios = []\n scores = []\n labels = []\n for i in range(len(batch_index)):\n vid,seq_start,seq_end,sample_pos = batch_index[i]\n vlength = len(data[vid]['labels'])\n seq_end = min(vlength,seq_end) # 截断\n padding_len = seq_len - (seq_end - seq_start)\n visual = data[vid]['visual'][seq_start:seq_end]\n audio = data[vid]['audio'][seq_start:seq_end]\n score = data[vid]['scores'][seq_start:seq_end]\n if padding_len > 0:\n visual_pad = np.zeros((padding_len, V_NUM, V_HEIGHT, V_WIDTH, V_CHANN))\n audio_pad = np.zeros((padding_len, A_NUM, A_HEIGHT, A_WIDTH, A_CHANN))\n score_pad = np.zeros((padding_len,))\n visual = np.vstack((visual,visual_pad)) # 统一在后侧padding\n audio = np.vstack((audio,audio_pad))\n score = np.hstack((score, score_pad))\n visuals.append(visual)\n audios.append(audio)\n scores.append(score)\n labels.append(data[vid]['labels'][sample_pos])\n visuals = np.array(visuals).reshape((gpu_num * bc, seq_len, V_NUM, V_HEIGHT, V_WIDTH, V_CHANN))\n audios = np.array(audios).reshape((gpu_num * bc, seq_len, A_NUM, A_HEIGHT, A_WIDTH, A_CHANN))\n scores = np.array(scores).reshape((gpu_num * bc, seq_len))\n labels = np.array(labels).reshape((gpu_num * bc,))\n sample_poses = np.array(sample_poses).reshape((gpu_num * bc,))\n\n # check\n if np.sum(labels - np.array(batch_labels)) != 0:\n logging.info('Label Mismatch: %d' % step)\n return visuals, audios, scores, labels, sample_poses\n\ndef _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n var = tf.get_variable(name, shape, initializer=initializer)\n return var\n\ndef _variable_with_weight_decay(name, shape, wd):\n var = _variable_on_cpu(name, shape, tf.contrib.layers.xavier_initializer())\n if wd is not None:\n weight_decay = tf.nn.l2_loss(var)*wd\n tf.add_to_collection('weightdecay_losses', weight_decay)\n return var\n\ndef conv3d(name, l_input, w, b):\n return tf.nn.bias_add(\n tf.nn.conv3d(l_input, w, strides=[1, 1, 1, 1, 1], padding='SAME'),\n b\n )\n\ndef score_pred(visual,audio,score,sample_poses,visual_weights,visual_biases,audio_weights,audio_biases,drop_out,training):\n # audio convolution\n audio_feat = tf.reshape(audio,shape=(-1,A_HEIGHT,A_WIDTH,A_CHANN)) # 6b*8*8*128\n audio_conv5 = tf.nn.conv2d(audio_feat, audio_weights['wc5'], [1, 1, 1, 1], padding='SAME')\n audio_conv5 = tf.nn.relu(tf.nn.bias_add(audio_conv5, audio_biases['bc5']))\n audio_out = tf.nn.max_pool(audio_conv5, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME') # 6b*4*4*256\n\n # visual convolution\n visual_conv5 = conv3d('conv5b', visual, visual_weights['wc5b'], visual_biases['bc5b'])\n visual_conv5 = tf.nn.relu(visual_conv5, 'relu5b')\n visual_pool5 = tf.nn.max_pool3d(visual_conv5,ksize=[1,2,2,2,1],strides=[1,2,2,2,1],padding='SAME')\n visual_pool6 = tf.transpose(visual_pool5, perm=(0, 4, 2, 3, 1))\n visual_pool6 = tf.nn.max_pool3d(visual_pool6, ksize=[1, 4, 1, 1, 1], strides=[1, 4, 1, 1, 1], padding='SAME')\n visual_pool6 = tf.transpose(visual_pool6, perm=(0, 4, 2, 3, 1)) # b*1*4*4*128\n visual_out = tf.squeeze(visual_pool6, axis=1) # b*4*4*128\n\n # bilinear pooling\n A = tf.transpose(audio_out,perm=[0,3,1,2]) # 6b*256*4*4\n shape_A = A.get_shape().as_list()\n A = tf.reshape(A,shape=[-1,A_NUM*shape_A[1],shape_A[2]*shape_A[3]]) # b*1536*16\n B = visual_out\n shape_B = B.get_shape().as_list()\n B = tf.reshape(B,shape=[-1,shape_B[1]*shape_B[2],shape_B[3]]) # b*16*128\n I = tf.matmul(A,B) # b*1536*128\n shape_I = I.get_shape().as_list()\n x = tf.reshape(I,shape=(-1,shape_I[1]*shape_I[2])) # b*196608\n y = tf.multiply(tf.sign(x), tf.sqrt(tf.abs(x))) # b*196608\n z = tf.nn.l2_normalize(y, dim=1) # b*196608\n\n # self-attention\n # z形式为bc*seq_len个clip\n # 对encoder来说每个gpu上输入bc*seq_len*d,即每次输入bc个序列,每个序列长seq_len,每个元素维度为d\n # 在encoder中将输入的序列映射到合适的维度\n seq_input = tf.reshape(z,shape=(BATCH_SIZE,SEQ_LEN,-1)) # bc*seq_len*196608\n logits, attention_list = self_attention(seq_input, score, SEQ_LEN, NUM_BLOCKS,\n NUM_HEADS, drop_out, training) # bc*seq_len\n # logits = tf.clip_by_value(tf.reshape(tf.sigmoid(logits), [-1, 1]), 1e-6, 0.999999) # (bc*seq_len,1)\n\n target = tf.one_hot(indices=sample_poses,depth=logits.get_shape().as_list()[-1],on_value=1,off_value=0)\n target = tf.cast(target,dtype=tf.float32)\n logits = tf.reduce_sum(logits * target, axis=1) # 只保留取样位置的值\n logits = tf.reshape(logits, [-1,1])\n\n return logits, attention_list\n\ndef _loss(sp,sn,delta):\n zeros = tf.constant(0,tf.float32,shape=[sp.get_shape().as_list()[0],1])\n delta_tensor = tf.constant(delta,tf.float32,shape=[sp.get_shape().as_list()[0],1])\n u = 1 - sp + sn\n lp = tf.maximum(zeros,u)\n condition = tf.less(u,delta_tensor)\n v = tf.square(lp)*0.5\n w = lp*delta-delta*delta*0.5\n loss = tf.where(condition,x=v,y=w)\n return tf.reduce_mean(loss)\n\ndef tower_loss_huber(name_scope,preds,labels):\n # 每一组相邻的分段计算一次loss,取平均\n cij_list = []\n for i in range(BATCH_SIZE - 1):\n condition = tf.greater(labels[i],labels[i+1])\n sp = tf.where(condition,preds[i],preds[i+1])\n sn = tf.where(condition,preds[i+1],preds[i])\n cij = _loss(sp,sn,3)\n cij_list.append(cij)\n cost = cij_list[0]\n for i in range(1,len(cij_list)):\n cost = cost + cij_list[i]\n cost = cost / len(cij_list)\n weight_decay_loss = tf.reduce_mean(tf.get_collection('weightdecay_losses'))\n total_loss = cost + weight_decay_loss\n\n return tf.reduce_mean(total_loss)\n\ndef average_gradients(tower_grads):\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n grads = []\n for g, _ in grad_and_vars:\n expanded_g = tf.expand_dims(g, 0)\n grads.append(expanded_g)\n grad = tf.concat(grads, 0)\n grad = tf.reduce_mean(grad, 0)\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n\ndef evaluation(pred_scores, data_test, test_ids, seq_len):\n # 根据预测的分数和对应的标签计算aprf以及mse\n # 输入模型训练时的总bc,用于计算测试数据中填充部分的长度\n preds_c = list(pred_scores[0])\n for i in range(1, len(pred_scores)):\n preds_c = preds_c + list(pred_scores[i])\n\n pos = 0\n label_pred_all = np.array(())\n label_true_all = np.array(())\n for vid in test_ids:\n labels = data_test[vid]['labels'].reshape((-1,))\n # 计算padding,提取preds中的有效预测部分\n vlength = len(labels)\n preds = preds_c[pos:pos + vlength]\n preds = np.array(preds).reshape((-1,))\n pos += vlength\n # predict\n hlnum = int(np.sum(labels))\n preds_list = list(preds)\n preds_list.sort(reverse=True)\n threshold = preds_list[hlnum]\n labels_pred = np.zeros_like(preds)\n for i in range(len(labels_pred)):\n if preds[i] > threshold :#and np.sum(labels_pred) < hlnum:\n labels_pred[i] = 1\n label_true_all = np.concatenate((label_true_all, labels))\n label_pred_all = np.concatenate((label_pred_all, labels_pred))\n\n a = accuracy_score(label_true_all, label_pred_all)\n p = precision_score(label_true_all, label_pred_all)\n r = recall_score(label_true_all, label_pred_all)\n f = f1_score(label_true_all, label_pred_all)\n logging.info('APRF: %.3f,%.3f,%.3f,%.3f,%d,%d' % (\n a, p, r, f, np.sum(label_true_all), np.sum(label_pred_all)))\n return a,p,r,f\n\ndef model_search(model_save_dir):\n def takestep(name):\n return int(name.split('-')[0].split('S')[-1])\n # 找到要验证的模型名称\n model_to_restore = []\n for root,dirs,files in os.walk(model_save_dir):\n for file in files:\n if file.endswith('.meta'):\n model_name = file.split('.meta')[0]\n step = int(model_name.split('-')[0].split('S')[-1])\n f1 = model_name.split('-')[-1]\n if step > 50000 or f1.startswith('F'):\n model_to_restore.append(os.path.join(root, model_name))\n model_to_restore = list(set(model_to_restore))\n model_to_restore.sort(key=takestep)\n return model_to_restore\n\ndef run_training(data_train, data_test, model_path, test_mode):\n if not os.path.exists(model_save_dir):\n os.makedirs(model_save_dir)\n max_f1 = MAX_F1\n\n with tf.Graph().as_default():\n global_step = tf.train.get_or_create_global_step()\n # placeholders\n visual_holder = tf.placeholder(tf.float32,shape=(BATCH_SIZE * GPU_NUM,\n SEQ_LEN,\n V_NUM,\n V_HEIGHT,\n V_WIDTH,\n V_CHANN))\n audio_holder = tf.placeholder(tf.float32,shape=(BATCH_SIZE * GPU_NUM,\n SEQ_LEN,\n A_NUM,\n A_HEIGHT,\n A_WIDTH,\n A_CHANN))\n scores_holder = tf.placeholder(tf.float32, shape=(BATCH_SIZE * GPU_NUM, SEQ_LEN))\n labels_holder = tf.placeholder(tf.float32,shape=(BATCH_SIZE * GPU_NUM,))\n sample_poses_holder = tf.placeholder(tf.int32,shape=(BATCH_SIZE * GPU_NUM,))\n dropout_holder = tf.placeholder(tf.float32,shape=())\n training_holder = tf.placeholder(tf.bool,shape=())\n\n # parameters\n with tf.variable_scope('var_name') as var_scope:\n weights = {\n 'wc5b': _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.0005),\n }\n biases = {\n 'bc5b': _variable_with_weight_decay('bc5b', [512], 0.000),\n }\n with tf.variable_scope('var_name_audio') as var_scope_audio:\n audio_weights = {\n 'wc5': _variable_with_weight_decay('au_wc5', [3, 3, 128, 256], L2_LAMBDA),\n }\n audio_biases = {\n 'bc5': _variable_with_weight_decay('au_bc5', [256], 0.0000),\n }\n # with tf.variable_scope('var_name_fusion') as var_name_fusion:\n # fusion_weights = {\n # 'wd1': _variable_with_weight_decay('wd1', [32768, 1024], L2_LAMBDA),\n # 'wd2': _variable_with_weight_decay('wd2', [1024, 512], L2_LAMBDA),\n # 'wd3': _variable_with_weight_decay('wd3', [512, 256], L2_LAMBDA),\n # 'wd4': _variable_with_weight_decay('wd4', [256, 64], L2_LAMBDA),\n # 'wout': _variable_with_weight_decay('wout', [64, 1], L2_LAMBDA),\n # }\n # fusion_biases = {\n # 'bd1': _variable_with_weight_decay('bd1', [1024], 0.0000),\n # 'bd2': _variable_with_weight_decay('bd2', [512], 0.0000),\n # 'bd3': _variable_with_weight_decay('bd3', [256], 0.0000),\n # 'bd4': _variable_with_weight_decay('bd4', [64], 0.0000),\n # 'bout': _variable_with_weight_decay('bout', [1], 0.0000),\n # }\n\n varlist_visual = list(weights.values()) + list(biases.values())\n varlist_audio = list(audio_weights.values()) + list(audio_biases.values())\n # training operations\n # lr = noam_scheme(LR_TRAIN,global_step,WARMUP_STEP)\n lr = tf.train.piecewise_constant(global_step,PHASES_STEPS,PHASES_LR)\n opt_train = tf.train.AdamOptimizer(lr)\n\n # graph building\n tower_grads_train = []\n logits_list = []\n loss_list = []\n attention_list = []\n for gpu_index in range(GPU_NUM):\n with tf.device('/gpu:%d' % gpu_index):\n visual = visual_holder[gpu_index * BATCH_SIZE:(gpu_index + 1) * BATCH_SIZE, :, :, :, :, :]\n visual = tf.reshape(visual,shape=(BATCH_SIZE*SEQ_LEN,V_NUM,V_HEIGHT,V_WIDTH,V_CHANN))\n audio = audio_holder[gpu_index * BATCH_SIZE:(gpu_index + 1) * BATCH_SIZE, :, :, :, :, :]\n audio = tf.reshape(audio,shape=(BATCH_SIZE*SEQ_LEN,A_NUM,A_HEIGHT,A_WIDTH,A_CHANN))\n labels = labels_holder[gpu_index * BATCH_SIZE:(gpu_index + 1) * BATCH_SIZE,]\n scores = scores_holder[gpu_index * BATCH_SIZE:(gpu_index + 1) * BATCH_SIZE, :]\n sample_poses = sample_poses_holder[gpu_index * BATCH_SIZE:(gpu_index + 1) * BATCH_SIZE,]\n\n # predict scores\n # logits, atlist_one = score_pred(visual,audio,scores,weights,biases,audio_weights,audio_biases,\n # fusion_weights,fusion_biases,dropout_holder,training_holder)\n logits, atlist_one = score_pred(visual, audio, scores, sample_poses, weights, biases, audio_weights, audio_biases,\n dropout_holder, training_holder)\n logits_list.append(logits)\n attention_list += atlist_one # 逐个拼接各个卡上的attention_list\n # calculate loss & gradients\n loss_name_scope = ('gpud_%d_loss' % gpu_index)\n loss = tower_loss_huber(loss_name_scope, logits, labels)\n varlist = tf.trainable_variables() # 全部训练\n varlist = list(set(varlist) - set(varlist_visual) - set(varlist_audio))\n # varlist = varlist + list(biases.values()) + list(audio_biases.values())\n # grads_train = opt_train.compute_gradients(loss, varlist)\n # thresh = GRAD_THRESHOLD # 梯度截断 防止爆炸\n # grads_train_cap = [(tf.clip_by_value(grad, -thresh, thresh), var) for grad, var in grads_train]\n # tower_grads_train.append(grads_train_cap)\n loss_list.append(loss)\n # grads_t = average_gradients(tower_grads_train)\n # train_op = opt_train.apply_gradients(grads_t, global_step=global_step)\n if test_mode == 1:\n train_op = tf.no_op()\n\n # session\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # load model\n saver_visual = tf.train.Saver(varlist_visual)\n saver_audio = tf.train.Saver(varlist_audio)\n saver_visual.restore(sess, visual_model_path)\n saver_audio.restore(sess, audio_model_path)\n\n saver_overall = tf.train.Saver(max_to_keep=100)\n if load_ckpt_model:\n logging.info(' Ckpt Model Restoring: '+model_path)\n saver_overall.restore(sess, model_path)\n logging.info(' Ckpt Model Resrtored !')\n\n # train & test preparation\n train_scheme = train_scheme_build_v3(data_train, SEQ_LEN)\n epoch_step = math.ceil(len(train_scheme[1]) / (BATCH_SIZE * GPU_NUM)) # 所有负样本都计算过一次作为一个epoch\n test_scheme, test_vids = test_scheme_build(data_test,SEQ_LEN)\n max_test_step = math.ceil(len(test_scheme) / BATCH_SIZE / GPU_NUM)\n\n # Beging training\n ob_loss = []\n timepoint = time.time()\n for step in range(MAXSTEPS):\n visual_b, audio_b, score_b, label_b, sample_pose_b = get_batch_train(data_train, train_scheme,\n step,GPU_NUM,BATCH_SIZE,SEQ_LEN)\n observe = sess.run([tf.no_op()] + loss_list + logits_list + attention_list + [global_step, lr],\n feed_dict={visual_holder: visual_b,\n audio_holder: audio_b,\n scores_holder: score_b,\n labels_holder: label_b,\n sample_poses_holder: sample_pose_b,\n dropout_holder: DROP_OUT,\n training_holder: True})\n\n loss_batch = np.array(observe[1:1+GPU_NUM])\n ob_loss.append(loss_batch) # 卡0和卡1返回的是来自同一个batch的两部分loss,求平均\n\n # save checkpoint & evaluate\n epoch = step / epoch_step\n if step % epoch_step == 0 or (step + 1) == MAXSTEPS:\n if step == 0 and test_mode == 0:\n continue\n duration = time.time() - timepoint\n timepoint = time.time()\n loss_array = np.array(ob_loss)\n ob_loss.clear()\n logging.info(' Step %d: %.3f sec' % (step, duration))\n logging.info(' Evaluate: '+str(step)+' Epoch: '+str(epoch))\n logging.info(' Average Loss: '+str(np.mean(loss_array))+' Min Loss: '+str(np.min(loss_array))+' Max Loss: '+str(np.max(loss_array)))\n\n # 按顺序预测测试集中每个视频的每个分段,全部预测后在每个视频内部排序,计算指标\n pred_scores = [] # 每个batch输出的预测得分\n for test_step in range(max_test_step):\n visual_b, audio_b, score_b, label_b, sample_pose_b = get_batch_test(data_test, test_scheme,\n test_step, GPU_NUM, BATCH_SIZE, SEQ_LEN)\n logits_temp_list, att_ob_list = sess.run([logits_list, attention_list], feed_dict={visual_holder: visual_b,\n audio_holder: audio_b,\n scores_holder: score_b,\n sample_poses_holder: sample_pose_b,\n training_holder: False,\n dropout_holder: 0})\n for preds in logits_temp_list:\n pred_scores.append(preds.reshape((-1)))\n a, p, r, f = evaluation(pred_scores, data_test, test_vids, SEQ_LEN)\n # logging.info('Accuracy: %.3f, Precision: %.3f, Recall: %.3f, F1: %.3f' % (a, p, r, f))\n return\n return\n\ndef main(self):\n label_record = load_label(LABEL_PATH)\n data_train, data_valid, data_test = load_data(label_record, FEATURE_BASE)\n print('Data loaded !')\n\n models_to_restore = model_search(model_save_dir)\n for i in range(len(models_to_restore)):\n print('-' * 20, i, models_to_restore[i].split('/')[-1], '-' * 20)\n ckpt_model_path = models_to_restore[i]\n run_training(data_train, data_test, ckpt_model_path,1) # for testing\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"SAMFv4_check.py","file_name":"SAMFv4_check.py","file_ext":"py","file_size_in_byte":32672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"334773504","text":"# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport numpy as np\nimport os\nimport xml.etree.ElementTree as ET\nfrom fvcore.common.file_io import PathManager\n\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom detectron2.structures import BoxMode\nfrom PIL import Image\nimport math\nimport json\n\n__all__ = [\"register_nuscenes\"]\n\n\n# fmt: off\nCLASS_NAMES = [\n \"Pedestrian\", \"Car\", \"Cycle\", \"Cyclist\", \"Bus\", \n \"Truck\", \"Construction\", \"Trailer\",\n \"Barrier\", \"Cone\",\n]\n# fmt: on\ncategory_dict = {'Pedestrian' : 0,\n 'Car' : 1,\n 'Bicycle' : 2,\n 'Motorcycle' : 2,\n 'Bicyclist' : 3,\n 'Motorcyclist' : 3,\n 'Bus' : 4,\n 'Truck' : 5, \n 'Construction' : 6,\n 'Trailer' : 7,\n 'Barrier' : 8,\n 'Cone' : 9,\n 'Van' : -1,\n }\n\ndef is_vehicle(num):\n return category_dict['Barrier'] != num and category_dict['Cone'] != num\n\ndef load_nuscenes_instances(data_dir, out_dir, subsets, subsets_files):\n print(\"VAL FILE: %s\" % val_file)\n print(\"TRAIN FILE: %s\\n\" % train_file)\n\n json_name = 'annotations_nuscenes_%s.json'\n\n img_id = 0\n ann_id = 0\n\n for sub_set, subset_name in zip(subsets, subsets_files):\n ann_dir = os.path.join(data_dir, 'label')\n im_dir = os.path.join(data_dir, 'image') #images_dataset\n print('Starting %s' % ann_dir)\n print('Starting %s\\n' % im_dir)\n\n dataset_dicts = []\n rings = {}\n\n with open(subset_name, \"r\") as f:\n im_list = f.read().splitlines()\n for filename in im_list:\n if not anns_in_ring(ann_dir, filename[1:]):\n # print('Empty ring, continuing.')\n continue\n complete_name_im = os.path.join(im_dir, filename + '.jpg')\n complete_name_ann = os.path.join(ann_dir, filename + '.txt')\n \n if not os.path.exists(complete_name_im): \n continue\n\n record = {}\n record['image_id'] = img_id\n img_id += 1\n\n im = Image.open(complete_name_im)\n # print(im.size)\n record['width'] = int(im.size[0])\n record['height'] = int(im.size[1])\n\n record['file_name'] = complete_name_im\n # images.append(image) \n\n if os.path.exists(complete_name_ann): \n pre_objs = np.genfromtxt(complete_name_ann, delimiter=' ',\n names=['token', 'type', 'truncated', 'occluded', 'alpha', 'bbox_xmin', 'bbox_ymin',\n 'bbox_xmax', 'bbox_ymax', 'dimensions_1', 'dimensions_2', 'dimensions_3',\n 'location_1', 'location_2', 'location_3', 'rotation_y'], dtype=None, encoding='ascii')\n\n if (pre_objs.ndim < 1):\n pre_objs = np.array(pre_objs, ndmin=1)\n else:\n pre_objs= []\n annotations = []\n\n for obj in pre_objs:\n\n if (category_dict[obj['type']] != -1):\n ann = {}\n ann_id += 1\n ann['token'] = obj['token']\n ann['category_id'] = int(category_dict[obj['type']])\n ann['bbox'] = [int(obj['bbox_xmin']), int(obj['bbox_ymin']), math.fabs(obj['bbox_xmax'] - obj['bbox_xmin']), math.fabs(obj['bbox_ymax'] - obj['bbox_ymin'])]\n ann['segmentation'] = [[int(obj['bbox_xmin']), int(obj['bbox_ymin']), \n int(obj['bbox_xmin']), int(obj['bbox_ymax']), \n int(obj['bbox_xmax']), int(obj['bbox_ymax']), \n int(obj['bbox_xmax']), int(obj['bbox_ymin'])]]\n ann['iscrowd'] = 0\n annotations.append(ann)\n\n if len(dataset_dicts) % 500 == 0:\n print(\"Processed %s images\" % (len(dataset_dicts)))\n\n if not filename[1:] in rings:\n rings[filename[1:]] = {}\n rings[filename[1:]][filename[0]] = len(annotations)\n record['annotations'] = annotations\n dataset_dicts.append(record)\n\n check_rings(rings)\n\n outfile_name = os.path.join(out_dir, json_name % (sub_set + '_nuscyc'))\n print(\"Processed %s images\" % (len(dataset_dicts)))\n\n with open(outfile_name, 'w') as outfile:\n outfile.write(json.dumps(dataset_dicts))\n\ndef anns_in_ring(ann_dir, filename):\n anns = 0\n for i in range(0,6):\n complete_name_ann = os.path.join(ann_dir, str(i) + filename + '.txt')\n if os.path.exists(complete_name_ann): \n anns += 1\n return anns > 0\n\ndef check_rings(ann_dict):\n for ring in ann_dict:\n print('ring', len(ann_dict[ring]), ring)\n assert len(ann_dict[ring]) == 6\n\n sum_ring = 0\n for cam in ann_dict[ring]:\n sum_ring += ann_dict[ring][cam]\n\n assert sum_ring > 0\n print(sum_ring)\n\ndef load_nuscenes_instance(data_dir, split):\n img_id = 0\n ann_id = 0\n json_name = 'annotations_nuscyc_%s_token.json'\n\n ann_dir = os.path.join(data_dir, 'label')\n im_dir = os.path.join(data_dir, 'image') #images_dataset\n print('Starting %s' % ann_dir)\n print('Starting %s\\n' % im_dir)\n\n dataset_dicts = []\n rings = {}\n\n split_file = os.path.join(data_dir, '%s.txt' %split)\n with open(split_file, \"r\") as f:\n im_list = f.read().splitlines()\n for filename in im_list:\n if not anns_in_ring(ann_dir, filename[1:]):\n # print('Empty ring, continuing.')\n continue\n complete_name_im = os.path.join(im_dir, filename + '.jpg')\n complete_name_ann = os.path.join(ann_dir, filename + '.txt')\n \n if not os.path.exists(complete_name_im): \n continue\n\n record = {}\n record['image_id'] = img_id\n img_id += 1\n\n im = Image.open(complete_name_im)\n record['width'] = int(im.size[0])\n record['height'] = int(im.size[1])\n record['file_name'] = complete_name_im\n\n if os.path.exists(complete_name_ann): \n pre_objs = np.genfromtxt(complete_name_ann, delimiter=' ',\n names=['token', 'type', 'truncated', 'occluded', 'alpha', 'bbox_xmin', 'bbox_ymin',\n 'bbox_xmax', 'bbox_ymax', 'dimensions_1', 'dimensions_2', 'dimensions_3',\n 'location_1', 'location_2', 'location_3', 'rotation_y'], dtype=None, encoding='ascii')\n\n if (pre_objs.ndim < 1):\n pre_objs = np.array(pre_objs, ndmin=1)\n else:\n pre_objs = []\n annotations = []\n\n for obj in pre_objs:\n if (category_dict[obj['type']] != -1):\n ann = {}\n ann_id += 1\n ann['token'] = obj['token']\n ann['category_id'] = int(category_dict[obj['type']])\n ann['bbox'] = [int(obj['bbox_xmin']), int(obj['bbox_ymin']), math.fabs(obj['bbox_xmax'] - obj['bbox_xmin']), math.fabs(obj['bbox_ymax'] - obj['bbox_ymin'])]\n ann['segmentation'] = [[int(obj['bbox_xmin']), int(obj['bbox_ymin']), \n int(obj['bbox_xmin']), int(obj['bbox_ymax']), \n int(obj['bbox_xmax']), int(obj['bbox_ymax']), \n int(obj['bbox_xmax']), int(obj['bbox_ymin'])]]\n ann['iscrowd'] = 0\n ann[\"bbox_mode\"] = BoxMode.XYWH_ABS\n annotations.append(ann)\n\n if len(dataset_dicts) % 500 == 0:\n print(\"Processed %s images\" % (len(dataset_dicts)))\n\n if not filename[1:] in rings:\n rings[filename[1:]] = {}\n rings[filename[1:]][filename[0]] = len(annotations)\n record['annotations'] = annotations\n dataset_dicts.append(record)\n\n check_rings(rings)\n print(\"Processed %s images\" % (len(dataset_dicts)))\n\n outfile_name = os.path.join(data_dir, json_name % (split))\n with open(outfile_name, 'w') as outfile:\n outfile.write(json.dumps(dataset_dicts))\n\n return dataset_dicts\n\ndef upload_nuscenes():\n files_list = [\n '/raid/datasets/token_nuscenes/train_ring.txt',\n '/raid/datasets/token_nuscenes/val_ring.txt', \n '/raid/datasets/token_nuscenes/trainval_ring.txt', \n '/raid/datasets/token_nuscenes/minitrain_ring.txt', \n '/raid/datasets/token_nuscenes/minival_ring.txt', \n ]\n\n splits = ['train', 'val', 'trainval', 'minitrain', 'minival']\n load_nuscenes_instances('/raid/datasets/token_nuscenes', out_dir, splits, files_list)\n\ndef register_nuscenes(name, dirname, split):\n json_name = 'annotations_nuscyc_%s_token.json'\n outfile_name = os.path.join(dirname, json_name % split)\n\n if not os.path.exists(outfile_name):\n load_and_register_nuscenes(name, dirname, split)\n else:\n with open(outfile_name) as json_file:\n data = json.load(json_file)\n\n for record in data:\n for ann in record['annotations']:\n ann[\"bbox_mode\"] = BoxMode.XYWH_ABS\n\n DatasetCatalog.register(name, lambda: data)\n MetadataCatalog.get(name).set(\n thing_classes=CLASS_NAMES, dirname=dirname, split=split\n )\n\ndef load_and_register_nuscenes(name, dirname, split):\n DatasetCatalog.register(name, lambda: load_nuscenes_instance(dirname, split))\n MetadataCatalog.get(name).set(\n thing_classes=CLASS_NAMES, dirname=dirname, split=split\n )\n","sub_path":"detectron2/data/datasets/nuscenes.py","file_name":"nuscenes.py","file_ext":"py","file_size_in_byte":9899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"230728741","text":"class CityScrapersLoggingPipeline(object):\n \"\"\"\n Dummy logging pipeline. Enabled by default, it reminds developers to\n turn on some kind of backend storage pipeline.\n \"\"\"\n\n def process_item(self, item, spider):\n spider.logger.warn(\n 'Processing {0}. Enable a database pipeline to save items.'.format(\n item.get('title', 'No title found')\n )\n )\n return item\n","sub_path":"city_scrapers/pipelines/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"307213486","text":"from django.contrib import admin\n\nfrom .models import Event\n\n\nclass AdminEvent(admin.ModelAdmin):\n list_display = (\n 'user_id',\n 'category',\n 'date',\n 'start_time',\n 'end_time',\n 'agent_role',\n 'status',\n )\n\n\nadmin.site.register(Event, AdminEvent)\n","sub_path":"planning/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"194568817","text":"'''\nA simple PyREM script to run iperf between two machines.\n'''\n\nimport time\nfrom pyrem.host import RemoteHost\n\n# Declare two hosts.\nHOST1 = RemoteHost('tradewars.cs.washington.edu')\nHOST2 = RemoteHost('spyhunter.cs.washington.edu')\n\n# Create tasks to be run on the hosts.\nserver = HOST1.run(['iperf -s'], quiet=True)\nclient = HOST2.run(['iperf -c tradewars.cs.washington.edu'])\n\n# Start the server task.\nserver.start()\n\n# Wait for servers to be ready.\ntime.sleep(1)\n\n# Run the client task.\nclient.start(wait=True)\n\n# Clean up.\nserver.stop()\n","sub_path":"examples/example1.py","file_name":"example1.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"280043375","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom rasa_sdk import Action\nfrom rasa_sdk.events import SlotSet\nimport pandas as pd\nimport json\nimport smtplib\nfrom email.message import EmailMessage\nfrom email.mime.text import MIMEText\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nfrom smtplib import SMTP\nimport sys\n\nZomatoData = pd.read_csv('zomato.csv',encoding=\"latin1\")\nZomatoData = ZomatoData.drop_duplicates().reset_index(drop=True)\nWeOperate = ['New Delhi', 'Gurgaon', 'Noida', 'Faridabad', 'Allahabad', 'Bhubaneshwar', 'Mangalore', 'Mumbai', 'Ranchi', 'Patna', 'Mysore', 'Aurangabad', 'Amritsar', 'Puducherry', 'Varanasi', 'Nagpur', 'Vadodara', 'Dehradun', 'Vizag', 'Agra', 'Ludhiana', 'Kanpur', 'Lucknow', 'Surat', 'Kochi', 'Indore', 'Ahmedabad', 'Coimbatore', 'Chennai', 'Guwahati', 'Jaipur', 'Hyderabad', 'Bangalore', 'Nashik', 'Pune', 'Kolkata', 'Bhopal', 'Goa', 'Chandigarh', 'Ghaziabad', 'Ooty', 'Gangtok', 'Shimla']\n\ndef RestaurantSearch(City,Cuisine,Price):\n TEMP = ZomatoData[(ZomatoData['Cuisines'].apply(lambda x: Cuisine.lower() in x.lower())) & (ZomatoData['City'].apply(lambda x: City.lower() in x.lower()))]\n if Price.lower()=='under300':\n TEMP1 = TEMP[TEMP['Average Cost for two']<300]\n elif Price.lower() == 'between300to700':\n TEMP1 = TEMP[(TEMP['Average Cost for two']>=300) & (TEMP['Average Cost for two']<=700)]\n else:\n TEMP1 = TEMP[TEMP['Average Cost for two']>700]\n TEMP2 = TEMP1[['Restaurant Name','Address','Average Cost for two','Aggregate rating']]\n TEMP2.sort_values(by='Aggregate rating',ascending=False,inplace=True)\n return TEMP2[['Restaurant Name','Address','Average Cost for two','Aggregate rating']]\n\nclass ActionSearchRestaurants(Action):\n def name(self):\n return 'action_search_restaurants'\n\n def run(self, dispatcher, tracker, domain):\n #config={ \"user_key\":\"f4924dc9ad672ee8c4f8c84743301af5\"}\n loc = tracker.get_slot('location').lower()\n cuisine = tracker.get_slot('cuisine').lower()\n price = tracker.get_slot('price').lower()\n \n operatingCities = [x.lower() for x in WeOperate]\n if loc.lower() in operatingCities:\n \n results = RestaurantSearch(City=loc,Cuisine=cuisine,Price=price)\n response=\"\"\n if results.shape[0] <5:\n response= \"Please try different selections? We do not have results for these values.\"\n else:\n response = \"Displaying top restaurants as per selection: \\n\"\n # print(\"Showing top rated restaurants as per your selections: \\n\")\n for restaurant in RestaurantSearch(loc,cuisine,price).iloc[:5].iterrows():\n restaurant = restaurant[1]\n \n response=response + F\"\\n Found: {restaurant['Restaurant Name']} in {restaurant['Address']} rated {restaurant['Aggregate rating']}. \\n\\n\"\n emailRequest = \"\\n\\nDo you want the results to be sent to you as an email?\"\n response=response + emailRequest\n else:\n response = \"We do not serve in this area yet.\"\n dispatcher.utter_message(\"-----\"+response)\n return [SlotSet('location',loc)]\n\nclass ActionSendMail(Action):\n def name(self):\n return 'action_send_mail'\n\n def run(self, dispatcher,tracker,domain):\n \n loc = tracker.get_slot('location').lower()\n cuisine = tracker.get_slot('cuisine').lower()\n price = tracker.get_slot('price').lower()\n response = RestaurantSearch(loc,cuisine,price).iloc[:10]\n \n EmailConfirmation=tracker.get_slot('emailConfirmation').lower()\n Recipient = tracker.get_slot('to')#\"tejaswinishreyadsc23@gmail.com\"\n print(\"Recipient is \",Recipient)\n if EmailConfirmation=='yes':\n if Recipient == None:\n dispatcher.utter_message(\"Please enter email ID\")\n else:\n msg = MIMEMultipart()\n msg['Subject'] = 'Top rated restaurants'\n msg['From'] = 'tejaswinishreyadsc23@gmail.com'\n msg['To']=Recipient\n \n html = \"\"\"\\\n \n \n \n {0}\n \n \n \"\"\".format(response.to_html())\n \n part1 = MIMEText(html, 'html')\n \n msg.attach(part1)\n \n server = smtplib.SMTP_SSL(\"smtp.gmail.com\", 465)\n \n server.login(\"tejaswinishreyadsc23@gmail.com\", \"chatB0T!\") \n server.sendmail(msg['From'], msg['To'] , msg.as_string())\n \n \n server.quit() \n \n dispatcher.utter_message(\"Message sent successfully. Bon apetit!\")\n return [SlotSet(\"to\",Recipient)]\n else:\n dispatcher.utter_message(\"Good day. Bon apetit!\")\n\n\n\n","sub_path":"actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"205242415","text":"#!/usr/bin/env python3\n\nINPUT_FILE = 'input13.txt'\n# INPUT_FILE = 'example13.txt'\n\nimport unittest\nimport re\n\nDEBUG = False\n# DEBUG = True\ndef debug_print(*args, **kwargs):\n if DEBUG:\n print(*args, **kwargs)\n\nNUM_SECONDS = 2503\ndef main():\n text = get_text(INPUT_FILE)\n coords, commands = parse_text(text)\n grid = create_grid_from_coords(coords)\n for command in commands:\n grid = run_command(grid, command)\n print_grid(grid)\n\ndef print_grid(grid):\n print('\\n'.join(''.join('#' if c else '.' for c in row)\n for row in grid))\n\ndef run_command(grid, command):\n xy, amount = command\n debug_print(f'Running command: fold along {xy} = {amount}')\n if xy == 'x':\n num_rows = len(grid)\n num_cols = amount\n elif xy == 'y':\n num_rows = amount\n num_cols = len(grid[0])\n else:\n raise Exception(f'Unrecognized xy: {xy}')\n new_grid = create_grid(num_rows, num_cols)\n for y in range(len(grid)):\n if xy == 'y' and y > amount:\n new_y = 2 * amount - y\n else:\n new_y = y\n for x in range(len(grid[0])):\n if xy == 'x' and x > amount:\n new_x = 2 * amount - x\n else:\n new_x = x\n if grid[y][x]:\n try:\n new_grid[new_y][new_x] = True\n except Exception as e:\n print(f'Indices y: {new_y}, x: {new_x} out of range')\n raise e\n return new_grid\n\ndef create_grid_from_coords(coords):\n max_x = max(x for x, _ in coords)\n max_y = max(y for _, y in coords)\n debug_print(f'max_x: {max_x}, max_y: {max_y}')\n num_rows = max_y + 1\n num_cols = max_x + 1\n grid = create_grid(num_rows, num_cols)\n debug_print(f'num rows: {num_rows}/{len(grid)}')\n debug_print(f'num cols: {num_cols}/{len(grid[0])}')\n for x, y in coords:\n try:\n grid[y][x] = True\n except Exception as e:\n print(f'x: {x}, y: {y}')\n raise e\n return grid\n\ndef create_grid(num_rows, num_cols):\n return [[False] * num_cols for _ in range(num_rows)]\n\ndef parse_text(text):\n coords_text, commands_text = text.split('\\n\\n')\n return parse_coords(coords_text), parse_commands(commands_text)\n\ndef parse_coords(coords_text):\n return [parse_coord(line) for line in coords_text.split('\\n')]\n\ndef parse_coord(line):\n x, y = line.split(',')\n return int(x), int(y)\n\ndef parse_commands(commands_text):\n return [parse_command(line) for line in commands_text.split('\\n')]\n\nCOMMAND_PATTERN = re.compile(r'fold along (x|y)=(\\d+)')\ndef parse_command(line):\n try:\n xy, amount = COMMAND_PATTERN.match(line).groups()\n except Exception as e:\n print(f'Could not parse line: {line}')\n raise e\n return xy, int(amount)\n \ndef get_text(filename):\n with open(filename) as f:\n return f.read().strip()\n\nif __name__ == '__main__':\n main()\n","sub_path":"2021/day13b.py","file_name":"day13b.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"560190318","text":"#This is code for 6.0001 pset 1 part B. AKA: I will never be able to afford a house!\r\n#justmillenialthings\r\n\r\nprint (\"Enter your annual salary:\")\r\nannual_salary = int(input())\r\nprint (\"Enter the percent of your salary to save, as a decimal:\")\r\nportion_saved = float(input())\r\nprint (\"Enter the cost of your dream home:\")\r\ntotal_cost = int(input())\r\nprint (\"Enter the semi-annual raise, as a decimal:\")\r\nsemi_annual_raise = float(input())\r\n\r\nportion_down_payment = 0.25\r\ncurrent_savings = 0\r\nr=0.04\r\nmonthly_salary = annual_salary/12\r\ntotal_downpayment = portion_down_payment * total_cost\r\nmonths_saved = 0\r\n\r\nwhile current_savings < total_downpayment:\r\n current_savings += current_savings*r/12+portion_saved*monthly_salary\r\n months_saved +=1\r\n if months_saved%6==0:\r\n monthly_salary+=monthly_salary*semi_annual_raise\r\n \r\n\r\nprint (\"Number of months : \"+ str(months_saved))\r\n","sub_path":"ps1b.py","file_name":"ps1b.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"463907518","text":"import numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn\nfrom torch.utils import data\nimport matplotlib.pyplot as plt\n\n#这应该是个回归问题,确实自己的代码不想看\n\ndef load_array(data_arrays, batch_size, is_train=True):\n \"\"\"构造一个PyTorch数据迭代器\"\"\"\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)\n\ntrain_data = pd.read_csv(r'D:\\书籍资料整理\\亚利桑那房价\\kaggle_house_pred_train.csv')\ntest_data = pd.read_csv(r'D:\\书籍资料整理\\亚利桑那房价\\kaggle_house_pred_test.csv')\nprint(train_data.shape)\nprint(test_data.shape)\n# print(train_data.head())\n\nall_features = pd.concat((train_data.iloc[:, 1:-1], test_data.iloc[:, 1:]))\nprint(all_features.shape)\n\n# 若无法获得测试数据,则可根据训练数据计算均值和标准差\nnumeric_features = all_features.dtypes[all_features.dtypes != 'object'].index\nall_features[numeric_features] = all_features[numeric_features].apply(\n lambda x: (x - x.mean()) / (x.std()))\n# 在标准化数据之后,所有均值消失,因此我们可以将缺失值设置为0\nall_features[numeric_features] = all_features[numeric_features].fillna(0)\n\n# “Dummy_na=True”将“na”(缺失值)视为有效的特征值,并为其创建指示符特征\nall_features = pd.get_dummies(all_features, dummy_na=True)\n\nprint(all_features.shape)\n\n#将数据创建张量\nn_train = train_data.shape[0]\ntrain_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32)\ntest_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32)\ntrain_labels = torch.tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32)\n\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\n\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\n\ndef log_rmse(net, features, labels):\n # 为了在取对数时进一步稳定该值,将小于1的值设置为1\n clipped_preds = torch.clamp(net(features), 1, float('inf'))\n rmse = torch.sqrt(loss(torch.log(clipped_preds),\n torch.log(labels)))\n return rmse.item()\n\ndef train(net, train_features, train_labels, test_features, test_labels,\n num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = load_array((train_features, train_labels), batch_size)\n # 这里使用的是Adam优化算法\n optimizer = torch.optim.Adam(net.parameters(),\n lr = learning_rate,\n weight_decay = weight_decay)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n optimizer.zero_grad()\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\n\n#k折交叉验证\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = torch.cat([X_train, X_part], 0)\n y_train = torch.cat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\n\n\ndef k_fold(k, X_train, y_train, num_epochs, learning_rate, weight_decay,\n batch_size):\n train_l_sum, valid_l_sum = 0, 0\n for i in range(k):\n data = get_k_fold_data(k, i, X_train, y_train)\n net = get_net()\n train_ls, valid_ls = train(net, *data, num_epochs, learning_rate,\n weight_decay, batch_size)\n train_l_sum += train_ls[-1]\n valid_l_sum += valid_ls[-1]\n\n print(f'折{i + 1},训练log rmse{float(train_ls[-1]):f}, '\n f'验证log rmse{float(valid_ls[-1]):f}')\n return train_l_sum / k, valid_l_sum / k\n\ndef train_and_pred(train_features, test_feature, train_labels, test_data,\n num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None,\n num_epochs, lr, weight_decay, batch_size)\n\n print(f'训练log rmse:{float(train_ls[-1]):f}')\n # 将网络应用于测试集。\n preds = net(test_features).detach().numpy()\n # 将其重新格式化以导出到Kaggle\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)\nnum_epochs=10\nlr=0.1\nweight_decay=0.9\nbatch_size=10\ntrain_and_pred(train_features, test_features, train_labels, test_data,\n num_epochs, lr, weight_decay, batch_size)","sub_path":"python/深度学习/torch学习/动手学习深度学习/2.多层感知机/房价数据实战/分析代码.py","file_name":"分析代码.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488115770","text":"import sys\nimport random\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom main_ui import Ui_MainWindow\n\n\nclass Drawer(QtWidgets.QMainWindow, Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n\n self.pushButton.clicked.connect(self.draw)\n self.drawing = False\n\n def draw(self):\n self.drawing = True\n self.update()\n\n def paintEvent(self, event):\n if not self.drawing:\n return\n\n qp = QtGui.QPainter()\n qp.begin(self)\n qp.setBrush(QtGui.QColor(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))\n r = random.randint(25, 210)\n rx = random.randint(10 + r, 490 - r)\n ry = random.randint(70 + r, 490 - r)\n qp.drawEllipse(rx - r, ry - r, 2 * r, 2 * r)\n qp.end()\n\n self.drawing = False\n\n\napp = QtWidgets.QApplication(sys.argv)\ndrawer = Drawer()\ndrawer.show()\nsys.exit(app.exec())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59527271","text":"class Player(object):\n\n @staticmethod\n def move(board):\n print(board)\n while True:\n a_moves = board.available_moves()\n col = input(f\"Choose a column! {[x + 1 for x in board.available_moves()]}\\n\")\n col = col if col.isdigit() else -1\n col = int(col)\n if col-1 in a_moves:\n break\n else:\n print('Invalid input')\n return col - 1\n","sub_path":"connect4/agent/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"361703666","text":"import requests\nimport re\n\nclass TiantianSpider:\n def __init__(self):\n self.start_url = \"http://fund.eastmoney.com/data/rankhandler.aspx?op=ph&dt=kf&ft=all&rs=&gs=0&sc=zzf&st=desc&sd=2017-11-26&ed=2018-11-26&qdii=&tabSubtype=,,,,,&pi={}&pn=50&dx=1&v=0.09605094795229396\"\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36\"}\n # 处理第一页,获取所有内容\n def parse_url(self):\n start_url = self.start_url.format(\"1\")\n # print(start_url)\n rest1 = requests.get(start_url, headers=self.headers)\n return rest1.content.decode()\n \n #提取第一页的内容,提取allPages:后面的页面数量\n def get_allpages(self, firstpage):\n # 把{}中的内容用正则匹配,返回一个列表类型\n content_list = re.findall(\"{.+}\", firstpage, re.S)\n content_dict = content_list[0]\n # 在content_list[0]中查找allPages:信息\n allPages = re.findall(\"allPages:(.*?),\",content_dict,re.S)\n return allPages\n \n #变量url\n def go_through_url(self, allPages):\n i = 1\n #所有的数据保存到一个str\n allData_final = str()\n #这里传入的allPages是个list类型的数据\n while i <= int(allPages[0]):\n # while i <= 2:\n url = self.start_url.format(str(i))\n rest = requests.get(url, headers=self.headers)\n content = rest.content.decode()\n # print(content)\n #获取每页{}中的内容\n content_list = re.findall(\"{.+}\", content, re.S)\n content_dict = content_list[0]\n #print(content_dict)\n #获取datas数据\n allData = re.findall(\"datas:\\[(.*?)\\]\", content_dict, re.S)\n #由于最后一行没有,, 添加一个逗号,以便后面分割\n allData_reg = allData[0] + (\",\")\n #利用一个大的string保存所有的结果\n allData_final += allData_reg\n # print(allData_final)\n i += 1\n return allData_final\n\n\n def save_content(self, data):\n #获取[]中每个\"\"中的内容,即每行的数据内容\n allData_Detail = re.findall(\"\\\"(.*?)\\\",\", data, re.S)\n # print(allData_Detail)\n # print(type(allData_Detail))\n # print(allData_Detail[1])\n with open(\"tiantian.txt\", \"a\", encoding=\"utf-8\") as f:\n i=0\n while i < len(allData_Detail):\n f.write(allData_Detail[i]+\"\\n\")\n i+=1\n\n def run(self):\n #接收parse_url()返回的内容\n first_page_content = self.parse_url()\n # print(first_page_content)\n #接收get_allpages(first_page_content)返回的所有页码数,传入第一个页面的内容\n allPages = self.get_allpages(first_page_content)\n # print(allPages)\n #接收get_allpages(first_page_content)返回的所有页面的数据,得到所有页面的内容,传入页面数\n allData = self.go_through_url(allPages)\n # print(allData)\n #把接收到的数据传入save_content(allData)接收并处理保存\n save_content = self.save_content(allData)\n \nif __name__ == \"__main__\":\n tiantian = TiantianSpider()\n tiantian.run()\n\n","sub_path":"爬虫/demo/tiantian.py","file_name":"tiantian.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"376865369","text":"#!/usr/bin/env python\n\n#########################################################################################\n#\n# Build a (js,css) package library based, \n# on a dependencies file, \n# using various compilers\n#\n# Python: 2 or 3 (ca. 2012-2013)\n#########################################################################################\n\n#import pprint\nimport os, tempfile, sys, re, json\n\ntry:\n import argparse\n ap = 1\nexcept ImportError:\n import optparse\n ap = 0\n\ntry:\n import yaml\n _hasYaml_ = 1\nexcept ImportError:\n _hasYaml_ = 0\n\nclass BuildPackage:\n \"\"\"Build a (js,css) library using various compilers\"\"\"\n \n def __init__(self):\n self.Encoding = 'utf8'\n self.inputType = 'custom'\n self.compilersPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'compilers') + '/'\n self.parsersPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'parsers') + '/'\n self.IniParser = None\n self.CustomParser = None\n self.availableCompilers = {\n \n 'cssmin' : {\n 'name' : 'CSS Minifier',\n 'compiler' : 'python __{{PATH}}__cssmin.py __{{EXTRA}}__ __{{OPTIONS}}__ --input __{{INPUT}}__ --output __{{OUTPUT}}__',\n 'options' : ''\n },\n \n 'uglifyjs' : {\n 'name' : 'Node UglifyJS Compiler',\n 'compiler' : 'uglifyjs __{{INPUT}}__ __{{OPTIONS}}__ -o __{{OUTPUT}}__',\n 'options' : ''\n },\n \n 'closure' : {\n 'name' : 'Java Closure Compiler',\n 'compiler' : 'java -jar __{{PATH}}__closure.jar __{{EXTRA}}__ __{{OPTIONS}}__ --js __{{INPUT}}__ --js_output_file __{{OUTPUT}}__',\n 'options' : ''\n },\n \n 'yui' : {\n 'name' : 'Java YUI Compressor Compiler',\n 'compiler' : 'java -jar __{{PATH}}__yuicompressor.jar __{{EXTRA}}__ __{{OPTIONS}}__ --type js -o __{{OUTPUT}}__ __{{INPUT}}__',\n 'options' : ''\n }\n \n }\n self.selectedCompiler = 'uglifyjs'\n \n self.realpath = ''\n self.outputToStdOut = True\n self.depsFile = ''\n self.inFiles = []\n self.replace = None\n self.doc = None\n self.doMinify = False\n self.outFile = None\n \n def import_path(self, fullpath='./', doReload=False):\n \"\"\" \n Import a file with full path specification. Allows one to\n import from anywhere, something __import__ does not do. \n \"\"\"\n path, filename = os.path.split(os.path.abspath(fullpath))\n filename, ext = os.path.splitext(filename)\n \n sys.path.append(path)\n module = __import__(filename)\n \n if doReload:\n reload(module) # Might be out of date\n \n del sys.path[-1]\n \n return module\n \n def openFile(self, file, op):\n if self.Encoding: f = open(file, op, encoding=self.Encoding)\n else: f = open(file, op)\n return f\n\n def openFileDescriptor(self, file, op):\n if self.Encoding: fh = os.fdopen(file, op, encoding=self.Encoding)\n else: fh = os.fdopen(file, op)\n return fh\n\n def read(self, file):\n buffer = ''\n #f = self.openFile(file, \"r\")\n #buffer = f.read()\n #f.close()\n # http://sdqali.in/blog/2012/07/09/understanding-pythons-with/\n with self.openFile(file, \"r\") as f:\n buffer = f.read()\n return buffer\n \n def readfd(self, file):\n buffer = ''\n #f = self.openFileDescriptor(file, \"r\")\n #buffer = f.read()\n #f.close()\n # http://sdqali.in/blog/2012/07/09/understanding-pythons-with/\n with self.openFileDescriptor(file, \"r\") as f:\n buffer = f.read()\n return buffer\n \n def readLines(self, file):\n buffer = ''\n #f = self.openFile(file, \"r\")\n #buffer = f.readlines()\n #f.close()\n # http://sdqali.in/blog/2012/07/09/understanding-pythons-with/\n with self.openFile(file, \"r\") as f:\n buffer = f.readlines()\n return buffer\n \n def write(self, file, text):\n #f = self.openFile(file, \"w\")\n #f.write(text)\n #f.close()\n # http://sdqali.in/blog/2012/07/09/understanding-pythons-with/\n with self.openFile(file, \"w\") as f:\n f.write(text)\n \n def writefd(self, file, text):\n #f = self.openFileDescriptor(file, \"w\")\n #f.write(text)\n #f.close()\n # http://sdqali.in/blog/2012/07/09/understanding-pythons-with/\n with self.openFileDescriptor(file, \"w\") as f:\n f.write(text)\n \n def joinPath(self, *args): \n argslen = len(args)\n DS = os.sep\n \n if 0==argslen: return \".\"\n \n path = DS.join(args)\n plen = len(path)\n \n if 0==plen: return \".\"\n \n isAbsolute = path[0]\n trailingSlash = path[plen - 1]\n\n # http://stackoverflow.com/questions/3845423/remove-empty-strings-from-a-list-of-strings\n peices = [x for x in re.split(r'[\\/\\\\]', path) if x]\n \n new_path = []\n up = 0\n i = len(peices)-1\n while i>=0:\n last = peices[i]\n if last == \"..\":\n up = up+1\n elif last != \".\":\n if up>0: up = up-1\n else: new_path.append(peices[i])\n i = i-1\n \n path = DS.join(new_path[::-1])\n plen = len(path)\n \n if 0==plen and 0==len(isAbsolute):\n path = \".\"\n\n if 0!=plen and trailingSlash == DS:\n path += DS\n\n if isAbsolute == DS:\n return DS + path\n else:\n return path\n \n def realPath(self, file):\n if ''!=self.realpath and (file.startswith('./') or file.startswith('../') or file.startswith('.\\\\') or file.startswith('..\\\\')): \n return self.joinPath(self.realpath, file) #os.path.join(self.realpath, file) #os.path.realpath(os.path.join(self.realpath, file))\n else:\n return file\n \n # http://www.php2python.com/wiki/function.pathinfo/\n def fileext(self, file):\n #absolute_path = file #os.path.abspath(file)\n #dirname = os.path.dirname(absolute_path)\n #basename = os.path.basename(absolute_path)\n extension = os.path.splitext(file)[-1] # return \".py\"\n #filename = __file__\n #return {'dirname': dirname, 'basename': basename, 'extension': extension}\n if extension is not None:\n return extension\n return ''\n \n def parseArgs(self):\n if ap:\n parser = argparse.ArgumentParser(description=\"Build and Compress Javascript Packages\")\n parser.add_argument('--deps', help=\"Dependencies File (REQUIRED)\", metavar=\"FILE\")\n parser.add_argument('--compiler', help=\"uglifyjs (default) | closure | yui | cssmin, Whether to use UglifyJS, Closure, YUI Compressor or CSSMin Compiler\", default=self.selectedCompiler)\n parser.add_argument('--enc', help=\"set text encoding (default utf8)\", metavar=\"ENCODING\", default=self.Encoding)\n args = parser.parse_args()\n\n else:\n parser = optparse.OptionParser(description='Build and Compress Javascript Packages')\n parser.add_option('--deps', help=\"Dependencies File (REQUIRED)\", metavar=\"FILE\")\n parser.add_option('--compiler', dest='compiler', help=\"uglifyjs (default) | closure | yui | cssmin, Whether to use UglifyJS, Closure, YUI Compressor or CSSMin Compiler\", default=self.selectedCompiler)\n parser.add_option('--enc', dest='enc', help=\"set text encoding (default utf8)\", metavar=\"ENCODING\", default=self.Encoding)\n args, remainder = parser.parse_args()\n\n # If no arguments have been passed, show the help message and exit\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n \n # Ensure variable is defined\n try:\n args.deps\n except NameError:\n args.deps = None\n\n # If no dependencies have been passed, show the help message and exit\n if None == args.deps:\n parser.print_help()\n sys.exit(1)\n \n # fix compiler selection\n args.compiler = args.compiler.lower()\n if not ( args.compiler in self.availableCompilers): args.compiler = self.selectedCompiler\n \n return args\n \n # parse settings in hash format\n def parseHashSettings(self, settings=None):\n \n if settings:\n # parse it\n if 'DEPENDENCIES' in settings:\n deps = settings['DEPENDENCIES']\n # convert to list/array if not so\n if not isinstance(deps, list): deps = [deps]\n self.inFiles = deps\n else: \n self.inFiles = []\n \n if 'REPLACE' in settings:\n self.replace = settings['REPLACE']\n else: \n self.replace = None\n \n if ('DOC' in settings) and ('OUTPUT' in settings['DOC']):\n self.doc = settings['DOC']\n self.doc['OUTPUT'] = self.realPath(settings['DOC']['OUTPUT'])\n else: \n self.doc = None\n \n if 'MINIFY' in settings:\n self.doMinify = True\n minsets = settings['MINIFY']\n \n if 'UGLIFY' in minsets:\n opts = minsets['UGLIFY']\n # convert to list/array if not so\n if not isinstance(opts, list): opts = [opts]\n self.availableCompilers['uglifyjs']['options'] = \" \".join(opts)\n \n if 'CLOSURE' in minsets:\n opts = minsets['CLOSURE']\n # convert to list/array if not so\n if not isinstance(opts, list): opts = [opts]\n self.availableCompilers['closure']['options'] = \" \".join(opts)\n \n if 'YUI' in minsets:\n opts = minsets['YUI']\n # convert to list/array if not so\n if not isinstance(opts, list): opts = [opts]\n self.availableCompilers['yui']['options'] = \" \".join(opts)\n \n if 'CSSMIN' in minsets:\n opts = minsets['CSSMIN']\n # convert to list/array if not so\n if not isinstance(opts, list): opts = [opts]\n self.availableCompilers['cssmin']['options'] = \" \".join(opts)\n else: \n self.doMinify = False\n \n if 'OUT' in settings:\n self.outFile = self.realPath(settings['OUT'])\n self.outputToStdOut = False\n else:\n self.outFile = None\n self.outputToStdOut = True\n \n \n # parse dependencies file in INI format\n def parseIniSettings(self):\n if not self.IniParser:\n inimodule = self.import_path(os.path.join(self.parsersPath, 'ini.py'))\n self.IniParser = IniParser = inimodule.IniParser\n \n setts = IniParser.fromString(self.read(self.depsFile))\n \n if 'DEPENDENCIES' in setts:\n setts['DEPENDENCIES'] = setts['DEPENDENCIES']['__list__']\n if 'OUT' in setts:\n setts['OUT'] = setts['OUT']['__list__'][0]\n if 'REPLACE' in setts:\n del setts['REPLACE']['__list__']\n if 'DOC' in setts:\n del setts['DOC']['__list__']\n \n if 'MINIFY' in setts:\n minsetts = setts['MINIFY']\n \n if 'UGLIFY' in minsetts:\n setts['MINIFY']['UGLIFY'] = minsetts['UGLIFY']['__list__']\n if 'CLOSURE' in minsetts:\n setts['MINIFY']['CLOSURE'] = minsetts['CLOSURE']['__list__']\n if 'YUI' in minsetts:\n setts['MINIFY']['YUI'] = minsetts['YUI']['__list__']\n if 'CSSMIN' in minsetts:\n setts['MINIFY']['CSSMIN'] = minsetts['CSSMIN']['__list__']\n \n self.parseHashSettings( setts )\n \n # parse dependencies file in YAML format\n def parseYamlSettings(self):\n if _hasYaml_:\n self.parseHashSettings( yaml.load( self.read(self.depsFile) ) )\n else:\n print (\"PyYaml is not installed!!\")\n sys.exit(1)\n \n # parse dependencies file in JSON format\n def parseJsonSettings(self):\n # read json input\n self.parseHashSettings( json.loads( self.read(self.depsFile) ) )\n \n \n # parse dependencies file in custom format\n def parseCustomSettings(self):\n if not self.CustomParser:\n inimodule = self.import_path(os.path.join(self.parsersPath, 'custom.py'))\n self.CustomParser = CustomParser = inimodule.CustomParser\n \n setts = CustomParser.fromString(self.read(self.depsFile))\n \n #pprint.pprint(setts)\n #sys.exit(0)\n self.parseHashSettings( setts )\n \n def parse(self):\n args = self.parseArgs()\n \n # if args are correct continue\n # get real-dir of deps file\n full_path = self.depsFile = os.path.realpath(args.deps)\n self.realpath = os.path.dirname(full_path)\n self.Encoding = args.enc.lower()\n self.selectedCompiler = args.compiler\n \n ext = self.fileext(full_path).lower()\n if not len(ext): ext=\"custom\"\n \n if ext==\".json\": \n self.inputType=\".json\"\n self.parseJsonSettings()\n elif ext==\".yml\" or ext==\".yaml\": \n self.inputType=\".yaml\"\n self.parseYamlSettings()\n elif ext==\".ini\": \n self.inputType=\".ini\"\n self.parseIniSettings()\n else: \n self.inputType=\"custom\"\n self.parseCustomSettings()\n \n def doMerge(self):\n\n files = self.inFiles\n if len(files)>0:\n realpath=self.realpath\n buffer = []\n\n for filename in files:\n filename = self.realPath(filename)\n buffer.append(self.read(filename))\n\n return \"\".join(buffer)\n return \"\"\n\n \n def doReplace(self, text, replace):\n \n for k in replace:\n text = text.replace(k, replace[k])\n return text\n \n \n def doExtractDoc(self, text, doc):\n startDoc = doc['STARTDOC']\n endDoc = doc['ENDDOC']\n \n if 'TRIM' in doc: trim = doc['TRIM']\n else: trim = None\n \n docs = []\n \n # extract doc blocks\n blocks = text.split(startDoc)\n for b in range(len(blocks)):\n tmp = blocks[b].split(endDoc)\n if len(tmp)>1:\n docs.append(tmp[0])\n blocks = None\n \n # trim first chars of each doc block line\n if trim:\n trimlen = len(trim)\n for i in range(len(docs)):\n tmp = docs[i].split(\"\\n\")\n \n for j in range(len(tmp)):\n if len(tmp[j])>0 and tmp[j].startswith(trim):\n tmp[j] = tmp[j][trimlen:]\n \n docs[i] = \"\\n\".join(tmp)\n \n return \"\\n\\n\".join(docs)\n \n \n def doExtractHeader(self, text):\n header = ''\n if text.startswith('/**'):\n position = text.find(\"**/\", 0)\n header = text[0:position+3]\n elif text.startswith('/*!'):\n position = text.find(\"!*/\", 0)\n header = text[0:position+3]\n return header\n\n\n def doPreprocess(self, text):\n return text\n\n\n def doPostprocess(self, text):\n return text\n\n \n def doCompress(self, text):\n\n if '' != text:\n in_tuple = tempfile.mkstemp() \n out_tuple = tempfile.mkstemp()\n \n self.writefd(in_tuple[0], text)\n\n extra = ''\n if 'cssmin'==self.selectedCompiler:\n if not self.outputToStdOut:\n extra = \"--basepath \"+os.path.dirname(self.outFile)\n else:\n extra = \"\"\n elif 'yui'==self.selectedCompiler or 'closure'==self.selectedCompiler:\n extra = \"--charset \"+self.Encoding\n \n # use the selected compiler\n compiler = self.availableCompilers[self.selectedCompiler]\n cmd = compiler['compiler'].replace('__{{PATH}}__', self.compilersPath).replace('__{{EXTRA}}__', extra).replace('__{{OPTIONS}}__', compiler['options']).replace('__{{INPUT}}__', in_tuple[1]).replace('__{{OUTPUT}}__', out_tuple[1])\n err = os.system(cmd)\n # on *nix systems this is a tuple, similar to the os.wait return result\n # on windows it is an integer\n # http://docs.python.org/2/library/os.html#process-management\n # http://docs.python.org/2/library/os.html#os.wait\n # high-byte is the exit status\n if not (type(err) is int): err = 255 & (err[1]>>8)\n \n if 0==err: compressed = self.readfd(out_tuple[0])\n \n try:\n os.unlink(in_tuple[1])\n except: \n pass\n try:\n os.unlink(out_tuple[1])\n except: \n pass\n \n # some error occured\n if 0!=err: sys.exit(1)\n \n return compressed\n return ''\n\n\n def build(self):\n\n text = self.doMerge()\n header = ''\n \n #self.doPreprocess(text)\n \n if self.replace:\n text = self.doReplace(text, self.replace)\n \n if self.doc:\n self.write(os.path.join(self.doc['OUTPUT']), self.doExtractDoc(text, self.doc))\n \n sepLine = \"=\" * 65\n \n # output the build settings\n if not self.outputToStdOut:\n print (sepLine)\n print (\" Build Package \")\n print (sepLine)\n print (\" \")\n print (\"Input : \" + self.inputType);\n print (\"Encoding : \" + self.Encoding)\n if self.doMinify:\n print (\"Minify : ON\")\n print (\"Compiler : \" + self.availableCompilers[self.selectedCompiler]['name'])\n else:\n print (\"Minify : OFF\")\n print (\"Output : \" + self.outFile)\n print (\" \")\n \n if self.doMinify:\n # minify and add any header\n header = self.doExtractHeader(text)\n text = self.doCompress(text)\n\n #self.doPostprocess(text)\n \n # write the processed file\n if self.outputToStdOut: print (header + text)\n else: self.write(os.path.join(self.outFile), header + text)\n\n def Main():\n # do the process\n buildLib = BuildPackage()\n buildLib.parse()\n buildLib.build()\n\n\n# if called directly from command-line\n# do the process\nif __name__ == \"__main__\": \n BuildPackage.Main()\n","sub_path":"buildtools/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":19333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"222595711","text":"import xgboost as xgb\nfrom xgboost.sklearn import XGBClassifier\nimport return_data\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nimport warnings\nwarnings.filterwarnings(\"ignore\")\ndataset_X,dataset_Y=return_data.return_tarin_data()\n\nX_train, X_test, y_train, y_test = train_test_split(dataset_X, dataset_Y,\n test_size=0.2,\n random_state=45)\n\nxgb_val = xgb.DMatrix(X_test, label=y_test)\nxgb_train = xgb.DMatrix(X_train, label=y_train)\n\n\nxgb1 = XGBClassifier(\n learning_rate =0.05,\n n_estimators=2800,\n max_depth=5,\n min_child_weight=1,\n gamma=0.21,\n subsample=0.8,\n colsample_bytree=0.75,\n objective= 'binary:logistic',\n nthread=4,\n scale_pos_weight=1,\n seed=27)\nprint(\"fiting\")\nxgb1.fit(X_train,y_train)\npre=xgb1.predict(X_test)\nprint(pre)\nprint(y_test)\nacc=metrics.accuracy_score(y_test,pre)\nprint(acc)\n\nX_pre=return_data.return_test_data()\ny_pre=xgb1.predict(X_pre)\n\nwith open(\"baidu_sub21.csv\",\"w\") as f:\n for i in range(len(y_pre)):\n f.write(str(i+1)+\",\"+str(int(y_pre[i]))+\"\\n\")\nprint(\"pre over..\")\n","sub_path":"Titanic_prediction/XGB5_19(0.9999).py","file_name":"XGB5_19(0.9999).py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563696561","text":"from tkinter import *\r\n\r\nroot=Tk()\r\n\r\ndef printname():\r\n #declare headers\r\n from keras.datasets import mnist\r\n from keras import models\r\n from keras import layers\r\n from keras.utils import to_categorical\r\n #loading mnist data sets\r\n\r\n (train_image,train_lebels),(test_image,test_lebels)=mnist.load_data()\r\n\r\n\r\n train_image=train_image.reshape((60000,28*28))\r\n train_image=train_image.astype('float32')/255\r\n test_image=test_image.reshape((10000,28*28))\r\n test_image=test_image.astype('float32')/255\r\n\r\n\r\n #declare model architecture\r\n model=models.Sequential()\r\n #define the model layers\r\n model.add(layers.Dense(60,activation='relu',input_shape=(28*28,)))\r\n\r\n model.add(layers.Dense(30,activation='relu'))\r\n model.add(layers.Dense(10,activation='softmax'))\r\n\r\n #compile the program\r\n\r\n model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])\r\n\r\n\r\n\r\n train_lebels= to_categorical(train_lebels)\r\n test_lebels= to_categorical(test_lebels)\r\n #fiting the data\r\n model.fit(train_image,train_lebels,epochs=3,batch_size=500)\r\n\r\n\r\ndef evaluate():\r\n #evaluate model on test data\r\n Score=model.evaluate(test_image,test_lebels,verbose=1)\r\n print(\"test accuracy:\",Score[1]*100)\r\n model.save('2dense.h5')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nbutton=Button(root,text=\"clickme\",command=printname)\r\nbutton.pack()\r\n\r\nbutton=Button(root,text=\"clickme\",command=evaluate)\r\nbutton.pack()\r\n\r\nroot.mainloop()#it does not let the window close\r\n\r\n","sub_path":"10.keras..py","file_name":"10.keras..py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"602010337","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 9 07:33:57 2021\r\n\r\n@author: Alejandro AJ\r\n\"\"\"\r\n\r\n#Programa que permite saber si un alumno Aprobó o Reprobó la asignatura.\r\nclass Estudiante:\r\n\r\n def __init__(self):\r\n self.nombre = input(\"Ingrese el nombre del estudiante:\")\r\n self.apellido = input(\"Ingrese el apellido del estudiante:\")\r\n self.nota1 = float(input(\"Ingrese la nota 1:\")) \r\n self.nota2 = float(input(\"Ingrese la nota 2:\"))\r\n self.nota3 = float(input(\"Ingrese la nota 3:\"))\r\n self.prome = 0\r\n\r\n def imprimir(self): \r\n print(\"Nombre:\",self.nombre)\r\n print(\"Apellido:\",self.apellido)\r\n print(\"Nota 1\",self.nota1)\r\n print(\"Nota 2\",self.nota2)\r\n print(\"Nota 3\",self.nota3)\r\n print(\"El promedio es:\",self.prome)\r\n \r\n def promedio(self):\r\n self.prome = (self.nota1 + self.nota2 + self.nota3)/3\r\n\r\n def promover_estudiante(self): \r\n if self.prome>3.0:\r\n print(\"Aprobó la asignatura\")\r\n else:\r\n print(\"Reprobó la asignatura\")\r\n\r\nestudiante1 = Estudiante()\r\nestudiante1.imprimir()\r\nestudiante1.promedio()\r\nestudiante1.promover_estudiante()","sub_path":"2. Promedio_notas1.py","file_name":"2. Promedio_notas1.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"142436725","text":"#!/usr/bin/python\n\nfrom GSP import *\n\n#Slots' clickthrough rates\nslot_ctrs=dict()\nslot_ctrs[\"id1\"] = 10\nslot_ctrs[\"id2\"] = 4\n\n#Advertisers' values\nadv_values=dict()\nadv_values[\"x\"] = 10\nadv_values[\"y\"] = 11\nadv_values[\"z\"] = 12\n\n#Advertisers' bots\nadv_bots=dict()\nadv_bots[\"x\"] = best_response_competitive\nadv_bots[\"y\"] = best_response_competitive\nadv_bots[\"z\"] = best_response_competitive\n\nstep=0\nhistory=[]\nadv_bids=dict()\n\ndone=False\nmax_step=100\n\n#We repeat the auctions as long as an equilibrium has not been reached.\n#(This mean that advertisers submit the same bids in any successive repetition.)\n#If an equilibrium is not reached in short time, then we stop after max_step steps\nwhile not done and step < max_step:\n \n done = True\n for i in adv_values.keys():\n #Invoke the bots for computing the bids for each advertiser\n adv_bids[i] = adv_bots[i](i,adv_values[i],slot_ctrs,history)\n #If it is the first step or there is at least one advertiser whose bid is different from the bid submitted in the previous step,\n #then we need another iteration, otherwise we can stop\n \n if step == 0 or adv_bids[i] != history[step-1][\"adv_bids\"][i]:\n done=False\n \n \n if done:\n break\n \n #Execute the GSP auction with the bids computed above\n adv_slots, adv_pays = gsp(slot_ctrs,adv_bids)\n \n #Update the history\n history.append(dict())\n history[step][\"adv_bids\"]=dict(adv_bids)\n history[step][\"adv_slots\"]=dict(adv_slots)\n history[step][\"adv_pays\"]=dict(adv_pays)\n \n print(step, history[step])\n \n step += 1","sub_path":"social_networks/Script/Tests_GSP.py","file_name":"Tests_GSP.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"586646711","text":"import tkinter\nimport random\n\nfrom frame import Frame\nfrom button import ButtonDone, ButtonCancel, ButtonAdd\nfrom utilities import parser, deparser\n\nclass TextBox(object):\n\tdef __init__(self, master, file_name, window, state='disabled', mode=None):\n\t\tself.master = master\n\t\tself.window = window\n\t\tself.mode = mode\n\n\t\twith open(file_name, 'ab+') as f:\n\t\t\tf.seek(0)\n\t\t\tlines = self._preprocess(parser(f.read()))\n\n\t\tself.text_box = tkinter.Text(\n\t\t\t\tmaster = master,\n\t\t\t\twidth = 58,\n\t\t\t\theight = len(lines)+1,\n\t\t\t\tbd = 0,\n\t\t\t)\n\n\t\tself.text_box.pack(side=tkinter.LEFT, expand=True)\n\n\t\tself.text_box.insert('1.0', ''.join(lines))\n\n\t\tself.text_box.config(state=state)\n\n\t\tif mode == 'dashboard':\n\t\t\tself.assign_button(len(lines))\n\n\tdef _preprocess(self, lines):\n\t\tif self.mode == 'recent':\n\t\t\treturn ['achievement points {}\\n\\n'.format(len(lines)*10)]+lines[-20:]\n\t\telif self.mode == 'random':\n\t\t\tif len(lines) <= 20:\n\t\t\t\treturn lines\n\n\t\t\trandom.seed()\n\n\t\t\tres, my_set = [], set()\n\t\t\twhile len(res) < 20:\n\t\t\t\tidx = random.randrange(0, len(lines))\n\t\t\t\tif idx not in my_set:\n\t\t\t\t\tmy_set.add(idx)\n\t\t\t\t\tres.append(lines[idx])\n\n\t\t\treturn res\n\t\telif self.mode == 'dashboard':\n\t\t\tres = []\n\t\t\tfor idx, line in enumerate(lines):\n\t\t\t\tres.append('{}.{}'.format(idx+1, line))\n\n\t\t\treturn res\n\n\tdef assign_button(self, nums):\n\t\tself.button_frame_list = []\n\t\tself.button_done_list, self.button_cancel_list = [], []\n\t\tfor idx in range(nums):\n\t\t\tbutton_frame = Frame(master=self.master, width=22, height=1, side=tkinter.TOP)\n\t\t\tself.button_frame_list.append(button_frame)\n\t\t\tself.button_done_list.append(ButtonDone(id=idx, master=button_frame.frame, text='{}.done'.format(idx+1), text_box=self, window=self.window))\n\t\t\tself.button_cancel_list.append(ButtonCancel(id=idx, master=button_frame.frame, text='{}.cancel'.format(idx+1), text_box=self, window=self.window))\n\n\t\tself.button_add = ButtonAdd(master=self.master, text_box=self, window=self.window)\n\n\tdef destroy(self):\n\t\tif self.mode == 'dashboard':\n\t\t\tself.button_add.destroy()\n\n\t\t\tfor button_done in self.button_done_list:\n\t\t\t\tbutton_done.destroy()\n\n\t\t\tfor button_cancel in self.button_cancel_list:\n\t\t\t\tbutton_cancel.destroy()\n\n\t\t\tfor button_frame in self.button_frame_list:\n\t\t\t\tbutton_frame.destroy()\n\n\t\tself.text_box.destroy()\n","sub_path":"text_box.py","file_name":"text_box.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"255329417","text":"import torch\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer \nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\nfrom gensim.models import KeyedVectors\n\nclass Dataset():\n def __init__(self, train_config_path):\n self.train_config_path = train_config_path\n\n print('Load word2vec')\n self.word2vec = KeyedVectors.load('./data/word2vec_fast/word2vec', mmap='r')\n\n # Run the below code only once to enable fast loading for word2vec model\n # self.word2vec = KeyedVectors.load_word2vec_format('./data/GoogleNews-vectors-negative300.bin.gz', binary=True)\n # self.word2vec.init_sims(replace=True)\n # self.word2vec.save('./data/word2vec_fast/word2vec')\n print('Done')\n\n self.lemma = WordNetLemmatizer() \n\n self._word_to_idx = {}\n self._weights = None\n self._build_vocab()\n\n def _preprocess_data_text(self, sentence, words):\n # text = sentence + ' _blank_ ' + words + ' _blank_ ' + sentence\n text = '_start_ ' + words + ' _start_sent_ ' + sentence + ' _end_'\n\n return text\n\n def _preprocess_sentence(self, sentence):\n words = word_tokenize(sentence)\n # words = [word.lower() for word in words if len(word) > 3]\n words = [self.lemma.lemmatize(word.lower()) for word in words if len(word) > 3]\n\n return words\n\n def _build_vocab(self):\n self._word_to_idx['_blank_'] = 0\n self._word_to_idx['_start_'] = 1\n self._word_to_idx['_start_sent_'] = 2\n self._word_to_idx['_end_'] = 3\n self._word_to_idx['_unk_'] = 4\n\n with open(self.train_config_path, 'r') as f:\n for line in f:\n data = line.strip().split('\\t')\n data_text = self._preprocess_data_text(data[1], data[4])\n words = self._preprocess_sentence(data_text)\n\n for word in words:\n if word not in self._word_to_idx and word in self.word2vec:\n self._word_to_idx[word] = len(self._word_to_idx)\n\n self._weights = np.random.rand(len(self._word_to_idx), 300)\n for k, v in self._word_to_idx.items():\n if k in self.word2vec:\n self._weights[v][:] = self.word2vec[k][:]\n self._weights[0][:] = np.zeros((300,))\n\n def sentenceToIdxs(self, sentence):\n words = self._preprocess_sentence(sentence)\n\n idxs = []\n for word in words:\n if word not in self._word_to_idx:\n idxs.append(4)\n continue\n idxs.append(self._word_to_idx[word])\n\n return idxs\n\n def get_data(self, train_config_path, test_config_path):\n X_train = []\n y_train = []\n X_test = []\n y_test = []\n\n with open(train_config_path, 'r') as f:\n for line in f:\n data = line.strip().split('\\t')\n data_text = self._preprocess_data_text(data[1], data[4])\n X_train.append((data_text,\n data[1],\n data[4],\n int(data[5]),\n int(data[6]),\n int(data[7]),\n int(data[8])))\n y_train.append(float(data[9]))\n\n with open(test_config_path, 'r') as f:\n for line in f:\n data = line.strip().split('\\t')\n data_text = self._preprocess_data_text(data[1], data[4])\n X_test.append((data_text,\n data[1],\n data[4],\n int(data[5]),\n int(data[6]),\n int(data[7]),\n int(data[8])))\n y_test.append(float(data[9]))\n\n return X_train, X_test, y_train, y_test\n\n def get_data_submission(self, submission_test_config_path):\n X = []\n\n with open(submission_test_config_path, 'r') as f:\n for line in f:\n data = line.strip().split('\\t')\n data_text = self._preprocess_data_text(data[1], data[4])\n X.append((data_text,\n data[1],\n data[4],\n int(data[5]),\n data[0]))\n\n return X\n","sub_path":"An II/dl/nlp/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"426538012","text":"import time\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nCITY_DATA = { 'chicago': 'chicago.csv',\r\n 'new york city': 'new_york_city.csv',\r\n 'washington': 'washington.csv' }\r\ncitynames = ['chicago', 'new york city', 'washington']\r\nmonths = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\r\ndays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday','all']\r\ndef get_filters():\r\n \"\"\"\r\n Asks user to specify a city, month, and day to analyze.\r\n\r\n Returns:\r\n (str) city - name of the city to analyze\r\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\r\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\r\n \"\"\"\r\n print('Hello! Let\\'s explore some US bikeshare data!')\r\n\r\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\r\n print('Would you like to see data for Chicago, New York City or Washington ?')\r\n def cityname():\r\n city = str(input('Type city name :'))\r\n if city not in citynames:\r\n print('Please select correct city among Chicago, New York City or Washington.')\r\n city = cityname()\r\n return city\r\n city = cityname()\r\n\r\n # get user input for month (all, january, february, ... , june)\r\n print('Select a month january, february, march, april, may, june or all ?')\r\n def monthname():\r\n month = str(input('Type month :'))\r\n if month not in months:\r\n print('Please select correct month among january, february, march, april, may, june or all ?.')\r\n month = monthname()\r\n return month\r\n month = monthname()\r\n\r\n # get user input for day of week (all, monday, tuesday, ... sunday)\r\n print('Select a day monday, tuesday, wednesday, thursday, friday, saturday, sunday or all ?')\r\n def dayname():\r\n day = str(input('Type day :'))\r\n if day not in days:\r\n print('Please select correct day among january, february, march, april, may, june or all ?.')\r\n day = dayname()\r\n return day\r\n day = dayname()\r\n\r\n print('-'*40)\r\n return city, month, day\r\n\r\n\r\ndef load_data(city, month, day):\r\n \"\"\"\r\n Loads data for the specified city and filters by month and day if applicable.\r\n\r\n Args:\r\n (str) city - name of the city to analyze\r\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\r\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\r\n Returns:\r\n df - Pandas DataFrame containing city data filtered by month and day\r\n \"\"\"\r\n # load data file into a dataframe\r\n df = pd.read_csv(CITY_DATA[city])\r\n\r\n # convert the Start Time column to datetime\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # extract month and day of week from Start Time to create new columns\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n # use the index of the months list to get the corresponding int\r\n months = ['january', 'february', 'march', 'april', 'may', 'june']\r\n month = months.index(month) + 1\r\n\r\n # filter by month to create the new dataframe\r\n df = df[df['month'] == month]\r\n\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n df = df[df['day_of_week'] == day.title()]\r\n\r\n #df['Birth Year'].fillna(0,inplace = True)\r\n return df\r\n\r\n\r\n\r\ndef time_stats(orignal_df):\r\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\r\n\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n\r\n # display the most common month\r\n\r\n orignal_df['month'] = pd.DatetimeIndex(orignal_df['Start Time']).month\r\n\r\n months_count = orignal_df['month'].value_counts()\r\n\r\n maxV = months_count.idxmax()\r\n months = ['january', 'february', 'march', 'april', 'may', 'june']\r\n print('Most common month is {} and count is {}.'.format((months[maxV-1]).title(),months_count.max()))\r\n\r\n # display the most common day of week\r\n orignal_df['Week Day'] = pd.DatetimeIndex(orignal_df['Start Time']).weekday_name\r\n days_count = orignal_df['Week Day'].value_counts()\r\n\r\n maxDay = days_count.idxmax()\r\n\r\n days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\r\n print('Most common day of week is {} and count is {}.'.format(maxDay.title(),days_count.max()))\r\n\r\n\r\n\r\n # display the most common start hour\r\n\r\n orignal_df['Hours'] = pd.DatetimeIndex(orignal_df['Start Time']).hour\r\n hours_count = orignal_df['Hours'].value_counts()\r\n\r\n print('Most common hour is {} and count : {}'.format(hours_count.idxmax(),hours_count.max()))\r\n\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef station_stats(df):\r\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\r\n\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n # display most commonly used start station\r\n Start_Station_counts = df['Start Station'].value_counts()\r\n print('Most commonly used start station is \"{}\" and count : {}'.format(Start_Station_counts.idxmax(),Start_Station_counts.max()))\r\n # display most commonly used end station\r\n End_Station_counts = df['End Station'].value_counts()\r\n print('Most commonly used end station is \"{}\" and count : {}'.format(Start_Station_counts.idxmax(),End_Station_counts.max()))\r\n # display most frequent combination of start station and end station trip\r\n df['Start End stations'] = df['Start Station'] + df['End Station']\r\n Start_End_Station = df['Start End stations'].value_counts()\r\n\r\n print('Most commonly used start station and end station is \"{}\" and counts :\"{}\".'.format(Start_End_Station.idxmax(),Start_End_Station.max()))\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef trip_duration_stats(df):\r\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # display total travel time\r\n total_time_sum = df['Trip Duration'].sum()\r\n print('Total travel time is {}.'.format(total_time_sum))\r\n # display mean travel time\r\n total_time_mean = df['Trip Duration'].mean()\r\n print('Total traveling mean time is {}.'.format(total_time_mean))\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef user_stats(df,city):\r\n \"\"\"Displays statistics on bikeshare users.\"\"\"\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # Display counts of user types\r\n count_user = df['User Type'].value_counts()\r\n print('Total Counts of user type are {}.'.format(count_user))\r\n # Display counts of gender\r\n df['Gender'].fillna('Not given',inplace=True)\r\n count_user_gender = df['Gender'].value_counts()\r\n print('Total Counts of user Gender type are {}.'.format(count_user_gender))\r\n\r\n\r\n # Display earliest, most recent, and most common year of birth\r\n birth_year = df['Birth Year'].value_counts()\r\n if city == 'new york city' or city== 'washington':\r\n print('Birth Year is not present for this city {}.'.format(city))\r\n\r\n if city == 'chicago':\r\n\r\n print('Earliest, most recent, and most common year of births are \"{}\", \"{}\" and \"{}\" of {}.'.format(birth_year.idxmin(),df['Birth Year'].iloc[0], birth_year.idxmax(),city))\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef main():\r\n while True:\r\n city, month, day = get_filters()\r\n df = load_data(city, month, day)\r\n orignal_df = pd.read_csv(CITY_DATA[city])\r\n time_stats(df)\r\n station_stats(df)\r\n trip_duration_stats(df)\r\n user_stats(df,city)\r\n\r\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\r\n if restart.lower() != 'yes':\r\n break\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":8283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315574836","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport requests\nimport requests.auth\nimport base64\nimport time\nimport json\nimport binascii\nfrom hashlib import sha256\nfrom urllib.parse import urlparse\nfrom datetime import datetime, timezone\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport os.path\nimport matplotlib.dates as mdates\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\nfrom matplotlib.pyplot import figure\n\ndef get_balance(url, wallet_id):\n page = urlopen(url+wallet_id)\n soup = BeautifulSoup(page, features=\"lxml\")\n balance = soup.body.find('div', attrs={'id' : 'currentBalance'}).text\n balance = balance[16:]\n return balance\n\n#Leave 'url' variable unchanged\nurl = 'https://nyzo.co/wallet?id='\n#Replace Sample with the nicknames for each of your verifiers/wallets\nnicknames = ['Sample', 'Sample', 'Sample']\n#Replace Sample with the public ID for each of your verifiers/wallets. Length of list should match length of nicknames list\nwallet_ids = ['Sample', 'Sample', 'Sample'] \n\nwallet_balances = []\nwallet_tuples = []\nwallets_dict = 0\nfor x in range(len(wallet_ids)):\n wallet_balances.append(get_balance(url, wallet_ids[x]))\n wallet_tuples.append(tuple((nicknames[x], float(wallet_balances[x]))))\nwallets_dict = {k: v for k, v in enumerate(wallet_tuples)}\n\ntotal_wallet_balance = 0\nfor x in range(len(wallets_dict)):\n total_wallet_balance += wallets_dict[x][1]\ntotal_wallet_balance = round(total_wallet_balance, 6)\n\n#Initiate API connection to qTrade account\n\nclass QtradeAuth(requests.auth.AuthBase):\n def __init__(self, key):\n self.key_id, self.key = key.split(\":\")\n\n def __call__(self, req):\n # modify and return the request\n timestamp = str(int(time.time()))\n url_obj = urlparse(req.url)\n\n request_details = req.method + \"\\n\"\n request_details += url_obj.path + url_obj.params + \"\\n\"\n request_details += timestamp + \"\\n\"\n if req.body:\n request_details += req.body + \"\\n\"\n else:\n request_details += \"\\n\"\n request_details += self.key\n hsh = sha256(request_details.encode(\"utf8\")).digest()\n signature = base64.b64encode(hsh)\n req.headers.update({\n \"Authorization\": \"HMAC-SHA256 {}:{}\".format(self.key_id, signature.decode(\"utf8\")),\n \"HMAC-Timestamp\": timestamp\n })\n return req\n\n# Create a session object to make repeated API calls easy!\napi = requests.Session()\n# Create an authenticator with your API key\napi.auth = QtradeAuth(\"\") #Your API Key goes between the quotations here\n\nall_balances = api.get(\"https://api.qtrade.io/v1/user/balances\").json()\nfor x in range(len(all_balances['data']['balances'])):\n if all_balances['data']['balances'][x]['currency'] == 'NYZO':\n cold_balance = float(all_balances['data']['balances'][x]['balance'])\n else:\n cold_balance = 0.0\n\nif len(all_balances['data']['order_balances']) > 0:\n for x in range(len(all_balances['data']['order_balances'])):\n if all_balances['data']['order_balances'][x]['currency'] == 'NYZO':\n on_order_balance = float(all_balances['data']['balances'][x]['balance'])\nelse:\n on_order_balance = 0.0\ntotal_qTrade_balance = cold_balance + on_order_balance\ntotal_balance = total_qTrade_balance + total_wallet_balance\n\nticker = api.get(\"https://api.qtrade.io/v1/ticker/NYZO_BTC\").json()\n\nhighest_bid = float(ticker['data']['bid'])\nlowest_ask = float(ticker['data']['ask'])\nfront_of_book = lowest_ask-0.00000001\nmid_price = (highest_bid + lowest_ask)/2\n\nbest_bid_value = format(total_balance*highest_bid, '.8f')\nfront_of_book_value = format(total_balance*front_of_book, '.8f')\nmid_price_value = format(total_balance*mid_price, '.8f')\nhighest_bid = format(highest_bid, '.8f')\nfront_of_book = format(front_of_book, '.8f')\nmid_price = format(mid_price, '.8f')\ntimestamp = datetime.now(tz=None).strftime('%m-%d-%Y %H:%M')\nmessage = f\"\"\"All Wallets & Verifiers: {total_wallet_balance}\nqTrade Balance: {total_qTrade_balance}\nTotal NYZO Holdings: {total_balance}\nBest Bid Value @ {highest_bid}: {best_bid_value}\nFront Of Book Value @ {front_of_book}: {front_of_book_value}\nMid Price Value @ {mid_price}: {mid_price_value}\"\"\"\n\nprint(str(timestamp))\nfor k, v in wallets_dict.items():\n print(str(v[0])+': '+str(v[1]))\nprint(message+'\\n')\n\ndictionary = {'Date': timestamp}\nfor x,y in wallet_tuples:\n dictionary[x] = y\ndictionary['qTrade'] = total_qTrade_balance\ndictionary['Total'] = total_wallet_balance\ndictionary['btcValue'] = front_of_book_value\n\nif os.path.isfile('nyzo_holdings.csv'):\n nyzo_balances_df = pd.read_csv('nyzo_holdings.csv', index_col=0)\n new_values_df = pd.DataFrame(data=dictionary, index=[0])\n new_values_df['Change'] = 0\n nyzo_balances_df = nyzo_balances_df.append(new_values_df, ignore_index=True, sort=True)\n nyzo_balances_df['Change'].iloc[-1] = (nyzo_balances_df['Total'].iloc[-1]-nyzo_balances_df['Total'].iloc[-2])\n nyzo_balances_df.to_csv('nyzo_holdings.csv')\n \n fig = plt.figure(figsize=(18,10))\n gs1 = gridspec.GridSpec(2, 1, height_ratios=[1, 1])\n\n x_axis = nyzo_balances_df['Date']\n ax1 = plt.subplot(gs1[0])\n plt.plot(x_axis, nyzo_balances_df['Total'], label='Current_Total_Nyzo'+'\\n'+str(total_balance))\n plt.ylabel('Total_Nyzo', fontdict={'fontsize': 24})\n plt.title('Nyzo_Holdings_History', fontdict={'fontsize': 32})\n plt.legend()\n ax1.xaxis_date()\n ax1.set_axisbelow(True)\n plt.grid(b=None, which='major', axis='both')\n plt.tick_params(axis='x', which='both', top=False, bottom=False, labelbottom=False)\n plt.tick_params(axis='y', which='both', left=True, right=True, labelleft=True, labelright=True)\n\n ax2 = plt.subplot(gs1[1], sharex=ax1)\n plt.plot(x_axis, nyzo_balances_df['btcValue'])\n plt.ylabel('BTC_Value', fontdict={'fontsize': 24})\n plt.tick_params(axis='x', which='both', top=False, bottom=True, labeltop=False, labelbottom=True)\n plt.tick_params(axis='y', which='both', left=True, right=True, labelleft=True, labelright=True)\n plt.grid(b=None, which='major', axis='both')\n ax2.set_axisbelow(True)\n plt.autoscale(tight=True)\n plt.tight_layout()\n plt.subplots_adjust(top=0.85, bottom=0.07, left=0.07, right=0.93, hspace=0, wspace=0)\n plt.savefig('nyzo_holdings.png', bbox_inches='tight',dpi=100)\n plt.clf()\n\nelse:\n nyzo_balances_df = pd.DataFrame(data=dictionary, index=[0])\n nyzo_balances_df['Change'] = 0\n nyzo_balances_df.to_csv('nyzo_holdings.csv')\n","sub_path":"nyzo_manager.py","file_name":"nyzo_manager.py","file_ext":"py","file_size_in_byte":6666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"261187893","text":"from django.shortcuts import render\nimport requests\nimport json\nimport random\n\n\ndef index(request):\n\n\t# authentication\n\tuser = 'your-user-id' \n\tpw = 'your-password'\n\t\n\t# API urls\n\tbacklog_url = \"http://jira.manulife.com:8080/rest/api/2/search?jql=project%20%3D%20AEP%20AND%20status%20%3D%20Backlog\"\n\tin_progress_url = \"http://jira.manulife.com:8080/rest/api/2/search?jql=project%20%3D%20AEP%20AND%20status%20%3D%20%22In%20Progress%22\"\n\tpipeline_url = \"http://jira.manulife.com:8080/rest/api/2/search?jql=project%20%3D%20AEP%20AND%20status%20%3D%20%22Selected%20for%20Development%22\"\n\t\n\t\n\t# get backlog count\n\tbacklog_resp = requests.get(backlog_url, auth=(user,pw))\n\tjson_backlog = backlog_resp.json()\n\tbacklog = json_backlog['total']\n\t\n\t# get in progress count\n\tin_progress_resp = requests.get(in_progress_url, auth=(user,pw))\n\tjson_in_progress = in_progress_resp.json()\n\tin_progress = json_in_progress['total']\n\t\n\t# get pipeline count\n\tpipeline_resp = requests.get(pipeline_url, auth=(user,pw))\n\tjson_pipeline = pipeline_resp.json()\n\tpipeline = json_pipeline['total']\n\t\n\t\n\tarr = [0]*25\n\tcount = 25\n\t\n\tfor i in range(0, count):\n\t\tarr[i] = random.randint(1,50)\n\t\n\t# array values for sparklines\n\tbacklog_arr = [3, 6, 12, 7, 5, 2, 1, 3, 8, 9, 2, 5, 9, 3, 6, 3, 6, 2, 7, 5, 1, 2, 7, 5, 10]\n\tin_progress_arr = [3, 6, 12, 7, 5, 2, 1, 3, 8, 9, 2, 5, 9, 3, 6, 3, 6, 2, 7, 5, 1, 2, 7, 5, 10]\n\tpage_load_arr = [3, 6, 12, 7, 5, 2, 1, 3, 8, 9, 2, 5, 9, 3, 6, 3, 6, 2, 7, 5, 1, 2, 7, 5, 10]\n\tpipeline_arr = [3, 6, 12, 7, 5, 2, 1, 3, 8, 9, 2, 5, 9, 3, 6, 3, 6, 2, 7, 5, 1, 2, 7, 5, 10]\n\tpoc_flight_arr = [3, 6, 12, 7, 5, 2, 1, 3, 8, 9, 2, 5, 9, 3, 6, 3, 6, 2, 7, 5, 1, 2, 7, 5, 10]\n\tpoc_pipeline_arr = [3, 6, 12, 7, 5, 2, 1, 3, 8, 9, 2, 5, 9, 3, 6, 3, 6, 2, 7, 5, 1, 2, 7, 5, 10]\n\tpoc_pipeline_arr = arr\n\t\n\t# values to be passed to html page\n\targs = {'backlog': backlog, 'in_progress': in_progress, 'pipeline': pipeline,\n\t'backlog_vals': backlog_arr, 'in_progress_vals': in_progress_arr, 'page_load_vals': page_load_arr,\n\t'pipeline_vals': pipeline_arr, 'poc_flight_vals': poc_flight_arr, 'poc_pipeline_vals': poc_pipeline_arr}\n\t\n\treturn render(request, 'dashboard/Information Radiator/index.html', args)","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"432129148","text":"\"\"\"Module for a Plotly world map.\"\"\"\nimport base64\nimport plotly\nimport plotly.graph_objs as go\nimport plotly.io as pio\nfrom django.utils.safestring import mark_safe\n\n\ndef world_map(df, return_base64=False):\n \"\"\"Create a world map.\n @param attribute: df (dataframe) dataframe containing the data\n @param attribute: return_base64 (bool): Whether to return the result as a\n base64 string or not.\n :returns: HTML or base64 string.\"\"\"\n\n # Limit data to page views\n df = df[~df.type.str.contains('users')]\n\n scl = [[0.0, '#C5A653'],\n [0.2, '#C5A653'],\n [0.4, '#C5A653'],\n [0.6, '#C5A653'],\n [0.8, '#C5A653'],\n [1.0, '#C5A653']]\n\n data = [\n dict(\n type='choropleth',\n locations=df['country_iso_code'],\n z=df['value'],\n text=df['country'],\n colorscale=scl,\n showscale=False,\n )\n ]\n\n layout = dict(\n geo=dict(\n showframe=False,\n showcoastlines=False,\n showcountries=True,\n projection=dict(\n type='equirectangular'\n )\n ),\n autosize=False,\n width=700,\n height=300,\n margin=go.layout.Margin(\n l=0, # noqa: E741\n r=0,\n b=0,\n t=0,\n pad=0\n ),\n )\n\n fig = dict(\n data=data,\n layout=layout\n )\n\n if return_base64:\n img = pio.to_image(\n fig,\n format='svg',\n )\n data = \"data:image/svg;base64,\" + base64.b64encode(img).decode('utf8')\n else:\n # Generate the HTML\n data = plotly.offline.plot(\n fig,\n output_type='div',\n show_link=False,\n config=dict(\n displaylogo=False,\n modeBarButtonsToRemove=['sendDataToCloud']\n )\n )\n\n return mark_safe(data)\n","sub_path":"ncr_website/integrations/google/analytics/utils/charts/world_map.py","file_name":"world_map.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"532164076","text":"# Calculates the correlation matrix\r\n# Author: Roberto Sanchez\r\n# Personal library.\r\n# 1. compute the correlation matrix\r\n# 2. Save the factors that are bigger than the correlation factor (i.e. abs(correlation) > fc)\r\n# 16/11/2016\r\n##\r\nfrom pymongo import MongoClient\r\nimport pandas as pd\r\nimport numpy as np\r\nimport time\r\nimport rs_common_framework_v4 as rs\r\n\r\n# TODO: change the name of the collection:---------\r\nMONGODB_HOST = 'localhost' #'192.168.6.132'\r\nMONGODB_PORT = 27017\r\nMONGODB_DB = 'project_db'\r\ncollection_series = 'filtered_time_series'\r\ncollection_metadata = 'metadata'\r\ncollection_correlation_matrix = 'correlation_matrix'\r\ntime_query = {}\r\nday_type = 'working_day'\r\nprojection = {'timestamp': 1}\r\nfc = 0.1 # this allows to save only correlations that are bigger tha fc\r\n# -------------------------------------------------\r\n\r\n\r\nconnection = MongoClient(MONGODB_HOST, MONGODB_PORT)\r\ncollection_series = connection[MONGODB_DB][collection_series]\r\ncollection_metadata = connection[MONGODB_DB][collection_metadata]\r\ncollection_correlation_matrix = connection[MONGODB_DB][collection_correlation_matrix]\r\n\r\n\r\n##############################################################\r\n\r\n\r\ndef main():\r\n # TODO: Put here the function to run\r\n compute_by_subcategory()\r\n return \"End of this script\"\r\n\r\n\r\n###############################################################\r\n\r\n\r\ndef compute_by_subcategory():\r\n query_dict = {}\r\n field = 'orientation'\r\n projection_dict = {field: True, '_id': False}\r\n subcategory_list = list(collection_metadata.find(query_dict, projection_dict))\r\n subcategory_list = [x[field] for x in subcategory_list]\r\n subcategory_list = list(set(subcategory_list))\r\n\r\n for sub_category in subcategory_list:\r\n tag_query = get_list_subcategory(sub_category, field)\r\n compute_matrix_correlation(tag_query)\r\n\r\n\r\ndef get_list_subcategory(sub_category, field):\r\n r_query = {}\r\n if sub_category == 'NE':\r\n r_query[field] = {'$in': ['NE', 'N', 'E', 'N/A']}\r\n return r_query\r\n if sub_category == 'SW':\r\n r_query[field] = {'$in': ['SW', 'S', 'W', 'N/A']}\r\n return r_query\r\n\r\n r_query[field] = {'$in': ['N/A', sub_category]}\r\n return r_query\r\n\r\n\r\ndef compute_matrix_correlation(tag_query):\r\n\r\n tag_list = rs.get_tag_names(collection_metadata, tag_query)\r\n id_tags = range(len(tag_list))\r\n timestamp = rs.get_tag_values(collection_series, time_query, projection, series_format='DF_t')\r\n offset = pd.DateOffset(days=7)\r\n offset_1h = pd.DateOffset(hours=1)\r\n start_time = pd.Timestamp(pd.Timestamp(rs.get_next_weekday(timestamp.index[0], \"Monday\"))._date_repr)\r\n end_time = timestamp.index[-1]\r\n time_line = pd.date_range(start_time, end_time + offset, freq='7D')\r\n\r\n for end_time in time_line[1:]:\r\n\r\n print('-- processing: ', start_time, 'to', end_time - offset_1h)\r\n\r\n date_query = rs.dict_range_time(start_time, end_time - offset_1h)\r\n date_query['day_type'] = day_type\r\n df = rs.get_tag_values(collection_series, date_query, tag_list, series_format='DF_t')\r\n tag_list = list(df.columns)\r\n for tag in tag_list:\r\n df[tag] = pd.to_numeric(df[tag], errors='coerce')\r\n values = df.T.values\r\n df.dropna(inplace=True)\r\n print('VALUES:', values.shape)\r\n matrix = np.corrcoef(values)\r\n\r\n for idx in id_tags:\r\n query_filter = {'tagname': tag_list[idx], 'timestamp': start_time._date_repr,\r\n 'epoch': time.mktime(time.strptime(str(start_time._date_repr), \"%Y-%m-%d\"))\r\n }\r\n aux = collection_correlation_matrix.find_one(query_filter)\r\n if aux is None:\r\n register = query_filter\r\n correlation_list = []\r\n else:\r\n register = aux\r\n correlation_list = register['correlation_list']\r\n for idy in id_tags:\r\n if idy != idx:\r\n if matrix[idx][idy] != np.nan:\r\n if abs(matrix[idx][idy]) > fc:\r\n register[tag_list[idy]] = matrix[idx][idy]\r\n correlation_list.append(tag_list[idy])\r\n\r\n register['correlation_list'] = list(set(correlation_list))\r\n collection_correlation_matrix.find_one_and_replace(\r\n filter=query_filter,\r\n replacement=register,\r\n upsert=True\r\n )\r\n\r\n print('matrix done')\r\n start_time = end_time\r\n\r\n\r\n###################################\r\n# TO RUN IN THIS APPLICATION\r\nif __name__ == \"__main__\":\r\n main()\r\n print('end of th script')\r\n","sub_path":"lib/bck/correlation_matrix_v1.py","file_name":"correlation_matrix_v1.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"613557574","text":"import subprocess\nimport os\nimport sys\nimport torch\nfrom itertools import cycle\nimport numpy as np\n\nalgorithms = ['er_compositional', 'ewc_compositional', 'van_compositional']\nalgorithms += ['er_joint', 'ewc_joint', 'van_joint']\nalgorithms += ['er_nocomponents', 'ewc_nocomponents', 'van_nocomponents']\nalgorithms += ['er_dynamic', 'ewc_dynamic', 'van_dynamic']\nalgorithms += ['fm_compositional', 'fm_dynamic']\ndatasets = ['CIFAR', 'Omniglot']\ndatasets += ['MNIST', 'Fashion', 'CUB']\nnum_epochs = 100\nmini_batch = 32\nupdate_frequency = 100\ninit_mode = 'random'\nresults_root = 'results/gated'\n\nnum_gpus = torch.cuda.device_count()\n\ngpu_use_total = np.zeros(num_gpus)\ncuda_device_dict = {}\ncounter = 0\nprocess_gpu_use = {}\ndid_not_start = 0\ndid_not_finish = 0\nfinished = 0\nfor i in range(10):\n for d in datasets:\n if d == 'MNIST':\n num_tasks = 10\n size = 64\n num_layers = 4\n init_tasks = 4\n architecture = 'mlp_gated'\n gpu_use = 20\n elif d == 'Fashion':\n num_tasks = 10\n size = 64\n num_layers = 4\n init_tasks = 4\n architecture = 'mlp_gated'\n gpu_use = 20\n elif d == 'CIFAR':\n num_tasks = 20\n size = 50\n num_layers = 4\n init_tasks = 4\n architecture = 'cnn_gated'\n gpu_use = 25\n elif d == 'CUB':\n num_tasks = 20\n size = 256\n num_layers = 4\n init_tasks = 4\n architecture = 'mlp_gated'\n gpu_use = 20\n elif d == 'Omniglot':\n num_tasks = 50\n size = 53\n num_layers = 4\n init_tasks = 4\n architecture = 'cnn_gated'\n gpu_use = 25\n \n for a in algorithms:\n ewc_lambda = 1e-3\n cuda_device = counter % num_gpus\n\n while np.all(gpu_use_total + gpu_use > 100):\n for p in cycle(process_gpu_use):\n try:\n p.wait(1)\n gpu_use_remove = process_gpu_use[p]\n gpu_use_total[cuda_device_dict[p]] -= gpu_use_remove\n del process_gpu_use[p]\n del cuda_device_dict[p]\n break\n except subprocess.TimeoutExpired:\n pass\n \n cuda_device = np.argmin(gpu_use_total)\n results_path = os.path.join(results_root, d, a, 'seed_{}'.format(i))\n print(results_path + ': ', end='')\n if not os.path.isdir(results_path):\n print('Did not start')\n did_not_start += 1\n else:\n completed_tasks = len([name for name in os.listdir(results_path) if os.path.isdir(os.path.join(results_path, name))])\n if completed_tasks != num_tasks:\n print('Did not finish', end='')\n did_not_finish += 1\n else:\n print('Finished')\n finished += 1\n continue\n my_env = os.environ.copy()\n my_env['CUDA_VISIBLE_DEVICES'] = str(cuda_device)\n args = ['python', 'lifelong_experiment.py',\n '-T', str(num_tasks), \n '-d', d,\n '-e', str(num_epochs),\n '-b', str(mini_batch),\n '-f', str(update_frequency),\n '--lambda', str(ewc_lambda),\n '-s', str(size),\n '-l', str(num_layers),\n '-k', str(init_tasks),\n '-i', init_mode,\n '-arc', architecture,\n '-alg', a,\n '-n', str(1),\n '-r', results_root,\n '--initial_seed', str(i)]\n p = subprocess.Popen(args, env=my_env)\n process_gpu_use[p] = gpu_use\n gpu_use_total[cuda_device] += gpu_use\n counter += 1\n cuda_device_dict[p] = cuda_device\n print(cuda_device)\n\nprint(did_not_start, did_not_finish, finished)\n\n\n","sub_path":"experiment_script_gated.py","file_name":"experiment_script_gated.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"238354935","text":"#%%\nfrom inheriting_contacts import Contact\nclass Friend(Contact):\n def __init__(self, name, email, phone):\n super().__init__(name,email)\n self.phone = phone\n\nf = Friend(\"Harry Potter\",\"hp@gmail.com\",123123)\nprint(f.all_contacts)\nprint(f.name,f.email,f.phone)\n\n\n# %%\n","sub_path":"4 Pillars of OOP/overriding_friends.py","file_name":"overriding_friends.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"184419068","text":"import subprocess\nfrom multiprocessing import Process\n\nfrom sqlalchemy.exc import IntegrityError\nfrom whereami.learn import learn\nfrom whereami.predict import locations, predict\nfrom flask import (\n render_template,\n request,\n)\n\nfrom . import (\n app,\n sockets,\n)\nfrom .models import *\n\n@app.route(\"/\", methods=[\"GET\"])\ndef index():\n areas = Area.query.order_by(Area.date_posted.asc()).all()\n areas = [Area(\"coding_spo\"), Area(\"back\"), Area(\"stage\"), Area(\"sleeping_area\")]\n return render_template(\"index.html\", areas=areas, config=app.config[\"PORT\"])\n\n# @app.route(\"/room\")\n# def api_whereami_predict():\n# try:\n# name = predict()\n# area = Area(name)\n# tick = AreaTick(area.id)\n# db.session.add(tick)\n# db.session.commit()\n# return name\n# except LearnLocation:\n# return \"None\"\n\n\n@app.route(\"/learn\", methods=[\"GET\", \"POST\"])\ndef api_learn():\n name = request.form[\"areaName\"]\n def func():\n try:\n Area.learn(name)\n area = Area(name)\n # db.session.add(area)\n # db.session.commit()\n except LearnLocation:\n pass\n except IntegrityError:\n pass\n p = Process(target=func)\n p.start()\n return index()\n\n\n@app.route(\"/room\")\ndef whereami_predict():\n prediction = predict()\n talk('the current room is the '+fake_name_to_real_name(prediction))\n return prediction\n\n\ndef talk(sentence):\n subprocess.call(['./bin/say', sentence])\n\n\ndef fake_name_to_real_name(fake):\n return {\n 'coding_spo': 'bedroom',\n 'back': 'bathroom',\n 'stage': 'livingroom',\n 'sleeping_area': 'frontdoor',\n }[fake]\n","sub_path":"server/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"348860636","text":"from pwn import *\nimport string\n\ndef connect(guess):\n r = remote('60.250.197.227', 11001)\n r.sendline(guess)\n res = int(r.recvall(1).split(' ')[5])\n return res\n\nnow = 341\nflag = 'AIS3{A1r1ght_U_4r3_my_3n3'\nwhile True:\n for s in string.printable:\n print('guess {}'.format(s))\n #print(flag)\n res = connect(flag + s)\n if res - now >= 10:\n max_ = 0\n min_ = 99999999999999999999\n for i in range(40):\n res = connect(flag + s)\n if max_ < res: \n max_ = res\n if min_ > res:\n min_ = res\n avg = (max_ + min_) / 2\n \n if avg < now + 10:\n continue\n\n now = avg\n flag += s\n print('avg {}'.format(now))\n print(flag)\n open('flag', 'a').write(flag + '\\n')\n break\n\n","sub_path":"Games/2020AIS3PreExam/Saburo/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"341118997","text":"\"\"\" Functions for parsing an ant world file. \"\"\"\nfrom world import Cell\nfrom ant import Color, Ant\n\ndef parse_cell(tok, ant_id, pos):\n \"\"\" Parse an individual cell charecter and return a new cell of the correct\n type\n\n :param tok: The character that represents a cell\n :param and_id: If the cell has an ant on it, use this as the ant id.\n :param pos: The position of the cell.\n :return: A new cell that represents the token on the map.\n \"\"\"\n if tok == \"#\":\n return Cell(rocky=True), ant_id\n elif tok == \".\":\n return Cell(), ant_id\n elif tok == \"+\":\n return Cell(anthill=Color.RED, ant=Ant(ant_id, Color.RED, 0, 0, 0, False, pos)), ant_id + 1\n elif tok == \"-\":\n return Cell(anthill=Color.BLACK, ant=Ant(ant_id, Color.BLACK, 0, 0, 0, False, pos)), ant_id + 1\n else:\n return Cell(food=int(tok)), ant_id\n\ndef parse_world(inp):\n \"\"\" Parse a string that represnts an world and turn it into a coordinate\n cell map\n\n :param inp: The string that represents an ant world\n :return: A dict of cell coords to Cell objects\n \"\"\"\n world = {}\n lines = inp.splitlines()\n x_size = lines.pop(0)\n y_size = lines.pop(0)\n # Check for size....\n ant_id = 0\n for y, line in enumerate(lines):\n for x, tok in enumerate(line.split()):\n world[(x, y)], ant_id = parse_cell(tok, ant_id, (x, y))\n return world\n\ndef load_world(path):\n \"\"\" Load a string from a file and turn it into an ant world cell map.\n\n :param path: path of an ant world file\n :return: an ant world cell map\n \"\"\"\n with open(path, \"r\") as f:\n data = f.read()\n return parse_world(data)\n","sub_path":"sub/ant-game/parse_world.py","file_name":"parse_world.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"78571357","text":"from scipy import interpolate\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\nfrom src.SoccerVisualizer import SoccerVisualizer\n\n\ndef interpolate_3d(points, steps):\n tck, u = interpolate.splprep(points, s=1/len(points))\n knots = interpolate.splev(tck[0], tck)\n u_fine = np.linspace(0,1,steps)\n points_fine = interpolate.splev(u_fine, tck)\n return points_fine\n\n\nif __name__=='__main__':\n num_true_pts = 20\n s_true = np.linspace(0, 10, num_true_pts)\n x_true = np.cos(s_true)\n y_true = np.sin(s_true)\n z_true = s_true / 3\n\n points = np.asfarray([x_true, y_true, z_true])\n\n points_fine = interpolate_3d(points, 200)\n\n # fig = plt.figure()\n # ax3d = fig.add_subplot(111, projection='3d')\n # ax3d.plot(x_true, y_true, z_true, 'b*')\n # ax3d.plot(points_fine[0], points_fine[1], points_fine[2], 'g')\n # fig.show()\n # plt.show()\n\n visualizer = SoccerVisualizer()\n visualizer.draw_3d_line(points_fine, 'red')\n visualizer.draw_3d_line(points, 'blue')\n print('The End.')\n\n\n","sub_path":"src/TrajectoryEstimation.py","file_name":"TrajectoryEstimation.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591551120","text":"str = 'asd dsag'\nother = str.replace(' ', '')\nstr2 = other[::-1]\nprint(other == str2)\nprint(str2)\n\nsent = 'my school gjgjjgjgjgjjgjgjg ggg fdxsdf hjj hh'\ntemp = ''\nlist = sent.split(' ')\nfor i in list:\n if len(i) > len(temp):\n temp = i\nprint(temp)\n\nmy_string = 'fhhfv nbnb ldld'\nlist_s = my_string.split(' ')\nx = '.'.join(list_s)\nprint(x)\n\nmy_date = '05.06.1989'\nlist_ss = my_date.split('.')\nprint(list_ss)\ntemp_l = list_ss[0]\nlist_ss[0] = list_ss[1]\nlist_ss[1] = temp_l\nprint('/'.join(list_ss))\n","sub_path":"src/string.py","file_name":"string.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"234877458","text":"import requests\nfrom restCountries import *\n\ndef skyScannerAirportFinder(country):\n url = \"https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/autosuggest/v1.0/UK/GBP/gr-GR/\"\n capital = countryNameInfo(country)[1]\n #print(capital)\n querystring = {\"query\":capital}\n headers = {\n 'x-rapidapi-host': \"skyscanner-skyscanner-flight-search-v1.p.rapidapi.com\",\n 'x-rapidapi-key': \"df61d8e863msh47948b951144384p16523djsn4b480d122f9d\"\n }\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response_data = response.json()\n PlaceId = response_data['Places'][0]['PlaceId']\n PlaceName = response_data['Places'][0]['PlaceName']\n return[PlaceId, PlaceName]\n\n\n\ndef skyScannerAirportFinderCity(city):\n url = \"https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/autosuggest/v1.0/UK/GBP/gr-GR/\"\n querystring = {\"query\":city}\n headers = {\n 'x-rapidapi-host': \"skyscanner-skyscanner-flight-search-v1.p.rapidapi.com\",\n 'x-rapidapi-key': \"df61d8e863msh47948b951144384p16523djsn4b480d122f9d\"\n }\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response_data = response.json()\n #print(response_data)\n PlaceId = response_data['Places'][0]['PlaceId']\n PlaceName = response_data['Places'][0]['PlaceName']\n #print(\"passed city\",city)\n return[PlaceId, PlaceName]\n\n\n\ndef skyScannerNeighbors(country):\n airport1 = skyScannerAirportFinder(country)\n #print(airport1)\n neighbors = countryNameInfo(country)[4]\n #print(neighbors)\n limit1 = range(len(neighbors))\n airport2 = []\n for x in limit1:\n airport2.append(skyScannerAirportFinderCity(neighbors[x]))\n #print(airport2[0])\n if ['BRN-sky', 'Bern'] in airport2: airport2.remove(['BRN-sky', 'Bern'])\n limit = range(len(airport2))\n return [airport1,airport2,limit]\n\ndef getFlights(airport1,airport2):\n url = \"https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/browsequotes/v1.0/US/USD/en-US/{}/{}/anytime\".format(airport1,airport2)\n headers = {\n 'x-rapidapi-host': \"skyscanner-skyscanner-flight-search-v1.p.rapidapi.com\",\n 'x-rapidapi-key': \"df61d8e863msh47948b951144384p16523djsn4b480d122f9d\"\n }\n response = requests.request(\"GET\", url, headers=headers)\n response_data = response.json()\n results = response_data['Quotes']\n limit = range(2,len(results))\n #print(results)\n try:\n ticket1 = results[1]['MinPrice']\n try:\n ticket2 = results[2]['MinPrice']\n quote1 = 0\n quote2 = 0\n for x in limit:\n if results[x]['MinPrice'] results[x]['MinPrice']:\n ticket2 = results[x]['MinPrice']\n quote2 = x+1\n #print(\"Incoming flight ticket prices:\",quote1,ticket1,\"Outgoing flight ticket prices\",quote2,ticket2,\"Euros\")\n fullprice = ticket1+ticket2\n flights = []\n flights.append(results[quote1-1])\n flights.append(results[quote2-1])\n departuredate = flights[0]['OutboundLeg']['DepartureDate']\n except:\n fullprice = 2*ticket1\n departuredate = results[0]['OutboundLeg']['DepartureDate']\n except:\n print(\"No flights available between\",airport1,\"and\",airport2)\n fullprice = \"0\"\n departuredate = \"0\"\n return [airport1,airport2,float(fullprice),departuredate]\n\ndef getNeighborFlights(country):\n skyScannerNeighbors1 = skyScannerNeighbors(country)\n airport1 = skyScannerNeighbors1[0]\n targetairports = skyScannerNeighbors1[1]\n limit = skyScannerNeighbors1[2]\n #print(skyScannerNeighbors1)\n results = []\n targetairportsfinal = []\n for x in limit:\n #print(\"flights from\",skyScannerNeighbors1[0][1] ,\"to\",targetairports[x][1] ,\"are:\",getFlights(airport1[0],targetairports[x][0]))\n results.append(getFlights(airport1[0],targetairports[x][0]))\n targetairportsfinal.append(targetairports[x][1])\n getnflights = skyScannerNeighbors1[0][1],results,targetairportsfinal,limit\n #print(getnflights)\n limit = getnflights[3]\n fromairport = getnflights[1][0][0]\n outairports = []\n dates = []\n urls = []\n for x in limit:\n currOutAirport = (getnflights[1][x][1])[:-4]\n outairports.append((getnflights[1][x][1])[:-4])\n dates.append(\"{}{}{}\".format(getnflights[1][x][3][2:4],getnflights[1][x][3][5:7],getnflights[1][x][3][8:10]))\n urls.append(\"https://gr.skyscanner.com/transport/flights/{}/{}/{}/?adults=1&children=0&adultsv2=1&childrenv2=&infants=0&cabinclass=economy&rtn=0&preferdirects=false&outboundaltsenabled=false&inboundaltsenabled=false&ref=home\".format(fromairport[:-4],currOutAirport,dates[x]))\n for x in limit:\n try:\n if results[x][2] == '0':\n del results[x]\n del targetairportsfinal[x]\n del urls[x]\n except:\n limit = range(len(results))\n limit = range(len(results)) \n return [skyScannerNeighbors1[0][1],results,targetairportsfinal,limit,urls]","sub_path":"Tkinter/skyPalio.py","file_name":"skyPalio.py","file_ext":"py","file_size_in_byte":5342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"127413120","text":"#Defining a funtion to write to files\r\n#Gave two parameters to the function\r\n#one for data that has to be put into the file\r\n#one for the name of the file\r\n\r\ndef writeToFiles(textInput, fileName):\r\n\tf = open(fileName, mode = \"w\")\r\n\tf.write(textInput)\r\n\treturn\r\n#Calling the function to write a file named hello\r\n#and put the text \"Hello World!\" in the text file\r\n\r\nwriteToFiles(\"Hello World!\", \"C:\\\\Users\\\\devipriya.patel\\\\Desktop\\Python\\\\hello.txt\")\r\n\r\n","sub_path":"writeToFiles.py","file_name":"writeToFiles.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"198531240","text":"# -*- coding: utf-8 -*-\n'''Prebuilt pipeline for visualizing a dual flouroscope scene.'''\n\nfrom .base_pipeline import BasePipeline\nimport vtk\nfrom StereoFlouroscopyRegistration.util.vtk_helpers import create_vtkMatrix4x4, GetRGBColor\n\nclass DualFlouroSceneVisualizer(BasePipeline):\n '''Prebuilt pipeline for visualizing a dual flouroscope scene.\n\n To solve the orientation problem, the user must supply a transform in homogeneous\n coordinates. Internally, the orientation of the image data is dropped. This way,\n we visualize the data with proper orientation with respect to each other.\n '''\n\n def __init__(self):\n super(DualFlouroSceneVisualizer, self).__init__()\n\n # Remove origin information\n self.xray_changer_1 = vtk.vtkImageChangeInformation()\n self.xray_changer_1.SetOutputOrigin(0, 0, 0)\n self.xray_changer_2 = vtk.vtkImageChangeInformation()\n self.xray_changer_2.SetOutputOrigin(0, 0, 0)\n self.ct_changer = vtk.vtkImageChangeInformation()\n self.ct_changer.SetOutputOrigin(0, 0, 0)\n\n # Setup mapper and actor for x-ray images\n self.xray_mapper_1 = vtk.vtkImageSliceMapper()\n self.xray_mapper_1.SetInputConnection(self.xray_changer_1.GetOutputPort())\n self.xray_mapper_2 = vtk.vtkImageSliceMapper()\n self.xray_mapper_2.SetInputConnection(self.xray_changer_2.GetOutputPort())\n\n self.xray_property = vtk.vtkImageProperty()\n self.xray_property.SetInterpolationTypeToNearest()\n\n self.xray_slice_1 = vtk.vtkImageSlice()\n self.xray_slice_1.SetMapper(self.xray_mapper_1)\n self.xray_slice_1.SetProperty(self.xray_property)\n\n self.xray_slice_2 = vtk.vtkImageSlice()\n self.xray_slice_2.SetMapper(self.xray_mapper_2)\n self.xray_slice_2.SetProperty(self.xray_property)\n\n self.marchingCubes = vtk.vtkImageMarchingCubes()\n self.marchingCubes.SetInputConnection(self.ct_changer.GetOutputPort())\n self.marchingCubes.ComputeGradientsOn()\n self.marchingCubes.ComputeNormalsOn()\n self.marchingCubes.ComputeScalarsOn()\n self.marchingCubes.SetNumberOfContours(1)\n self.marchingCubes.SetValue(0, 0)\n\n self.ct_mapper = vtk.vtkPolyDataMapper()\n self.ct_mapper.SetInputConnection(self.marchingCubes.GetOutputPort())\n self.ct_mapper.ScalarVisibilityOff()\n self.ct_actor = vtk.vtkActor()\n self.ct_actor.SetMapper(self.ct_mapper)\n self.ct_actor.GetProperty().SetInterpolationToGouraud()\n self.ct_actor.GetProperty().SetColor(GetRGBColor('antique_white'))\n\n self.renderer = vtk.vtkRenderer()\n self.renderer.AddViewProp(self.ct_actor)\n self.renderer.AddViewProp(self.xray_slice_1)\n self.renderer.AddViewProp(self.xray_slice_2)\n self.renderer.SetBackground(0.1, 0.2, 0.3)\n\n self.interactor = vtk.vtkRenderWindowInteractor()\n self.interactor_style = vtk.vtkInteractorStyleTrackballCamera()\n self.interactor.SetInteractorStyle(self.interactor_style)\n\n def SetCTInputConnection(self, port):\n '''Set the input port for the CT volume.\n\n No tests are performed to validate the input.\n\n Args:\n port (int): The input connection port\n\n Returns:\n None\n '''\n self.ct_changer.SetInputConnection(port)\n self.marchingCubes.SetUpdateExtentToWholeExtent() # DO NOT REMOVE!\n\n def SetCam1InputConnection(self, port):\n '''Set the input port for camera 1.\n\n No tests are performed to validate the input.\n\n Args:\n port (int): The input connection port\n\n Returns:\n None\n '''\n self.xray_changer_1.SetInputConnection(port)\n\n def SetCam2InputConnection(self, port):\n '''Set the input port for camera 2.\n\n No tests are performed to validate the input.\n\n Args:\n port (int): The input connection port\n\n Returns:\n None\n '''\n self.xray_changer_2.SetInputConnection(port)\n\n def SetCTOrientationMatrix(self, matrix):\n '''Set the display matrix for the CT volume.\n\n No tests are performed to validate the input.\n\n Args:\n matrix (np.array): The rotation and translation in homogeneous coordiantes\n\n Returns:\n None\n '''\n self.ct_actor.PokeMatrix(create_vtkMatrix4x4(matrix))\n self.marchingCubes.SetUpdateExtentToWholeExtent() # DO NOT REMOVE!\n\n def SetCam1OrientationMatrix(self, matrix):\n '''Set the display matrix for camera 1.\n\n No tests are performed to validate the input.\n\n Args:\n matrix (np.array): The rotation and translation in homogeneous coordiantes\n\n Returns:\n None\n '''\n self.xray_slice_1.PokeMatrix(create_vtkMatrix4x4(matrix))\n\n def SetCam2OrientationMatrix(self, matrix):\n '''Set the display matrix for camera 2.\n\n No tests are performed to validate the input.\n\n Args:\n matrix (np.array): The rotation and translation in homogeneous coordiantes\n\n Returns:\n None\n '''\n self.xray_slice_2.PokeMatrix(create_vtkMatrix4x4(matrix))\n\n def SetMarchingCubesValue(self, value):\n '''Set the value for marching cubes.\n\n A contour is computed through the image data at this value. See\n vtkImageMarchingCubes for more information.\n\n Args:\n value (float): The contour value\n\n Returns:\n None\n '''\n self.marchingCubes.SetValue(0, value)\n self.marchingCubes.SetUpdateExtentToWholeExtent() # DO NOT REMOVE!\n\n def SetCamWindow(self, window):\n '''Set the window for both camera 1 and 2.\n\n The camera 1 and 2 have the same window and level.\n\n Args:\n window (float): The window value\n\n Returns:\n None\n '''\n self.xray_property.SetColorWindow(window)\n\n def SetCamLevel(self, level):\n '''Set the level for both camera 1 and 2.\n\n The camera 1 and 2 have the same window and level.\n\n Args:\n level (float): The level value\n\n Returns:\n None\n '''\n self.xray_property.SetColorLevel(level)\n\n def set_render_window(self, render_window):\n '''Setup the render window.\n\n Args:\n render_window (vtk.vtkRenderWindow): The render window created by the holding class.\n\n Returns:\n vtk.vtkRenderWindowInteractor: The interactor created by the class with style ImageSlicing\n '''\n\n # Add renderer to render window\n render_window.AddRenderer(self.renderer)\n self.interactor.SetRenderWindow(render_window)\n self.renderer.ResetCamera()\n\n return self.interactor\n","sub_path":"StereoFlouroscopyRegistration/pipelines/df_scene_visualizer_pipeline.py","file_name":"df_scene_visualizer_pipeline.py","file_ext":"py","file_size_in_byte":6825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"323672478","text":"#Leon Oram\r\n#17-12-2014\r\n#Functions 2\r\nimport pdb\r\n\r\nvalues = [float(8.17),float(1.49),float(2.78),float(4.25),float(12.70),float(1.23),float(2.02),float(6.09),float(0.15),float(0.77),float(4.02),float(6.75),float(7.51),float(1.93),float(0.09),float(5.99),float(6.33),float(9.06),float(2.76),float(0.98),float(2.36),float(0.15),float(1.97),float(0.07)]\r\n\r\ndef get_input():\r\n cyp = input(\"Please enter your encrypted message: \\n\" )\r\n cyp=cyp.lower()\r\n words = cyp.split()\r\n return words\r\n#try each interation of ceaser and add value\r\n #turn all letters in word to unicode\r\n #words = [[h,e,l,l,o][w,o,r,l,d]]]\r\n #values = [123,123]\r\n\r\ndef get_letter(word_list):\r\n letter=[]\r\n word_unicode=[]\r\n for word_num,word in enumerate(word_list):\r\n for letter_num,letters in enumerate(word):\r\n letter.append(letters) #e.g [3,4,5,6]\r\n word_unicode.append(convert(letter))\r\n letter=[]\r\n return word_unicode\r\n \r\ndef convert(letters):\r\n letters_uni=[]\r\n for letter in letters:\r\n letters_uni.append(ord(letter))\r\n return letters_uni\r\n \r\n\r\nword_list = get_input()\r\nword_unicode = get_letter(word_list)\r\nword_total=0\r\nword_total_value=[]\r\n\r\nfor word_number,current_word in enumerate(word_unicode):\r\n for count in range(0,26):\r\n for current_letter in current_word:\r\n #print(current_letter+count)\r\n if current_letter + count > 120:\r\n current_letter = current_letter - 26\r\n #print(current_letter+count-97)\r\n word_total = word_total + values[current_letter+count-97]\r\n word_total_value.append(word_total)\r\n Sword_total=0\r\n print(word_total_value)\r\n","sub_path":"Xmas.py","file_name":"Xmas.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"418584878","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2019/10/10\n# @Author : Edrain\n\"\"\"\n如果不希望字符串中的\\表示转义,我们可以通过在字符串的最前面加上字母r来加以说明\n\"\"\"\n\ns1 = r'\\'hello, world!\\''\ns2 = r'\\n\\\\hello, world!\\\\\\n'\nprint(s1, s2, end='')\n","sub_path":"code_ed/Day01-15/code/Day07/str04.py","file_name":"str04.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"651845053","text":"import string\n\nwith open('fdf.txt', 'rt') as f:\n text = f.read\nfor symbol in string.punctuation:\n text = text.replace(symbol, \" \")\nwords = text.split()\nfr_dict = {}\nfor word in words:\n if word.lower() in fr_dict:\n fr_dict[word.lower()] += 1\n else:\n fr_dict[word.lower()] = 1\nword_freq = [(counter, word) for word, counter in fr_dict.items()]\nword_freq.sort()\nprint(word_freq)\n","sub_path":"notes_20160422.py","file_name":"notes_20160422.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"58807296","text":"# # Given two arrays, write a function to compute their intersection.\n# #\n# # Example 1:\n# #\n# # Input: nums1 = [1,2,2,1], nums2 = [2,2]\n# # Output: [2,2]\n#\n# Example 2:s\n#\n# Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4,0]\n# Output: [4,9]\nfrom typing import List\n\n\nclass Solution:\n\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n smap = {}\n for x in nums1:\n if x not in smap:\n smap[x] = 1\n else:\n smap[x] += 1\n res = []\n for i in nums2:\n if smap.__contains__(i) and smap.get(i) > 0:\n res.append(i)\n smap[i] -= 1\n return res\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.intersect([4, 9, 5], [9, 4, 9, 8, 4]))\n","sub_path":"lc350.py","file_name":"lc350.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"383999980","text":"# A node structure\nfrom collections import deque\n\n\nclass Node:\n # A utility function to create a new node\n def __init__(self, key):\n self.val = key\n self.left = None\n self.right = None\n\n @staticmethod\n def max_depth(root):\n if root is None:\n return 0\n q = deque([root])\n level = 0\n while q:\n level += 1\n for _ in range(len(q)):\n node = q.popleft()\n if node.left is not None:\n q.append(node.left)\n if node.right is not None:\n q.append(node.right)\n return level\n\n\n# Driver Code\nif __name__ == '__main__':\n t_root = Node(3)\n t_root.left = Node(9)\n t_root.right = Node(20)\n t_root.right.left = Node(15)\n t_root.right.right = Node(7)\n\n print(\"max_depth\")\n\n print(t_root.max_depth(t_root))\n","sub_path":"_Trees/bfs/leetcode/9_104.py","file_name":"9_104.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"559595153","text":"from django.conf.urls import patterns, url\n\nurlpatterns = patterns(\n\t'scheduling.apps.homepage.views',\n\turl(r'^$', 'index', name=\"homepageindex\"),\n\turl(r'^sobrenosotros/$', 'aboutus', name=\"homepageabout\"),\n\turl(r'^asprova/$', 'asprova', name=\"homepageasprova\"),\n\turl(r'^contacto/$', 'contact', name=\"homepagecontact\"),\n\turl(r'^sector_industrial/$', 'sector', name=\"homepagesector\"),\n\turl(r'^sector_industrial/(?P[-\\w,]+)/$', 'sectordetalle', name=\"sectordetalle\"),\n)\n","sub_path":"scheduling/scheduling/apps/homepage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"555383678","text":"from tkinter import *\nfrom tkinter import ttk\nimport tkinter as tk\nimport cv2\nimport PIL.Image, PIL.ImageTk\nimport time\nimport datetime as dt\nimport argparse\nimport imutils\nfrom imutils import face_utils\nimport dlib\nimport numpy as np\nfrom numpy import newaxis\nimport matplotlib.pyplot as plt\nimport Audio_Processing\nimport Face_Processing\nimport G_Code_Generator\nimport serial\nimport serial.tools.list_ports\nimport os\n\nclass App:\n def __init__(self, window, window_title, video_source=0):\n#OpenCV/Dlib Initializations:\n self.video_source = video_source\n self.face_cascade = cv2.CascadeClassifier(os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,'Machine_Learning_Files','haarcascade_frontalface_default.xml')))\n self.landmark_model = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,'Machine_Learning_Files','shape_predictor_68_face_landmarks.dat'))\n self.detector = dlib.get_frontal_face_detector()\n self.predictor = dlib.shape_predictor(self.landmark_model)\n self.vid = VideoCapture( self.video_source) #Open Videosource - Default is webcam\n#Tkinter Initializations:\n self.window = window\n self.window.title(window_title)\n self.G_Code = tk.StringVar()\n self.frame = tk.Frame(self.window)\n self.canvas = tk.Canvas(window, width = self.vid.width, height = self.vid.height) # Create a canvas that can fit the above video source size\n self.canvas.pack()\n#~~Buttons~~\n #Draw Button\n self.btn_capture=tk.Button(self.frame, text=\"Draw Me!\", bg = \"green\",fg = \"white\",width = 25,height = 2, command=self.capture)\n self.btn_capture.pack(side=tk.LEFT)\n #Record Audio Button\n self.btn_record=tk.Button(self.frame, text=\"Listen\",bg = \"blue\",fg = \"white\",width = 25, height = 2, command = self.audio_recording)\n self.btn_record.pack(side=tk.LEFT)\n self.frame.pack()\n #Quit button\n self.btn_quit=tk.Button(self.frame, text='QUIT',bg = \"red\",fg = \"white\",width = 25,height = 2, command=self.close)\n self.btn_quit.pack(side=tk.RIGHT)\n #Add another frame to pack additional buttons\n self.frame2 = tk.Frame(self.window)\n self.frame2.pack()\n #Label\n self.label1 = tk.Label(self.frame2,text=\"Manual G-Code: \")\n self.label1.pack(side = tk.LEFT)\n #Text Entry\n self.text_entry=ttk.Entry(self.frame2,textvariable = self.G_Code)\n self.text_entry.pack(side=tk.LEFT)\n #Demo Button\n self.btn_demo = tk.Button(self.frame2, text = 'DEMO', bg = \"green\", fg = \"white\", width = 25, height = 2, command = self.demo)\n self.btn_demo.pack(side = tk.RIGHT)\n #Send Button\n self.btn_send = tk.Button(self.frame2, text = 'SEND',bg = \"purple\",fg = 'white', width = 25, height = 2, command = self.manual)\n self.btn_send.pack(side = tk.RIGHT)\n\n\n# Serial Communication\n#~~~~~~~~~~~~~~\n# self.ser = serial.Serial('COM3', baudrate = 115200, timeout = 1)\n# time.sleep(3)\n# self.ser.reset_input_buffer()\n # ser.write(b'%')\n # time.sleep(1)\n# arduinoData = self.ser.readline().decode('ascii')\n# print(arduinoData)\n#~~~~~~~~~~~~~~\n\n # After it is called once, the update method will be automatically called every delay milliseconds\n self.delay=10\n self.update()\n\n self.window.mainloop()\n\n\n def close(self):\n self.ser.close()\n tk.quit()\n\n def connect_arduino(self):\n ports = serial.tools.list_ports.comports()\n commPort = 'None'\n numConnection = len(ports)\n\n for i in range(0,numConnection):\n port = ports[i]\n strPort = str(port)\n\n if 'Arduino' in strPort:\n splitPort = strPort.split(' ')\n commPort = (splitPort[0])\n\n if commPort != 'None':\n self.ser = serial.Serial(commPort,baudrate = 115200, timeout=1)\n print('Connected to ' + commPort)\n\n else:\n print('Connection Issue!')\n\n def send_code(self,g_code = None):\n \"\"\"Send g-code line by line over serial port\"\"\"\n for i in g_code:\n self.ser.write((i + '\\n').encode('utf-8'))\n time.sleep(0.5)\n while self.ser.in_waiting:\n print(self.ser.readline())\n return\n\n def demo(self):\n g_code = G_Code_Generator.main('demo')\n self.send_code(g_code)\n\n def audio_recording(self):\n my_audio = Audio_Processing.audio_main()\n g_code = G_Code_Generator.main('sound',my_audio)\n self.send_code(g_code)\n\n def clear_entry(self):\n self.text_entry.delete(0,'end')\n\n def manual(self):\n text = self.G_Code.get()\n print(text)\n text_array = [text]\n self.send_code(text_array)\n self.clear_entry()\n\n\n def show_info(self,msg):\n popup = tk.Tk()\n popup.wm_title(\"!\")\n label = tk.Label(popup, text=msg)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n B1 = tk.Button(popup, text=\"Okay\", command = popup.destroy)\n B1.pack()\n print(\"Should be throwing error message\")\n popup.mainloop()\n\n def find_rect(self,rect):\n x = rect.left()\n y = rect.top()\n w = rect.right() - x\n h = rect.bottom() - y\n return(x,y,w,h)\n\n def shape_to_np_jaw(self,shape,x1,y1,w,h,dtype = np.int32):\n coords = np.zeros((18,2), dtype = dtype)\n for i in range(0,18):\n if i == 0:\n coords[i][0] = x1\n coords[i][1] = y1\n elif i == 17:\n coords[i][0] = x1+w\n coords[i][1] = y1\n else:\n coords[i][0] = shape.part(i).x\n coords[i][1] = shape.part(i).y\n new_coords = coords.reshape(1,18,2)\n\n return new_coords\n\n def dodge(self,front,back):\n return cv2.divide(front, 255-back,scale=256)\n\n\n def sort_x(self, array):\n x_array = []\n for coord in range(len(array)):\n for j in range(len(array[coord])):\n if(j==0):\n x_array = np.append(x_array,array[coord][j])\n return x_array\n\n def sort_y(self, array):\n y_array = []\n for coord in range(len(array)):\n for j in range(len(array[coord])):\n if(j==1):\n y_array = np.append(y_array,array[coord][j])\n return y_array\n\n def thin_array(self, array):\n array[1::5]\n return array\n\n\n\n def capture(self):\n # Get a frame from the video source\n ret,frame=self.vid.get_frame()\n\n if ret:\n rects = self.detector(frame, 0)\n print(\"Rects: \",rects)\n if len(rects) == 0:\n self.show_info(\"No faces detected!\")\n else:\n coords = Face_Processing.face_to_dotmap(frame,rects)\n x_array = self.sort_x(coords)\n y_array = self.sort_y(coords)\n\n x_array = [arr for i,arr in enumerate(x_array) if i % 5 == 0]\n y_array = [arr for i,arr in enumerate(y_array) if i % 5 == 0]\n coord_array = np.column_stack((x_array,y_array))\n\n plt.plot(x_array,y_array,\"ro\")\n plt.show()\n g_code = G_Code_Generator.main('face',coord_array)\n self.send_code(g_code)\n\n\n def update(self):\n\n # Get a frame from the video source\n ret, frame = self.vid.get_frame()\n\n if ret:\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n faces = self.face_cascade.detectMultiScale(gray, 1.1, 4)\n\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)\n self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame))\n self.canvas.create_image(0, 0, image = self.photo, anchor = tk.NW)\n self.window.after(self.delay,self.update)\n\n\nclass VideoCapture:\n def __init__(self, video_source=0):\n # Open the video source\n self.vid = cv2.VideoCapture(video_source)\n if not self.vid.isOpened():\n raise ValueError(\"Unable to open video source\", video_source)\n# check, frame = video.read()\n\n # Command Line Parser\n args=CommandLineParser().args\n\n\n #create videowriter\n\n # 1. Video Type\n VIDEO_TYPE = {\n 'avi': cv2.VideoWriter_fourcc(*'XVID'),\n #'mp4': cv2.VideoWriter_fourcc(*'H264'),\n 'mp4': cv2.VideoWriter_fourcc(*'XVID'),\n }\n\n # 2. Video Dimension\n STD_DIMENSIONS = {\n '480p': (640, 480),\n '720p': (1280, 720),\n '1080p': (1920, 1080),\n '4k': (3840, 2160),\n }\n res=STD_DIMENSIONS[args.res[0]]\n\n\n #set video sourec width and height\n self.vid.set(3,res[0])\n self.vid.set(4,res[1])\n\n # Get video source width and height\n self.width,self.height=res\n\n\n # To get frames\n def get_frame(self):\n if self.vid.isOpened():\n ret, frame = self.vid.read()\n if ret:\n # Return a boolean success flag and the current frame converted to BGR\n return (ret, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n else:\n return (ret, None)\n else:\n return (ret, None)\n\n def __del__(self):\n if self.vid.isOpened():\n self.vid.release()\n\n cv2.destroyAllWindows()\n\n\n\nclass CommandLineParser:\n\n def __init__(self):\n\n parser=argparse.ArgumentParser(description='Script to record videos')\n\n\n parser.add_argument('--res', nargs=1, default=['480p'], type=str, help='Resolution of the video output: for now we have 480p, 720p, 1080p & 4k')\n\n self.args = parser.parse_args()\n\n\n\ndef main():\n\n App(tk.Tk(),'Canny GUI')\n\nmain()\n","sub_path":"Python/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":9886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"507272899","text":"import tensorflow as tf\nimport tensorflow_hub as hub\nfrom keras import backend as K\nimport numpy as np\nfrom keras.models import Model, Input\nfrom keras.layers import LSTM, Dense, Bidirectional, Lambda\n\n\nclass Predictor:\n\n def __init__(self):\n sess = tf.Session()\n K.set_session(sess)\n self.elmo_model = hub.Module(\"https://tfhub.dev/google/elmo/2\",\n trainable=True)\n sess.run(tf.global_variables_initializer())\n sess.run(tf.tables_initializer())\n print(\"Done\")\n tags = ['1', '2']\n self.tags2labels = {'1':'Negative', '2':'Positive'}\n self.tag2idx = {t: i for i, t in enumerate(tags)}\n self.n_tags = len(tags)\n self.max_len = 50\n self.batch_size = 25\n self.load_model()\n\n def ElmoEmbedding(self, x):\n return self.elmo_model(inputs={\n \"tokens\": tf.squeeze(tf.cast(x, tf.string)),\n \"sequence_len\": tf.constant(self.batch_size * [self.max_len])\n },\n signature=\"tokens\",\n as_dict=True)[\"elmo\"]\n\n def load_model(self):\n input_text = Input(shape=(self.max_len,), dtype=\"string\")\n embedding = Lambda(self.ElmoEmbedding, output_shape=(self.max_len, 1024))(input_text)\n lstm = Bidirectional(LSTM(512))(embedding)\n l_dense = Dense(100, activation='relu')(lstm)\n out = Dense(self.n_tags, activation='softmax')(l_dense)\n self.model = Model([input_text], out)\n self.model.load_weights('D:\\\\sentiment\\\\experiments_eng\\\\weights0.h5')\n\n def vectorize_text(self, text):\n X_vector = text.split(' ')\n X = []\n new_seq = []\n for i in range(self.max_len):\n try:\n new_seq.append(X_vector[i])\n except:\n new_seq.append(\"__PAD__\")\n X.append(new_seq)\n for i in range(24):\n new_seq = []\n for y in range(self.max_len):\n new_seq.append(\"__PAD__\")\n X.append(new_seq)\n return np.array(X)\n\n def predict(self, text):\n vector = self.vectorize_text(text)\n predictions = self.model.predict(vector)[0]\n preds = [('Negative', predictions[0]), ('Positive', predictions[1])]\n return preds\n","sub_path":"sentiment_demo/sent_predictor_eng.py","file_name":"sent_predictor_eng.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"588575836","text":"# -*-encoding=UTF-8 -*-\n# author:mahome\n\nfrom model.user import *\nfrom model.weibo import *\nfrom tools.lib import read_file_by_split_tag\nimport copy\n\nclass DataBase(object):\n \"\"\"\n\n \"\"\"\n __data = {}\n\n\n def init_data(self):\n self.__init_structure()\n self.__init_user()\n\n def __init_structure(self):\n self.__data[\"users\"] = {}\n self.__data[\"train_uid\"] = []\n self.__data[\"test_id\"] = []\n\n\n def __init_user(self):\n\n #inti user\n\n #trian\n train_labels = read_file_by_split_tag(\"../data/train/train_labels.txt\", tag=\"||\")\n #style: 2291075214||m||1994||湖北 武汉\n for label in train_labels:\n self.__data[\"users\"][label[0]] = User(label[0], sex=label[1], birth=label[2], region=label[3])\n self.__data[\"train_uid\"].append(label[0])\n #test\n test_labels = read_file_by_split_tag(\"../data/test/test_nolabels.txt\", tag=\"||\")\n #style: 2291075214\n for label in test_labels:\n self.__data[\"users\"][label[0]] = User(label[0])\n self.__data[\"test_uid\"].append(label[0])\n\n\n all_info = read_file_by_split_tag(\"../data/train/train_info.txt\", tag=\"||\")\n all_info.extend(read_file_by_split_tag(\"../data/test/test_info.txt\", tag=\"||\"))\n #style:1033009482||霏霏老爸||http://tp3.sinaimg.cn/1033009482/180/1277326853/1\n for info in all_info:\n if info[0] not in self.__data[\"users\"]: self.__data[\"users\"][info[0]] = User(info[0])\n self.__data[\"users\"][info[0]].name = info[1]\n self.__data[\"users\"][info[0]].photo_url = info[2]\n\n def get_user(self, id):\n \"\"\"\n use\n :param str userid:\n :return:\n \"\"\"\n pass\n\n def get_train_ids(self):\n \"\"\"\n\n :return:[uid] include all train id\n \"\"\"\n train_ids = copy.deepcopy(self.__data[\"train_uid\"])\n return train_ids\n\n\n def get_test_ids(self):\n \"\"\"\n\n :return:[uid] include all test id\n \"\"\"\n test_ids = copy.deepcopy(self.__data[\"test_id\"])\n return test_ids\n\nif __name__ == \"__main__\":\n test = DataBase()\n test.init_data()\n tmp = test.get_test_ids()\n pass\n","sub_path":"data_manage/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"496438141","text":"\"\"\"\nДомашнее задание №1\nФункции и структуры данных\n\"\"\"\n\n\ndef power_numbers(*args):\n \"\"\"\n функция, которая принимает N целых чисел,\n и возвращает список квадратов этих чисел\n \"\"\"\n return list(map(lambda x: x ** 2, args))\n\n# filter types\nODD = \"odd\"\nEVEN = \"even\"\nPRIME = \"prime\"\n\n\ndef filter_numbers(list_numbers, filter_type=PRIME):\n \"\"\"\n функция, которая на вход принимает список из целых чисел,\n и возвращает только чётные/нечётные/простые числа\n (выбор производится передачей дополнительного аргумента)\n \"\"\"\n if filter_type == ODD:\n return list(filter(lambda x: x % 2 == 1, list_numbers))\n elif filter_type == EVEN:\n return list(filter(lambda x: x % 2 == 0, list_numbers))\n elif filter_type == PRIME:\n temp_list_number = list()\n for number in list_numbers:\n if number == 1:\n continue\n elif number > 1:\n it_prime = False\n for denominator in range(1,number):\n if number % denominator == 0 and denominator != 1 and denominator != number:\n #list_numbers.remove(number) - Так было бы лучше, но валится тест из за того, что список меняется на лету.\n it_prime = False\n break\n else:\n it_prime = True\n if it_prime == True:\n temp_list_number.append(number)\n return temp_list_number\n\ndef is_prime(x):\n return 2 in [x, 2 ** x % x]\n\n#print(filter_numbers([3, 5, 7, 9, 11], PRIME))\n\nprint(is_prime(4))","sub_path":"homework_01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"116025771","text":"#coding:utf-8\nimport pandas as pd\nfrom pandas import DataFrame\nfrom pylab import mpl\nimport matplotlib.pyplot as plt\nmpl.rcParams['font.sans-serif']=['SimHei'] #设置字体\nmpl.rcParams['axes.unicode_minus']=False #编码\ndef dict_deal(dict_in):\n dict_out={}\n numAPP=0\n numMD=0\n numSJ=0\n numCS=0\n numDGS=0\n numDL=0\n for i in dict_in.keys():\n #print i,'+',a_read[i]\n\n if u'移动APP' in i:\n numAPP+=dict_in[i]\n if u'魔都' in i:\n numMD+=dict_in[i]\n if u'手机浏览器' in i:\n numSJ+=dict_in[i]\n if u'唱衰' in i:\n numCS+=dict_in[i]\n if u'大公司创新' in i:\n numDGS+=dict_in[i]\n if u'大佬' in i:\n numDL+=dict_in[i]\n dict_out['移动APP']=numAPP\n dict_out['魔都']=numMD\n dict_out['手机浏览器']=numSJ\n dict_out['唱衰']=numCS\n dict_out['大公司创新']=numDGS\n dict_out['大佬']=numDL\n return dict_out\ndf = pd.ExcelFile(\"wxgzhdata.xlsx\").parse(u\"3月\")\ndf2=df.fillna(method=\"pad\")#往后填充\nd1=df2.groupby(u'文章标题').sum()[[u'总阅读人数',u'初次打开阅读人数',u'分享次数',u'每日增粉人数']]\na=d1.to_dict(outtype='dict')#list\n#print a\na_read= a[u'总阅读人数']\nf_read=a[u'初次打开阅读人数']\nshare=a[u'分享次数']\nfans=a[u'每日增粉人数']\n\n#print a_read\nx={}\nout_a_read=dict_deal(a_read)#总阅读人数dict\nout_f_read= dict_deal(f_read)#初次打开阅读人数\nout_share=dict_deal(share)#分享次数\nout_fans=dict_deal(fans)#每日增粉人数\nx['总阅读人数']=out_a_read\nx['初次打开阅读人数']=out_f_read\nx['分享次数']=out_share\nx['增粉数']=out_fans\nxf=DataFrame(x)\n#print xf\nxf.plot( kind='barh')\nxf.T.plot( kind='barh')\nplt.show()\n","sub_path":"weixin.py","file_name":"weixin.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"268587456","text":"\"\"\"\r\nThis program group the lyrics data by decades\r\nOutput: txt files with all the lyrics for each decade\r\n\"\"\"\r\nimport sqlite3\r\n\r\nconn = sqlite3.connect('music.db')\r\ncur = conn.cursor()\r\n\r\n# Extract lyrics from database\r\ndef get_lyrics(start_year, end_year):\r\n q = 'SELECT song_lyrics, year FROM Songs'\r\n cur.execute(q)\r\n rows = cur.fetchall()\r\n result = []\r\n for row in rows:\r\n if (int(row[1]) < start_year or int(row[1]) > end_year) \\\r\n or row[0] == 'NA': # Ignore missing lyrics\r\n continue\r\n else:\r\n result.append(row[0])\r\n return result\r\n\r\ninterval = 10\r\nstart_year = 1959\r\nlyrics_dic = {}\r\nwhile True:\r\n lyrics = get_lyrics(start_year, start_year+interval)\r\n fname = 'lyrics/lyrics_' + str(start_year + 1) + '.txt'\r\n f = open(fname, 'wb')\r\n for line in lyrics:\r\n f.write(line.replace('\\n\\n', '. ').replace('\\n', '. ').strip().encode('utf-8'))\r\n f.close()\r\n start_year += 10\r\n if start_year >= 2019:\r\n break\r\n","sub_path":"luhuanchen-part-b/lyrics_decades.py","file_name":"lyrics_decades.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"517895610","text":"import sys\nimport numpy as np\nfrom sklearn import tree\nfrom sklearn.metrics import confusion_matrix\n\n\n\n\ndef readfile(filename):\n\t# data_info = np.fromstring(open(filename, 'r').readline().strip(), sep=' ').astype('uint')\n\tdata = np.loadtxt(filename, skiprows=1)\n\ttrainX = data[:,1:5]\n\ttrainY = data[:,5]\n\treturn(trainX, trainY)\n\ndef main():\n\n\ttrainfile=sys.argv[1]\n\ttestfile=sys.argv[2]\n\n\ttrainX, trainY = readfile(trainfile)\n\n\tclf = tree.DecisionTreeClassifier()\n\tclf = clf.fit(trainX,trainY)\n\n\ttestX, testY = readfile(testfile)\n\n\tx = np.shape(testX)[0]\n\n\n\tresult = clf.predict(testX)\n\n\n\tprint(testY)\n\tprint(result)\n\n\tconf_mat = confusion_matrix( testY, result)\n\n\tprint (conf_mat)\n\t\n\n\nmain()","sub_path":"GoodMovie.py","file_name":"GoodMovie.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"645473662","text":"from flask import Flask, request, abort, redirect, json, jsonify\nfrom data_process.process import *\nfrom data_process.align import *\nfrom fairseqq.interactive_new import load_model, translate\n\napp = Flask(__name__)\n\n# preload model\ntask, align_dict, models, tgt_dict, translator, use_cuda, args=load_model()\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n\n@app.route('/translate', methods=['POST'])\ndef login():\n if request.method == 'POST':\n # get input from request\n input = request.form['input']\n\n # preprocess ok\n pre_input = preprocess(input)\n\n # translate\n # trans_output = translate(my_model, pre_input)\n results = translate(task, align_dict, models, tgt_dict, translator, args, use_cuda, pre_input)\n pre_trans = results[0].hypos[0].split('\\t')[2]\n\n #print(pre_input)\n #print(pre_trans)\n # postprocess\n add(pre_input, pre_trans)\n nalign = results[0].alignments[0]\n #print(repr(results))\n salign()\n\n delete()\n return jsonify({\n 'status': \"ok\",\n 'code': 200,\n 'data': results[0].hypos,\n })\n else:\n return abort(403)\n\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"158571154","text":"import scrapy\nfrom image_parser.items import ImageParserItem\nfrom scrapy_redis.spiders import RedisSpider\nimport json\nimport redis\nfrom scrapy.http import Request\nfrom search_img.models import *\n\n\nclass YandexSpider(RedisSpider):\n \"\"\"\n Class to parse yandex.ua.\n\n Attributes:\n name: Spider name.\n allowed_domains: List of strings containing domains that this spider\n is allowed to crawl.\n start_urls: A list of URLs where the spider will begin to crawl\n from, when no particular URLs are specified.\n tag: Tag name.\n images_quantity: The number of images that need to parse.\n number: Record number counter.\n \"\"\"\n\n name = 'yandex_spider'\n allowed_domains = ['yandex.ua']\n start_urls = ['https://yandex.ua/images/search?text=%s']\n # tag = None\n images_quantity = 5\n # number = 1\n\n def make_request_from_data(self, data):\n \"\"\"\n Make request from data.\n\n Args:\n data: Data.\n\n Returns:\n Transmits URL into the function make_requests_from_url.\n \"\"\"\n data = json.loads(data)\n if 'tag' in data and 'images_quantity' in data:\n url = self.start_urls[0] % data['tag']\n # self.tag = data['tag']\n self.images_quantity = int(data['images_quantity'])\n # return self.make_requests_from_url(url)\n return Request(url, dont_filter=True, meta={'tag': data['tag']})\n else:\n self.logger.error(\"Unexpected data from '%s': %r\", self.redis_key,\n data)\n\n def parse(self, response):\n \"\"\"\n This method is in charge of processing the response and\n returning scraped data and/or more URLs to follow.\n\n Args:\n response: The response to parse.\n \"\"\"\n quantity = response.meta.get('quantity', 0)\n images = response.xpath(\n '//div[contains(@class, \"serp-item_type_search\")]')\n for img in images:\n if quantity < self.images_quantity:\n item = ImageParserItem()\n item['image_url'] = 'https:' + \\\n img.xpath('.//a/img/@src').extract()[0]\n item['site'] = 'https://' + self.allowed_domains[0]\n item['tag'] = response.meta['tag']\n item['rank'] = quantity\n # item['images_quantity'] = self.images_quantity\n quantity += 1\n yield item\n else:\n # self.number = 1\n r = redis.StrictRedis(host='127.0.0.1', port=6379)\n Tag.objects.filter(name=response.meta['tag']).update(\n status_yandex='ready')\n r.publish('yandex', response.meta['tag'])\n return\n\n next_page = response.xpath(\n '//div[contains(@class, \"more_direction_next\")]/a/@href').extract()\n if next_page:\n url = response.urljoin(next_page[0])\n yield scrapy.Request(url, self.parse,\n meta={'tag': response.meta['tag'],\n 'quantity': quantity})\n","sub_path":"final_project/image_parser/image_parser/spiders/yandex_spider.py","file_name":"yandex_spider.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"634143965","text":"from sqlalchemy.exc import ArgumentError\nfrom sqlalchemy.orm import class_mapper, object_mapper\nfrom sqlalchemy.orm.exc import UnmappedClassError, UnmappedInstanceError\n\n\ndef get_session(context):\n return context.get('session')\n\n\ndef get_query(model, context):\n query = getattr(model, 'query', None)\n if not query:\n session = get_session(context)\n if not session:\n raise Exception('A query in the model Base or a session in the schema is required for querying.\\n'\n 'Read more http://graphene-python.org/docs/sqlalchemy/tips/#querying')\n query = session.query(model)\n return query\n\n\ndef is_mapped_class(cls):\n try:\n class_mapper(cls)\n except (ArgumentError, UnmappedClassError):\n return False\n else:\n return True\n\n\ndef is_mapped_instance(cls):\n try:\n object_mapper(cls)\n except (ArgumentError, UnmappedInstanceError):\n return False\n else:\n return True\n","sub_path":"graphene_sqlalchemy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"76204150","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os, sys, time, math\r\nfrom scipy.optimize import curve_fit\r\nfrom lmfit import Model, Parameters, Minimizer, minimize, printfuncs\r\nfrom lmfit.models import LinearModel\r\nfrom matplotlib.font_manager import FontProperties\r\nimport matplotlib.font_manager\r\n\r\nplt.rcParams['font.family']='Times New Roman'\r\n\r\n#Define the guassian in order to scale the data later\r\n \r\ndef gaussian(x, A, mu, sigma):\r\n return A*np.exp(-(x - mu)**2./(2.*sigma**2.))\r\n\r\n#Decide which bin width to look at, open the right file and use loadtxt to create the array\r\n \r\nbin = input(\"\\nBin: \")\r\n\r\nname = str(bin) + 'Combined'\r\n\r\nwith open(name, 'r') as f:\r\n lines = f.readlines()\r\n \r\nwith open('0Load', 'w') as f:\r\n for line in lines:\r\n f.write(line[1:])\r\n \r\nwith open('0Load', 'r') as f:\r\n x = np.loadtxt(f, comments = ')', dtype = {'names': ('Time', 'DM', 'SNR', 'Bin'), 'formats': ('f8', 'f8', 'f8', 'int')}, delimiter = ', ')\r\n\r\n#Create two copies of x; y and z. y has it's time changed to LST and has the SNR scaled, z only has its SNR scaled \r\n\r\ny = np.copy(x)\r\nz = np.copy(x)\r\n\r\nx = np.sort(x, order = 'Time')\r\nz = np.sort(z, order = 'Time')\r\ny = np.sort(y, order = 'Time')\r\n\r\n#Loop which looks at all the data and decides if it is relevant. A loop works out the MJD for a sidereal time of 9h53m\r\n \r\nrownum = 0\r\na1 = [0]\r\nfailures = 0\r\n\r\nfor row in x:\r\n MJD = x[rownum]['Time']\r\n\r\n MJD0 = math.floor(MJD)\r\n \r\n if MJD0 <= 57030: #Excluding these because it isn't the right MJD (16767)\r\n a1 = np.append(a1, rownum)\r\n rownum += 1\r\n continue\r\n\r\n H = 24*(MJD - MJD0)\r\n\r\n JD0 = MJD0 + 2400000.5\r\n\r\n D0 = JD0 - 2451545.0\r\n\r\n GMST = 6.697374558 + 0.06570982441908*D0 + 1.00273790935*H\r\n\r\n LST = GMST%24\r\n\r\n LSTHour = math.floor(LST)\r\n\r\n LSTMinutes = (LST - LSTHour)*60\r\n\r\n H = 27\r\n r = 0\r\n\r\n while (H/24) > 1.0 or (H/24) < 0.0:\r\n H = ((9.897859735-0.00005 + 24*r) - 6.697374558 - 0.06570982441908*D0)/1.00273790935 #was 9.895\r\n r += 1\r\n \r\n Decimal = H/24\r\n \r\n Centre = MJD0 + Decimal\r\n\r\n #Entries that do not correspond to the right time are removed\r\n \r\n if not -5/(60*24) < x[rownum]['Time'] - Centre < 5/(60*24):\r\n a1 = np.append(a1, rownum)\r\n \r\n if (0.9972695663 - 10/(60*24) < x[rownum]['Time'] - Centre < 0.9972695663 + 10/(60*24)) or (-0.9972695663 + 10/(60*24) < x[rownum]['Time'] - Centre < -0.9972695663 - 10/(60*24)):\r\n failures += 1\r\n \r\n rownum += 1\r\n \r\na1 = np.delete(a1, 0)\r\n\r\nx = np.delete(x, a1)\r\ny = np.delete(y, a1)\r\nz = np.delete(z, a1)\r\n\r\n#The times are converted from MJD to LST\r\n\r\nrownum = 0\r\n\r\nfor row in y:\r\n MJD = y[rownum]['Time']\r\n\r\n MJD0 = math.floor(MJD)\r\n \r\n H = 24*(MJD - MJD0)\r\n\r\n JD0 = MJD0 + 2400000.5\r\n\r\n D0 = JD0 - 2451545.0\r\n\r\n GMST = 6.697374558 + 0.06570982441908*D0 + 1.00273790935*H\r\n\r\n LST = GMST%24\r\n\r\n y[rownum]['Time'] = LST\r\n \r\n rownum += 1\r\n\r\n#Plot measured SNR vs LST\r\n \r\nplt.plot(y['Time'], z['SNR'], 'ro')\r\nplt.ylabel('SNR')\r\nplt.xlabel('LST')\r\nplt.title('SNR vs LST')\r\n\r\nplt.show()\r\n\r\n#Perform the scaling\r\n\r\nrownum = 0\r\nfor row in y:\r\n z[rownum]['SNR'] = y[rownum]['SNR']/gaussian(y[rownum]['Time'], 1, 9.897859735-0.00005, 0.09)\r\n y[rownum]['SNR'] = y[rownum]['SNR']/gaussian(y[rownum]['Time'], 1, 9.897859735-0.00005, 0.09)\r\n rownum += 1\r\n\r\nprint(10/gaussian(9.897859735-0.00005 + 5/60, 1, 9.897859735-0.00005, 0.09))\r\n\r\nnrows = rownum\r\n \r\n#Plot scaled SNR vs LST\r\n \r\n'''plt.plot(y['Time'], z['SNR'], 'ro')\r\nplt.ylabel('Normalized SNR')\r\nplt.xlabel('LST')\r\nplt.title('Normalized SNR vs LST')\r\n\r\nplt.show()'''\r\n\r\n#Plot scaled SNR vs MJD\r\n\r\nplt.plot(z['Time'], z['SNR'], 'ro')\r\n\r\nplt.show()\r\n\r\n#Count the number of pulses in individual pulse windows\r\n\r\nnumpul = 1\r\nrownum = 0\r\n\r\nwhile rownum < nrows - 1:\r\n if not z[rownum + 1]['Time'] - z[rownum]['Time'] < 0.25306/(2*60*60*24):\r\n numpul += 1\r\n rownum += 1\r\n \r\nprint(rownum+1)\r\nprint(numpul)\r\n\r\n'''This section deals with the integrated pulse intensities'''\r\n\r\n#Create an array to store the data for the integrated pulse intensities\r\n\r\nintsnr = np.zeros(numpul, dtype = {'names': ('Pulse', 'Time', 'Int SNR'), 'formats': ('int', 'f8', 'f8')})\r\n\r\n#Loop over the array to fill up the pulse numbers\r\n\r\npulse = 0\r\n\r\nwhile pulse < numpul:\r\n intsnr[pulse]['Pulse'] = pulse + 1\r\n pulse += 1\r\n\r\n#Work out the integrated SNRs of pulses\r\n \r\nrownum = 0\r\npulse = 0\r\n\r\nintsnr[0]['Time'] = z[0]['Time']\r\nintsnr[0]['Int SNR'] = z[0]['SNR']\r\n\r\nwhile rownum < nrows - 1:\r\n if not z[rownum + 1]['Time'] - z[rownum]['Time'] <= 0.253/(2*60*60*24):\r\n pulse += 1\r\n intsnr[pulse]['Time'] = z[rownum + 1]['Time']\r\n intsnr[pulse]['Int SNR'] = intsnr[pulse]['Int SNR'] + z[rownum + 1]['SNR']\r\n else:\r\n intsnr[pulse]['Int SNR'] = intsnr[pulse]['Int SNR'] + z[rownum + 1]['SNR']\r\n rownum += 1\r\n \r\nprint(np.sum(z['SNR']))\r\nprint(np.sum(intsnr['Int SNR']))\r\n\r\n'''plt.plot(intsnr['Time'], intsnr['Int SNR'], 'ro')\r\nplt.ylabel('SNR')\r\nplt.xlabel('Number of pulse')\r\nplt.show()'''\r\n \r\n#Choose the number of bins and size of bins and generate an array of appropriate size\r\n\r\nnumbin = eval(input('Choose the number of points (default 30): '))\r\nbinsize = eval(input('Choose the length between points (default 3): '))\r\n\r\n#numbin = 30\r\n#binsize = 3\r\n\r\nbinarray = np.zeros(numbin, dtype = np.int)\r\n\r\n#Loop which counts number of pulses in each bin\r\n\r\nrownum = 0\r\nwhile rownum < numpul:\r\n bin = 0\r\n while bin < numbin:\r\n if 10 + binsize*bin <= intsnr[rownum]['Int SNR']:\r\n binarray[bin] += 1\r\n bin += 1\r\n rownum += 1\r\n\r\nbinx = np.zeros(numbin)\r\n\r\nbin = 0\r\nwhile bin < numbin:\r\n binx[bin] = 10 + binsize*bin\r\n bin += 1\r\n \r\n#Plot the histogram\r\n\r\n'''plt.bar(binx, binarray, width = binsize)\r\nplt.ylabel('Number of pulses')\r\nplt.xlabel('Scaled SNR')\r\nplt.title('Scaled SNR of pulses')\r\nplt.xticks(np.arange(10, binsize*numbin, binsize))\r\n\r\nplt.show()'''\r\n\r\n#Remove infinite values\r\n\r\nlogbin = np.log10(binarray)\r\nlogx = np.log10(binx)\r\nrowdel = [0]\r\nr = 0\r\n\r\nfor row in logbin:\r\n if math.isfinite(logbin[r]) == False:\r\n rowdel = np.append(rowdel, r)\r\n r += 1\r\n \r\nrowdel = np.delete(rowdel, 0) \r\nlogbin = np.delete(logbin, rowdel)\r\nlogx = np.delete(logx, rowdel)\r\nerx = np.delete(binx, rowdel)\r\n\r\n#Plot the log-log plot\r\n\r\nplt.plot(logx, logbin, 'ro')\r\nplt.ylabel('log(Number of pulses)')\r\nplt.xlabel('log(Scaled SNR)')\r\nplt.title('Log-log plot of Scaled SNR of pulses')\r\n\r\nplt.show()\r\n\r\n#Plot the log-log plot with a line of best fit\r\n\r\ndecision = eval(input('\\nEnter 1 for a single power law exponent or 2 for a piecewise power law: '))\r\n\r\nwidth = eval(input('\\nEnter the thickness of the lines on the plot: '))\r\n\r\nif decision == 1:\r\n\r\n fitlogbin = np.copy(logbin)\r\n fitlogx = np.copy(logx)\r\n\r\n rowdel = [0]\r\n r = 0\r\n\r\n for row in logbin:\r\n if fitlogx[r] <= np.log10(10/gaussian(9.897859735-0.00005 + 5/60, 1, 9.897859735-0.00005, 0.09)) or fitlogbin[r] < 1: \r\n rowdel = np.append(rowdel, r)\r\n r += 1\r\n \r\n rowdel = np.delete(rowdel, 0)\r\n fitlogx = np.delete(fitlogx, rowdel)\r\n fitlogbin = np.delete(fitlogbin, rowdel)\r\n fiterx1 = np.delete(erx, rowdel)\r\n \r\n fitval = np.polyfit(fitlogx, fitlogbin, 1)\r\n a = np.linspace(fitlogx[0] - 0.02, fitlogx[-1] + 0.02, 100)\r\n\r\n #Use lmfit instead, for the plot have run the program twice and manually input the values to be used from what was found using the fitting function\r\n\r\n mod = LinearModel()\r\n result = mod.fit(fitlogbin, x = fitlogx, slope = -3, intercept = 10)\r\n print(result.fit_report())\r\n\r\n '''plt.plot(logx, logbin, 'ro', b, b*-3.62212153 + 6.96592205, 'k-', a, a*-6.39557577 + 10.6634723, 'k-')\r\n plt.errorbar(logx, logbin, xerr = 1/erx, yerr = 0, fmt = 'ro')\r\n plt.ylabel('log(Cumulative Number of pulses > Scaled SNR)')\r\n plt.xlabel('log(Scaled SNR)')\r\n\r\n plt.show()'''\r\n\r\n print(fitval[0])\r\n print(fitval[1])\r\n\r\n #Try writing the function\r\n\r\n x = fitlogbin\r\n\r\n data = fitlogx\r\n\r\n error = 1/fiterx1\r\n\r\n def residual(pars, x, data=None):\r\n a=pars['intercept'].value\r\n b=pars['slope'].value\r\n model = -a/b + ((1/b)*x)\r\n resids = model - data\r\n weighted = np.sqrt(resids ** 2 / error ** 2)\r\n return weighted\r\n\r\n params=Parameters()\r\n params.add('intercept', value=10.0)\r\n params.add('slope', value=-3.0)\r\n\r\n mi=minimize(residual, params, args=(x, data))\r\n \r\n print(printfuncs.report_fit(mi.params, min_correl=0.5))\r\n\r\n slope = eval(input(\"\\nSlope: \"))\r\n sloperr = eval(input(\"Slope error: \"))\r\n int = eval(input(\"Intercept: \"))\r\n\r\n plt.plot(logx, logbin, 'ro')\r\n plt.plot(a, a*slope + int, 'k-', linewidth = width, label = r'$ \\alpha = %.2f \\pm %.2f$' %(slope, sloperr))\r\n plt.errorbar(logx, logbin, xerr = 1/erx, yerr = 0, fmt = 'ro')\r\n plt.ylabel('log(Cumulative number of pulses > SNR)', fontsize = 18)\r\n plt.xlabel('log(SNR)', fontsize = 18)\r\n plt.xticks(fontsize = 15)\r\n plt.xticks(fontsize = 15)\r\n plt.legend(frameon = False, fontsize = 18)\r\n\r\n plt.show()\r\nelif decision == 2:\r\n\r\n fitlogbin = np.copy(logbin)\r\n fitlogx = np.copy(logx)\r\n fitlogx1 = np.copy(logx)\r\n fit2 = np.copy(fitlogbin)\r\n \r\n #Choose the cutoff point for the piecewise power law\r\n \r\n cutoff = eval(input('\\nChoose a point on the y axis to use to differentiate between the regimes (default 2.3): '))\r\n\r\n rowdel = [0]\r\n r = 0\r\n\r\n for row in logbin:\r\n if fitlogx[r] <= np.log10(10/gaussian(9.897859735-0.00005 + 5/60, 1, 9.897859735-0.00005, 0.09)) or fitlogbin[r] > cutoff or fitlogbin[r] < 1: \r\n rowdel = np.append(rowdel, r)\r\n r += 1\r\n \r\n rowdel = np.delete(rowdel, 0)\r\n fitlogx = np.delete(fitlogx, rowdel)\r\n fitlogbin = np.delete(fitlogbin, rowdel)\r\n fiterx1 = np.delete(erx, rowdel)\r\n \r\n fitval = np.polyfit(fitlogx, fitlogbin, 1)\r\n a = np.linspace(fitlogx[0] - 0.02, fitlogx[-1] + 0.02, 100)\r\n\r\n rowdel = [0]\r\n r = 0\r\n\r\n for row in logbin:\r\n if fitlogx1[r] <= np.log10(10/gaussian(9.897859735-0.00005 + 5/60, 1, 9.897859735-0.00005, 0.09)) or fit2[r] < cutoff: \r\n rowdel = np.append(rowdel, r)\r\n r += 1\r\n \r\n rowdel = np.delete(rowdel, 0)\r\n fitlogx1 = np.delete(fitlogx1, rowdel)\r\n fit2 = np.delete(fit2, rowdel)\r\n fiterx2 = np.delete(erx, rowdel)\r\n \r\n fitval1 = np.polyfit(fitlogx1, fit2, 1)\r\n b = np.linspace(fitlogx1[0] - 0.02, fitlogx1[-1] + 0.02, 100)\r\n\r\n #Use lmfit instead, for the plot have run the program twice and manually input the values to be used from what was found using the fitting function\r\n\r\n mod = LinearModel()\r\n result = mod.fit(fitlogbin, x = fitlogx, slope = -3, intercept = 10)\r\n #print(result.fit_report())\r\n\r\n result2 = mod.fit(fit2, x = fitlogx1, slope = -3, intercept = 10)\r\n #print(result2.fit_report())\r\n\r\n '''plt.plot(logx, logbin, 'ro', b, b*-3.62212153 + 6.96592205, 'k-', a, a*-6.39557577 + 10.6634723, 'k-')\r\n plt.errorbar(logx, logbin, xerr = 1/erx, yerr = 0, fmt = 'ro')\r\n plt.ylabel('log(Cumulative Number of pulses > Scaled SNR)')\r\n plt.xlabel('log(Scaled SNR)')\r\n\r\n plt.show()\r\n\r\n print(fitval[0])\r\n print(fitval[1])'''\r\n\r\n #Try writing the function\r\n\r\n x = fitlogbin\r\n\r\n data = fitlogx\r\n\r\n error = 1/fiterx1\r\n\r\n def residual(pars, x, data=None):\r\n a=pars['intercept'].value\r\n b=pars['slope'].value\r\n model = -a/b + ((1/b)*x)\r\n resids = model - data\r\n weighted = np.sqrt(resids ** 2 / error ** 2)\r\n return weighted\r\n\r\n params=Parameters()\r\n params.add('intercept', value=10.0)\r\n params.add('slope', value=-3.0)\r\n\r\n mi=minimize(residual, params, args=(x, data))\r\n \r\n print(printfuncs.report_fit(mi.params, min_correl=0.5))\r\n\r\n x = fit2\r\n\r\n data = fitlogx1\r\n\r\n error = 1/fiterx2\r\n\r\n def residual(pars, x, data=None):\r\n a=pars['intercept'].value\r\n b=pars['slope'].value\r\n model = -a/b + ((1/b)*x)\r\n resids = model - data\r\n weighted = np.sqrt(resids ** 2 / error ** 2)\r\n return weighted\r\n\r\n params=Parameters()\r\n params.add('intercept', value=10.0)\r\n params.add('slope', value=-3.0)\r\n\r\n mi=minimize(residual, params, args=(x, data))\r\n \r\n print(printfuncs.report_fit(mi.params, min_correl=0.5))\r\n\r\n stslope = eval(input(\"\\nSteep slope: \"))\r\n stsloperr = eval(input(\"Steep slope error: \"))\r\n stint = eval(input(\"Steep intercept: \"))\r\n shslope = eval(input(\"\\nShallow slope: \"))\r\n shsloperr = eval(input(\"Shallow slope error: \"))\r\n shint = eval(input(\"Shallow intercept: \"))\r\n\r\n plt.plot(logx, logbin, 'ro')\r\n plt.plot(b, b*shslope + shint, 'k-', linewidth = width, label = r'$ \\alpha = %.2f \\pm %.2f$' %(shslope, shsloperr))\r\n plt.plot(a, a*stslope + stint, 'k--', linewidth = width, label = r'$ \\alpha = %.2f \\pm %.2f$' %(stslope, stsloperr))\r\n plt.errorbar(logx, logbin, xerr = 1/erx, yerr = 0, fmt = 'ro')\r\n plt.ylabel('log(Cumulative number of pulses > SNR)', fontsize = 18)\r\n plt.xlabel('log(SNR)', fontsize = 18)\r\n plt.xticks(fontsize = 15)\r\n plt.yticks(fontsize = 15)\r\n plt.legend(frameon = False, fontsize = 18)\r\n\r\n plt.show()\r\nelse:\r\n print('\\nIncorrect entry.')","sub_path":"intsnr.py","file_name":"intsnr.py","file_ext":"py","file_size_in_byte":13571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"530802724","text":"# coding=utf-8\n\"\"\"\nAuthor: vision\ndate: 2019/4/7 14:44\n\"\"\"\nfrom DateBase.connect_db import session\nfrom DateBase.creat import ConcreteMix, CementAttributeDatum\n\n# query_mix 用法\n# mix1 = query_mix('普通砼', '15', None, None)\n# mix1 = query_mix('泵送砼', '30', 'P6', '0.08')\n\n\ndef query_mix(ConcreteName, StrengthLevel, ImperLevel, SwellLevel):\n mix = session.query(ConcreteMix).filter(\n ConcreteMix.ConcreteName == ConcreteName,\n ConcreteMix.StrengthLevel == StrengthLevel,\n ConcreteMix.ImperLevel == ImperLevel,\n ConcreteMix.SwellLevel == SwellLevel,\n )\n return mix[0]\n\n\nif __name__ == \"__main__\":\n mix1 = query_mix('普通砼', '15', None, None)\n mix1 = query_mix('泵送砼', '30', 'P6', None)\n print(mix1.MixRatioID)\n","sub_path":"Util/query_database.py","file_name":"query_database.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"589517939","text":"import scipy.io as scp # used to load .mat file\nimport numpy as np # used for multi-dimensional array\nimport sys\n\ndef hrtf(fileName, aIndex, eIndex):\n \"\"\"\n This function will save a 3D audio version of the fileName by computing\n the head-related transfer function based off the passed-in azimuth\n and elevation indices\n\n Arguments:\n fileName - a string of the filename which the user wants 3D audio playback of\n aIndex - the index of the azimuth list that returns the desired azimuth\n eIndex - the index of the elevation list that returns the desired elevation\n \"\"\"\n # Reads in a .wav file into a Wave object with frame rate of 44.1k Hz\n try:\n data, fs = sf.read(fileName, dtype='float32')\n except RuntimeError:\n print(\"Error: Audio file cannot be played, or it doesn't exist.\")\n sys.exit(0)\n\n # Reads in the HRTF database data file\n try:\n hrtf_db = scp.loadmat('CIPIC_58_HRTF.mat')\n except RuntimeError:\n print(\"Error: Audio file cannot be played, or it doesn't exist.\")\n sys.exit(0)\n\n # Convert stereo audio file to mono by grabbing the left channel only\n if len(data.shape) == 2:\n data = data[:][0]\n\n # Extract hrir's from the HRTF database\n hrir_l = hrtf_db['hrir_l']\n hrir_r = hrtf_db['hrir_r']\n\n # Get head-related impulse samples\n rgt = list(np.squeeze(hrir_r[aIndex][eIndex][:]))\n lft = list(np.squeeze(hrir_l[aIndex][eIndex][:]))\n\n # Generate ITD (Interaural Time Difference) | this isn't MATLAB\n delay = hrtf_db['ITD'][aIndex - 1][eIndex - 1]\n zeros_delay = [0] * abs(int(delay))\n\n # Include the delay in left and right stereo channels\n if aIndex < 13:\n lft = lft + zeros_delay\n rgt = zeros_delay + rgt\n else:\n lft = zeros_delay + lft\n rgt = rgt + zeros_delay\n\n # Perform convolution\n left_convolved = list(np.convolve(data, lft))\n right_convolved = list(np.convolve(data, rgt))\n zeros_samplerate = [0] * int(fs * 0.2)\n\n # Include convolved audio in left and right stero channels\n wav_left = left_convolved + zeros_samplerate\n wav_right = right_convolved + zeros_samplerate\n\n # Create np array soundToPlay with wav_left and wav_right\n soundToPlay = np.array([wav_left, wav_right], dtype='float32')\n\n # Find max between left and right stereo channels\n maxlft = max(soundToPlay[0])\n maxrgt = max(soundToPlay[1])\n if (maxlft > maxrgt):\n maximum = maxlft\n else:\n maximum = maxrgt\n\n # Scale the wave to -1 and 1\n soundToPlay = soundToPlay / float(maximum)\n\n # Transpose multi-dimensional array into the proper shape\n soundToPlay = soundToPlay.T\n\n return soundToPlay\n\n# start with 5 ft as the cutoff for the sounds\n# to get multiple sounds, just add the left channels with one another and\n# add the right channels with one another and output that\n","sub_path":"hrtf.py","file_name":"hrtf.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"121725774","text":"import tensorflow as tf\nfrom matplotlib import pyplot as plt\nfrom tensorflow.keras import datasets, layers, models\n\n\nBATCH_SIZE = 100\n\n\ndef load_image(img_path, size=(32, 32)):\n label = tf.constant(1, tf.int8) if tf.strings.regex_full_match(img_path, '.*/automobile/.*') \\\n else tf.constant(0, tf.int8)\n img = tf.io.read_file(img_path)\n img = tf.image.decode_jpeg(img)\n img = tf.image.resize(img, size) / 255.0\n return img, label\n\n\ndef main():\n ds_train = tf.data.Dataset.list_files('../data/cifar2/train/*/*.jpg').map(\n load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE\n ).shuffle(buffer_size=1000).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)\n\n ds_test = tf.data.Dataset.list_files('../data/cifar2/test/*/*.jpg').map(\n load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE\n ).shuffle(buffer_size=1000).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)\n\n # plt.figure(figsize=(9, 9))\n # for i, (img, label) in enumerate(ds_train.unbatch().take(9)):\n # ax = plt.subplot(3, 3, i+1)\n # ax.imshow(img.numpy())\n # ax.set_title('label={}'.format(label))\n # ax.set_xticks([])\n # ax.set_yticks([])\n # plt.show()\n\n for x, y in ds_train.take(1):\n print(x.shape, y.shape)\n\n tf.keras.backend.clear_session()\n\n inputs = layers.Input(shape=(32, 32, 3))\n x = layers.Conv2D(32, kernel_size=(3, 3), activation='relu')(inputs)\n x = layers.MaxPool2D()(x)\n x = layers.Conv2D(64, kernel_size=(5, 5), activation='relu')(x)\n x = layers.MaxPool2D()(x)\n x = layers.Dropout(rate=0.1)(x)\n x = layers.Flatten()(x)\n x = layers.Dense(32, activation='relu')(x)\n outputs = layers.Dense(1, activation='sigmoid')(x)\n\n model = models.Model(inputs=inputs, outputs=outputs)\n model.summary()\n\n # 训练\n from datetime import datetime\n log_dir = './log/1-2/{}'.format(datetime.now().strftime('%Y%m%d-%H%M%S'))\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir, histogram_freq=1)\n\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),\n loss=tf.keras.losses.binary_crossentropy,\n metrics=['accuracy'])\n history = model.fit(ds_train, epochs=10, validation_data=ds_test,\n callbacks=[tensorboard_callback], workers=4)\n\n import pandas as pd\n df_history = pd.DataFrame(history.history)\n df_history.index = range(1, len(df_history)+1)\n df_history.index.name = 'epoch'\n\n print(df_history.head(10))\n\n def plot_metric(history, metric):\n \"\"\"\n 训练过程loss、auc变化折线图\n :param history:\n :param metric:\n :return:\n \"\"\"\n train_metrics = history.history[metric]\n val_metrics = history.history['val_' + metric]\n epochs = range(1, len(train_metrics) + 1)\n plt.plot(epochs, train_metrics, 'bo--')\n plt.plot(epochs, val_metrics, 'ro-')\n plt.title('Training and validation ' + metric)\n plt.xlabel('Epochs')\n plt.ylabel(metric)\n plt.legend(['train_' + metric, 'val_' + metric])\n plt.show()\n\n # plot_metric(history, 'accuracy')\n # plot_metric(history, 'loss')\n\n val_loss, val_accuracy = model.evaluate(ds_test, workers=4)\n print(val_loss, val_accuracy)\n\n prediction = model.predict(ds_test)\n print(prediction)\n\n for x, y in ds_test.take(1):\n print(model.predict_on_batch(x[:20]))\n\n model.save_weights('./1-2_checkpoints/weights.ckpt', save_format='tf')\n model.save('./1-2_checkpoints/saved_model_weights', save_format='tf')\n\n model_loaded = tf.keras.models.load_model('./1-2_checkpoints/saved_model_weights')\n prediction_ = model.loaded.evaluate(ds_test)\n print(prediction_)\n \n\nif __name__ == '__main__':\n main()\n #\n # from tensorboard import notebook\n #\n # notebook.list()\n #\n # notebook.start('--logdir ./log/1-2')\n\n","sub_path":"scripts/1-2.py","file_name":"1-2.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"49388419","text":"# -*- coding: utf8 - *-\n\"\"\"Command line tool for managing tmux workspaces and tmuxp configurations.\n\ntmuxp.cli\n~~~~~~~~~\n\n:copyright: Copyright 2013 Tony Narlock.\n:license: BSD, see LICENSE for details\n\n\"\"\"\n\nimport os\nimport sys\nimport argparse\nimport argcomplete\nimport logging\nimport kaptan\nfrom . import log, util, exc, WorkspaceBuilder, Server, config\nfrom .util import ascii_lowercase, input\nfrom .workspacebuilder import freeze\nfrom distutils.util import strtobool\n\n\nimport re\nVERSIONFILE = os.path.join(os.path.abspath(os.path.dirname(__file__)), '__init__.py')\nverstrline = open(VERSIONFILE, \"rt\").read()\nVSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\nmo = re.search(VSRE, verstrline, re.M)\nif mo:\n __version__ = mo.group(1)\n#import pkg_resources\n#__version__ = pkg_resources.require(\"tmuxp\")[0].version\n\nlogger = logging.getLogger(__name__)\n\nconfig_dir = os.path.expanduser('~/.tmuxp/')\ncwd_dir = os.getcwd() + '/'\ntmuxinator_config_dir = os.path.expanduser('~/.tmuxinator/')\nteamocil_config_dir = os.path.expanduser('~/.teamocil/')\n\n\ndef prompt(name, default=None):\n \"\"\"Return user input from command line.\n\n :meth:`~prompt`, :meth:`~prompt_bool` and :meth:`prompt_choices` are from\n `flask-script`_. See the `flask-script license`_.\n\n .. _flask-script: https://github.com/techniq/flask-script\n .. _flask-script license:\n https://github.com/techniq/flask-script/blob/master/LICENSE\n\n :param name: prompt text\n :param default: default value if no input provided.\n :rtype: string\n\n \"\"\"\n\n prompt = name + (default and ' [%s]' % default or '')\n prompt += name.endswith('?') and ' ' or ': '\n while True:\n rv = input(prompt)\n if rv:\n return rv\n if default is not None:\n return default\n\n\ndef prompt_bool(name, default=False, yes_choices=None, no_choices=None):\n \"\"\"Return user input from command line and converts to boolean value.\n\n :param name: prompt text\n :param default: default value if no input provided.\n :param yes_choices: default 'y', 'yes', '1', 'on', 'true', 't'\n :param no_choices: default 'n', 'no', '0', 'off', 'false', 'f'\n :rtype: bool\n\n \"\"\"\n\n yes_choices = yes_choices or ('y', 'yes', '1', 'on', 'true', 't')\n no_choices = no_choices or ('n', 'no', '0', 'off', 'false', 'f')\n\n if default is None:\n prompt_choice = 'y/n'\n elif default is True:\n prompt_choice = 'Y/n'\n else:\n prompt_choice = 'y/N'\n\n prompt = name + ' [%s]' % prompt_choice\n prompt += name.endswith('?') and ' ' or ': '\n\n while True:\n rv = input(prompt)\n if not rv:\n return default\n if rv.lower() in yes_choices:\n return True\n elif rv.lower() in no_choices:\n return False\n\n\ndef prompt_yes_no(name, default=True):\n \"\"\":meth:`prompt_bool()` returning yes by default.\"\"\"\n return prompt_bool(name, default=default)\n\n\ndef prompt_choices(name, choices, default=None, resolve=ascii_lowercase,\n no_choice=('none',)):\n \"\"\"Return user input from command line from set of provided choices.\n\n :param name: prompt text\n :param choices: list or tuple of available choices. Choices may be\n single strings or (key, value) tuples.\n :param default: default value if no input provided.\n :param no_choice: acceptable list of strings for \"null choice\"\n :rtype: str\n\n \"\"\"\n\n _choices = []\n options = []\n\n for choice in choices:\n if isinstance(choice, basestring):\n options.append(choice)\n else:\n options.append(\"%s [%s]\" % (choice[1], choice[0]))\n choice = choice[0]\n _choices.append(choice)\n\n while True:\n rv = prompt(name + ' - (%s)' % ', '.join(options), default)\n if not rv:\n return default\n rv = resolve(rv)\n if rv in no_choice:\n return None\n if rv in _choices:\n return rv\n\n\nclass ConfigFileCompleter(argcomplete.completers.FilesCompleter):\n\n \"\"\"argcomplete completer for tmuxp files.\"\"\"\n\n def __call__(self, prefix, **kwargs):\n\n completion = argcomplete.completers.FilesCompleter.__call__(\n self, prefix, **kwargs\n )\n\n completion += [os.path.join(config_dir, c)\n for c in config.in_dir(config_dir)]\n\n return completion\n\n\nclass TmuxinatorCompleter(argcomplete.completers.FilesCompleter):\n\n \"\"\"argcomplete completer for Tmuxinator files.\"\"\"\n\n def __call__(self, prefix, **kwargs):\n completion = argcomplete.completers.FilesCompleter.__call__(\n self, prefix, **kwargs\n )\n\n tmuxinator_configs = config.in_dir(\n tmuxinator_config_dir, extensions='yml')\n completion += [os.path.join(tmuxinator_config_dir, f)\n for f in tmuxinator_configs]\n\n return completion\n\n\nclass TeamocilCompleter(argcomplete.completers.FilesCompleter):\n\n \"\"\"argcomplete completer for Teamocil files.\"\"\"\n\n def __call__(self, prefix, **kwargs):\n completion = argcomplete.completers.FilesCompleter.__call__(\n self, prefix, **kwargs\n )\n\n teamocil_configs = config.in_dir(teamocil_config_dir, extensions='yml')\n completion += [os.path.join(teamocil_config_dir, f)\n for f in teamocil_configs]\n\n return completion\n\n\ndef SessionCompleter(prefix, parsed_args, **kwargs):\n \"\"\"Return list of session names for argcomplete completer.\"\"\"\n\n t = Server(\n socket_name=parsed_args.socket_name,\n socket_path=parsed_args.socket_path\n )\n\n return [s.get('session_name') for s in t._sessions\n if s.get('session_name').startswith(prefix)]\n\n\ndef setup_logger(logger=None, level='INFO'):\n \"\"\"Setup logging for CLI use.\n\n :param logger: instance of logger\n :type logger: :py:class:`Logger`\n\n \"\"\"\n if not logger:\n logger = logging.getLogger()\n if not logger.handlers:\n channel = logging.StreamHandler()\n channel.setFormatter(log.DebugLogFormatter())\n\n # channel.setFormatter(log.LogFormatter())\n logger.setLevel(level)\n logger.addHandler(channel)\n\n\ndef startup(config_dir):\n \"\"\"Initialize CLI.\n\n :param config_dir: Config directory to search\n :type config_dir: string\n\n \"\"\"\n\n if not os.path.exists(config_dir):\n os.makedirs(config_dir)\n\n\ndef load_workspace(config_file, args):\n \"\"\"Build config workspace.\n\n :param config_file: full path to config file\n :param type: string\n\n \"\"\"\n logger.info('Loading %s.' % config_file)\n\n sconfig = kaptan.Kaptan()\n sconfig = sconfig.import_config(config_file).get()\n sconfig = config.expand(sconfig, os.path.dirname(config_file))\n sconfig = config.trickle(sconfig)\n\n t = Server(\n socket_name=args.socket_name,\n socket_path=args.socket_path,\n colors=args.colors\n )\n\n try:\n builder = WorkspaceBuilder(sconf=sconfig, server=t)\n except exc.EmptyConfigException:\n logger.error('%s is empty or parsed no config data' % config_file)\n return\n\n tmux_bin = util.which('tmux')\n\n try:\n builder.build()\n\n if 'TMUX' in os.environ:\n if prompt_yes_no('Already inside TMUX, switch to session?'):\n tmux_env = os.environ.pop('TMUX')\n builder.session.switch_client()\n\n os.environ['TMUX'] = tmux_env\n return\n else:\n sys.exit('Session created in detached state.')\n\n builder.session.attach_session()\n except exc.TmuxSessionExists as e:\n if prompt_yes_no('%s Attach?' % e):\n if 'TMUX' in os.environ:\n builder.session.switch_client()\n\n else:\n builder.session.attach_session()\n return\n except exc.TmuxpException as e:\n import traceback\n\n print(traceback.format_exc())\n logger.error(e)\n\n choice = prompt_choices(\n 'Error loading workspace. (k)ill, (a)ttach, (d)etach?',\n choices=['k', 'a', 'd'],\n default='k'\n )\n\n if choice == 'k':\n builder.session.kill_session()\n print('Session killed.')\n elif choice == 'a':\n if 'TMUX' in os.environ:\n builder.session.switch_client()\n else:\n builder.session.attach_session()\n else:\n sys.exit()\n\n\ndef command_freeze(args):\n \"\"\"Import teamocil config to tmuxp format.\"\"\"\n\n ctext = ' '.join(args.session_name)\n\n t = Server(\n socket_name=args.socket_name,\n socket_path=args.socket_path,\n colors=args.colors\n )\n\n session = t.findWhere({\n 'session_name': ctext\n })\n\n sconf = freeze(session)\n configparser = kaptan.Kaptan()\n newconfig = config.inline(sconf)\n configparser.import_config(newconfig)\n config_format = prompt_choices('Convert to', choices=[\n 'yaml', 'json'], default='yaml')\n\n if config_format == 'yaml':\n newconfig = configparser.export(\n 'yaml', indent=2, default_flow_style=False, safe=True\n )\n elif config_format == 'json':\n newconfig = configparser.export('json', indent=2)\n else:\n sys.exit('Unknown config format.')\n\n print(newconfig)\n print(\n '---------------------------------------------------------------')\n print(\n 'Configuration import does its best to convert teamocil files.\\n')\n if prompt_yes_no(\n 'The new config *WILL* require adjusting afterwards. Save config?'\n ):\n dest = None\n while not dest:\n dest_prompt = prompt('Save to: ', os.path.abspath(\n os.path.join(config_dir, '%s.%s' % (sconf.get('session_name'), config_format))))\n if os.path.exists(dest_prompt):\n print('%s exists. Pick a new filename.' % dest_prompt)\n continue\n\n dest = dest_prompt\n\n dest = os.path.abspath(os.path.relpath(os.path.expanduser(dest)))\n if prompt_yes_no('Write to %s?' % dest):\n buf = open(dest, 'w')\n buf.write(newconfig)\n buf.close()\n\n print('Saved to %s.' % dest)\n else:\n print(\n 'tmuxp has examples in JSON and YAML format at \\n'\n 'View tmuxp docs at '\n )\n sys.exit()\n\n\ndef command_load(args):\n \"\"\"Load a session from a tmuxp session file.\"\"\"\n if args.list:\n startup(config_dir)\n configs_in_user = config.in_dir(config_dir)\n configs_in_cwd = config.in_cwd()\n\n sys.exit()\n\n output = ''\n\n if not configs_in_user:\n output += '# %s: \\n\\tNone found.\\n' % config_dir\n else:\n output += '# %s: \\n\\t%s\\n' % (\n config_dir, ', '.join(configs_in_user)\n )\n\n if configs_in_cwd:\n output += '# current directory:\\n\\t%s' % (\n ', '.join(configs_in_cwd)\n )\n\n print(output)\n return\n\n elif args.config:\n if '.' == args.config:\n if config.in_cwd():\n configfile = config.in_cwd()[0]\n print(configfile)\n else:\n sys.exit('No tmuxp configs found in current directory.')\n else:\n configfile = args.config\n file_user = os.path.join(config_dir, configfile)\n file_cwd = os.path.join(cwd_dir, configfile)\n\n if os.path.exists(file_cwd) and os.path.isfile(file_cwd):\n print('load %s' % file_cwd)\n load_workspace(file_cwd, args)\n elif os.path.exists(file_user) and os.path.isfile(file_user):\n load_workspace(file_user, args)\n else:\n logger.error('%s not found.' % configfile)\n\n\ndef command_import_teamocil(args):\n \"\"\"Import teamocil config to tmuxp format.\"\"\"\n\n if args.list:\n try:\n configs_in_user = config.in_dir(\n teamocil_config_dir, extensions='yml')\n except OSError:\n configs_in_user = []\n configs_in_cwd = config.in_dir(\n config_dir=cwd_dir, extensions='yml')\n\n output = ''\n\n if not os.path.exists(teamocil_config_dir):\n output += '# %s: \\n\\tDirectory doesn\\'t exist.\\n' % teamocil_config_dir\n elif not configs_in_user:\n output += '# %s: \\n\\tNone found.\\n' % teamocil_config_dir\n else:\n output += '# %s: \\n\\t%s\\n' % (\n config_dir, ', '.join(configs_in_user)\n )\n\n if configs_in_cwd:\n output += '# current directory:\\n\\t%s' % (\n ', '.join(configs_in_cwd)\n )\n\n print(output)\n elif args.config:\n configfile = os.path.abspath(os.path.relpath(\n os.path.expanduser(args.config)))\n configparser = kaptan.Kaptan(handler='yaml')\n\n if os.path.exists(configfile):\n print(configfile)\n configparser.import_config(configfile)\n newconfig = config.import_teamocil(configparser.get())\n configparser.import_config(newconfig)\n else:\n sys.exit('File not found: %s' % configfile)\n\n config_format = prompt_choices('Convert to', choices=[\n 'yaml', 'json'], default='yaml')\n\n if config_format == 'yaml':\n newconfig = configparser.export(\n 'yaml', indent=2, default_flow_style=False\n )\n elif config_format == 'json':\n newconfig = configparser.export('json', indent=2)\n else:\n sys.exit('Unknown config format.')\n\n print(newconfig)\n print(\n '---------------------------------------------------------------')\n print(\n 'Configuration import does its best to convert teamocil files.\\n')\n if prompt_yes_no(\n 'The new config *WILL* require adjusting afterwards. Save config?'\n ):\n dest = None\n while not dest:\n dest_prompt = prompt('Save to: ', os.path.abspath(\n os.path.join(config_dir, 'myimport.%s' % config_format)))\n if os.path.exists(dest_prompt):\n print('%s exists. Pick a new filename.' % dest_prompt)\n continue\n\n dest = dest_prompt\n\n dest = os.path.abspath(os.path.relpath(os.path.expanduser(dest)))\n if prompt_yes_no('Write to %s?' % dest):\n buf = open(dest, 'w')\n buf.write(newconfig)\n buf.close()\n\n print('Saved to %s.' % dest)\n else:\n print(\n 'tmuxp has examples in JSON and YAML format at \\n'\n 'View tmuxp docs at '\n )\n sys.exit()\n\n\ndef command_import_tmuxinator(args):\n \"\"\"Import tmuxinator config to tmuxp format.\"\"\"\n if args.list:\n try:\n configs_in_user = config.in_dir(\n tmuxinator_config_dir, extensions='yml')\n except OSError:\n configs_in_user = []\n configs_in_cwd = config.in_dir(\n config_dir=cwd_dir, extensions='yml')\n\n output = ''\n\n if not os.path.exists(tmuxinator_config_dir):\n output += '# %s: \\n\\tDirectory doesn\\'t exist.\\n' % tmuxinator_config_dir\n elif not configs_in_user:\n output += '# %s: \\n\\tNone found.\\n' % tmuxinator_config_dir\n else:\n output += '# %s: \\n\\t%s\\n' % (\n config_dir, ', '.join(configs_in_user)\n )\n\n if configs_in_cwd:\n output += '# current directory:\\n\\t%s' % (\n ', '.join(configs_in_cwd)\n )\n\n print(output)\n\n if args.config:\n configfile = os.path.abspath(os.path.relpath(\n os.path.expanduser(args.config)))\n configparser = kaptan.Kaptan(handler='yaml')\n\n if os.path.exists(configfile):\n print(configfile)\n configparser.import_config(configfile)\n newconfig = config.import_tmuxinator(configparser.get())\n configparser.import_config(newconfig)\n else:\n sys.exit('File not found: %s' % configfile)\n\n config_format = prompt_choices('Convert to', choices=[\n 'yaml', 'json'], default='yaml')\n\n if config_format == 'yaml':\n newconfig = configparser.export(\n 'yaml', indent=2, default_flow_style=False\n )\n elif config_format == 'json':\n newconfig = configparser.export('json', indent=2)\n else:\n sys.exit('Unknown config format.')\n\n print(newconfig)\n print(\n '---------------------------------------------------------------')\n print(\n 'Configuration import does its best to convert tmuxinator files.\\n')\n if prompt_yes_no(\n 'The new config *WILL* require adjusting afterwards. Save config?'\n ):\n dest = None\n while not dest:\n dest_prompt = prompt('Save to: ', os.path.abspath(\n os.path.join(config_dir, 'myimport.%s' % config_format)))\n if os.path.exists(dest_prompt):\n print('%s exists. Pick a new filename.' % dest_prompt)\n continue\n\n dest = dest_prompt\n\n dest = os.path.abspath(os.path.relpath(os.path.expanduser(dest)))\n if prompt_yes_no('Write to %s?' % dest):\n buf = open(dest, 'w')\n buf.write(newconfig)\n buf.close()\n\n print('Saved to %s.' % dest)\n else:\n print(\n 'tmuxp has examples in JSON and YAML format at \\n'\n 'View tmuxp docs at '\n )\n sys.exit()\n\n\ndef command_convert(args):\n \"\"\"Convert tmuxp config to and from JSON and YAML.\"\"\"\n\n try:\n configfile = args.config\n except exc.TmuxpException:\n print('Please enter a config')\n\n file_user = os.path.join(config_dir, configfile)\n file_cwd = os.path.join(cwd_dir, configfile)\n if os.path.exists(file_cwd) and os.path.isfile(file_cwd):\n fullfile = os.path.normpath(file_cwd)\n filename, ext = os.path.splitext(file_cwd)\n elif os.path.exists(file_user) and os.path.isfile(file_user):\n\n fullfile = os.path.normpath(file_user)\n filename, ext = os.path.splitext(file_user)\n else:\n logger.error('%s not found.' % configfile)\n return\n\n if 'json' in ext:\n if prompt_yes_no('convert to <%s> to yaml?' % (fullfile)):\n configparser = kaptan.Kaptan()\n configparser.import_config(configfile)\n newfile = fullfile.replace(ext, '.yaml')\n newconfig = configparser.export(\n 'yaml', indent=2, default_flow_style=False\n )\n if prompt_yes_no('write config to %s?' % (newfile)):\n buf = open(newfile, 'w')\n buf.write(newconfig)\n buf.close()\n print('written new config to %s' % (newfile))\n elif 'yaml' in ext:\n if prompt_yes_no('convert to <%s> to json?' % (fullfile)):\n configparser = kaptan.Kaptan()\n configparser.import_config(configfile)\n newfile = fullfile.replace(ext, '.json')\n newconfig = configparser.export('json', indent=2)\n print(newconfig)\n if prompt_yes_no('write config to <%s>?' % (newfile)):\n buf = open(newfile, 'w')\n buf.write(newconfig)\n buf.close()\n print('written new config to <%s>.' % (newfile))\n\n\ndef command_attach_session(args):\n \"\"\"Command to attach / switch client to a tmux session.\"\"\"\n commands = []\n ctext = ' '.join(args.session_name)\n\n t = Server(\n socket_name=args.socket_name,\n socket_path=args.socket_path,\n colors=args.colors\n )\n\n try:\n session = next((s for s in t.sessions if s.get(\n 'session_name') == ctext), None)\n if not session:\n raise exc.TmuxpException('Session not found.')\n except exc.TmuxpException as e:\n print(e)\n return\n\n if 'TMUX' in os.environ:\n del os.environ['TMUX']\n session.switch_client()\n print('Inside tmux client, switching client.')\n else:\n session.attach_session()\n print('Attaching client.')\n\n\ndef command_kill_session(args):\n \"\"\"Command to kill a tmux session.\"\"\"\n commands = []\n ctext = ' '.join(args.session_name)\n\n t = Server(\n socket_name=args.socket_name or None,\n socket_path=args.socket_path or None\n )\n\n try:\n session = next((s for s in t.sessions if s.get(\n 'session_name') == ctext), None)\n if not session:\n raise exc.TmuxpException('Session not found.')\n except exc.TmuxpException as e:\n print(e)\n return\n\n try:\n session.kill_session()\n print(\"Killed session %s.\" % ctext)\n except exc.TmuxpException as e:\n logger.error(e)\n\ndef get_parser():\n \"\"\"Return :py:class:`argparse.ArgumentParser` instance for CLI.\"\"\"\n\n server_parser = argparse.ArgumentParser(add_help=False)\n\n # server_parser.add_argument(\n # '--log-level',\n # dest='log_level',\n # default='INFO',\n # metavar='log-level',\n # help='Log level e.g. INFO, DEBUG, ERROR'\n # )\n\n server_parser.add_argument(\n '-L', dest='socket_name',\n default=None,\n help='socket name of tmux server. Same as tmux.',\n metavar='socket-name'\n )\n\n server_parser.add_argument(\n '-S',\n dest='socket_path',\n default=None,\n help='socket path of tmux server. Same as tmux.',\n metavar='socket-path'\n )\n\n parser = argparse.ArgumentParser(\n description='Launch tmux workspace. '\n 'Help documentation: .',\n parents=[server_parser]\n )\n\n client_parser = argparse.ArgumentParser(add_help=False)\n colorsgroup = client_parser.add_mutually_exclusive_group()\n\n colorsgroup.add_argument(\n '-2',\n dest='colors',\n action='store_const',\n const=256,\n help='Force tmux to assume the terminal supports 256 colours.',\n )\n\n colorsgroup.add_argument(\n '-8',\n dest='colors',\n action='store_const',\n const=88,\n help='Like -2, but indicates that the terminal supports 88 colours.',\n )\n\n parser.set_defaults(colors=None)\n\n subparsers = parser.add_subparsers(\n title='commands',\n description='valid commands',\n )\n\n kill_session = subparsers.add_parser(\n 'kill-session',\n parents=[server_parser],\n help='Kill tmux session by name.'\n )\n kill_session.set_defaults(callback=command_kill_session)\n\n kill_session.add_argument(\n dest='session_name',\n type=str,\n nargs='+',\n default=None,\n help='Name of session',\n ).completer = SessionCompleter\n\n attach_session = subparsers.add_parser(\n 'attach-session',\n parents=[server_parser, client_parser],\n help='If run from outside tmux, create a new client in the current '\n 'terminal and attach it. If used from inside, switch the current '\n 'client.'\n )\n attach_session.set_defaults(callback=command_attach_session)\n\n attach_session.add_argument(\n dest='session_name',\n nargs='+',\n type=str,\n help='Name of session',\n ).completer = SessionCompleter\n\n freeze = subparsers.add_parser(\n 'freeze',\n parents=[server_parser],\n help='Create a snapshot of a tmux session and save it to JSON or YAML.'\n )\n freeze.set_defaults(callback=command_freeze)\n\n freeze.add_argument(\n dest='session_name',\n type=str,\n nargs='+',\n help='Name of session',\n ).completer = SessionCompleter\n\n load = subparsers.add_parser(\n 'load',\n parents=[server_parser, client_parser],\n help='Load a configuration from file. Attach the session. If session '\n 'already exists, offer to attach instead.'\n )\n\n loadgroup = load.add_mutually_exclusive_group(required=True)\n loadgroup.add_argument(\n '--list', dest='list', action='store_true',\n help='List config files available',\n )\n\n loadgroup.add_argument(\n dest='config',\n type=str,\n nargs='?',\n help='List config available in working directory and config folder.'\n ).completer = ConfigFileCompleter(allowednames=('.yaml', '.json'), directories=False)\n load.set_defaults(callback=command_load)\n\n convert = subparsers.add_parser(\n 'convert',\n help='Convert tmuxp config between YAML and JSON format.'\n )\n\n convert.add_argument(\n dest='config',\n type=str,\n default=None,\n help='Absolute or relative path to config file.'\n ).completer = ConfigFileCompleter(allowednames=('.yaml', '.json'), directories=False)\n\n convert.set_defaults(callback=command_convert)\n\n importparser = subparsers.add_parser(\n 'import',\n help='Import configurations from teamocil and tmuxinator.'\n )\n importsubparser = importparser.add_subparsers(\n title='commands',\n description='valid commands',\n help='additional help'\n )\n\n import_teamocil = importsubparser.add_parser(\n 'teamocil',\n help=\"Parse teamocil configurations into tmuxp format\"\n )\n\n import_teamocilgroup = import_teamocil.add_mutually_exclusive_group(\n required=True\n )\n import_teamocilgroup.add_argument(\n '--list', dest='list', action='store_true',\n help='List configs in ~/.teamocil and current working directory.'\n )\n\n import_teamocilgroup.add_argument(\n dest='config',\n type=str,\n nargs='?',\n help='''\\\n Checks current ~/.teamocil and current directory for yaml files.\n '''\n ).completer = TeamocilCompleter(allowednames=('.yml'), directories=False)\n import_teamocil.set_defaults(callback=command_import_teamocil)\n\n import_tmuxinator = importsubparser.add_parser(\n 'tmuxinator',\n help=\"Parse teamocil configurations into tmuxp format\"\n )\n\n import_tmuxinatorgroup = import_tmuxinator.add_mutually_exclusive_group(\n required=True)\n import_tmuxinatorgroup.add_argument(\n '--list', dest='list', action='store_true',\n help='List yaml configs in ~/.tmuxinator and current working directory.'\n )\n\n import_tmuxinatorgroup.add_argument(\n dest='config',\n type=str,\n nargs='?',\n help='''\\\n Checks current ~/.tmuxinator and current directory for yaml files.\n '''\n ).completer = TmuxinatorCompleter(allowednames=('.yml'), directories=False)\n\n import_tmuxinator.set_defaults(callback=command_import_tmuxinator)\n\n # http://stackoverflow.com/questions/8521612/argparse-optional-subparser\n parser.add_argument(\n '-v', '--version', action='version',\n version='tmuxp %s' % __version__,\n help='Prints the tmuxp version',\n )\n\n return parser\n\n\ndef main():\n\n parser = get_parser()\n\n argcomplete.autocomplete(parser, always_complete_options=False)\n\n args = parser.parse_args()\n\n setup_logger(level=args.log_level.upper() if 'log_level' in args else 'INFO')\n\n try:\n util.has_required_tmux_version()\n except exc.TmuxpException as e:\n logger.error(e)\n sys.exit()\n\n util.oh_my_zsh_auto_title()\n\n t = Server(\n socket_name=args.socket_name,\n socket_path=args.socket_path,\n colors=args.colors\n )\n\n if args.callback is command_load:\n command_load(args)\n elif args.callback is command_convert:\n command_convert(args)\n elif args.callback is command_import_teamocil:\n command_import_teamocil(args)\n elif args.callback is command_import_tmuxinator:\n command_import_tmuxinator(args)\n elif args.callback is command_freeze:\n command_freeze(args)\n elif args.callback is command_attach_session:\n command_attach_session(args)\n elif args.callback is command_kill_session:\n command_kill_session(args)\n else:\n parser.print_help()\n","sub_path":"tmuxp/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":28734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"146067244","text":"\"\"\"Database module to handle all database functions of podcatcher\n\"\"\"\n\nimport sqlite3\n\nDB_PATH = \"C:/Daten/Projekte/Python-Projekte/podcatcher/src/database.sq3\"\n\nST_UPDATE_DAILY = 0\nST_UPDATE_WEEKLY = 1\nST_NO_UPDATE = 2\n\nclass DB(object):\n \"\"\"simple handler of sqlite3 queries.\n \"\"\"\n def __init__(self,filepath=DB_PATH):\n self.conn = sqlite3.connect(filepath)\n self.cursor = self.conn.cursor()\n\n def __enter__(self):\n \"\"\"for using pythons with-statement.\n \"\"\"\n return self\n\n def __exit__(self, type, value, traceback):\n \"\"\"called after with-stamement block.\n \"\"\"\n self.conn.close()\n\n def getLastId(self):\n return self.cursor.lastrowid\n\n def sql(self, sql, parameters=()):\n \"\"\"execute query and return result if present.\n \"\"\"\n self.cursor.execute(sql,parameters)\n self.conn.commit()\n return self.cursor.fetchall()\n\n# def createTableCasts():\n# \"\"\"database-init: table casts\n# \"\"\"\n# with DB() as dbHandler:\n# dbHandler.sql(\n# \"CREATE TABLE casts (id INTEGER PRIMARY KEY \\\n# AUTOINCREMENT, title TEXT, url TEXT,\\\n# last_updated TEXT, short_title TEXT, status INT)\"\n# )\n\n# def createTableShows():\n# \"\"\"database-init: table shows\n# \"\"\"\n# with DB() as dbHandler:\n# dbHandler.sql(\n# \"CREATE TABLE shows (id INTEGER PRIMARY KEY AUTOINCREMENT, \\\n# feed_id TEXT, title TEXT, subtitle TEXT, author TEXT, \\\n# media_link TEXT, published TEXT, status INT, hash TEXT)\"\n# )\n\ndef get_cast_data(cast_id):\n with DB() as db_handler:\n result = db_handler.sql(\n 'SELECT title, short_title, url, last_updated \\\n FROM casts WHERE id=?',\n str(cast_id)\n )\n if result:\n data = {\n 'title': result[0][0],\n 'short_title': result[0][1],\n 'url': result[0][2],\n 'last_updated': result[0][3]\n }\n return data\n else:\n raise KeyError(\"Cast with id %d doesn't exist.\" % cast_id)\n\ndef get_ids_for_update(status):\n sql_add = ''\n if status == ST_UPDATE_WEEKLY:\n pass\n\n with DB() as db_handler:\n result = db_handler.sql(\n 'SELECT id from casts WHERE status = ?',\n str(STATUS_UPDATE))\n\ndef change_feed_url(cast_id, new_url):\n with DB() as db_handler:\n result = db_handler.sql(\n 'UPDATE casts SET url=? WHERE id=?',\n (new_url, cast_id)\n )\n\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"288130633","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom faker import Faker\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\n\nimport os\nimport datetime as dt\nfrom tqdm import tqdm\n\n\n# # Setup Faker and Random Seeds\n\n# In[2]:\n\n\nfake = Faker('en_US')\nFaker.seed(42)\nnp.random.seed(43)\n\n\n# # Tweak Parent Distributions\n\n# In[3]:\n\n\np_null_end_date = 0.1\n\n\n# In[4]:\n\n\np_n_visits = 0.5\nsns.distplot(np.random.geometric(p=p_n_visits, size=10000), kde=False).set_title('N(Visits)');\n\n\n# In[5]:\n\n\np_n_procedures = 0.33\nmean_n_procedures = 1/p_n_procedures # for geometric distribution\nsns.distplot(np.random.geometric(p=p_n_procedures, size=10000), kde=False).set_title('N)Procedures)');\n\n\n# In[6]:\n\n\np_procedure_start_shift = 0.55\nsns.distplot(np.random.geometric(p=p_procedure_start_shift, size=10000)-1, kde=False).set_title('Procedure Start Shift');\n\n\n# In[7]:\n\n\np_procedure_duration = 0.22\nsns.distplot(np.random.geometric(p=p_procedure_duration, size=10000)-1).set_title('Procedure Duration');\n\n\n# # Generate Fake Data\n\n# In[8]:\n\n\nn_patients = 10000\nrows = []\n\nfor i_patient in tqdm(range(n_patients), desc='Patients'):\n patient_key = fake.md5()[0:10]\n for i_visit in range(np.random.geometric(p=p_n_visits)):\n facility_state = fake.state_abbr()\n dt_visit_start = fake.date_between(start_date=dt.date(2018, 1,1), end_date='today')\n n_procedures = np.random.geometric(p=p_n_procedures)\n for i_procedure in range(n_procedures):\n procedure = fake.pystr_format(string_format='{{random_letter}}', letters='ABCDEFGH').upper() # IJKLMNOPQRSTUVWXYZ\n dt_start = dt_visit_start + dt.timedelta(days=np.random.geometric(p=p_procedure_start_shift)-1)\n\n if np.random.rand() < p_null_end_date:\n dt_end = None\n else:\n # correlate p_procedure_duration by the number of procedures\n # if this visit only has 1 procedure it should be more likely to be a 1 day duration to be realistic (ie outpatient vs multiday inpatient treatment)\n\n this_p_procedure_duration = p_procedure_duration*( mean_n_procedures / n_procedures )\n\n # hard code limits on this_p_procedure_duration so it is still valid\n if this_p_procedure_duration < 0.9*p_procedure_duration:\n this_p_procedure_duration = 0.9*p_procedure_duration\n elif 0.8 < this_p_procedure_duration:\n this_p_procedure_duration = 0.8\n\n dt_end = dt_visit_start + dt.timedelta(days=np.random.geometric(p=this_p_procedure_duration)-1)\n\n rows.append({\n 'patient_key': patient_key,\n 'facility_state': facility_state,\n 'procedure': procedure,\n 'service_from': dt_start,\n 'service_to': dt_end,\n })\n\n\n# In[9]:\n\n\ndfp = pd.DataFrame(rows)\n\n\n# In[10]:\n\n\nlen(dfp.index)\n\n\n# In[11]:\n\n\ndfp.head(20)\n\n\n# # Shuffle and Save to CSV\n\n# In[12]:\n\n\ndfp = dfp.sample(frac=1).reset_index(drop=True)\n\n\n# In[13]:\n\n\noutput_path = '../output'\nos.makedirs(output_path, exist_ok=True)\n\ndfp.to_csv(f'{output_path}/randomly_generated_service_lines.csv', index=False, na_rep='nan')\n\n","sub_path":"notebooks/fake_health_claims_data.py","file_name":"fake_health_claims_data.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"636159916","text":"\"\"\"Test SMTP over SSL/TLS.\"\"\"\n\nimport ssl\nimport socket\nimport unittest\nimport pkg_resources\n\nfrom aiosmtpd.controller import Controller as BaseController\nfrom aiosmtpd.smtp import SMTP as SMTPProtocol\nfrom contextlib import ExitStack\nfrom aiosmtpd.testing.helpers import (\n ReceivingHandler,\n get_server_context\n)\nfrom email.mime.text import MIMEText\nfrom smtplib import SMTP_SSL\nfrom unittest.mock import patch\n\n\nModuleResources = ExitStack()\n\n\ndef setUpModule():\n # Needed especially on FreeBSD because socket.getfqdn() is slow on that OS,\n # and oftentimes (not always, though) leads to Error\n ModuleResources.enter_context(patch(\"socket.getfqdn\", return_value=\"localhost\"))\n\n\ndef tearDownModule():\n ModuleResources.close()\n\n\nclass Controller(BaseController):\n def factory(self):\n return SMTPProtocol(self.handler)\n\n\ndef get_client_context():\n context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)\n context.check_hostname = False\n context.load_verify_locations(\n cafile=pkg_resources.resource_filename(\n 'aiosmtpd.tests.certs', 'server.crt'))\n return context\n\n\nclass TestSMTPS(unittest.TestCase):\n def setUp(self):\n self.handler = ReceivingHandler()\n controller = Controller(self.handler, ssl_context=get_server_context())\n controller.start()\n self.addCleanup(controller.stop)\n self.address = (controller.hostname, controller.port)\n\n def test_smtps(self):\n with SMTP_SSL(*self.address, context=get_client_context()) as client:\n code, response = client.helo('example.com')\n self.assertEqual(code, 250)\n self.assertEqual(response, socket.getfqdn().encode('utf-8'))\n client.send_message(\n MIMEText('hi'), 'sender@example.com', 'rcpt1@example.com')\n self.assertEqual(len(self.handler.box), 1)\n envelope = self.handler.box[0]\n self.assertEqual(envelope.mail_from, 'sender@example.com')\n","sub_path":"aiosmtpd/tests/test_smtps.py","file_name":"test_smtps.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"448167484","text":"import sys\nimport regex as re\nimport os\n\n\ndef getFiles(dir, suffix):\n files = []\n for file in os.listdir(dir):\n if(file.endswith(suffix)):\n files.append(file)\n return files\n\ndef tokenize(text):\n words = re.split('\\P{L}+', text)\n words.remove('')\n words.sort()\n return words\n\ndef count(files):\n dick = {}\n words_per_doc = {}\n for f in files:\n count = 0\n text = open(f).read().lower()\n words = tokenize(text)\n for w in words:\n counter+=1\n dick.setdefault(w, {}).setdefault(f, )\n \n\n\n\n dick = dict()\n counter = 0\n for w in words:\n counter +=1\n if(w not in dick.keys()):\n dick[w]=1\n else:\n dick[w]+=1\n return dick, counter\n\n\ndef main(arg):\n files = getFiles(arg, 'txt')\n# text = open('file.txt').read()\n# words = tokenize(text)\n dick, nbrOfWords = count(files)\n for d in dick.keys():\n print(d, dick[d]/nbrOfWords)\n\n \nif __name__ == '__main__':\n for arg in sys.argv[1:]:\n main(arg)\n \n","sub_path":"TentaPlugg/counter2.py","file_name":"counter2.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"67206611","text":"import csv\nimport os\nfrom pathlib import Path\n\n\nclass GameWriter:\n \"\"\"\n Writes data for games to output csv as series of features\n \"\"\"\n\n def __init__(self, output_path, games_list, append=False):\n \"\"\"\n :param output_path: specify the output path for the csv file\n :param games_list: list of games to write (each game gets its own row)\n :param append: open in append mode\n \"\"\"\n self.output_path = Path(output_path)\n self.games_list = games_list\n self.should_append_to_file = append\n\n def write(self):\n \"\"\"\n Writes all games in self.games_list to the csv\n\n :return: None\n \"\"\"\n headers = self.games_list[0].keys() # get the list of csv headers\n file_exists = self.output_path.exists()\n if file_exists and not self.should_append_to_file: # remove file if exists and not append\n os.remove(self.output_path)\n # writes games (which are dictionaries) to the csv using csv.DictWriter\n with self.output_path.open(\"a\") as games_csv:\n csv_writer = csv.DictWriter(games_csv, fieldnames=headers, lineterminator='\\n')\n if not file_exists:\n csv_writer.writeheader() # only add the headers once\n for game in self.games_list:\n csv_writer.writerow(game)\n","sub_path":"score_writer/game_writer.py","file_name":"game_writer.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403592382","text":"import wave\nimport os\n\nimport pyaudio\n\nfrom modules.utils import load\n\nFSAMP = 48000 # Sampling frequency in Hz\nSRC_DIR = 'exp_data/20200208'\nDST_DIR = 'processed/20200208_wav'\n\np = pyaudio.PyAudio()\n\ndef pkl2wav(pkl_fp, wav_fp):\n pkl = load(pkl_fp)\n with wave.open(wav_fp, 'wb') as f:\n f.setnchannels(1)\n f.setsampwidth(p.get_sample_size(pyaudio.paInt16))\n f.setframerate(FSAMP)\n f.writeframes(b\"\".join(pkl))\n\nif __name__ == '__main__':\n names = os.listdir(SRC_DIR)\n src_fps = [f'{SRC_DIR}/{x}' for x in names]\n dst_fps = [f'{DST_DIR}/{x.split(\".\")[0]}.wav' for x in names]\n os.makedirs(DST_DIR)\n for src_fp, dst_fp in zip(src_fps, dst_fps):\n pkl2wav(src_fp, dst_fp)\n print(dst_fp)\n","sub_path":"useful_scripts/convert_pkl2wav.py","file_name":"convert_pkl2wav.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135994862","text":"import bpy\nfrom bpy.app.handlers import persistent\nfrom photogrammetry_importer.point import Point\nfrom photogrammetry_importer.opengl.draw_manager import DrawManager\n\nfrom photogrammetry_importer.utils.blender_utils import add_empty\nfrom photogrammetry_importer.utils.blender_point_utils import compute_particle_coord_texture\nfrom photogrammetry_importer.utils.blender_point_utils import compute_particle_color_texture\nfrom photogrammetry_importer.blender_logging import log_report\n\ndef draw_points(op, points, add_points_to_point_cloud_handle, reconstruction_collection=None):\n\n log_report('INFO', 'Add particle draw handlers', op)\n\n coords, colors = Point.split_points(points)\n object_anchor_handle = add_empty(\n \"OpenGL Point Cloud\", reconstruction_collection)\n if add_points_to_point_cloud_handle:\n object_anchor_handle['particle_coords'] = coords\n object_anchor_handle['particle_colors'] = colors\n bpy.context.scene['contains_opengl_point_clouds'] = True\n\n draw_manager = DrawManager.get_singleton()\n draw_manager.register_points_draw_callback(\n object_anchor_handle, coords, colors)\n\n\n@persistent\ndef redraw_points(dummy):\n\n # This test is very cheap, so it will not cause \n # huge overheads for scenes without point clouds\n if 'contains_opengl_point_clouds' in bpy.context.scene:\n\n log_report('INFO', 'Checking scene for missing point cloud draw handlers', dummy)\n for obj in bpy.data.objects:\n if 'particle_coords' in obj and 'particle_colors' in obj:\n coords = obj['particle_coords']\n colors = obj['particle_colors']\n\n draw_manager = DrawManager.get_singleton()\n draw_manager.register_points_draw_callback(\n obj, coords, colors)\n viz_point_size = bpy.context.scene.opengl_panel_viz_settings.viz_point_size\n draw_manager.set_point_size(viz_point_size)\n\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n area.tag_redraw()\n break\n","sub_path":"photogrammetry_importer/opengl/visualization_utils.py","file_name":"visualization_utils.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"92640196","text":"# -*-coding:utf-8 -*-\n\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.pdfdocument import PDFDocument\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import PDFPageAggregator\nfrom pdfminer.layout import LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine\nfrom dateutil.parser import parse\nimport time\nimport csv, os\n\n\nclass PDFPageDetailedAggregator(PDFPageAggregator):\n def __init__(self, rsrcmgr, pageno=1, laparams=None):\n PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams)\n self.rows = []\n self.page_number = 0\n\n def receive_layout(self, ltpage):\n def render(item, page_number):\n if isinstance(item, LTPage) or isinstance(item, LTTextBox):\n for child in item:\n render(child, page_number)\n elif isinstance(item, LTTextLine):\n child_str = ''\n for child in item:\n if isinstance(child, (LTChar, LTAnno)):\n child_str += child.get_text()\n child_str = ' '.join(child_str.split()).strip()\n if child_str:\n row = (page_number, item.bbox[0], item.bbox[1], item.bbox[2], item.bbox[3],\n child_str) # bbox == (x1, y1, x2, y2)\n self.rows.append(row)\n for child in item:\n render(child, page_number)\n return\n\n render(ltpage, self.page_number)\n self.page_number += 1\n self.rows = sorted(self.rows, key=lambda x: (x[0], -x[2]))\n self.result = ltpage\n\n\nclass PdfExtractor:\n def __init__(self):\n self.a=1\n\n def get_pdf_text(self,pdf_path):\n open_file = open(pdf_path, 'rb')\n parser = PDFParser(open_file)\n doc = PDFDocument(parser)\n rsrcmgr = PDFResourceManager()\n laparams = LAParams()\n device = PDFPageDetailedAggregator(rsrcmgr, laparams=laparams)\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n for page in PDFPage.create_pages(doc):\n interpreter.process_page(page)\n # receive the LTPage object for this page\n device.get_result()\n device_data_list = device.rows\n # print(device_data_list)\n device_data_list = ' '.join([i[-1] for i in device_data_list])\n # print(device_data_list)\n open_file.close()\n return device_data_list\n\n\nif __name__ == \"__main__\":\n obj = PdfExtractor()\n data_list = obj.get_pdf_text(\"doc_file/90378400.pdf\")\n print(data_list)","sub_path":"src/pdf_text_extraction.py","file_name":"pdf_text_extraction.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"417087464","text":"import os, time, json, tarfile, gc, pandas as pd, sys\nfrom itertools import islice\n\n\"\"\"Task Modules\"\"\"\nfrom etl.utils.commons import module_format, check_fobj_exists, remove_file\nfrom etl.modules import CLEANED_FILE_NAME_TEMPLATE, AGGREGATED_INFO, USER_FILE\n\n\nclass IO():\n\n def __init__(self, definition):\n # Module Definition\n self.definition = definition\n # Tar Filename\n assert 'filename' in self.definition, \"Tar file path not found!\"\n # Pretty Print Module Name\n module_format(self.definition['name'])\n\n def format_input(self, files_to_process):\n \"\"\" Add more info to file process procedure \"\"\"\n return [(file, self.definition) for file in files_to_process]\n\n def _delete_file_if_exists(self):\n \"\"\" Remove files when we run it the next time, so that results are refreshed \"\"\"\n if check_fobj_exists(AGGREGATED_INFO):\n remove_file(AGGREGATED_INFO)\n\n def _check_file_existence(self, file_to_check):\n \"\"\" Remove files when we run it the next time, so that results are refreshed \"\"\"\n if file_to_check == USER_FILE and check_fobj_exists(AGGREGATED_INFO):\n remove_file(AGGREGATED_INFO)\n\n def read_tar_file(self, filename):\n assert check_fobj_exists(filename), \"Tar File not found : \" + filename\n return tarfile.open(filename, \"r\")\n\n def process_file(self, inputs_to_process):\n file_to_process, definition = inputs_to_process\n print(\"Processing for file : \", file_to_process)\n # File to record total records in processed files\n line_count = 0\n file_to_write = CLEANED_FILE_NAME_TEMPLATE + file_to_process\n # If file already exists, do not decompress and clean\n if not os.path.isfile(file_to_write):\n # When decompressing user file, re-write aggregate file\n self._check_file_existence(file_to_process)\n # open files to read and write\n aggregated_info = open(AGGREGATED_INFO, 'a+')\n tar = self.read_tar_file(definition['filename'])\n write_file_obj = open(file_to_write, 'w', buffering=100 * (1024 ** 2))\n # Extract only required files from tar\n for member in tar:\n if member.name == file_to_process:\n # Process file object line by line\n fobj = tar.extractfile(member)\n for line in fobj:\n line_count += 1\n # Decode bytes to JSON\n jsonstr = line.decode('utf8').replace(\"','\", '\",\"')\n try:\n # Extract specific fields from JSON\n json_data = json.loads(jsonstr)\n updated_data = dict(\n (k, json_data[k]) for k in definition['to_extract_files'][file_to_process])\n # Write NEWLINE delimited JSON along with new lines at the end\n write_file_obj.write(json.dumps(updated_data))\n write_file_obj.write('\\n')\n except ValueError:\n print(jsonstr)\n print('Decoding JSON has failed. This record will not be loaded.\\n')\n break\n # Recoup tar members to gain memory and collect garbage\n tar.members = []\n gc.collect()\n # Close all opened files\n write_file_obj.close()\n tar.close()\n # Push total rows count to the info file\n aggregated_info.write(json.dumps({file_to_process: line_count}))\n aggregated_info.write(\"\\n\")\n aggregated_info.close()\n else:\n print(file_to_write + \" Exists! Hence skipping !!\")\n\n def run(self):\n start_time = time.time()\n files_to_process = self.definition['to_extract_files'].keys()\n for files in self.format_input(files_to_process):\n self.process_file(files)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n module_format(self.definition['name'], type=1)\n\n","sub_path":"container_folder/newyorker_task/etl/modules/decompress_and_clean.py","file_name":"decompress_and_clean.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"513256365","text":"# Author : Andrzej Wojciechowski (AAWO)\n# Copyright : Andrzej Wojciechowski (AAWO)\n# --------------------------------------------\nfrom sys import argv, stdout\nfrom random import randrange\n\nif len(argv) == 3:\n stdout.write(str(randrange(int(argv[1]), int(argv[2])+1)))\nelif len(argv) == 4:\n stdout.write(str(randrange(int(argv[1]), int(argv[2])+1, int(argv[3]))))\nelse:\n argv_num = (len(argv)-1)\n raise TypeError(\"Wrong number of arguments. Expected 2 or 3 - received %d\" % argv_num)\n","sub_path":"run/randInt.py","file_name":"randInt.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"136985351","text":"import numpy as np\r\nimport frontend as ph\r\nfrom frontend.library.svm import SVMModel\r\n\r\nph.env.pyhusky_start()\r\n\r\ndef line_parse(line):\r\n data = line.split()\r\n return ( np.array(data[:-1], dtype=float), float(data[-1]) )\r\n\r\ndef svm_hdfs():\r\n SVM_model = SVMModel()\r\n # Data can be loaded from hdfs directly\r\n # By providing hdfs url\r\n SVM_model.load_hdfs(\"hdfs:///datasets/classification/a9t\")\r\n # Train the model\r\n SVM_model.train(n_iter = 10, alpha = 0.1)\r\n\r\n # Show the parameter\r\n # print \"Vector of Parameters:\"\r\n # print LR_model.get_param()\r\n # print \"intercpet term: \" + str(LR_model.get_intercept())\r\n\r\nsvm_hdfs()\r\n","sub_path":"examples/pyhusky_svm.py","file_name":"pyhusky_svm.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9356013","text":"from tensorflow.keras import layers, Model, Sequential\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\nfrom tensorflow.keras.layers import SeparableConv2D, DepthwiseConv2D\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import Dense, Activation, Permute, Dropout\nfrom tensorflow.keras.constraints import max_norm\nfrom tensorflow.keras.layers import Input, Flatten\n\n\nclass ConvBNReLU(layers.Layer):\n def __init__(self, out_channel, kernel_size=3, stride=1, **kwargs):\n super(ConvBNReLU, self).__init__(**kwargs)\n self.conv = layers.Conv2D(filters=out_channel, kernel_size=kernel_size,\n strides=stride, padding='SAME', use_bias=False, name='Conv2d')\n self.bn = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name='BatchNorm')\n self.activation = layers.ReLU(max_value=6.0)\n\n def call(self, inputs, training=False):\n x = self.conv(inputs)\n x = self.bn(x, training=training)\n x = self.activation(x)\n return x\n\n\nclass InvertedResidual(layers.Layer):\n def __init__(self, in_channel, out_channel, stride, expand_ratio, **kwargs):\n super(InvertedResidual, self).__init__(**kwargs)\n self.hidden_channel = in_channel * expand_ratio\n self.use_shortcut = stride == 1 and in_channel == out_channel\n\n layer_list = []\n if expand_ratio != 1:\n # 1x1 pointwise conv\n layer_list.append(ConvBNReLU(out_channel=self.hidden_channel, kernel_size=1, name='expand'))\n\n layer_list.extend([\n # 3x3 depthwise conv\n layers.DepthwiseConv2D(kernel_size=3, padding='SAME', strides=stride,\n use_bias=False, name='depthwise'),\n layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name='depthwise/BatchNorm'),\n layers.ReLU(max_value=6.0),\n # 1x1 pointwise conv(linear)\n layers.Conv2D(filters=out_channel, kernel_size=1, strides=1,\n padding='SAME', use_bias=False, name='project'),\n layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name='project/BatchNorm')\n ])\n self.main_branch = Sequential(layer_list, name='expanded_conv')\n\n def call(self, inputs, training=False, **kwargs):\n if self.use_shortcut:\n return inputs + self.main_branch(inputs, training=training)\n else:\n return self.main_branch(inputs, training=training)\n\n\ndef MobileNetV2(Chans=64,\n Samples=128,\n num_classes=3,\n alpha=1.0,\n round_nearest=8,\n include_top=True, dropoutRate=0.5):\n block = InvertedResidual\n input_main = Input((1, Chans, Samples))\n block1 = Conv2D(10, (1, 5), padding=\"same\",\n input_shape=(1, Chans, Samples), use_bias=False,\n kernel_constraint=max_norm(2., axis=(0, 1, 2)))(input_main)\n # block1 = BatchNormalization(axis=1)(block1)\n block1 = Conv2D(10, (Chans, 1), use_bias=False,\n kernel_constraint=max_norm(2., axis=(0, 1, 2)))(block1)\n block1 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1) \\\n (block1) # axis=1:channel_first,data_format=\"batch_shape + (rows, cols, channels)\"\n block1 = Activation('elu')(block1)\n block1 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block1)\n block1 = Dropout(dropoutRate)(block1)\n\n block2 = block(in_channel=10, out_channel=16, stride=1, expand_ratio=4)(block1)\n\n # input_channel = _make_divisible(32 * alpha, round_nearest)\n # last_channel = _make_divisible(1280 * alpha, round_nearest)\n\n # input_image = layers.Input(shape=(im_height, im_width, 3), dtype='float32')\n # conv1\n # x = ConvBNReLU(input_channel, stride=2, name='Conv')(input_image)\n # building inverted residual residual blockes\n # for idx, (t, c, n, s) in enumerate(inverted_residual_setting):\n # output_channel = _make_divisible(c * alpha, round_nearest)\n # for i in range(n):\n # stride = s if i == 0 else 1\n # x = block(x.shape[-1],\n # output_channel,\n # stride,\n # expand_ratio=t)(x)\n # # building last several layers\n x = ConvBNReLU(last_channel, kernel_size=1, name='Conv_1')(x)\n\n if include_top is True:\n # building classifier\n x = layers.GlobalAveragePooling2D()(x) # pool + flatten\n x = layers.Dropout(0.2)(x)\n output = layers.Dense(num_classes, name='Logits')(x)\n else:\n output = x\n\n model = Model(inputs=input_image, outputs=output)\n return model\n","sub_path":"Attention_CNN/Models.py","file_name":"Models.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"8450162","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os, sys, shutil, argparse\n\ndef removeBom(fileFrom):\n # BOM(Byte Order Mark)を除去\n with open(fileFrom, \"r\", encoding = 'utf_8_sig') as in_f:\n with open(\"tmp_file\", \"w\", encoding = 'UTF-8') as out_f:\n for str in in_f:\n out_f.write(str)\n\n # 元ファイルを変換後の内容で上書き\n shutil.move(\"tmp_file\", fileFrom)\n\nif __name__ == '__main__':\n try:\n # コマンドライン引数設定\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input')\n\n # コマンドライン引数受け取り\n args = parser.parse_args()\n arg_input = str(args.input)\n\n # ファイル単品の場合\n in_file = arg_input\n if os.path.isfile(in_file):\n # BOM除去実行\n removeBom(in_file)\n sys.exit()\n else:\n # 変換対象ディレクトリの存在チェック\n in_dir = arg_input\n if not os.path.exists(in_dir):\n print(\"変換元が存在しません\")\n sys.exit()\n\n for dirpath, dirnames, filenames in os.walk(in_dir):\n for filename in filenames:\n print(os.path.join(dirpath, filename))\n in_file = os.path.join(dirpath, filename)\n\n if os.path.isfile(in_file):\n # BOM除去実行\n removeBom(in_file)\n except Exception as e:\n print(e)\n","sub_path":"02_ファイル操作系/BOM除去/removeBOM.py","file_name":"removeBOM.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45931766","text":"def slope(n0,n1):\n return((int(H[n0-1])-int(H[n1-1]))/(n0-n1))\n\ndef maxGlide2(start, stop, flag):\n global H2\n global ans\n global maxi\n if(start == stop):\n y = sum(H2)\n if(y > maxi):\n maxi = y\n ans.append(list(H2))\n elif(start < stop):\n maxSlope=-10e10\n if(flag == 1):\n H2 = []\n maxi = 0\n ans = []\n H2.append(int(A[start-1]))\n for i in range(start+1, stop+1):\n s2=slope(start,i)\n if(int(H[start-1]) > int(H[i-1])):\n if(s2 >=maxSlope):\n maxSlope=s2\n H2.append(int(A[i-1]))\n maxGlide2(i, stop, 0)\n H2.pop()\n else:\n break\n if(flag == 1):\n H2.pop()\n else:\n minSlope=10e10\n if(flag == 1):\n H2 = []\n maxi = 0\n ans = []\n H2.append(int(A[start-1]))\n for i in range(start-1, stop-1, -1):\n s2=slope(start,i)\n if(int(H[start-1]) > int(H[i-1])):\n if(s2<=minSlope):\n minSlope=s2\n H2.append(int(A[i-1]))\n maxGlide2(i, stop, 0)\n H2.pop()\n else:\n break\n if(flag == 1):\n H2.pop()\n\n\ndef updater(b, c):\n A[b-1] = str(c)\n\n\nH2 = []\nmaxi = 0\nans = []\nw = input().split(\" \")\nN = int(w[0])\nQ = int(w[1])\nH = input().split(\" \")\nA = input().split(\" \")\nfor i in range(Q):\n X = input().split(\" \")\n if(X[0] == \"1\"):\n updater(int(X[1]), int(X[2]))\n else:\n maxGlide2(int(X[1]), int(X[2]), 1)\n if(maxi != 0):\n print(maxi)\n else:\n print(-1)\n","sub_path":"chef_and_dragon_dens.py","file_name":"chef_and_dragon_dens.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"452421110","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n编写创建数据库的类,并构建connectMysql方法\nauthor:王诚坤\ndate:2018/10/16\n\"\"\"\n\nimport pymysql\n\n\nclass MySQLCommand(object):\n # 初始化类\n def __init__(self):\n # 数据库地址\n self.host = '192.168.1.181'\n # 端口号\n self.port = 3306\n # 用户名\n self.user = 'root'\n # 密码\n self.password = 'sim509'\n # 数据库名\n self.db = 'tencent_word_vec'\n # 数据库表名\n self.table = 'completed'\n\n def connectMysql(self):\n \"\"\"\n 建立数据库连接\n :return:\n \"\"\"\n try:\n self.conn = pymysql.connect(host=self.host, port=self.port, user=self.user,\n passwd=self.password, db=self.db, charset='utf8')\n self.cursor = self.conn.cursor()\n print(\"数据库已连接!\")\n except pymysql.Error as e:\n print('连接数据库失败!')\n print(e)\n\n def insertData(self, data_dict, primary_key='word'):\n '''\n 将数据插入数据库,首先检查数据是否已经存在,如果存在则不插入\n :param data_dict: 要插入的数据字典\n :param primary_key: 主键\n :return:\n '''\n\n # 检测数据是否存在\n sqlExit = 'SELECT ' + primary_key + ' FROM tc_word_vec WHERE ' + primary_key + \" = %s \" % (\n data_dict[primary_key])\n # 执行查找语句\n res = self.cursor.execute(sqlExit)\n if res:\n print('数据已经存入数据库', res)\n return 0\n # 数据不存在,则执行插入操作\n try:\n # 拼接属性名\n cols = ','.join(data_dict.keys())\n # 拼接属性名对应的值\n values = '\",\"'.join(data_dict.values())\n # 插入语句\n sql = \"INSERT INTO tc_word_vec (%s) VALUES (%s)\" % (cols, '\"' + values + '\"')\n\n try:\n # 执行插入操作\n result = self.cursor.execute(sql)\n insert_id = self.conn.insert_id()\n self.conn.commit()\n\n if result:\n print('插入成功', insert_id)\n return insert_id + 1\n except pymysql.Error as e:\n # 如果出现异常,执行回滚操作\n self.conn.rollback()\n if \"key 'PRIMARY'\" in e.args[1]:\n print('数据已存在,未再次插入!')\n else:\n print(\"插入数据失败,原因 %d: %s\" % (e.args[0], e.args[1]))\n except pymysql.Error as e:\n print(\"数据库错误,原因 %d: %s\" % (e.args[0], e.args[1]))\n\n def select_word(self, word):\n sql = \"SELECT * FROM tc_word_vec WHERE word = '%s'\" % word\n res = self.cursor.execute(sql)\n if res:\n result = self.cursor.fetchone()\n return result\n else:\n raise Exception(\"数据库中没有找到该'%s'!\" % word)\n\n def closeMysql(self):\n \"\"\"\n 关闭数据库连接\n :return:\n \"\"\"\n self.cursor.close()\n self.conn.close()\n print('数据库连接已关闭!')\n\n\nif __name__ == '__main__':\n # 初始化并建立数据库连接\n conn = MySQLCommand()\n conn.connectMysql()\n # 查找‘机器学习’\n result = conn.select_word('真好')\n # 获取词语\n print(result[0])\n # 获取词语对应的向量\n print(result[1:])\n # 关闭数据库连接\n conn.closeMysql()\n","sub_path":"tecent_word_vec/ConnectDatabase.py","file_name":"ConnectDatabase.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"482172503","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\ndf=pd.read_csv(\"tweets_key2.csv\")\ndf.head()\n\ny2=[]\nx=len(df[df['Sentiment']==-1])\ny=len(df[df['Sentiment']==0])\nz=len(df[df['Sentiment']==1])\n# print(x,y,z)\n# print(len(df[df['Sentiment']==-1]))\ny2.append(x)\ny2.append(y)\ny2.append(z)\nx2=[1,2,3]\nfig, ax = plt.subplots()\n# ind = np.arange(1, 4)\n#\n# show the figure, but do not block\n# plt.show(block=False)\n\n\npm, pc, pn = plt.bar(x2, y2)\npm.set_color('r')\npc.set_color('b')\npn.set_color('g')\nax.set_xticks(x2)\nax.set_xticklabels(['Negative', 'Neutral', 'Positive'])\nmx=max(x,y,z)+5\nax.set_ylim([0, mx])\nax.set_ylabel('Count')\nax.set_title('Sentiment Analysis')\n\nplt.show(block=True)\n# plt.bar(x,y2)\n\n\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"258200248","text":"from .client import BaseClient\nfrom .utils import main_method\nfrom .executor import TapExecutor\nfrom .users import UsersStream\n\nfrom .utils import (\n stream_is_selected, transform_write_and_count, safe_to_iso8601,\n format_last_updated_for_request, get_res_data\n)\n\nimport sys\nimport json\n\nimport singer\nimport base64\n\nfrom singer.catalog import Catalog, CatalogEntry, Schema\n\nLOGGER = singer.get_logger()\n\n\nSTREAMS = [\n UsersStream,\n]\n\n\nREQUIRED_CONFIG_KEYS = ['api_secret', 'api_key', 'user_id']\n\n\nclass GigyaTap(TapExecutor):\n url = 'https://accounts.eu1.gigya.com/'\n pagination_type = 'next'\n auth_type = 'basic_key'\n replication_key_format = 'timestamp'\n\n def build_params(self, stream, last_updated):\n query = \"\"\"\n select created,\n lastUpdatedTimestamp,\n UID,\n token,\n profile.email,\n profile.username,\n profile.firstName,\n profile.lastName,\n profile.gender,\n profile.birthDay,\n profile.birthMonth,\n profile.birthYear,\n profile.city,\n profile.zip,\n data.country,\n data.registerInstance,\n data.lang,\n data.lastVisitWebDate,\n data.registeredDate,\n data.registerVariant,\n data.registerSource,\n data.lastDownload.ip,\n data.lastDownload.browserName,\n subscriptions.softonicNewsletter.email.doubleOptin.status,\n subscriptions.softonicNewsletter.email.isSubscribed,\n subscriptions.softonicNewsletter.email.lastUpdatedSubscriptionState\n subscriptions.solutionsNewsletter.email.doubleOptin.status,\n subscriptions.solutionsNewsletter.email.isSubscribed,\n subscriptions.solutionsNewsletter.email.lastUpdatedSubscriptionState\n from emailAccounts\n where lastUpdatedTimestamp > {}\n order by lastUpdatedTimestamp\n limit 10000\"\"\".format(last_updated)\n LOGGER.info('\\nQuery running is:\\n {}'.format(query))\n return {\n 'query': query,\n 'secret': stream.config['api_secret'],\n 'apiKey': stream.config['api_key'],\n 'UID': stream.config['user_id'],\n 'format': 'json',\n 'openCursor': True,\n 'httpStatusCodes': True\n }\n\n def call_incremental_stream(self, stream):\n \"\"\"\n Method to call all incremental synced streams\n \"\"\"\n\n last_updated = stream.update_and_return_bookmark()\n\n \n request_config = {\n 'url': self.generate_api_url(stream),\n 'headers': self.build_headers(),\n 'params': self.build_params(stream, last_updated=last_updated),\n 'run': True\n }\n\n LOGGER.info(\"Extracting %s since %s\" % (stream, last_updated))\n\n total_contacts_pulled = 0\n\n \n\n while request_config['run']:\n\n res = self.client.make_request(request_config)\n \n records = res.json()['results']\n\n transform_write_and_count(stream, records)\n\n total_contacts_pulled += res.json()['objectsCount']\n total_count = res.json()['totalCount']\n\n last_updated = self.get_max_last_updated(last_updated, records)\n stream.update_bookmark(str(last_updated))\n \n request_config = self.update_for_next_call(\n res,\n request_config,\n last_updated=last_updated\n )\n\n LOGGER.info(\"Pulled %s objects out of %s\" % (total_contacts_pulled, total_count))\n \n LOGGER.info('MAX UPDATED: {}'.format(last_updated))\n return str(last_updated)\n \n def get_max_last_updated(self, last_updated, records):\n for r in records:\n last_updated = max(int(last_updated), r['lastUpdatedTimestamp'])\n return last_updated\n\n def update_for_next_call(self, res, request_config, last_updated=None):\n\n if 'nextCursorId' not in res.json():\n LOGGER.info('Ending now, last response json is:')\n LOGGER.info(res.json())\n request_config['run'] = False\n return request_config\n\n nextCursorId = res.json()['nextCursorId']\n\n if 'query' in request_config['params']:\n del request_config['params']['query']\n \n if 'openCursor' in request_config['params']:\n del request_config['params']['openCursor']\n\n request_config['params']['cursorId'] = nextCursorId\n\n return request_config\n\n\n\ndef main():\n main_method(\n REQUIRED_CONFIG_KEYS,\n GigyaTap,\n BaseClient,\n STREAMS\n )\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tap_gigya/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"364112005","text":"import os,time,signal\n\ndef handler(s,f):\n print(\"Soy el proceso PID %d recibi la señal %s de mi padre %d\" % (os.getpid(), s, os.getppid()))\n\ndef hijo1():\n print(\"Soy el proceso Hijo %d \" % os.getppid())\nsignal.signal(signal.SIGUSR1,handler)\npid=os.fork()\nif pid==0:\n signal.pause()\n os._exit(0)\npid1=os.fork()\nif pid1==0:\n signal.pause()\n os._exit(0)\npid2=os.fork()\nif pid2==0:\n signal.pause()\n os._exit(0)\nos.kill(pid,signal.SIGUSR1)\nos.kill(pid1,signal.SIGUSR1)\nos.kill(pid2,signal.SIGUSR1)\nsignal.signal(signal.SIGUSR1,handler)\n","sub_path":"GuiaPráctica-COMPUTACIONII/ejercicio9.py","file_name":"ejercicio9.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"332567855","text":"# n = 10\n# n+=1\n# print (\"zmienna n wynosi \", n)\n# # zadanie pk5\n# j=1\n# while j < 7:\n# \tprint(\"kolejna petla\")\n# \tj+=1\n# \tif j == 3:\n# \t\tbreak\n# # zadanie 6\n# n = 0\n# lst = [1]\n# while n < 9:\n# \tm = lst[n]/2\n# \tlst.append(m)\n# \tn+=1\n# print (lst)\n# # zadanie 19\n# for n in range (1,20):\n# \tif n%3 == 0 and n%5 != 0:\n# \t\tprint(\"bum\", end=\", \")\n# \tif n%3 != 0 and n%5 == 0:\n# \t\tprint (\"bęc\", end=\", \")\n# \tif n%3 ==0 and n%5 == 0:\n# \t\tprint (\"bingo\", end= \", \")\n# # zadanie 8\n# lt = [-3,-2,-1,0,1,2,3,4,5,6,7,8,9]\n# tot1 = 0\n# j=0\n# while j < len(lt):\n# \ttot1 += lt[j]\n# \tif lt[j]>0:\n# \t\tbreak\n# \tj+=1\n# print(tot1)\n# zadanie 9\nlst = [-9,-7,-5,-4,-3,-2,-1,3,4,5,6,9,10]\nsuma = 0\nj = len(lst)-1\nwhile (lst[j] > 0) and (j>=0):\n\tsuma += lst[j]\n\tj-=1\nprint(suma)\n# zadanie 10\nsuma = 0\nj = 0\nwhile True:\n\tsuma += lst[j]\n\tj += 1\n\tif lst[j] > 0:\n\t\tbreak\nprint (suma)\n# zadanie 18\nj =0\nwhile j<10:\n\tif j%3 ==0:\n\t\tprint(2**j, end=\", \")\n\tj += 1","sub_path":"inkrementacja.py","file_name":"inkrementacja.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"1627548","text":"\"\"\"\r\nDefiniții\r\nUn student vrea, pentru ora de engleză de la FMI, să găsească cele mai expresive cuvinte. Pentru\r\naceasta a făcut o poză la astfel de cuvinte dintr-un dicționar, iar apoi a folosit un program ca să\r\nextragă un string cu textul din poze. Textul respecta regulile:\r\n• Este împărțit în paragrafe, fiecare paragraf terminându-se cu \"\\n\".\r\n• Fiecare paragraf corespunde unei intrări din dicționar, adică conține un cuvânt și definițiile\r\npentru acesta\r\n• Cuvântul unui paragraf este la începutul lui și este mereu urmat de \":\" și apoi de toate\r\ndefinițiile cuvântului\r\nAcum, studentul vrea sa decidă care sunt cele mai expresive cuvinte. Pentru asta, numără pentru\r\nfiecare cuvânt în câte expresii apare (Ex: run away, run over, run a company), în acesta mod:\r\n• Numără de câte ori apare cuvântul în paragraf\r\n• Numără de câte ori apare caracterul tilda (~) în paragraf\r\n• Și însumează aceste două numere\r\n- Se dă stringul complet, format din paragrafe și dat ca input pe un singur rând.\r\n- Se cere să se obțină o listă de tupluri formate dintr-un cuvânt aflat la început de paragraf și\r\nnumărul care reprezintă expresivitatea sa calculată în acel paragraf.\r\nDe exemplu, pentru următorul string:\r\n\"run: to go faster than a walk : to go steadily by springing steps : to take part into a contest - ~ a\r\nmarathon : to move at a fast gallop - he may occasionally run to and from work : flee, retreat,\r\nescape - drop the gun and run : to go without restraint : move freely about at will - let chickens ~\r\nloose : consort - we run with our group \\n\" +\r\n\"dog: canid wolves, foxes, and other dogs especially : a highly variable domestic mammal : a pet ~ :\r\nfellow, chap, a lazy person - you lucky dog \\n\" +\r\n\"break: break a/the record to do something better than the best known speed, time, number, etc.\r\npreviously achieved : to fail to keep a law, rule, or promise = ~ the law : These enzymes break\r\ndown food in the stomach (= cause food to separate into smaller pieces). I needed something to\r\nbreak the monotony of my typing job. The phone rang, as to break my concentration. To ~ (of a\r\nstorm) = to start suddenly: We arrived just as a storm was breaking. \\n\"\r\nSe obține:\r\n[(\"run\", 5), (\"dog\", 2), (\"break\", 6)]\r\n# de exemplu, pentru \"run\" numărăm de două ori pe ~ și de 3 ori pe run, deci expresivitatea este 5,\r\niar tuplul este (\"run\", 5)\r\n\"\"\"\r\n\r\n\r\n\r\n# vom considera, ptr frumusetea si simplitatea inputului ca paragrafele sunt scrise pe mai multe randuri\r\n# si ca o definitie se termina literalmente cu \"\\n\"\r\n# nu cu caracterul \"urmatoarea linie\" ci cu STRINGUL \"\\n\" (\"\\\" + \"n\")\r\n\r\n#note: a se ignora ghilimelele, nu sunt necesare in input\r\n\r\n\r\nfile = open(\"input.txt\",\"r\")\r\n\r\n\r\nl = [cuv for cuv in file.read().split()]\r\n\r\nd = {\r\n\r\n}\r\n\r\nlTup = []\r\n\r\n#print(l)\r\n\r\nlastChWasEL = True #last character was end line\r\n\r\nfor cuv in l :\r\n if lastChWasEL:\r\n if len(d) != 0:\r\n lTup.append((list(d.keys())[0],d[list(d.keys())[0]]+d[list(d.keys())[1]])) #adaugam tuplet\r\n d.clear() #curatam dictionarul\r\n d[cuv.replace(\":\",\"\")] = 0 #il initializam pentru urm definitie\r\n lastChWasEL = False\r\n elif cuv in d :\r\n d[cuv] += 1 #adaugam corespunzator cuvintele in dictionar\r\n elif cuv == \"~\":\r\n d[cuv] = 1\r\n elif cuv == \"\\\\n\":\r\n lastChWasEL = True\r\n\r\nlTup.append((list(d.keys())[0],d[list(d.keys())[0]]+d[list(d.keys())[1]]))\r\n\r\n\r\nprint(lTup)","sub_path":"Laborator PA/3/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626284774","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.10-x86_64/egg/boto/ec2/instancestatus.py\n# Compiled at: 2015-11-24 05:02:18\n# Size of source mod 2**32: 6854 bytes\n\n\nclass Details(dict):\n __doc__ = '\\n A dict object that contains name/value pairs which provide\\n more detailed information about the status of the system\\n or the instance.\\n '\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'name':\n self._name = value\n else:\n if name == 'status':\n self[self._name] = value\n else:\n setattr(self, name, value)\n\n\nclass Event(object):\n __doc__ = '\\n A status event for an instance.\\n\\n :ivar code: A string indicating the event type.\\n :ivar description: A string describing the reason for the event.\\n :ivar not_before: A datestring describing the earliest time for\\n the event.\\n :ivar not_after: A datestring describing the latest time for\\n the event.\\n '\n\n def __init__(self, code=None, description=None, not_before=None, not_after=None):\n self.code = code\n self.description = description\n self.not_before = not_before\n self.not_after = not_after\n\n def __repr__(self):\n return 'Event:%s' % self.code\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'code':\n self.code = value\n else:\n if name == 'description':\n self.description = value\n else:\n if name == 'notBefore':\n self.not_before = value\n else:\n if name == 'notAfter':\n self.not_after = value\n else:\n setattr(self, name, value)\n\n\nclass Status(object):\n __doc__ = '\\n A generic Status object used for system status and instance status.\\n\\n :ivar status: A string indicating overall status.\\n :ivar details: A dict containing name-value pairs which provide\\n more details about the current status.\\n '\n\n def __init__(self, status=None, details=None):\n self.status = status\n if not details:\n details = Details()\n self.details = details\n\n def __repr__(self):\n return 'Status:%s' % self.status\n\n def startElement(self, name, attrs, connection):\n if name == 'details':\n return self.details\n\n def endElement(self, name, value, connection):\n if name == 'status':\n self.status = value\n else:\n setattr(self, name, value)\n\n\nclass EventSet(list):\n\n def startElement(self, name, attrs, connection):\n if name == 'item':\n event = Event()\n self.append(event)\n return event\n else:\n return\n\n def endElement(self, name, value, connection):\n setattr(self, name, value)\n\n\nclass InstanceStatus(object):\n __doc__ = '\\n Represents an EC2 Instance status as reported by\\n DescribeInstanceStatus request.\\n\\n :ivar id: The instance identifier.\\n :ivar zone: The availability zone of the instance.\\n :ivar events: A list of events relevant to the instance.\\n :ivar state_code: An integer representing the current state\\n of the instance.\\n :ivar state_name: A string describing the current state\\n of the instance.\\n :ivar system_status: A Status object that reports impaired\\n functionality that stems from issues related to the systems\\n that support an instance, such as such as hardware failures\\n and network connectivity problems.\\n :ivar instance_status: A Status object that reports impaired\\n functionality that arises from problems internal to the instance.\\n '\n\n def __init__(self, id=None, zone=None, events=None, state_code=None, state_name=None):\n self.id = id\n self.zone = zone\n self.events = events\n self.state_code = state_code\n self.state_name = state_name\n self.system_status = Status()\n self.instance_status = Status()\n\n def __repr__(self):\n return 'InstanceStatus:%s' % self.id\n\n def startElement(self, name, attrs, connection):\n if name == 'eventsSet':\n self.events = EventSet()\n return self.events\n else:\n if name == 'systemStatus':\n return self.system_status\n if name == 'instanceStatus':\n return self.instance_status\n return\n\n def endElement(self, name, value, connection):\n if name == 'instanceId':\n self.id = value\n else:\n if name == 'availabilityZone':\n self.zone = value\n else:\n if name == 'code':\n self.state_code = int(value)\n else:\n if name == 'name':\n self.state_name = value\n else:\n setattr(self, name, value)\n\n\nclass InstanceStatusSet(list):\n __doc__ = '\\n A list object that contains the results of a call to\\n DescribeInstanceStatus request. Each element of the\\n list will be an InstanceStatus object.\\n\\n :ivar next_token: If the response was truncated by\\n the EC2 service, the next_token attribute of the\\n object will contain the string that needs to be\\n passed in to the next request to retrieve the next\\n set of results.\\n '\n\n def __init__(self, connection=None):\n list.__init__(self)\n self.connection = connection\n self.next_token = None\n\n def startElement(self, name, attrs, connection):\n if name == 'item':\n status = InstanceStatus()\n self.append(status)\n return status\n else:\n return\n\n def endElement(self, name, value, connection):\n if name == 'nextToken':\n self.next_token = value\n setattr(self, name, value)","sub_path":"pycfiles/boto_patch-2.38.0-py3.4/instancestatus.cpython-34.py","file_name":"instancestatus.cpython-34.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"215942736","text":"\"\"\"\n802. Find Eventual Safe States\n\nWe start at some node in a directed graph, and every turn, we walk along a directed edge of the graph. If we reach a terminal node (that is, it has no outgoing directed edges), we stop.\n\nWe define a starting node to be safe if we must eventually walk to a terminal node. More specifically, there is a natural number k, so that we must have stopped at a terminal node in less than k steps for any choice of where to walk.\n\nReturn an array containing all the safe nodes of the graph. The answer should be sorted in ascending order.\n\nThe directed graph has n nodes with labels from 0 to n - 1, where n is the length of graph. The graph is given in the following form: graph[i] is a list of labels j such that (i, j) is a directed edge of the graph, going from node i to node j.\n\n \n\nExample 1:\n\nIllustration of graph\nInput: graph = [[1,2],[2,3],[5],[0],[5],[],[]]\nOutput: [2,4,5,6]\nExplanation: The given graph is shown above.\nExample 2:\n\nInput: graph = [[1,2,3,4],[1,2],[3,4],[0,4],[]]\nOutput: [4]\n \n\nConstraints:\n\nn == graph.length\n1 <= n <= 104\n0 <= graph[i].legnth <= n\ngraph[i] is sorted in a strictly increasing order.\nThe graph may contain self-loops.\nThe number of edges in the graph will be in the range [1, 4 * 10^4].\n\n\"\"\"\n\n\nclass EventualSafeNodes:\n\n def doit_dfs(self, graph: list) -> list:\n\n N = len(graph)\n res = [True if len(graph[i]) == 0 else False for i in range(N) ]\n nontermial = [False] * N\n\n def dfs(n, visited):\n if res[n]:\n return True\n\n if visited[n] or nontermial[n]:\n nontermial[n] = True\n return False\n\n visited[n] = True\n for c in graph[n]:\n if not dfs(c, visited):\n nontermial[c] = True\n break\n else:\n res[n] = True\n\n visited[n] = False\n return res[n]\n\n visited = [False] * N\n for i in range(N):\n dfs(i, visited)\n\n return [i for i in range(N) if res[i]]\n\n \"\"\"\n Approach #1: Reverse Edges [Accepted]\n Intuition\n\n The crux of the problem is whether you can reach a cycle from the node you start in. If you can, then there is a way to avoid stopping indefinitely; and if you can't, then after some finite number of steps you'll stop.\n\n Thinking about this property more, a node is eventually safe if all it's outgoing edges are to nodes that are eventually safe.\n\n This gives us the following idea: we start with nodes that have no outgoing edges - those are eventually safe. Now, we can update any nodes which only point to eventually safe nodes - those are also eventually safe. Then, we can update again, and so on.\n\n However, we'll need a good algorithm to make sure our updates are efficient.\n\n Algorithm\n\n We'll keep track of graph, a way to know for some node i, what the outgoing edges (i, j) are. We'll also keep track of rgraph, a way to know for some node j, what the incoming edges (i, j) are.\n\n Now for every node j which was declared eventually safe, we'll process them in a queue. We'll look at all parents i = rgraph[j] and remove the edge (i, j) from the graph (from graph). \n If this causes the graph to have no outgoing edges graph[i], then we'll declare it eventually safe and add it to our queue.\n\n Also, we'll keep track of everything we ever added to the queue, so we can read off the answer in sorted order later.\n\n\n Complexity Analysis\n\n Time Complexity: O(N + E), where NN is the number of nodes in the given graph, and EE is the total number of edges.\n\n Space Complexity: O(N) in additional space complexity.\n \"\"\"\n def doit_dfs_topsort(self, graph):\n import collections\n N = len(graph)\n safe = [False] * N\n\n graph = map(set, graph)\n rgraph = [set() for _ in range(N)]\n q = collections.deque()\n\n for i, js in enumerate(graph):\n if not js:\n q.append(i)\n for j in js:\n rgraph[j].add(i)\n\n while q:\n j = q.popleft()\n safe[j] = True\n for i in rgraph[j]:\n graph[i].remove(j)\n if len(graph[i]) == 0:\n q.append(i)\n\n return [i for i, v in enumerate(safe) if v]\n\n\n \"\"\"\n Approach #2: Depth-First Search [Accepted]\n Intuition\n\n As in Approach #1, the crux of the problem is whether you reach a cycle or not.\n\n Let us perform a \"brute force\": a cycle-finding DFS algorithm on each node individually. \n This is a classic \"white-gray-black\" DFS algorithm that would be part of any textbook on DFS. We mark a node gray on entry, and black on exit. If we see a gray node during our DFS, it must be part of a cycle. \n In a naive view, we'll clear the colors between each search.\n\n Algorithm\n\n We can improve this approach, by noticing that we don't need to clear the colors between each search.\n\n When we visit a node, the only possibilities are that we've marked the entire subtree black (which must be eventually safe), or it has a cycle and we have only marked the members of that cycle gray. \n So indeed, the invariant that gray nodes are always part of a cycle, and black nodes are always eventually safe is maintained.\n\n In order to exit our search quickly when we find a cycle (and not paint other nodes erroneously), we'll say the result of visiting a node is true if it is eventually safe, otherwise false. \n This allows information that we've reached a cycle to propagate up the call stack so that we can terminate our search early.\n\n\n Complexity Analysis\n\n Time Complexity: O(N + E), where NN is the number of nodes in the given graph, and EE is the total number of edges.\n\n Space Complexity: O(N) in additional space complexity.\n \"\"\"\n def doit_dfs_colors(self, graph):\n import collections\n WHITE, GRAY, BLACK = 0, 1, 2\n color = collections.defaultdict(int)\n\n # This is a classic \"white-gray-black\" DFS algorithm that would be part of any textbook on DFS. \n # We mark a node gray on entry, and black on exit. If we see a gray node during our DFS, it must be part of a cycle. \n # White never access, Black terminal, Gray, navigated, twice means not good.\n def dfs(node):\n if color[node] != WHITE:\n return color[node] == BLACK\n\n color[node] = GRAY\n for nei in graph[node]:\n if color[nei] == BLACK:\n continue\n if color[nei] == GRAY or not dfs(nei):\n return False\n color[node] = BLACK\n return True\n\n return filter(dfs, range(len(graph)))\n ","sub_path":"PythonLeetcode/leetcodeM/802_FindEventualSafeStates.py","file_name":"802_FindEventualSafeStates.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"22909464","text":"import numpy as np\n\nfrom pulseq.core.event_lib import EventLibrary\n\n\ndef read(self, path):\n \"\"\"\n Reads .seq file from path, and constructs a Sequence object from the file.\n\n Parameters\n ----------\n path : str\n Path of .seq file to be read.\n \"\"\"\n\n input_file = open(path, 'r')\n self.shape_library = EventLibrary()\n self.rf_library = EventLibrary()\n self.grad_library = EventLibrary()\n self.adc_library = EventLibrary()\n self.delay_library = EventLibrary()\n self.block_events = {}\n self.rf_raster_time = self.system.rf_raster_time\n self.grad_raster_time = self.system.grad_raster_time\n\n while True:\n section = skip_comments(input_file)\n if section == -1:\n break\n if section == '[BLOCKS]':\n self.block_events = read_blocks(input_file)\n elif section == '[RF]':\n self.rf_library = read_events(input_file, 1, None, None)\n elif section == '[GRAD]':\n self.grad_library = read_events(input_file, 1, 'g', self.grad_library)\n elif section == '[TRAP]':\n self.grad_library = read_events(input_file, [1, 1e-6, 1e-6, 1e-6], 't', self.grad_library)\n elif section == '[ADC]':\n self.adc_library = read_events(input_file, [1, 1e-9, 1e-6, 1, 1], None, None)\n elif section == '[DELAYS]':\n self.delay_library = read_events(input_file, 1e-6, None, None)\n elif section == '[SHAPES]':\n self.shape_library = read_shapes(input_file)\n\n\ndef read_blocks(input_file):\n \"\"\"\n Read Blocks from .seq file. Blocks are single lines under the '[BLOCKS]' header in the .seq file.\n\n Parameters\n ----------\n input_file : file\n .seq file to be read.\n\n Returns\n -------\n block_events : dict\n Key-value mapping of Block ID and Event ID.\n \"\"\"\n\n line = strip_line(input_file)\n for x in range(len(line)):\n line[x] = float(line[x])\n\n event_table = []\n while not (line == '\\n' or line[0] == '#'):\n event_row = []\n for c in line[1:]:\n event_row.append(float(c))\n event_table.append(event_row)\n\n line = strip_line(input_file)\n # Break here to avoid crash when the while loop condition is evaluated for line != '\\n'\n # Crash occurs because spaces have been eliminated\n if len(line) == 0:\n break\n\n block_events = {}\n for x in range(len(event_table)):\n block_events[x + 1] = np.array(event_table[x])\n\n return block_events\n\n\ndef read_events(input_file, scale, type, event_lib):\n scale = 1 if scale is None else scale\n event_library = event_lib if event_lib is not None else EventLibrary()\n\n line = strip_line(input_file)\n for x in range(len(line)):\n line[x] = float(line[x])\n\n while not (line == '\\n' or line[0] == '#'):\n event_id = line[0]\n data = np.multiply(line[1:], scale)\n event_library.insert(event_id, data, type)\n\n line = strip_line(input_file)\n if not line:\n break\n\n for x in range(len(line)):\n line[x] = float(line[x])\n\n return event_library\n\n\ndef read_shapes(input_file):\n shape_library = EventLibrary()\n\n strip_line(input_file)\n line = strip_line(input_file)\n\n while not (line == -1 or len(line) == 0 or line[0] != 'shape_id'):\n id = int(line[1])\n line = skip_comments(input_file)\n num_samples = int(line.split(' ')[1])\n data = []\n line = skip_comments(input_file)\n line = line.split(' ')\n while not (len(line) == 0 or line[0] == '#'):\n data.append(float(line[0]))\n line = strip_line(input_file)\n line = skip_comments(input_file)\n # line could be -1 since -1 is EOF marker, returned from skipComments(inputFile)\n line = line.split(' ') if line != -1 else line\n data.insert(0, num_samples)\n data = np.reshape(data, [1, len(data)])\n shape_library.insert(id, data, None)\n\n return shape_library\n\n\ndef skip_comments(input_file):\n \"\"\"\n Skip one '#' comment in .seq file.\n\n Parameters\n ----------\n input_file : file\n .seq file to be read.\n\n Returns\n -------\n line : str\n First line in input_file after skipping one '#' comment block.\n Note: File pointer is remembered, so successive calls work as expected.\n \"\"\"\n\n line = input_file.readline()\n if line == '':\n return -1\n while line == '\\n' or line[0] == '#':\n line = input_file.readline()\n if line == '':\n return -1\n line = line.strip()\n return line\n\n\ndef strip_line(input_file):\n \"\"\"\n Remove spaces, newline whitespace and return line.\n\n Parameters\n ----------\n input_file : file\n .seq file to be read.\n\n Returns\n -------\n line : str\n First line in input_file after removing spaces and newline whitespaces.\n Note: File pointer is remembered, so successive calls work as expected.\n \"\"\"\n line = input_file.readline()\n line = line.strip()\n line = line.split(' ')\n while '' in line:\n line.remove('')\n return line\n","sub_path":"pulseq/core/Sequence/read_seq.py","file_name":"read_seq.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"91970748","text":"import Weather_Realtime_Info_for_student\r\nimport Air_Pollution_Info\r\nimport Bus_stop_info\r\nimport Youtube_data_api\r\nimport Genie_music_crawling_re\r\nimport threading,time,ctypes,json\r\ng_Radiator = False\r\ng_Air_Conditioner = False\r\ng_Balcony_Windows = False\r\ng_Humidifier = False\r\ng_Dehumidifier = False\r\ng_AI_Mode = False\r\ng_Smul_Mode=False\r\n\r\ntemperature=0\r\nhumidity=0\r\njson_list_weather=[]\r\njson_list_air=[]\r\njson_weather = Weather_Realtime_Info_for_student\r\njson_air = Air_Pollution_Info\r\njson_Bus = Bus_stop_info\r\nplay_Youtube = Youtube_data_api\r\nsidoName = '동구'\r\n\r\ndef g_value_false():\r\n global g_Radiator,g_AI_Mode,g_Air_Conditioner,g_Balcony_Windows,g_Humidifier,g_Dehumidifier\r\n g_Radiator=True\r\n g_Air_Conditioner=True\r\n g_Humidifier=True\r\n g_Dehumidifier=True\r\n control_temperature('R')\r\n control_temperature('A')\r\n control_humidity('H')\r\n control_humidity('D')\r\n g_Balcony_Windows = False\r\n g_AI_Mode = False\r\ndef terminate_ai_mode(ai_scheduler):\r\n if not ai_scheduler.isAlive():\r\n return\r\n exc = ctypes.py_object(SystemExit)\r\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(ai_scheduler.ident),exc)\r\n if res == 0:\r\n raise ValueError('nonexistent thread id')\r\n elif res>1:\r\n ctypes.pythonapi.PyThreadState_SetAsyncExc(ai_scheduler.ident,None)\r\n raise SystemError('PyhreadState_SetAsyncExc failed')\r\ndef print_main_menu():\r\n print('1. 기상정보 조회')\r\n print('2. 장비상태 확인')\r\n print('3. 장비제어')\r\n print('4. 스마트모드')\r\n print('5. TV 실행')\r\n print('6. 시뮬레이션 모드')\r\n print('7. 프로그램 종료')\r\ndef print_device_status(device_name, device_status):\r\n print('%s 상태: '%device_name,end='')\r\n if device_status == True:print('작동')\r\n else: print('정지')\r\ndef check_device_status():\r\n print_device_status('난방기',g_Radiator) # 기온 5도 이하,10도 이상\r\n print_device_status('에어컨',g_Air_Conditioner)# 기온 30도 이상, 18도 이하\r\n print_device_status('가습기',g_Humidifier) # 습도 40~60% 유지\r\n print_device_status('제습기',g_Dehumidifier)\r\n print_device_status('발코니(베란다) 창문',g_Balcony_Windows) # 비,미세먼지,난방기,에어컨\r\ndef print_device_menu():\r\n print('\\n상태 변경할 기기를 선택하세요.')\r\n print('1. 난방기')\r\n print('2. 에어컨')\r\n print('3. 가습기')\r\n print('4. 제습기')\r\n print('5. 발코니(베란다) 창문')\r\ndef temperature_room():\r\n for data in json_list_weather:\r\n if data['category'] == 'T1H':\r\n return int(data['fcstValue'])\r\ndef temperature_change_up():\r\n global temperature,g_AI_Mode,g_Radiator\r\n while True:\r\n if g_Radiator == False:\r\n break\r\n else:\r\n time.sleep(2)\r\n # temperature = int(temperature)\r\n if g_Radiator==True:\r\n temperature+=1\r\n if g_AI_Mode==True:\r\n changed_status()\r\ndef temperature_change_down():\r\n global temperature,g_AI_Mode,g_Air_Conditioner\r\n while True:\r\n if g_Air_Conditioner == False:\r\n break\r\n else:\r\n time.sleep(1800)\r\n # temperature = int(temperature)\r\n if g_Air_Conditioner==True:\r\n temperature -= 1\r\n if g_AI_Mode==True:\r\n changed_status()\r\ndef control_temperature(RorA):\r\n global g_Radiator,g_Air_Conditioner\r\n if RorA=='R':\r\n g_Radiator = not g_Radiator\r\n g_Radiator_scheduler = threading.Thread(target=temperature_change_up)\r\n if g_Radiator==True:\r\n g_Radiator_scheduler.daemon=True\r\n g_Radiator_scheduler.start()\r\n else:\r\n while g_Radiator_scheduler.is_alive():\r\n try:\r\n terminate_ai_mode(g_Radiator_scheduler)\r\n except:\r\n pass\r\n elif RorA=='A':\r\n g_Air_Conditioner = not g_Air_Conditioner\r\n g_Air_Conditioner_scheduler = threading.Thread(target=temperature_change_down)\r\n if g_Air_Conditioner ==True:\r\n g_Air_Conditioner_scheduler .daemon=True\r\n g_Air_Conditioner_scheduler .start()\r\n else:\r\n while g_Air_Conditioner_scheduler.is_alive():\r\n try:\r\n terminate_ai_mode(g_Air_Conditioner_scheduler )\r\n except:\r\n pass\r\ndef humidity_room():\r\n for data in json_list_weather:\r\n if data['category'] == 'REH':\r\n return int(data['fcstValue'])\r\ndef humidity_change_up():\r\n global humidity, g_AI_Mode,g_Humidifier\r\n while True:\r\n if g_Humidifier == False:\r\n break\r\n else:\r\n time.sleep(1800)\r\n if g_Humidifier==True:\r\n humidity += 1\r\n if g_AI_Mode == True:\r\n changed_status()\r\ndef humidity_change_down():\r\n global humidity, g_AI_Mode,g_Dehumidifier\r\n while True:\r\n if g_Humidifier == False:\r\n break\r\n else:\r\n time.sleep(1800)\r\n if g_Dehumidifier==True:\r\n humidity -= 1\r\n if g_AI_Mode == True:\r\n changed_status()\r\ndef control_humidity(HorD):\r\n global g_Humidifier, g_Dehumidifier\r\n if HorD == 'H':\r\n g_Humidifier = not g_Humidifier\r\n g_Humidifier_scheduler = threading.Thread(target=humidity_change_up)\r\n if g_Humidifier == True:\r\n g_Humidifier_scheduler.daemon = True\r\n g_Humidifier_scheduler.start()\r\n else:\r\n while g_Humidifier_scheduler.is_alive():\r\n try:\r\n terminate_ai_mode(g_Humidifier_scheduler)\r\n except:\r\n pass\r\n elif HorD == 'D':\r\n g_Dehumidifier = not g_Dehumidifier\r\n g_Dehumidifier_scheduler = threading.Thread(target=humidity_change_down)\r\n if g_Dehumidifier == True:\r\n g_Dehumidifier_scheduler.daemon = True\r\n g_Dehumidifier_scheduler.start()\r\n else:\r\n while g_Dehumidifier_scheduler.is_alive():\r\n try:\r\n terminate_ai_mode(g_Dehumidifier_scheduler)\r\n except:\r\n pass\r\ndef control_device():\r\n global g_Radiator,g_Balcony_Windows,g_Humidifier,g_Air_Conditioner\r\n check_device_status()\r\n print_device_menu()\r\n menu_num = int(input('번호를 입력하세요: '))\r\n print()\r\n if menu_num ==1: control_temperature('R')\r\n elif menu_num ==2: control_temperature('A')\r\n elif menu_num ==3: control_humidity('H')\r\n elif menu_num ==4: control_humidity('D')\r\n elif menu_num ==5: g_Balcony_Windows = not g_Balcony_Windows\r\n check_device_status()\r\n if g_AI_Mode==True:\r\n changed_status()\r\ndef get_realtime_weather_info():\r\n global json_list_weather,json_list_air\r\n while True:\r\n if g_AI_Mode == False:\r\n continue\r\n else:\r\n time.sleep(3600)\r\n weather_time = json_weather.get_Realtime_Weather_Info()\r\n json_list_weather = json_weather.Make_Weather_Json(weather_time)\r\n json_weather.Make_Weather_CSV()\r\n json_list_air = json_air.Make_Air_json_csv()\r\n json_air.Make_Air_json_csv()\r\n changed_status()\r\ndef smart_mode():\r\n global g_AI_Mode\r\n print('1. 인공지능 모드 조회')\r\n print('2. 인공지능 모드 상태 변경')\r\n if g_Smul_Mode==False:\r\n print('3. 실시간 기상정보 Update')\r\n menu_num = int(input('메뉴를 선택하세요: '))\r\n if menu_num==1:\r\n print('현재 인공지능 모드:',end='')\r\n if g_AI_Mode == True: print('작동\\n')\r\n else: print('중지\\n')\r\n elif menu_num==2:\r\n ai_scheduler = threading.Thread(target=get_realtime_weather_info)\r\n g_AI_Mode = not g_AI_Mode\r\n print('현재 인공지능 모드:',end='')\r\n if g_AI_Mode == True:\r\n ai_scheduler.daemon = True\r\n ai_scheduler.start()\r\n print('인공지능 모드 작동\\n')\r\n changed_status()\r\n else:\r\n while ai_scheduler.is_alive():\r\n try:\r\n terminate_ai_mode(ai_scheduler)\r\n except:\r\n pass\r\n print('인공지능 모드 정지\\n')\r\n elif menu_num==3:\r\n get_weather_air_info()\r\ndef changed_status(): # 기준에 따라 기기 상태 바꾸는 함수\r\n last_time = json_list_weather[0]['fcstTime']\r\n global g_Radiator,g_Balcony_Windows,g_Air_Conditioner,temperature,humidity\r\n if temperature<=5 and g_Radiator==False:\r\n control_temperature('R')\r\n print('기온이 5℃ 이하 이므로 난방기를 작동합니다.')\r\n elif temperature >= 10 and g_Radiator == True: # 기온 10도 이상, 난방기 작동시 정지\r\n control_temperature('R')\r\n print('기온이 10℃도 이상이므로 난방기를 정지합니다.')\r\n if temperature>= 30 and g_Air_Conditioner == False: # 기온 30도 이상, 에어컨 정지시 작동\r\n control_temperature('A')\r\n print('기온이 30℃ 이상이므로 에어컨을 작동합니다.')\r\n elif temperature <= 18 and g_Air_Conditioner == True: # 기온 18도 이하, 에어컨 작동시 정지\r\n control_temperature('A')\r\n print('기온이 18℃ 이하이므로 에어컨을 정지합니다.')\r\n if humidity<30 and g_Humidifier==False:\r\n control_humidity('H')\r\n print('습도가 30% 미만이므로 가습기를 작동합니다.')\r\n elif humidity>50 and g_Humidifier==True:\r\n control_humidity('H')\r\n print('습도가 50% 초과이므로 가습기를 정지합니다.')\r\n if humidity>60 and g_Dehumidifier==False:\r\n control_humidity('D')\r\n print('습도가 60% 초과이므로 제습기를 작동합니다.')\r\n elif humidity<40 and g_Dehumidifier==True:\r\n control_humidity('D')\r\n print('습도가 40% 미만이므로 제습기를 정지합니다.')\r\n for result in json_list_weather:\r\n if g_Balcony_Windows==True:\r\n if result['fcstTime'] == last_time and result['category'] == 'RN1': # 창문\r\n if int(result['fcstValue'])>0: # 비 예보가 있으면 창문 닫음\r\n g_Balcony_Windows=False\r\n print('곧 비 예보가 있으므로 창문을 닫습니다.')\r\n elif g_Radiator==True: # 난방기가 켜진 경우 창문 닫음\r\n g_Balcony_Windows=False\r\n print('난방기가 작동 중 이므로 창문을 닫습니다.')\r\n elif g_Air_Conditioner==True: # 에어컨이 켜진 경우 창문 닫음\r\n g_Balcony_Windows=False\r\n print('에어컨이 작동 중 이므로 창문을 닫습니다.')\r\n elif 81<=int(json_list_air[0]['pm10Value']) <= 150 or 51<=int(json_list_air[0]['pm25Value']) <= 100:\r\n g_Balcony_Windows = False\r\n print('미세먼지 등급이 \"나쁨\" 이므로 창문을 닫습니다.')\r\n elif int(json_list_air[0]['pm10Value']) >= 151 or int(json_list_air[0]['pm25Value']) >= 101:\r\n g_Balcony_Windows = False\r\n print('미세먼지 등급이 \"매우나쁨\" 이므로 창문을 닫습니다.')\r\ndef TV_on():\r\n print('1. 버스 도착정보 조회\\n2. 유튜브 검색/재생\\n3. 음악 차트 조회')\r\n menu = int(input('실행할 메뉴를 선택하세요: '))\r\n if menu==1:\r\n json_Bus.Print_arrive_time()\r\n elif menu==2:\r\n play_Youtube.play_video()\r\n elif menu==3:\r\n music_list = Genie_music_crawling_re.Print_rank()\r\n menu_music = int(input('\\n1.유튜브로 음악 재생\\n2.종료\\n메뉴를 선택하세요: '))\r\n if menu_music==1:\r\n choice_music = int(input('재생할 음악 순위를 선택하세요: '))\r\n rank,title,artist = music_list[choice_music].split(',')\r\n play_Youtube.play_music('%s %s'%(title,artist))\r\n return\r\n elif menu_music==2:\r\n return\r\ndef Simulation_mode():\r\n global json_list_weather,json_list_air,temperature,humidity,g_Smul_Mode\r\n g_Smul_Mode=True\r\n json_list_weather=[]\r\n json_list_air=[]\r\n with open('./시뮬레이션_초단기예보조회.json','r',encoding='UTF8') as weatherfile:\r\n json_object = json.load(weatherfile)\r\n json_string = json.dumps(json_object)\r\n json_list_weather = json.loads(json_string)\r\n with open('./시뮬레이션_미세먼지농도조회.json','r',encoding='UTF8') as airfile:\r\n json_object2 = json.load(airfile)\r\n json_string2 = json.dumps(json_object2)\r\n json_list_air = json.loads(json_string2)\r\n temperature = temperature_room()\r\n humidity = humidity_room()\r\ndef Simulation_save_CSV():\r\n global json_list_weather,json_list_air\r\n csv_Data_weather = ['baseDate,baseTime,category,fcstDate,fcstTime,fcstValue,nx,ny']\r\n csv_Data_air = ['도시명,dataTime,미세먼지농도,초미세먼지농도']\r\n for prn_data in json_list_weather:\r\n csv_Data_weather.append(str(prn_data.get('baseDate'))+','+str(prn_data.get('baseTime'))+','+\r\n prn_data.get('category')+','+str(prn_data.get('fcstDate'))+','+\r\n str(prn_data.get('fcstTime'))+','+str(prn_data.get('fcstValue'))+','+\r\n str(prn_data.get('nx'))+','+str(prn_data.get('ny')))\r\n f = open('./시뮬레이션_초단기예보조회.csv','w')\r\n f.write('\\n'.join(csv_Data_weather))\r\n f.close()\r\n\r\n for prn_data in json_list_air:\r\n csv_Data_air.append(prn_data['cityName'] + ',' + prn_data['dataTime']\r\n + ',' + prn_data['pm10Value'] + ',' + prn_data['pm25Value'])\r\n f = open('./시뮬레이션_미세먼지농도조회.csv','w')\r\n f.write('\\n'.join(csv_Data_air))\r\n f.close()\r\ndef get_weather_air_info(): #시뮬->기본으로 돌아갈때 다시 정보 받아오는 함수\r\n global json_list_weather,json_list_air,temperature,humidity\r\n json_weather.get_Realtime_Weather_Info()\r\n json_list_weather = json_weather.json_weather_result\r\n json_list_air = json_air.Make_Air_json_csv()\r\n temperature = temperature_room()\r\n humidity = humidity_room()\r\ndef print_weather_air_info():\r\n global json_list_air,json_list_weather,temperature,humidity\r\n last_time = json_list_weather[0]['fcstTime']\r\n for data in json_list_weather:\r\n if data['fcstTime'] == last_time:\r\n if data['category'] == 'T1H':\r\n print('- 외부 기온:%s(℃)'%data['fcstValue'])\r\n elif data['category'] == 'REH':\r\n print('- 외부 습도:%s(%%)'%data['fcstValue'])\r\n elif data['category'] =='RN1':\r\n print('- 강수량:%s(mm/h)'%data['fcstValue'])\r\n pm10 = int(json_list_air[0]['pm10Value'])\r\n pm25 = int(json_list_air[0]['pm25Value'])\r\n print('- 미세먼지 농도:%d(㎍/㎥\\n- 초미세먼지 농도:%d(㎍/㎥)' % (pm10, pm25))\r\n if pm10 <= 30 or pm25 <= 15:\r\n print('- 미세먼지 예보 등급: 좋음')\r\n elif pm10 <= 80 or pm25 <= 50:\r\n print('- 미세먼지 예보 등급: 보통')\r\n elif pm10 <= 150 or pm25 <= 100:\r\n print('- 미세먼지 예보 등급: 나쁨')\r\n elif pm10 >= 151 or pm25 >= 101:\r\n print('- 미세먼지 예보 등급: 매우나쁨')\r\n print('- 내부 기온:%s(℃)'%temperature)\r\n print('- 내부 습도:%s(%%)\\n'%humidity)\r\nprint('<스마트 홈네트워크 시뮬레이션 프로그램 ver 1.0>')\r\nget_weather_air_info()\r\nwhile True:\r\n if not g_Smul_Mode:\r\n print_main_menu()\r\n try:\r\n menu_num = int(input('메뉴를 선택하세요: '))\r\n print()\r\n except:\r\n pass\r\n if menu_num==1:\r\n print_weather_air_info()\r\n elif menu_num==2:\r\n check_device_status()\r\n elif menu_num==3:\r\n control_device()\r\n elif menu_num==4:\r\n smart_mode()\r\n elif menu_num==5:\r\n TV_on()\r\n elif menu_num==6:\r\n g_value_false()\r\n Simulation_mode()\r\n elif menu_num==7:\r\n break\r\n else:\r\n print('\\n<< 시뮬레이션 모드 작동 중 >>')\r\n menu_num = int(input('1. 장비제어\\n2. 스마트모드변경\\n3. 시뮬레이션 기상정보 조회\\n4. 시뮬레이션 종료\\n메뉴를 선택하세요: '))\r\n print()\r\n if menu_num==1:\r\n control_device()\r\n elif menu_num==2:\r\n smart_mode()\r\n elif menu_num==3:\r\n print_weather_air_info()\r\n elif menu_num==4:\r\n g_value_false()\r\n Simulation_save_CSV()\r\n get_weather_air_info()\r\n g_Smul_Mode = False","sub_path":"App/1.Smart_Home_Network_v1.py","file_name":"1.Smart_Home_Network_v1.py","file_ext":"py","file_size_in_byte":16969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"121114564","text":"\"\"\"Convenient access to data stored in yaml files.\"\"\"\n\nimport os\n\nimport yaml\n\nfrom helpers import requests\n\n\nclass YamlData(object):\n \"\"\"Data stored in yaml, conveniently\"\"\"\n\n # A dict mapping filenames to the data we read from the file.\n _the_data = {}\n\n # Where we read repo-tools-data from.\n _data_dir = \"../repo-tools-data\"\n\n def __init__(self, data):\n self.data = data\n\n @classmethod\n def from_file(cls, f):\n \"\"\"Returns a YamlData object loaded from an open yaml file.\"\"\"\n return cls(yaml.safe_load(f))\n\n @classmethod\n def from_string(cls, s):\n \"\"\"Returns a YamlData object loaded from a yaml string.\"\"\"\n return cls(yaml.safe_load(s))\n\n @classmethod\n def the_data(cls, filename):\n \"\"\"\n Returns the data from a particular file name, either locally or remote.\n \"\"\"\n if filename not in cls._the_data:\n # Define REPO_TOOLS_LATEST_PEOPLE=1 in the environment to force code to\n # get the data from GitHub instead of the local copy.\n if int(os.environ.get('REPO_TOOLS_LATEST_PEOPLE', '0')):\n # Read from GitHub.\n resp = requests.get(\"https://raw.githubusercontent.com/edx/repo-tools-data/master/\" + filename)\n if not resp.ok:\n resp.raise_for_status()\n cls._the_data[filename] = cls.from_string(resp.text)\n else:\n # Read from a file.\n with open(os.path.join(cls._data_dir, filename)) as f:\n cls._the_data[filename] = cls.from_file(f)\n\n return cls._the_data[filename]\n","sub_path":"yamldata.py","file_name":"yamldata.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114648086","text":"\nimport os\nimport time\nimport string\nimport argparse\nimport re\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\nimport torch.nn.functional as F\nimport numpy as np\nfrom nltk.metrics.distance import edit_distance\n\nfrom utils import CTCLabelConverter, AttnLabelConverter, Averager,CTCLabelConverterForBaiduWarpctc\nfrom dataset import hierarchical_dataset, AlignCollate\nfrom model_guide import Model\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef benchmark_all_eval(model, criterion_ctc, criterion_attn, evaluation_loader, converter_ctc, converter_attn, opt, calculate_infer_time=False):\n \"\"\" evaluation with 10 benchmark evaluation datasets \"\"\"\n # The evaluation datasets, dataset order is same with Table 1 in our paper.\n # eval_data_list = ['IIIT5k_3000', 'SVT', 'IC03_860', 'IC03_867', 'IC13_857',\n # 'IC13_1015', 'IC15_1811', 'IC15_2077', 'SVTP', 'CUTE80']\n\n # # To easily compute the total accuracy of our paper.\n eval_data_list = ['IIIT5k_3000', 'SVT', 'IC03_867', \n 'IC13_1015', 'IC15_2077', 'SVTP', 'CUTE80']\n\n if calculate_infer_time:\n evaluation_batch_size = 1 # batch_size should be 1 to calculate the GPU inference time per image.\n else:\n evaluation_batch_size = opt.batch_size\n\n list_accuracy = []\n list_accuracy_attn = []\n total_forward_time = 0\n total_evaluation_data_number = 0\n total_correct_number_attn = 0\n total_correct_number = 0\n log = open(f'./result/{opt.exp_name}/log_all_evaluation.txt', 'a')\n dashed_line = '-' * 80\n print(dashed_line)\n for eval_data in eval_data_list:\n eval_data_path = os.path.join(opt.eval_data, eval_data)\n AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)\n eval_data, eval_data_log = hierarchical_dataset(root=eval_data_path, opt=opt)\n evaluation_loader = torch.utils.data.DataLoader(\n eval_data, batch_size=evaluation_batch_size,\n shuffle=False,\n num_workers=int(opt.workers),\n collate_fn=AlignCollate_evaluation, pin_memory=True)\n\n _, accuracy_by_best_model_gtc, norm_ED_by_best_model_gtc, _, _, _, infer_time_gtc, length_of_data, _, acc_attn, norm_ED_by_best_model_attn, _, _ = validation_ctc_and_attn(\n model, criterion_ctc, criterion_attn, evaluation_loader, converter_ctc, converter_attn, opt)\n\n # _, accuracy_by_best_model, norm_ED_by_best_model, _, _, _, infer_time, length_of_data = validation(\n # model, criterion, evaluation_loader, converter, opt)\n list_accuracy.append(f'{accuracy_by_best_model_gtc:0.3f}')\n list_accuracy_attn.append(f'{acc_attn:0.3f}')\n total_forward_time += infer_time_gtc\n total_evaluation_data_number += len(eval_data)\n total_correct_number += accuracy_by_best_model_gtc * length_of_data\n total_correct_number_attn += acc_attn * length_of_data\n log.write(eval_data_log)\n print(f'Acc {accuracy_by_best_model_gtc:0.3f}\t normalized_ED {norm_ED_by_best_model_gtc:0.3f}')\n print(f'Acc {acc_attn:0.3f}\t normalized_ED {norm_ED_by_best_model_attn:0.3f}')\n log.write(f'Acc {accuracy_by_best_model_gtc:0.3f}\t normalized_ED {norm_ED_by_best_model_gtc:0.3f}\t')\n log.write(f'Acc {acc_attn:0.3f}\t normalized_ED {norm_ED_by_best_model_attn:0.3f}\t')\n print(dashed_line)\n\n averaged_forward_time = total_forward_time / total_evaluation_data_number * 1000\n total_accuracy = total_correct_number / total_evaluation_data_number\n total_accuracy_attn = total_correct_number_attn / total_evaluation_data_number\n # params_num = sum([np.prod(p.size()) for p in model.parameters()])\n\n evaluation_log = 'accuracy: '\n for name, accuracy in zip(eval_data_list, list_accuracy):\n evaluation_log += f'{name}: {accuracy}\t'\n evaluation_log += f'total_accuracy: {total_accuracy:0.3f}\t'\n evaluation_log_attn = 'accuracy attention: '\n for name, accuracy in zip(eval_data_list, list_accuracy_attn):\n evaluation_log_attn += f'{name}: {accuracy}\t'\n evaluation_log_attn += f'total_accuracy: {total_accuracy_attn:0.3f}\t'\n # evaluation_log += f'averaged_infer_time: {averaged_forward_time:0.3f}\t# parameters: {params_num/1e6:0.3f}'\n print(evaluation_log)\n print(evaluation_log_attn)\n print(averaged_forward_time)\n log.write(evaluation_log)\n log.write(evaluation_log_attn)\n log.close()\n\n return None\n\ndef get_res(labels, preds_str, preds_max_prob, opt, length_of_data, isattn = False):\n n_correct = 0\n confidence_score_list = []\n norm_ED = 0\n for gt, pred, pred_max_prob in zip(labels, preds_str, preds_max_prob):\n if isattn:\n # gt = gt[:gt.find('[s]')]\n pred_EOS = pred.find('[s]')\n pred = pred[:pred_EOS] # prune after \"end of sentence\" token ([s])\n pred_max_prob = pred_max_prob[:pred_EOS]\n\n # To evaluate 'case sensitive model' with alphanumeric and case insensitve setting.\n if opt.sensitive and opt.data_filtering_off:\n pred = pred.lower()\n gt = gt.lower()\n alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'\n out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'\n pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)\n gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)\n\n if pred == gt:\n n_correct += 1\n # else :\n # print(isattn)\n # print(pred)\n # print(gt)\n\n\n # ICDAR2019 Normalized Edit Distance\n if len(gt) == 0 or len(pred) == 0:\n norm_ED += 0\n elif len(gt) > len(pred):\n norm_ED += 1 - edit_distance(pred, gt) / len(gt)\n else:\n norm_ED += 1 - edit_distance(pred, gt) / len(pred)\n\n # calculate confidence score (= multiply of pred_max_prob)\n try:\n confidence_score = pred_max_prob.cumprod(dim=0)[-1]\n except:\n confidence_score = 0 # for empty pred case, when prune after \"end of sentence\" token ([s])\n confidence_score_list.append(confidence_score)\n # print(n_correct)\n return n_correct, confidence_score_list, norm_ED\n # print(pred, gt, pred==gt, confidence_score)\n\n\ndef validation_ctc_and_attn(model, criterion_ctc, criterion_attn, evaluation_loader, converter_ctc, converter_attn, opt):\n \"\"\" validation or evaluation \"\"\"\n n_correct_all = 0\n n_correct_all_attn = 0\n norm_ED = 0\n length_of_data = 0\n infer_time = 0\n valid_loss_avg_ctc = Averager()\n valid_loss_avg_attn = Averager()\n\n for i, (image_tensors, labels) in enumerate(evaluation_loader):\n batch_size = image_tensors.size(0)\n length_of_data = length_of_data + batch_size\n image = image_tensors.to(device)\n # For max length prediction\n length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)\n text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)\n\n text_for_loss_ctc, length_for_loss_ctc = converter_ctc.encode(labels, batch_max_length=opt.batch_max_length)\n text_for_loss_attn, length_for_loss_attn = converter_attn.encode(labels, batch_max_length=opt.batch_max_length)\n\n start_time = time.time()\n # if 'CTC' in opt.Prediction:\n preds = model.module.inference(image, text_for_pred)\n forward_time = time.time() - start_time\n preds_ctc, preds_attn = model(image, text_for_loss_attn, is_train = False)\n preds_attn = preds_attn[:, :text_for_loss_attn.shape[1] - 1, :]\n target = text_for_loss_attn[:, 1:]\n cost_attn = criterion_attn(preds_attn.contiguous().view(-1, preds_attn.shape[-1]), target.contiguous().view(-1))\n _, preds_index_attn = preds_attn.max(2)\n preds_str_attn = converter_attn.decode(preds_index_attn, length_for_pred)\n labels_attn = converter_attn.decode(text_for_loss_attn[:, 1:], length_for_loss_attn)\n # Calculate evaluation loss for CTC deocder.\n preds_size = torch.IntTensor([preds_ctc.size(1)] * batch_size)\n # permute 'preds' to use CTCloss format\n if opt.baiduCTC:\n cost_ctc = criterion_ctc(preds_ctc.permute(1, 0, 2), text_for_loss_ctc, preds_size, length_for_loss_ctc) / batch_size\n else:\n cost_ctc = criterion_ctc(preds_ctc.log_softmax(2).permute(1, 0, 2), text_for_loss_ctc, preds_size, length_for_loss_ctc)\n\n # Select max probabilty (greedy decoding) then decode index to character\n if opt.baiduCTC:\n _, preds_index = preds_ctc.max(2)\n preds_index = preds_index.view(-1)\n else:\n _, preds_index = preds_ctc.max(2)\n preds_str = converter_ctc.decode(preds_index.data, preds_size.data)\n \n # else:\n # preds = model(image, text_for_pred, is_train=False)\n # forward_time = time.time() - start_time\n\n # preds = preds[:, :text_for_loss.shape[1] - 1, :]\n # target = text_for_loss[:, 1:] # without [GO] Symbol\n # cost = criterion(preds.contiguous().view(-1, preds.shape[-1]), target.contiguous().view(-1))\n\n # # select max probabilty (greedy decoding) then decode index to character\n # _, preds_index = preds.max(2)\n # preds_str = converter.decode(preds_index, length_for_pred)\n # labels = converter.decode(text_for_loss[:, 1:], length_for_loss)\n\n infer_time += forward_time\n valid_loss_avg_ctc.add(cost_ctc)\n valid_loss_avg_attn.add(cost_attn)\n # calculate accuracy & confidence score\n preds_prob = F.softmax(preds_ctc, dim=2)\n preds_max_prob, _ = preds_prob.max(dim=2)\n preds_prob_attn = F.softmax(preds_attn, dim=2)\n preds_max_prob_attn, _ = preds_prob_attn.max(dim=2)\n # confidence_score_list = []\n\n n_correct, confidence_score_list, norm_ED = get_res(labels, preds_str, preds_max_prob, opt, length_of_data)\n n_correct_attn, confidence_score_list_attn, norm_ED_attn = get_res(labels, preds_str_attn, preds_max_prob_attn, opt, length_of_data, isattn = True)\n n_correct_all += n_correct\n n_correct_all_attn += n_correct_attn\n accuracy = n_correct_all / float(length_of_data) * 100\n norm_ED = norm_ED / float(length_of_data) # ICDAR2019 Normalized Edit Distance\n accuracy_attn = n_correct_all_attn / float(length_of_data) * 100\n norm_ED_attn = norm_ED_attn / float(length_of_data) \n print(n_correct)\n print(n_correct_attn)\n print(length_of_data)\n # print(infer_time / float(length_of_data) )\n return valid_loss_avg_ctc.val(), accuracy, norm_ED, preds_str, confidence_score_list, labels, infer_time, length_of_data, valid_loss_avg_attn.val(), accuracy_attn, norm_ED_attn, preds_str_attn, confidence_score_list_attn\n\n\n\ndef validation(model, criterion, evaluation_loader, converter, opt):\n \"\"\" validation or evaluation \"\"\"\n n_correct = 0\n norm_ED = 0\n length_of_data = 0\n infer_time = 0\n valid_loss_avg = Averager()\n\n for i, (image_tensors, labels) in enumerate(evaluation_loader):\n batch_size = image_tensors.size(0)\n length_of_data = length_of_data + batch_size\n image = image_tensors.to(device)\n # For max length prediction\n length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)\n text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)\n\n text_for_loss, length_for_loss = converter.encode(labels, batch_max_length=opt.batch_max_length)\n\n start_time = time.time()\n # if 'CTC' in opt.Prediction:\n preds = model.module.inference(image, text_for_pred)\n forward_time = time.time() - start_time\n\n # Calculate evaluation loss for CTC deocder.\n preds_size = torch.IntTensor([preds.size(1)] * batch_size)\n # permute 'preds' to use CTCloss format\n if opt.baiduCTC:\n if opt.label_smooth :\n cost = criterion(preds.permute(1, 0, 2), text_for_loss, preds_size, length_for_loss,batch_size) \n else :\n cost = criterion(preds.permute(1, 0, 2), text_for_loss, preds_size, length_for_loss) / batch_size\n else:\n cost = criterion(preds.log_softmax(2).permute(1, 0, 2), text_for_loss, preds_size, length_for_loss)\n\n # Select max probabilty (greedy decoding) then decode index to character\n if opt.baiduCTC:\n _, preds_index = preds.max(2)\n preds_index = preds_index.view(-1)\n else:\n _, preds_index = preds.max(2)\n preds_str = converter.decode(preds_index.data, preds_size.data)\n \n # else:\n # preds = model(image, text_for_pred, is_train=False)\n # forward_time = time.time() - start_time\n\n # preds = preds[:, :text_for_loss.shape[1] - 1, :]\n # target = text_for_loss[:, 1:] # without [GO] Symbol\n # cost = criterion(preds.contiguous().view(-1, preds.shape[-1]), target.contiguous().view(-1))\n\n # # select max probabilty (greedy decoding) then decode index to character\n # _, preds_index = preds.max(2)\n # preds_str = converter.decode(preds_index, length_for_pred)\n # labels = converter.decode(text_for_loss[:, 1:], length_for_loss)\n\n infer_time += forward_time\n valid_loss_avg.add(cost)\n\n # calculate accuracy & confidence score\n preds_prob = F.softmax(preds, dim=2)\n preds_max_prob, _ = preds_prob.max(dim=2)\n confidence_score_list = []\n for gt, pred, pred_max_prob in zip(labels, preds_str, preds_max_prob):\n # if 'Attn' in opt.Prediction:\n # gt = gt[:gt.find('[s]')]\n # pred_EOS = pred.find('[s]')\n # pred = pred[:pred_EOS] # prune after \"end of sentence\" token ([s])\n # pred_max_prob = pred_max_prob[:pred_EOS]\n\n # To evaluate 'case sensitive model' with alphanumeric and case insensitve setting.\n if opt.sensitive and opt.data_filtering_off:\n pred = pred.lower()\n gt = gt.lower()\n alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'\n out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'\n pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)\n gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)\n\n if pred == gt:\n n_correct += 1\n\n\n # ICDAR2019 Normalized Edit Distance\n if len(gt) == 0 or len(pred) == 0:\n norm_ED += 0\n elif len(gt) > len(pred):\n norm_ED += 1 - edit_distance(pred, gt) / len(gt)\n else:\n norm_ED += 1 - edit_distance(pred, gt) / len(pred)\n\n # calculate confidence score (= multiply of pred_max_prob)\n try:\n confidence_score = pred_max_prob.cumprod(dim=0)[-1]\n except:\n confidence_score = 0 # for empty pred case, when prune after \"end of sentence\" token ([s])\n confidence_score_list.append(confidence_score)\n # print(pred, gt, pred==gt, confidence_score)\n\n accuracy = n_correct / float(length_of_data) * 100\n norm_ED = norm_ED / float(length_of_data) # ICDAR2019 Normalized Edit Distance\n\n return valid_loss_avg.val(), accuracy, norm_ED, preds_str, confidence_score_list, labels, infer_time, length_of_data\n\n\ndef test(opt):\n \"\"\" model configuration \"\"\"\n # if 'CTC' in opt.Prediction:\n if opt.baiduCTC:\n converter_ctc = CTCLabelConverterForBaiduWarpctc(opt.character)\n else :\n converter_ctc = CTCLabelConverter(opt.character)\n# else:\n converter_attn = AttnLabelConverter(opt.character)\n opt.num_class_ctc = len(converter_ctc.character)\n opt.num_class_attn = len(converter_attn.character)\n\n if opt.rgb:\n opt.input_channel = 3\n model = Model(opt)\n # print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,\n # opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,\n # opt.SequenceModeling, opt.Prediction)\n model = torch.nn.DataParallel(model).to(device)\n\n # load model\n print('loading pretrained model from %s' % opt.saved_model)\n model.load_state_dict(torch.load(opt.saved_model, map_location=device), strict = False)\n opt.exp_name = '_'.join(opt.saved_model.split('/')[1:])\n # print(model)\n\n \"\"\" keep evaluation model and result logs \"\"\"\n os.makedirs(f'./result/{opt.exp_name}', exist_ok=True)\n os.system(f'cp {opt.saved_model} ./result/{opt.exp_name}/')\n\n \"\"\" setup loss \"\"\"\n # if 'CTC' in opt.Prediction:\n criterion_ctc = torch.nn.CTCLoss(zero_infinity=True).to(device)\n# else:\n criterion_attn = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0\n\n \"\"\" evaluation \"\"\"\n model.eval()\n with torch.no_grad():\n if opt.benchmark_all_eval: # evaluation with 10 benchmark evaluation datasets\n log = open(f'./result/{opt.exp_name}/log_evaluation.txt', 'a')\n AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)\n eval_data, eval_data_log = hierarchical_dataset(root=opt.eval_data, opt=opt)\n evaluation_loader = torch.utils.data.DataLoader(\n eval_data, batch_size=opt.batch_size,\n shuffle=False,\n num_workers=int(opt.workers),\n collate_fn=AlignCollate_evaluation, pin_memory=True)\n benchmark_all_eval(model, criterion_ctc, criterion_attn, evaluation_loader, converter_ctc, converter_attn, opt)\n else:\n log = open(f'./result/{opt.exp_name}/log_evaluation.txt', 'a')\n AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)\n eval_data, eval_data_log = hierarchical_dataset(root=opt.eval_data, opt=opt)\n evaluation_loader = torch.utils.data.DataLoader(\n eval_data, batch_size=opt.batch_size,\n shuffle=False,\n num_workers=int(opt.workers),\n collate_fn=AlignCollate_evaluation, pin_memory=True)\n _, accuracy_by_best_model, _, _, _, _, _, _, _, acc_attn, _, _, _ = validation_ctc_and_attn(\n model, criterion_ctc, criterion_attn, evaluation_loader, converter_ctc, converter_attn, opt)\n log.write(eval_data_log)\n print(f'{accuracy_by_best_model:0.3f}')\n print(f'{acc_attn:0.3f}')\n log.write(f'{accuracy_by_best_model:0.3f}\t')\n log.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--eval_data', required=True, help='path to evaluation dataset')\n parser.add_argument('--benchmark_all_eval', action='store_true', help='evaluate 10 benchmark evaluation datasets')\n parser.add_argument('--workers', type=int, help='number of data loading workers', default=12)\n parser.add_argument('--batch_size', type=int, default=192, help='input batch size')\n parser.add_argument('--saved_model', required=True, help=\"path to saved_model to evaluation\")\n \"\"\" Data processing \"\"\"\n parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')\n parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')\n parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')\n parser.add_argument('--rgb', action='store_true', help='use rgb input')\n parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')\n parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')\n parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')\n parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')\n parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')\n \"\"\" Model Architecture \"\"\"\n parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')\n parser.add_argument('--FeatureExtraction', type=str, required=True, help='FeatureExtraction stage. VGG|RCNN|ResNet')\n parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')\n parser.add_argument('--Prediction', type=str, required=True, help='Prediction stage. CTC|Attn')\n parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')\n parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor')\n parser.add_argument('--output_channel', type=int, default=512,\n help='the number of output channel of Feature extractor')\n parser.add_argument('--output_channel_GCN', type=int, default=512,\n help='the number of output channel of GCN')\n parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')\n parser.add_argument('--guide_training', action='store_true', help='Whether to use guide_training (default not)')\n opt = parser.parse_args()\n\n \"\"\" vocab / character number configuration \"\"\"\n if opt.sensitive:\n opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).\n\n cudnn.benchmark = True\n cudnn.deterministic = True\n opt.num_gpu = torch.cuda.device_count()\n\n test(opt)\n","sub_path":"test_guide.py","file_name":"test_guide.py","file_ext":"py","file_size_in_byte":21937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"71742235","text":"\n\n# -*- coding: utf-8 -*-\n\nimport os, sys\nimport gevent\nfrom gevent import subprocess\n\nPOTREECONVERTOR = r'H:\\Program Files\\PotreeConverter_1.4RC2_windows_64bit\\PotreeConverter.exe'\nLASDIR = r'G:\\TDDOWNLOAD\\laz'\nTILEDIR = r'F:\\work\\python\\pi_server\\static\\resources\\pointclouds'\n\n\ndef run(name):\n cmd = [POTREECONVERTOR,\n '--overwrite', '--outdir',\n '{0}{1}{2}'.format(TILEDIR, os.path.sep, name),\n '{0}{1}{2}.las'.format(LASDIR, os.path.sep, name),\n ]\n subprocess.check_output(cmd)\n\ndef get_count(typename):\n return len( list(filter(lambda x:x.startswith(typename), os.listdir(LASDIR))))\n\ndef test():\n l = []\n towers = list(map(lambda x: 'shibo_tower{0}'.format(x), range(get_count('shibo_tower'))))\n trees = list(map(lambda x: 'shibo_tree{0}'.format(x), range(get_count('shibo_tree'))))\n builds = list(map(lambda x: 'shibo_build{0}'.format(x), range(get_count('shibo_build'))))\n l.extend(towers)\n l.extend(trees)\n l.extend(builds)\n print(l)\n for i in l:\n run(i)\n\n\ndef main():\n gevent.joinall([gevent.spawn(test)])\n\n\nif __name__ == '__main__':\n main()\n\n\n\n ","sub_path":"test/test_pointcloudtile.py","file_name":"test_pointcloudtile.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"163009987","text":"# -*- coding:utf-8 -*-\n\"\"\"\ncreated by wzp on 14-6-27下午2:05.\n\"\"\"\n\nfrom gfirefly.server.globalobject import remoteserviceHandle\nfrom app.proto_file import hero_request_pb2\nfrom app.proto_file import hero_response_pb2\nfrom app.proto_file.common_pb2 import CommonResponse\nfrom gfirefly.server.logobj import logger\nfrom shared.db_opear.configs_data import game_configs\nfrom app.game.core.item_group_helper \\\n import is_afford, consume, gain, get_return\nfrom shared.utils import log_action\nfrom app.game.core.pack.item import Item\n\n\n@remoteserviceHandle('gate')\ndef get_heros_101(pro_data, player):\n \"\"\"取得武将列表 \"\"\"\n response = hero_response_pb2.GetHerosResponse()\n for hero in player.hero_component.get_heros():\n hero_pb = response.heros.add()\n hero.update_pb(hero_pb)\n return response.SerializePartialToString()\n\n\n@remoteserviceHandle('gate')\ndef hero_upgrade_with_item_103(data, player):\n \"\"\"武将升级,使用经验药水\"\"\"\n args = hero_request_pb2.HeroUpgradeWithItemRequest()\n args.ParseFromString(data)\n response = hero_response_pb2.HeroUpgradeResponse()\n hero_no = args.hero_no\n exp_item_no = args.exp_item_no\n exp_item_num = args.exp_item_num\n exp_item = player.item_package.get_item(exp_item_no)\n # 服务器验证\n if exp_item:\n if exp_item.num < exp_item_num:\n response.res.result = False\n response.res.result_no = 106\n response.res.message = u\"经验药水道具不足!\"\n return response.SerializeToString()\n else:\n logger.error('item package can not get item:%d' % exp_item_no)\n exp = game_configs.item_config.get(exp_item_no).get('funcArg1')\n hero = player.hero_component.get_hero(hero_no)\n hero.upgrade(exp * exp_item_num)\n player.item_package.consume_item(exp_item_no, exp_item_num)\n # 返回\n response.res.result = True\n response.level = hero.level\n response.exp = hero.exp\n return response.SerializeToString()\n\n\n@remoteserviceHandle('gate')\ndef hero_break_104(data, player):\n \"\"\"武将突破\"\"\"\n args = hero_request_pb2.HeroBreakRequest()\n args.ParseFromString(data)\n hero_no = args.hero_no\n hero = player.hero_component.get_hero(hero_no)\n response = hero_response_pb2.HeroBreakResponse()\n\n # 验证武将是否突破到上限\n if hero.break_level == game_configs.hero_config.get(hero_no).breakLimit:\n response.res.result = False\n response.res.result_no = 201\n return response.SerializeToString()\n\n _hero_breakup = game_configs.hero_breakup_config.get(hero.hero_no)\n item_group = _hero_breakup.get_consume(hero.break_level)\n # 判断是否足够\n result = is_afford(player, item_group) # 校验\n if not result.get('result'):\n response.res.result = False\n response.res.result_no = result.get('result_no')\n return response.SerializeToString()\n\n # 返回消耗\n return_data = consume(player, item_group)\n get_return(player, return_data, response.consume)\n\n hero.break_level += 1\n hero.save_data()\n # 3、返回\n response.res.result = True\n response.break_level = hero.break_level\n return response.SerializeToString()\n\n\n@remoteserviceHandle('gate')\ndef hero_sacrifice_105(data, player):\n \"\"\"武将献祭\"\"\"\n args = hero_request_pb2.HeroSacrificeRequest()\n args.ParseFromString(data)\n heros = player.hero_component.get_heros_by_nos(args.hero_nos)\n if len(heros) == 0:\n logger.error(\"hero %s is not exists.\" % str(args.hero_nos))\n response = hero_sacrifice_oper(heros, player)\n # remove hero\n player.hero_component.delete_heros_by_nos(args.hero_nos)\n return response.SerializeToString()\n\n\n@remoteserviceHandle('gate')\ndef hero_compose_106(data, player):\n \"\"\"武将合成\"\"\"\n args = hero_request_pb2.HeroComposeRequest()\n args.ParseFromString(data)\n hero_chip_no = args.hero_chip_no\n response = hero_response_pb2.HeroComposeResponse()\n hero_no = game_configs.chip_config.get(\"chips\").get(hero_chip_no).combineResult\n need_num = game_configs.chip_config.get(\"chips\").get(hero_chip_no).needNum\n if not hero_no or not need_num:\n logger.error(\"chip_config数据不全!\")\n hero_chip = player.hero_chip_component.get_chip(hero_chip_no)\n # 服务器校验\n if hero_chip.num < need_num:\n response.res.result = False\n response.res.message = u\"碎片不足,合成失败!\"\n return response.SerializeToString()\n if player.hero_component.contain_hero(hero_no):\n response.res.result = False\n response.res.result_no = 202\n response.res.message = u\"武将已存在,合成失败!\"\n return response.SerializeToString()\n hero = player.hero_component.add_hero(hero_no)\n hero_chip.consume_chip(need_num) # 消耗碎片\n\n # tlog\n log_action.hero_flow(player, hero.hero_no, 1, 1)\n log_action.chip_flow(player, hero_chip.chip_no, 1, 0,\n need_num, hero_chip.num, 1)\n # 3、返回\n response.res.result = True\n hero.update_pb(response.hero)\n return response.SerializeToString()\n\n\n@remoteserviceHandle('gate')\ndef hero_sell_107(data, player):\n \"\"\"武将出售\"\"\"\n args = hero_request_pb2.HeroSellRequest()\n args.ParseFromString(data)\n hero_nos = args.hero_nos\n\n response = hero_response_pb2.HeroSellResponse()\n for hero_no in hero_nos:\n sell_gain = game_configs.hero_config.get(hero_no).sellGain\n return_data = gain(player, sell_gain)\n get_return(player, return_data, response.gain)\n\n response.res.result = True\n return response\n\n\n@remoteserviceHandle('gate')\ndef hero_refine_118(data, player):\n request = hero_request_pb2.HeroRefineRequest()\n request.ParseFromString(data)\n response = CommonResponse()\n response.result = False\n\n hero = player.hero_component.get_hero(request.hero_no)\n _refine_item = game_configs.seal_config.get(request.refine)\n if not hero:\n logger.error('cant find hero:%s', request.hero_no)\n return response.SerializePartialToString()\n if not _refine_item:\n logger.error('cant find refine item:%s', request.refine)\n return response.SerializePartialToString()\n\n if not player.brew.consume(_refine_item.expend):\n logger.error('cant afford refine:%s:cur%s',\n _refine_item.expend,\n player.brew.nectar)\n return response.SerializePartialToString()\n\n response.result = True\n hero.refine = request.refine\n player.brew.save_data()\n hero.save_data()\n return response.SerializePartialToString()\n\n\ndef hero_sacrifice_oper(heros, player):\n \"\"\"\n 武将献祭,返回总武魂、经验药水\n :param heros: 被献祭的武将\n :return total_hero_soul:总武魂数量, exp_item_no:经验药水编号, exp_item_num:经验药水数量\n \"\"\"\n total_exp = 0\n exp_item_no = 0\n exp_item_num = 0\n\n response = hero_response_pb2.HeroSacrificeResponse()\n gain_response = response.gain\n for hero in heros:\n sacrifice_gain = game_configs.hero_config.get(hero.hero_no).sacrificeGain\n return_data = gain(player, sacrifice_gain)\n get_return(player, return_data, gain_response)\n # 经验\n exp = hero.get_all_exp()\n total_exp += exp\n\n # baseconfig {1000000: 'item_id'}\n exp_items = game_configs.base_config.get(\"sacrificeGainExp\")\n\n keys = []\n try:\n keys = sorted([int(item) for item in list(exp_items)], reverse=True)\n except Exception:\n logger.error(\"base_config sacrificeGainExp key must be int type:%s.\",\n str(exp_items))\n return\n\n for exp in keys:\n item_no = exp_items.get(exp)\n config = game_configs.item_config.get(item_no)\n exp = config.get(\"funcArg1\")\n if total_exp/exp > 0:\n exp_item_no = item_no\n exp_item_num = total_exp/exp\n break\n\n player.item_package.add_item(Item(exp_item_no, exp_item_num))\n player.item_package.save_data()\n item_pb = gain_response.items.add()\n item_pb.item_no = exp_item_no\n item_pb.item_num = exp_item_num\n response.res.result = True\n return response\n","sub_path":"app/game/action/node/hero.py","file_name":"hero.py","file_ext":"py","file_size_in_byte":8200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"96357001","text":"import socket\nimport math\nfrom types import SimpleNamespace\n\nclass UR_programmer():\n\n def __init__(self, ip, simulate):\n #Socket til at sende kommandoer til robotten\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s.settimeout(10)\n self.connected = False\n\n #Tegneparametre:\n self.tegnehojde = 0.162\n #Grænser for tegningen (x-min, y-min, x-max, y-max)\n #Robot 3: x [-0.525, -0.325] y [-0.542, -0.265]\n self.tegne_limits = [-0.525, -0.542, -0.325, -0.265]\n self.home_pos = b' movej(p[-0.48709836954289376, -0.11010631171999086, 0.24547688620045213, -2.218021967288288, -2.2221231843407026, -0.0015151572218644231])\\n'\n\n #Husk at kontrollere ip-adressen!\n if not simulate:\n self.connect(ip)\n else:\n self.s = SimpleNamespace()\n self.s.send = lambda a : print(a)\n\n def connect(self, ip):\n #TCP_IP ='10.130.58.11'\n #Husk at kontrollere ip-adressen!\n TCP_IP = ip\n TCP_PORT = 30002\n BUFFER_SIZE = 1024\n\n try:\n #print(\"Opening IP Address\" + TCP_IP)\n self.s.connect((TCP_IP, TCP_PORT))\n response = self.s.recv(BUFFER_SIZE)\n self.connected = True\n except socket.error:\n print(\"Socket error\")\n self.s.close()\n\n\n def move_home(self):\n #Prædefineret home-position:\n #(Når vi skal sende en streng til robotten,\n # skal den konverteres til et bytearray\n # derfor står der b foran strengen.)\n self.s.send(b'def myProg():\\n')\n self.s.send(self.home_pos)\n self.s.send(b'end\\n')\n\n\n\n def move_xyz(self, x, y, z):\n '''\n Denne funktion laver et UR-script program, og sender det til robotten.\n Programmet vil indeholde en enkelt movel-kommando, til punktet x,y,z,\n der er givet som argumenter til denne funktion.\n\n Bemærk: Der kontrolleres ikke grænser på hverken x, y eller z!\n (Denne funktion er ikke beregnet til tegning, men til transport)\n '''\n #Når vi skal sende en streng til robotten,\n # skal den konverteres til et bytearray\n # derfor står der b' foran strengen.\n self.s.send(b'def myProg():\\n')\n #Vi læser robottens aktuelle konfiguration,\n # for at genbruge rotationen.\n self.s.send(b' var_1=get_actual_tcp_pose()\\n')\n st = ' var_1[0] = {:.5f}\\n'.format(x)\n self.s.send(bytearray(st,'utf8'))\n st = ' var_1[1] = {:.5f}\\n'.format(y)\n self.s.send(bytearray(st,'utf8'))\n st = ' var_1[2] = {:.5f}\\n'.format(z)\n self.s.send(bytearray(st,'utf8'))\n self.s.send(b' movel(var_1)\\n')\n #self.s.send(bytearray(st,'utf8'))\n self.s.send(b'end\\n')\n\n\n def move_path(self, path):\n '''\n Denne funktion genererer et UR-script program og sender det til robotten.\n Programmet vil lave en movel()-kommando til hvert punkt i listen 'path'.\n Hvert punkt i listen skal være en liste eller en tuple med 2 elementer, (x,y)\n Til z-koordinaten bruges variablen self.tegnehojde\n\n (Hvis ikke (x,y) ligger indenfor grænserne i self.tegne_limits,\n vil punktet ikke blive sendt til robotten.)\n\n Robottens orientering i hvert punkt vil være uændret, så inden programmet sendes,\n skal robottens tool være orienteret med kuglepennen mod papiret.\n '''\n limit_error = False\n #Når vi skal sende en streng til robotten,\n # skal den konverteres til et bytearray\n # derfor står der b' foran strengen.\n self.s.send(b'def myProg():\\n')\n for p in path:\n if self.tegne_limits[0] <= p[0] <= self.tegne_limits[2] and self.tegne_limits[1] <= p[1] <= self.tegne_limits[3]:\n #Vi læser robottens aktuelle konfiguration,\n # for at genbruge rotationen.\n self.s.send(b' var_1=get_actual_tcp_pose()\\n')\n\n st = ' var_1[0] = {:.5f}\\n'.format(p[0])\n self.s.send(bytearray(st,'utf8'))\n\n st = ' var_1[1] = {:.5f}\\n'.format(p[1])\n self.s.send(bytearray(st,'utf8'))\n\n st = ' var_1[2] = {:.5f}\\n'.format(self.tegnehojde)\n self.s.send(bytearray(st,'utf8'))\n\n self.s.send(b' movel(var_1, r=0.003)\\n')\n else:\n limit_error = True\n #self.s.send(self.home_pos)\n self.s.send(b'end\\n')\n\n print('Program sendt til robot.')\n if limit_error:\n print('(Mindst et punkt blev udeladt, fordi det lå udenfor tegneområdet)')\n","sub_path":"robot/ur_programmer.py","file_name":"ur_programmer.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"99081480","text":"import os\nimport re\nimport shutil\nimport sys\n\nimport jinja2.environment, jinja2.loaders\n\nimport model\nimport parsing\n\n\n# Constants definitions\n\nERR_INCORRECT_NR_ARGS = str(\n \"Usage: generator.py \"\n)\n\nERR_INVALID_TYPE_STRING = str(\n \"The input parameter input_string should be of type str. Please change.\"\n)\n\nERR_INVALID_XML_BEGIN = str(\"The xml configuration file \\\"\")\nERR_INVALID_XML_MIDDLE = str(\n \"\\\" is not valid with respect to the xml schema file \\\"\"\n)\nERR_INVALID_XML_END = str(\"\\\".\")\n \nERR_INVALID_PATH_BEGIN = \"The provided path \\\"\"\nERR_INVALID_PATH_END = \"\\\" is invalid. Please change.\"\n \n# Auto-generated file warning message\nWRN_AUTO_GENERATED_FILE = [\n\"*\",\n\"* WARNING! AUTO-GENERATED FILE.\",\n\"*\",\n\"* PLEASE DO NOT UPDATE THIS FILE MANUALLY. \",\n\"* USE THE PYTHON GENERATOR SCRIPTS FOR ANY MODIFICATIONS.\",\n\"*\"\n]\n\n# The number of time points to be included in each unit test\nUNIT_TESTS_NR_OF_TIMEPOINTS = 12\n \nPROJECT_ROOT_FOLDER = \"/home/ovidiu/Repositories/git/multiscale/Multiscale\"\nMODEL_CHECKING_FOLDER = PROJECT_ROOT_FOLDER + str(\n \"/modules/verification/spatial-temporal\" \n) \nMODEL_CHECKING_INPUT_FOLDER = MODEL_CHECKING_FOLDER + str(\n \"/include/multiscale/verification/spatial-temporal\" \n)\nMODEL_CHECKING_SAMPLE_FOLDER = MODEL_CHECKING_FOLDER + str(\n \"/sample\"\n)\nMODEL_CHECKING_SRC_FOLDER = MODEL_CHECKING_FOLDER + str(\n \"/src\"\n)\nMODEL_CHECKING_TEST_FOLDER = MODEL_CHECKING_FOLDER + str(\n \"/test\"\n)\nTEMPLATES_FOLDER_PATH = PROJECT_ROOT_FOLDER + str(\n \"/script/verification/generator/templates\"\n)\n\nTAG_WRN_AUTO_GENERATED_FILE = \"auto_generated_warning\"\n\nTAG_UNIT_TESTS_NR_OF_TIMEPOINTS = \"nr_of_time_points\"\n\nTAG_SPATIAL_ENTITIES = \"spatial_entities\"\nTAG_SPATIAL_MEASURES = \"spatial_measures\"\nTAG_SPATIAL_ENTITY_NAME = \"spatial_entity_name\"\n\nTAG_SPATIAL_ENTITY_NAME_FIXED_WIDTH = \"#spatial_entity_fixed_width\"\nTAG_SPATIAL_MEASURE_NAME_FIXED_WIDTH = \"#spatial_measure_fixed_width\"\nTAG_SPATIAL_ENTITY_AND_MEASURE_NAME_FIXED_WIDTH = \"#spatial_entity_and_measure_fixed_width\"\n\nTAG_FILTER_FIRST_TO_UPPER = \"first_to_upper\"\n\nTAG_DENSITY_SPATIAL_MEASURE_FLAG = \"is_density_spatial_measure\"\nTAG_CENTROID_X_SPATIAL_MEASURE_FLAG = \"is_centroid_x_spatial_measure\"\nTAG_CENTROID_Y_SPATIAL_MEASURE_FLAG = \"is_centroid_y_spatial_measure\"\n\nLABEL_DENSITY_SPATIAL_MEASURE = \"density\"\nLABEL_CENTROID_X_SPATIAL_MEASURE = \"centroidX\"\nLABEL_CENTROID_Y_SPATIAL_MEASURE = \"centroidY\"\n\nFIXED_WIDTH_PADDING = 4\n\nEXT_HEADER_FILE = \".hpp\"\nEXT_SOURCE_FILE = \".cpp\"\n\n# List of (input_template_path, output_template_path, min_spatial_entity_width,\n# min_spatial_measure_width)\nTEMPLATE_PREPROCESSING_IO = [\n (\n TEMPLATES_FOLDER_PATH + str(\n \"/include/attribute/spatial_measure_type_hpp.tpl.in\"\n ), \n TEMPLATES_FOLDER_PATH + str(\n \"/include/attribute/spatial_measure_type_hpp.tpl\"\n ), \n 1, \n 29\n ),\n (\n TEMPLATES_FOLDER_PATH + str(\n \"/include/attribute/subset_specific_type_hpp.tpl.in\"\n ), \n TEMPLATES_FOLDER_PATH + str(\n \"/include/attribute/subset_specific_type_hpp.tpl\"\n ), \n 29, \n 1\n ),\n (\n TEMPLATES_FOLDER_PATH + str(\n \"/include/parsing/symbol_tables_auto_generated_hpp.tpl.in\"\n ), \n TEMPLATES_FOLDER_PATH + str(\n \"/include/parsing/symbol_tables_auto_generated_hpp.tpl\"\n ), \n 1, \n 1\n ),\n (\n TEMPLATES_FOLDER_PATH + str(\n \"/test/evaluation/time_points_spatial_entities_attributes_initializer_hpp.tpl.in\"\n ),\n TEMPLATES_FOLDER_PATH + str(\n \"/test/evaluation/time_points_spatial_entities_attributes_initializer_hpp.tpl\"\n ),\n 10,\n 20\n )\n]\n\n# Dictionary of (template_path, generated_source_file_path)\n#\n# If you want to generate a new source file then add a new entry to this\n# dictionary.\nSOURCE_FILE_GENERATING_IO = {\n \"config/verification/spatial-temporal/schema/mstml_l1v1_xsd.tpl\" : \n PROJECT_ROOT_FOLDER + str(\n \"/config/verification/spatial-temporal/schema/MSTML_L1V1.xsd\"\n )\n ,\n \"include/attribute/spatial_measure_type_hpp.tpl\" : \n MODEL_CHECKING_INPUT_FOLDER + str(\n \"/attribute/SpatialMeasureType.hpp\"\n )\n ,\n\n \"include/attribute/subset_specific_type_hpp.tpl\" : \n MODEL_CHECKING_INPUT_FOLDER + str(\n \"/attribute/SubsetSpecificType.hpp\"\n )\n ,\n \"include/parsing/symbol_tables_auto_generated_hpp.tpl\" : \n MODEL_CHECKING_INPUT_FOLDER + str(\n \"/parsing/SymbolTablesAutoGenerated.hpp\"\n )\n ,\n \"sample/parser_evaluation_sample_cpp.tpl\" : \n MODEL_CHECKING_SAMPLE_FOLDER + str(\n \"/ParserEvaluationSample.cpp\"\n )\n ,\n \"src/attribute/spatial_measure_attribute_auto_generated_cpp.tpl\" : \n MODEL_CHECKING_SRC_FOLDER + str(\n \"/attribute/SpatialMeasureAttributeAutoGenerated.cpp\"\n )\n ,\n \"src/data/spatial_temporal_data_reader_auto_generated_cpp.tpl\" : \n MODEL_CHECKING_SRC_FOLDER + str(\n \"/data/SpatialTemporalDataReaderAutoGenerated.cpp\"\n )\n ,\n \"src/data/spatial_temporal_data_writer_auto_generated_cpp.tpl\" :\n MODEL_CHECKING_SRC_FOLDER + str(\n \"/data/SpatialTemporalDataWriterAutoGenerated.cpp\"\n )\n ,\n \"src/attribute/subset_specific_attribute_auto_generated_cpp.tpl\" : \n MODEL_CHECKING_SRC_FOLDER + str(\n \"/attribute/SubsetSpecificAttributeAutoGenerated.cpp\"\n )\n ,\n \"test/checking/model_checker_test_hpp.tpl\" : \n MODEL_CHECKING_TEST_FOLDER + str(\n \"/checking/ModelCheckerTest.hpp\"\n )\n ,\n \"test/evaluation/complete_trace_test_hpp.tpl\" : \n MODEL_CHECKING_TEST_FOLDER + str(\n \"/evaluation/CompleteTraceTest.hpp\"\n )\n ,\n \"test/evaluation/empty_trace_test_hpp.tpl\" :\n MODEL_CHECKING_TEST_FOLDER + str(\n \"/evaluation/EmptyTraceTest.hpp\"\n )\n ,\n \"test/evaluation/non_empty_trace_evaluation_test_cpp.tpl\" :\n MODEL_CHECKING_TEST_FOLDER + str(\n \"/evaluation/NonEmptyTraceEvaluationTest.cpp\"\n )\n ,\n \"test/evaluation/numeric_state_variable_trace_test_hpp.tpl\" :\n MODEL_CHECKING_TEST_FOLDER + str(\n \"/evaluation/NumericStateVariableTraceTest.hpp\"\n )\n ,\n \"test/evaluation/spatial_entities_trace_test_hpp.tpl\" :\n MODEL_CHECKING_TEST_FOLDER + str(\n \"/evaluation/SpatialEntitiesTraceTest.hpp\"\n )\n ,\n \"test/evaluation/time_points_spatial_entities_attributes_initializer_hpp.tpl\" :\n MODEL_CHECKING_TEST_FOLDER + str(\n \"/evaluation/TimePointsSpatialEntitiesAttributesInitializer.hpp\"\n )\n ,\n \"test/parsing/parser_test_hpp.tpl\" :\n MODEL_CHECKING_TEST_FOLDER + str(\n \"/parsing/ParserTest.hpp\"\n )\n}\n\nDERIVED_SPATIAL_ENTITY_TEMPLATE_PATH = str(\n \"include/model/derived_spatial_entity_hpp.tpl\"\n)\nDERIVED_SPATIAL_ENTITY_OUTPUT_FOLDER = MODEL_CHECKING_INPUT_FOLDER + str(\n \"/model/\"\n)\n\n\n# Functions definitions\n\ndef validate_path(path):\n \"\"\" Check if the input path is valid \"\"\"\n if isinstance(path, str) and len(path) > 0:\n if os.path.exists(path):\n return True\n \n # Else raise invalid value error \n raise ValueError(ERR_INVALID_PATH_BEGIN + path + ERR_INVALID_PATH_END)\n\ndef generate_derived_spatial_entity_file(environment, template_path, \n output_folder, spatial_entity_name):\n \"\"\"Generate the derived spatial entity source file\n \n Keyword argument:\n environment -- the environment for loading templates\n template_path -- path to the template file\n output_folder -- path to the output folder\n spatial_entity_name -- the name of the spatial entity\n \"\"\"\n # Assert the validity of the input parameters\n assert environment is not None\n assert isinstance(template_path, str) and len(template_path) > 0\n assert isinstance(output_folder, str) and len(output_folder) > 0\n assert (isinstance(spatial_entity_name, str) and \n len(spatial_entity_name) > 0)\n \n # Load and get a reference to the template\n template = environment.get_template(template_path)\n \n # Initialize output_path\n output_path = output_folder + first_to_upper(spatial_entity_name) + str(\n EXT_HEADER_FILE\n )\n \n # Generate source file from template\n with open(output_path, \"w+\") as output_file:\n output_file.write(\n template.render({\n TAG_WRN_AUTO_GENERATED_FILE : WRN_AUTO_GENERATED_FILE,\n TAG_UNIT_TESTS_NR_OF_TIMEPOINTS : UNIT_TESTS_NR_OF_TIMEPOINTS,\n TAG_SPATIAL_ENTITY_NAME : spatial_entity_name\n })\n )\n\ndef generate_spatial_entities_and_measures_dependent_file(environment, \n template_path, \n output_path, \n spatial_entities, \n spatial_measures):\n \"\"\"Generate the source file dependent on the collection of spatial\n entities and spatial measures\n \n Keyword argument:\n environment -- the environment for loading templates\n template_path -- path to the template file\n output_path -- path to the output file\n spatial_entities -- the list of spatial entities\n spatial_measures -- the list of spatial measures\n \"\"\"\n # Assert the validity of the input parameters\n assert environment is not None\n assert isinstance(template_path, str) and len(template_path) > 0\n assert isinstance(output_path, str) and len(output_path) > 0\n assert isinstance(spatial_entities, list) and len(spatial_entities) > 0\n assert isinstance(spatial_measures, list) and len(spatial_measures) > 0\n \n # Load and get a reference to the template\n template = environment.get_template(template_path)\n \n # Generate source file from template\n with open(output_path, \"w+\") as output_file:\n output_file.write(\n template.render({\n TAG_WRN_AUTO_GENERATED_FILE : WRN_AUTO_GENERATED_FILE,\n TAG_UNIT_TESTS_NR_OF_TIMEPOINTS : UNIT_TESTS_NR_OF_TIMEPOINTS,\n TAG_SPATIAL_ENTITIES : spatial_entities, \n TAG_SPATIAL_MEASURES : spatial_measures\n })\n )\n\n\ndef first_to_upper(input_string):\n \"\"\"Return the input string with the first letter uppercase\n \n Keyword arguments:\n input_string -- the input string whose first letter will be uppercase\n \"\"\"\n if isinstance(input_string, str): \n if len(input_string) > 1:\n return (input_string[:1].capitalize()) + (input_string[1:])\n else:\n return input_string\n else:\n raise TypeError(ERR_INVALID_TYPE_STRING)\n\ndef preprocess_template_file(input_file_path, output_file_path, \n spatial_entities, spatial_measures,\n min_spatial_entity_width=1, \n min_spatial_measure_width=1):\n \"\"\" Preprocess the given input template and generate the output template\n \n Keyword arguments:\n input_file_path -- template input file path\n output_file_path -- template output file path\n spatial_entities -- list of spatial entities\n spatial_measures -- list of spatial measures\n \"\"\"\n # Assert the validity of the input\n assert isinstance(input_file_path, str) and len(input_file_path) > 0\n assert isinstance(output_file_path, str) and len(output_file_path) > 0\n assert isinstance(spatial_entities, list) and len(spatial_entities) > 0\n assert isinstance(spatial_measures, list) and len(spatial_measures) > 0\n \n # Compute the maximum width for spatial entities and spatial measures names\n spatial_entity_fixed_width = max(\n max([len(spatial_entity.name) \n for spatial_entity in spatial_entities]\n ), \n min_spatial_entity_width\n ) + FIXED_WIDTH_PADDING\n spatial_measure_fixed_width = max(\n max([len(spatial_measure.name) \n for spatial_measure in spatial_measures]\n ),\n min_spatial_measure_width\n ) + FIXED_WIDTH_PADDING\n spatial_entity_and_measure_fixed_width = (\n spatial_entity_fixed_width +\n spatial_measure_fixed_width\n )\n \n # Create the replace dictionary\n replace_dictionary = {\n TAG_SPATIAL_ENTITY_NAME_FIXED_WIDTH : \n str(spatial_entity_fixed_width)\n , \n TAG_SPATIAL_MEASURE_NAME_FIXED_WIDTH : \n str(spatial_measure_fixed_width)\n ,\n TAG_SPATIAL_ENTITY_AND_MEASURE_NAME_FIXED_WIDTH :\n str(spatial_entity_and_measure_fixed_width)\n }\n \n # Create the replacement regular expression\n regular_expression = re.compile(\"|\".join(replace_dictionary.keys()))\n \n # Create output file\n with open(output_file_path, \"w+\") as output_file:\n with open(input_file_path, \"r\") as input_file:\n for line in input_file:\n output_file.write(\n regular_expression.sub(\n lambda matched_token: \n replace_dictionary[matched_token.group(0)]\n , \n line\n )\n )\n \n\ndef generate_source_files(spatial_entities, spatial_measures):\n \"\"\"Generate the source files considering the given spatial entities \n and spatial measures\n \n Keyword arguments:\n spatial_entities -- the list of spatial entities\n spatial_measures -- the list of spatial measures\n \"\"\"\n # Assert that the spatial_entities and spatial_measures lists \n # have more than one element\n assert isinstance(spatial_entities, list) and len(spatial_entities) > 0\n assert isinstance(spatial_measures, list) and len(spatial_measures) > 0\n \n # Load the environment with all required templates\n environment = jinja2.environment.Environment(\n loader=jinja2.loaders.FileSystemLoader(\n TEMPLATES_FOLDER_PATH\n ),\n block_start_string=\"/*{%\",\n block_end_string=\"%}*/\",\n variable_start_string=\"/*{{\",\n variable_end_string=\"}}*/\",\n trim_blocks=True,\n lstrip_blocks=True\n )\n \n # Register the first_to_upper custom filter\n environment.filters[TAG_FILTER_FIRST_TO_UPPER] = first_to_upper\n \n # Preprocess all dependent template input files\n for (template_input_file, template_output_file, min_spatial_entity_width, \n min_spatial_measure_width) in TEMPLATE_PREPROCESSING_IO:\n preprocess_template_file(\n template_input_file, template_output_file, spatial_entities, \n spatial_measures, min_spatial_entity_width, \n min_spatial_measure_width\n )\n \n # Generate the spatial entities and measures dependent files\n for template_path, output_path in SOURCE_FILE_GENERATING_IO.iteritems():\n generate_spatial_entities_and_measures_dependent_file(\n environment, template_path, output_path, \n spatial_entities, spatial_measures\n )\n \n # Generate the derived spatial entity files\n for spatial_entity in spatial_entities:\n generate_derived_spatial_entity_file(\n environment, DERIVED_SPATIAL_ENTITY_TEMPLATE_PATH, \n DERIVED_SPATIAL_ENTITY_OUTPUT_FOLDER, spatial_entity.name\n )\n\ndef parse_configuration_file(xml_file_path, xml_schema_path):\n \"\"\"Parse the configuration file and return the list of spatial entities \n and spatial measures\n\n Keyword arguments:\n xml_file_path -- xml configuration file path\n xml_schema_path -- xml configuration schema file path \n \n Returns (parse_result, spatial_entities, spatial_measures) where:\n - parse_result: Result of parsing the xml file (True/False)\n - spatial_entities: The list of spatial entities recorded \n in the xml file\n - spatial_measures: The list of spatial measures recorded \n in the xml file\n \"\"\"\n # Assert that the paths are non-empty strings\n assert isinstance(xml_file_path, str) and len(xml_file_path) > 0\n assert isinstance(xml_schema_path, str) and len(xml_schema_path) > 0\n \n # Construct xml parser\n parser = parsing.parser.SpatialDescriptionParser(\n xml_file_path, \n xml_schema_path\n )\n \n # Parse the spatial description\n if parser.parse():\n return (True, parser.spatial_entities, parser.spatial_measures)\n else:\n return (False, [], [])\n\ndef parse_configuration_and_generate_files(xml_file_path, xml_schema_path):\n \"\"\"Parse the configuration file and generate all spatial description \n dependent source files\n \n Keyword arguments:\n xml_file_path -- xml configuration file path\n xml_schema_path -- xml configuration schema file path\n \"\"\"\n # Check if the paths are valid\n validate_path(xml_file_path)\n validate_path(xml_schema_path)\n \n # Parse configuration file\n (\n parse_result, \n spatial_entities, \n spatial_measures\n ) = parse_configuration_file(\n xml_file_path, \n xml_schema_path\n )\n \n # If successful then generate source files\n if parse_result:\n generate_source_files(spatial_entities, spatial_measures) \n else:\n raise RuntimeError(ERR_INVALID_XML_BEGIN + xml_file_path + \n ERR_INVALID_XML_MIDDLE + xml_schema_path + \n ERR_INVALID_XML_END)\n\n# Do the following whenever the script is executed\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(ERR_INCORRECT_NR_ARGS)\n else:\n # Inform the user that the generator script is executed\n print(\"%-------------------------------------------------------------\")\n print(\"% Generating source files using the generator.py script... \")\n print(\"%-------------------------------------------------------------\")\n\n parse_configuration_and_generate_files(sys.argv[1], sys.argv[2])\n \n","sub_path":"Multiscale/script/verification/generator/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":18493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"56471108","text":"from flask import url_for, current_app\nfrom flask_mail import Mail, Message\nfrom blueblog.extensions import mail\n\ndef send_mail(subject, to, html):\n\n message = Message(subject, recipients=[to], body=html)\n mail.send(message)\n\ndef send_new_commment_email(post):\n\n post_url = url_for('blog.show_post', post_id=post.id, _external=True) + '#comments'\n send_mail(subject='New comment', to=current_app.config['BLUEBLOG_EMAIL'],\n html='

    New Comment in post%s, click the link below to check:

    '\n '

    %s

    '\n '

    Do not reply this email.

    '\n % (post.title, post_url, post_url))\n\ndef send_new_reply_email(commment):\n\n post_url = url_for('blog.show_post', post_id=commment.id, _external=True) + '#comments'\n send_mail(subject='New reply', to=commment.email,\n html='

    New reply for the comment you left in post%s, click the link below to check:

    '\n '

    %s

    '\n '

    Do not reply this email.

    '\n % (commment.post.title, post_url, post_url))","sub_path":"blueblog/blueblog/emails.py","file_name":"emails.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"467254131","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 18 19:50:03 2019\n\n@author: POORVI\n\"\"\"\n\nimport urllib.request\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport re\n\nmovie_url=[]\n\n\ndef imdb(name):\n movie=name\n # str(input('Movie Name: '))\n movie = movie.title()\n movie_search = '+'.join(movie.split())\n\n base_url = 'http://www.imdb.com/find?ref_=nv_sr_fn&q='\n url = base_url + movie_search + '&s=all'\n\n try:\n with urllib.request.urlopen(url) as r:\n html_d = r.read()\n soup = BeautifulSoup(html_d, 'html.parser')\n movies = soup.find_all('a', string=movie)\n \n movie_url=('https://www.imdb.com' + str(movies[0].get('href')))\n \n with urllib.request.urlopen(movie_url) as r:\n html_doc = r.read()\n soup1 = BeautifulSoup(html_doc, 'html.parser')\n movie_title = soup1.find('title').contents[0]\n rate = soup1.find('span', itemprop='ratingValue').contents[0]\n print(movie_title)\n \n \n return 'IMDB: ' + rate + '/10.0'\n \n except Exception as e:\n print(e)\n print(\"There is no movie like this. \",movie)\n \n return \"There is no movie like this. \"+movie\n \n\n\ndef rot_handle(movie): \n search_name = '%20'.join(movie.split())\n# print('hi')\n base_url=\"https://www.rottentomatoes.com/search/?search=\"\n url=base_url+search_name\n driver=webdriver.PhantomJS()\n driver.get(url)\n# print(url)\n \n title1= driver.find_element_by_id(\"movieSection\")\n movie = title1.find_element_by_class_name('unstyled')\n link=movie.get_attribute(\"href\")\n name=movie.text\n with urllib.request.urlopen(link) as r:\n html_d = r.read()\n soup = BeautifulSoup(html_d, 'html.parser')\n\n# print(type(movie_title))\n rate = soup.find('span', attrs={'class': 'mop-ratings-wrap__percentage'})\n rate=rate.get_text()\n # rating = \" \".join(re.split(\"\\s+\", rate, flags=re.UNICODE))\n rating=re.sub(r\"\\s+\",\" \",rate)\n\n rating=rating.split(\" \")\n \n driver.quit()\n print(name)\n print(\"Tomatometer:\",rating[1])\n return \"Tomatometer:\"+ rating[1]\n\n \ndef rotten_tomatoes(name):\n \n movie=name\n movie_search = '_'.join(movie.split()) # joined the movie name by splitting through spaces\n # search for how to do case insensitive search for movie names\n\n base_url = 'https://www.rottentomatoes.com/m/'\n url = base_url + movie_search # main url which will give movie info\n# print(url)\n try:\n with urllib.request.urlopen(url) as r:\n html_d = r.read()\n soup = BeautifulSoup(html_d, 'html.parser')\n\n movie_title = soup.find('title').contents[0]\n# print(type(movie_title))\n rate = soup.find('span', attrs={'class': 'mop-ratings-wrap__percentage'})\n rate=rate.get_text()\n # rating = \" \".join(re.split(\"\\s+\", rate, flags=re.UNICODE))\n rating=re.sub(r\"\\s+\",\" \",rate)\n\n rating=rating.split(\" \")\n print(movie_title)\n \n# rating=rating[3].split(\"/\")\n\n print(\"Tomatometer:\",rating[1])\n return \"Tomatometer: \"+ rating[1]\n \n except Exception as e:\n# print(e)\n if e.code==404:\n return rot_handle(movie)\n \n print(\"There is no movie like this\",movie)\n\n\ndef call(name):\n print(\"NAME \",name)\n if 'jhonny' in name or 'Rating' in name or 'rating' in name:\n return 'No'\n \n else:\n imdb_rating = imdb(name)\n rotten = rotten_tomatoes(name)\n \n \n return ''+str(imdb_rating)+\"\\n\"+ str(rotten)\n ","sub_path":"movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"515672866","text":"'''\nThe functions for intentRerouteLat\n\n'''\nimport numpy\nimport time\n\ndef _init_( self ):\n self.default = ''\n\ndef checkLog( main, nodeId ):\n try:\n logNames = main.ONOSbench.listLog( main.onosIp[ nodeId ] )\n assert logNames is not None\n if len( logNames ) >= 2:\n return 2\n return 1\n except AssertionError:\n main.log.error(\"There is no karaf log\")\n return -1\n\ndef bringBackTopology( main ):\n main.log.info( \"Bring back topology \" )\n main.CLIs[ 0 ].pushTestIntents(main.ingress, main.egress, main.batchSize,\n offset=1, options=\"-w\", timeout=main.timeout)\n main.CLIs[ 0 ].purgeWithdrawnIntents()\n main.CLIs[ 0 ].setCfg( \"org.onosproject.provider.nil.NullProviders\", \"deviceCount\", value=0)\n main.CLIs[ 0 ].setCfg( \"org.onosproject.provider.nil.NullProviders\", \"enabled\", value=\"false\")\n main.CLIs[ 0 ].setCfg( \"org.onosproject.provider.nil.NullProviders\", \"deviceCount\", value=main.deviceCount)\n main.CLIs[ 0 ].setCfg( \"org.onosproject.provider.nil.NullProviders\", \"enabled\", value=\"true\")\n main.CLIs[ 0 ].balanceMasters()\n time.sleep( main.setMasterSleep )\n if len( main.ONOSip ) > 1:\n main.CLIs[ 0 ].deviceRole(main.end1[ 'name' ], main.ONOSip[ 0 ])\n main.CLIs[ 0 ].deviceRole(main.end2[ 'name' ], main.ONOSip[ 0 ])\n time.sleep( main.setMasterSleep )\n\ndef getValues( main ):\n '''\n Calculated the wanted values for intentRerouteTest\n\n 1. Get the first \"last topology timestamp\" from karaf.log in different node\n 2. Get the first \"first intent installed timestamp\" from karaf log in different node\n 3. Get the last \"last intent installed timestamp\" from karaf log in different node\n\n Return:\n last_topology_to_first_installed: The time from the last topology to the first intent installed\n first_installed_to_last_installed: Time time from the first topology to the last intent installed\n totalTime: The time from the last topology to the last intent installed\n\n '''\n lastTopologyTimestamp = compareTimestamp( main, main.searchTerm[ \"TopologyTime\" ], \"creationTime=\", \",\", 'last',func='min' )\n firstIntentInstalledTimestamp = compareTimestamp( main, main.searchTerm[ \"InstallTime\" ], \"time = \", \" \", 'first',func='min' )\n lastIntentInstalledTimestamp = compareTimestamp( main, main.searchTerm[ \"InstallTime\" ], \"time = \", \" \", 'last',func='max' )\n\n if lastTopologyTimestamp == -1 or firstIntentInstalledTimestamp == -1 or lastIntentInstalledTimestamp == -1:\n main.log.warn( \"Can't get timestamp from karaf log! \" )\n bringBackTopology( main )\n return -1, -1, -1\n\n #calculate values\n lastTopologyToFirstInstalled = firstIntentInstalledTimestamp - lastTopologyTimestamp\n if lastTopologyToFirstInstalled < 0:\n main.record = main.record + 1\n\n firstInstalledToLastInstalled = lastIntentInstalledTimestamp - firstIntentInstalledTimestamp\n totalTime = lastIntentInstalledTimestamp - lastTopologyTimestamp\n\n if main.validRun >= main.warmUp and main.verify:\n main.log.info( \"Last topology time stamp: {0:f}\".format( lastTopologyTimestamp ))\n main.log.info( \"First installed time stamp: {0:f}\".format( firstIntentInstalledTimestamp ))\n main.log.info( \"Last installed time stamp: {0:f}\".format( lastIntentInstalledTimestamp ))\n main.log.info( \"Last topology to first installed latency:{0:f}\".format( lastTopologyToFirstInstalled ))\n main.log.info( \"First installed to last installed latency:{0:f}\".format( firstInstalledToLastInstalled ))\n main.log.info( \"Overall latency:{0:f}\".format( totalTime ))\n main.LatencyList.append( totalTime )\n main.LatencyListTopoToFirstInstalled.append( lastTopologyToFirstInstalled )\n main.LatencyListFirstInstalledToLastInstalled.append( firstInstalledToLastInstalled )\n return lastTopologyToFirstInstalled, firstInstalledToLastInstalled, totalTime\n\ndef compareTimestamp( main, compareTerm, splitTerm_before, splitTerm_after, mode, func='max' ):\n '''\n Compare all the timestamps of compareTerm from different node.\n\n func:\n max: Compare which one is the biggest and retun it\n min: Compare which one is the smallest and return it\n\n return:\n This function will return the biggest or smallest timestamps of the compareTerm.\n\n '''\n compareTermList = []\n for i in range( main.numCtrls ):\n timestamp = main.CLIs[ i ].getTimeStampFromLog( mode, compareTerm, splitTerm_before, splitTerm_after, startLine=main.totalLines[ i ], logNum=checkLog( main, i ) )\n compareTermList.append( timestamp )\n main.log.info(\"-----------------------------------------------\")\n for i in range( main.numCtrls ):\n main.log.info( \"ONOS Node {} {} {} time stamp: {}\".format((i+1), mode, compareTerm, compareTermList[ i ]))\n x = min( compareTermList )\n main.log.info(\"-----------------------------------------------\")\n if x == -1:\n main.log.warn( \"Can't compare timestamps\" )\n return -1\n else:\n if func == 'max':\n return max( compareTermList )\n if func == 'min':\n return min( compareTermList )\n","sub_path":"TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py","file_name":"intentRerouteLatFuncs.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"410465048","text":"'''\nStores board state\nDetermines valid moves\nLogs moves\n'''\n\nimport numpy as np\nfrom constants import *\n\n\n\n\nclass GameState:\n\tdef __init__(self):\n\t\tself.board = CHESSBOARD\n\t\t# self.board = ROOKSONLY\n\t\tself.move_functions = {\t'p': self.get_pawn_moves,\n\t\t\t\t\t\t\t\t'R': self.get_rook_moves,\n\t\t\t\t\t\t\t\t'N': self.get_knight_moves,\n\t\t\t\t\t\t\t\t'B': self.get_bishop_moves,\n\t\t\t\t\t\t\t\t'Q': self.get_queen_moves,\n\t\t\t\t\t\t\t\t'K': self.get_king_moves,\n\t\t\t\t\t\t\t\t}\n\n\t\tself.whitetomove = True\n\t\tself.movelog = []\n\n\t# Move a piece from start to end positions\n\t# Doesn't work for en passant, castling, and pawn promotion\n\tdef make_move(self, move):\n\t\t# Clears where move started\n\t\tself.board[move.start.row,move.start.col] = '--'\n\t\t# Overwrites where move ends\n\t\tself.board[move.end.row,move.end.col] = move.piece_moved\n\t\t# Add to move log\n\t\tself.movelog.append(move)\n\t\t# Change to opposite side's turn\n\t\tself.whitetomove = not(self.whitetomove)\n\n\t# Goes back one move\n\tdef undo_move(self):\n\t\tif len(self.movelog) != 0:\n\t\t\tlast_move = self.movelog.pop()\n\t\t\tself.board[last_move.end.row,last_move.end.col] = last_move.piece_captured\n\t\t\tself.board[last_move.start.row,last_move.start.col] = last_move.piece_moved\n\t\t\tself.whitetomove = not(self.whitetomove)\n\t\telse:\n\t\t\tprint('No more moves to undo')\n\n\t# All moves possible after considering check\n\tdef get_valid_moves(self):\n\t\tpass\n\n\t# All moves possible ignoring check\n\tdef get_possible_moves(self):\n\t\t#Start with blank move list\n\t\tmoves = []\n\t\t#For each square on the board\n\t\tfor row in range(len(self.board)):\n\t\t\tfor col in range(len(self.board[row])):\n\t\t\t\t# Get value of what's in that square\n\t\t\t\tsquare = self.board[row,col]\n\t\t\t\t# If it's blank, do nothing\n\t\t\t\tif square == '--':\n\t\t\t\t\tcontinue\n\t\t\t\t# If it has a piece on it\n\t\t\t\telse:\n\t\t\t\t\tpiece_colour, piece_type = square\n\t\t\t\t\t# If it's the current player's piece\n\t\t\t\t\tif (piece_colour == 'w' and self.whitetomove) or (piece_colour == 'b' and not(self.whitetomove)):\n\t\t\t\t\t\t# Get possible moves (automatically calls correct function)\n\t\t\t\t\t\tself.move_functions[piece_type](row, col, moves)\n\t\treturn moves\n\n\n\t#Calculates all possible moves for pawn at row, col\n\tdef get_pawn_moves(self, row, col, moves):\n\n\t\tif self.whitetomove:\n\t\t\t# If square in front is blank\n\t\t\tif self.board[row-1,col] == '--':\n\t\t\t\t# Add to possible moves\n\t\t\t\tmoves.append(Move((row, col), (row-1,col), self.board))\n\t\t\t\t# If square 2 in front of starting position is also blank\n\t\t\t\tif row == DIMENSION-2 and self.board[row-2,col] == '--':\n\t\t\t\t\t# Add to possible moves\n\t\t\t\t\tmoves.append(Move((row, col), (row-2,col), self.board))\n\n\t\t\t\t# print('{},{} - {}'.format(row, col, len(moves)))\n\n\t\t\t# Checking diagonals\n\t\t\tif col-1 >= 0:\n\t\t\t\tif self.board[row-1,col-1][0] == 'b':\n\t\t\t\t\tmoves.append(Move((row, col), (row-1,col-1), self.board))\n\t\t\tif col+1 < DIMENSION:\n\t\t\t\tif self.board[row-1,col+1][0] == 'b':\n\t\t\t\t\tmoves.append(Move((row, col), (row-1,col+1), self.board))\n\n\t\t\t# En Passant\n\t\t\t# if col-1 >= 0:\n\t\t\t# \tif row == 3 and board[row][col-1] == 'b':\n\n\t\t\t# if col+1 <= 7:\n\t\t\t# \tif row == 3 and board[row][col+1] == 'b':\n\n\t\t#If it's black's turn\n\t\telse:\n\t\t\t# If square in front is blank\n\t\t\tif self.board[row+1,col] == '--':\n\t\t\t\t# Add to possible moves\n\t\t\t\tmoves.append(Move((row, col), (row+1,col), self.board))\n\t\t\t\t# If square 2 in front of starting position is also blank\n\t\t\t\tif row == 1 and self.board[row+2,col] == '--':\n\t\t\t\t\t# Add to possible moves\n\t\t\t\t\tmoves.append(Move((row, col), (row+2,col), self.board))\n\n\t\t\t\t# print('{},{} - {}'.format(row, col, len(moves)))\n\n\t\t\t# Checking diagonals\n\t\t\tif col-1 >= 0:\n\t\t\t\tif self.board[row+1,col-1][0] == 'w':\n\t\t\t\t\tmoves.append(Move((row, col), (row+1,col-1), self.board))\n\t\t\tif col+1 < DIMENSION:\n\t\t\t\tif self.board[row+1,col+1][0] == 'w':\n\t\t\t\t\tmoves.append(Move((row, col), (row+1,col+1), self.board))\n\n\t\t\t# En Passant\n\t\t\t# if col-1 >= 0:\n\t\t\t# \tif row == 3 and board[row][col-1] == 'b':\n\n\t\t\t# if col+1 <= 7:\n\t\t\t# \tif row == 3 and board[row][col+1] == 'b':\n\t\n\t# Calculates all moves possible in a set of directions\n\t# directions holds vectors (v)\n\t# max_step holds coefficient (m)\n\t# Calculates all possible m*v\n\tdef moves_in_direction(self, directions, row, col, moves, max_step=DIMENSION-1):\n\t\t# Determine enemy colour\n\t\tenemy_colour = 'b' if self.whitetomove else 'w'\n\t\t# For each direction\n\t\tfor dx, dy in directions:\n\t\t\t# For each possible step size\n\t\t\tfor i in range(1,max_step+1):\n\t\t\t\t# Set new x,y coords based off step size \n\t\t\t\tx = row + i*dx\n\t\t\t\ty = col + i*dy\n\n\t\t\t\t# If still on the board\n\t\t\t\tif 0<=x end\n\tdef get_chess_notation(self):\n\t\tstart_move = self.get_rank_and_file(self.start.row, self.start.col)\n\t\tend_move = self.get_rank_and_file(self.end.row, self.end.col)\n\t\treturn start_move + '->'+ end_move\n\n\t# Get position in format e.g. e4\n\tdef get_rank_and_file(self, r, c):\n\t\treturn self.cols2files[c] + self.rows2ranks[r]\n","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":7557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"148569893","text":"# coding: utf-8\n\n\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom zuora_client.models.common_response_type import CommonResponseType # noqa: F401,E501\nfrom zuora_client.models.job_result_subscriptions import JobResultSubscriptions # noqa: F401,E501\n\n\nclass JobResult(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'success': 'bool',\n 'account_number': 'str',\n 'credit_memo_numbers': 'list[str]',\n 'invoice_numbers': 'list[str]',\n 'order_number': 'str',\n 'paid_amount': 'str',\n 'payment_number': 'str',\n 'status': 'str',\n 'subscription_numbers': 'list[str]',\n 'subscriptions': 'list[JobResultSubscriptions]'\n }\n\n attribute_map = {\n 'success': 'success',\n 'account_number': 'accountNumber',\n 'credit_memo_numbers': 'creditMemoNumbers',\n 'invoice_numbers': 'invoiceNumbers',\n 'order_number': 'orderNumber',\n 'paid_amount': 'paidAmount',\n 'payment_number': 'paymentNumber',\n 'status': 'status',\n 'subscription_numbers': 'subscriptionNumbers',\n 'subscriptions': 'subscriptions'\n }\n\n def __init__(self, success=None, account_number=None, credit_memo_numbers=None, invoice_numbers=None, order_number=None, paid_amount=None, payment_number=None, status=None, subscription_numbers=None, subscriptions=None): # noqa: E501\n \"\"\"JobResult - a model defined in Swagger\"\"\" # noqa: E501\n\n self._success = None\n self._account_number = None\n self._credit_memo_numbers = None\n self._invoice_numbers = None\n self._order_number = None\n self._paid_amount = None\n self._payment_number = None\n self._status = None\n self._subscription_numbers = None\n self._subscriptions = None\n self.discriminator = None\n\n if success is not None:\n self.success = success\n if account_number is not None:\n self.account_number = account_number\n if credit_memo_numbers is not None:\n self.credit_memo_numbers = credit_memo_numbers\n if invoice_numbers is not None:\n self.invoice_numbers = invoice_numbers\n if order_number is not None:\n self.order_number = order_number\n if paid_amount is not None:\n self.paid_amount = paid_amount\n if payment_number is not None:\n self.payment_number = payment_number\n if status is not None:\n self.status = status\n if subscription_numbers is not None:\n self.subscription_numbers = subscription_numbers\n if subscriptions is not None:\n self.subscriptions = subscriptions\n\n @property\n def success(self):\n \"\"\"Gets the success of this JobResult. # noqa: E501\n\n Indicates whether the call succeeded. # noqa: E501\n\n :return: The success of this JobResult. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._success\n\n @success.setter\n def success(self, success):\n \"\"\"Sets the success of this JobResult.\n\n Indicates whether the call succeeded. # noqa: E501\n\n :param success: The success of this JobResult. # noqa: E501\n :type: bool\n \"\"\"\n\n self._success = success\n\n @property\n def account_number(self):\n \"\"\"Gets the account_number of this JobResult. # noqa: E501\n\n The account number for the order. # noqa: E501\n\n :return: The account_number of this JobResult. # noqa: E501\n :rtype: str\n \"\"\"\n return self._account_number\n\n @account_number.setter\n def account_number(self, account_number):\n \"\"\"Sets the account_number of this JobResult.\n\n The account number for the order. # noqa: E501\n\n :param account_number: The account_number of this JobResult. # noqa: E501\n :type: str\n \"\"\"\n\n self._account_number = account_number\n\n @property\n def credit_memo_numbers(self):\n \"\"\"Gets the credit_memo_numbers of this JobResult. # noqa: E501\n\n An array of the credit memo numbers generated in this order request. The credit memo is only available if you have the Avdanced AR Settlement feature enabled. # noqa: E501\n\n :return: The credit_memo_numbers of this JobResult. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._credit_memo_numbers\n\n @credit_memo_numbers.setter\n def credit_memo_numbers(self, credit_memo_numbers):\n \"\"\"Sets the credit_memo_numbers of this JobResult.\n\n An array of the credit memo numbers generated in this order request. The credit memo is only available if you have the Avdanced AR Settlement feature enabled. # noqa: E501\n\n :param credit_memo_numbers: The credit_memo_numbers of this JobResult. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._credit_memo_numbers = credit_memo_numbers\n\n @property\n def invoice_numbers(self):\n \"\"\"Gets the invoice_numbers of this JobResult. # noqa: E501\n\n An array of the invoice numbers generated in this order request. Normally it includes one invoice number only, but can include multiple items when a subscription was tagged as invoice separately. # noqa: E501\n\n :return: The invoice_numbers of this JobResult. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._invoice_numbers\n\n @invoice_numbers.setter\n def invoice_numbers(self, invoice_numbers):\n \"\"\"Sets the invoice_numbers of this JobResult.\n\n An array of the invoice numbers generated in this order request. Normally it includes one invoice number only, but can include multiple items when a subscription was tagged as invoice separately. # noqa: E501\n\n :param invoice_numbers: The invoice_numbers of this JobResult. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._invoice_numbers = invoice_numbers\n\n @property\n def order_number(self):\n \"\"\"Gets the order_number of this JobResult. # noqa: E501\n\n The order number of the order created. # noqa: E501\n\n :return: The order_number of this JobResult. # noqa: E501\n :rtype: str\n \"\"\"\n return self._order_number\n\n @order_number.setter\n def order_number(self, order_number):\n \"\"\"Sets the order_number of this JobResult.\n\n The order number of the order created. # noqa: E501\n\n :param order_number: The order_number of this JobResult. # noqa: E501\n :type: str\n \"\"\"\n\n self._order_number = order_number\n\n @property\n def paid_amount(self):\n \"\"\"Gets the paid_amount of this JobResult. # noqa: E501\n\n The total amount collected in this order request. # noqa: E501\n\n :return: The paid_amount of this JobResult. # noqa: E501\n :rtype: str\n \"\"\"\n return self._paid_amount\n\n @paid_amount.setter\n def paid_amount(self, paid_amount):\n \"\"\"Sets the paid_amount of this JobResult.\n\n The total amount collected in this order request. # noqa: E501\n\n :param paid_amount: The paid_amount of this JobResult. # noqa: E501\n :type: str\n \"\"\"\n\n self._paid_amount = paid_amount\n\n @property\n def payment_number(self):\n \"\"\"Gets the payment_number of this JobResult. # noqa: E501\n\n The payment number that collected in this order request. # noqa: E501\n\n :return: The payment_number of this JobResult. # noqa: E501\n :rtype: str\n \"\"\"\n return self._payment_number\n\n @payment_number.setter\n def payment_number(self, payment_number):\n \"\"\"Sets the payment_number of this JobResult.\n\n The payment number that collected in this order request. # noqa: E501\n\n :param payment_number: The payment_number of this JobResult. # noqa: E501\n :type: str\n \"\"\"\n\n self._payment_number = payment_number\n\n @property\n def status(self):\n \"\"\"Gets the status of this JobResult. # noqa: E501\n\n Status of the order. `Pending` is only applicable for an order that contains a `CreateSubscription` order action. # noqa: E501\n\n :return: The status of this JobResult. # noqa: E501\n :rtype: str\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"Sets the status of this JobResult.\n\n Status of the order. `Pending` is only applicable for an order that contains a `CreateSubscription` order action. # noqa: E501\n\n :param status: The status of this JobResult. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"Completed\", \"Pending\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status\n\n @property\n def subscription_numbers(self):\n \"\"\"Gets the subscription_numbers of this JobResult. # noqa: E501\n\n **Note:** This field is in Zuora REST API version control. Supported minor versions are 222.4 or earlier. To use this field in the method, you must set the `zuora-version` parameter to the minor version number in the request header. Container for the subscription numbers of the subscriptions in an order. # noqa: E501\n\n :return: The subscription_numbers of this JobResult. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._subscription_numbers\n\n @subscription_numbers.setter\n def subscription_numbers(self, subscription_numbers):\n \"\"\"Sets the subscription_numbers of this JobResult.\n\n **Note:** This field is in Zuora REST API version control. Supported minor versions are 222.4 or earlier. To use this field in the method, you must set the `zuora-version` parameter to the minor version number in the request header. Container for the subscription numbers of the subscriptions in an order. # noqa: E501\n\n :param subscription_numbers: The subscription_numbers of this JobResult. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._subscription_numbers = subscription_numbers\n\n @property\n def subscriptions(self):\n \"\"\"Gets the subscriptions of this JobResult. # noqa: E501\n\n **Note:** This field is in Zuora REST API version control. Supported minor versions are 223.0 or later. To use this field in the method, you must set the `zuora-version` parameter to the minor version number in the request header. Container for the subscription numbers and statuses in an order. # noqa: E501\n\n :return: The subscriptions of this JobResult. # noqa: E501\n :rtype: list[JobResultSubscriptions]\n \"\"\"\n return self._subscriptions\n\n @subscriptions.setter\n def subscriptions(self, subscriptions):\n \"\"\"Sets the subscriptions of this JobResult.\n\n **Note:** This field is in Zuora REST API version control. Supported minor versions are 223.0 or later. To use this field in the method, you must set the `zuora-version` parameter to the minor version number in the request header. Container for the subscription numbers and statuses in an order. # noqa: E501\n\n :param subscriptions: The subscriptions of this JobResult. # noqa: E501\n :type: list[JobResultSubscriptions]\n \"\"\"\n\n self._subscriptions = subscriptions\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(JobResult, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, JobResult):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"zuora_client/models/job_result.py","file_name":"job_result.py","file_ext":"py","file_size_in_byte":13280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"2883302","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport yaml\nimport asyncio\nimport netdev\n\nINVENTORY_FILE = 'netmap.yaml'\nPARAMS = {'username': 'admin', 'password': 'Juniper', 'device_type': 'juniper_junos'}\nCOMMAND = 'show configuration'\n\n\ndef get_devices_params(file_name=INVENTORY_FILE):\n with open(file_name) as f:\n inventory = yaml.load(f)\n devices_list = []\n fvrrs = []\n vpn_rrs = []\n for region in inventory['regions']:\n fvrrs.extend(inventory['regions'][region]['external_rrs'])\n for device in fvrrs:\n device.update(PARAMS)\n device.update({'host': device['mgmt_ip']})\n del(device['hostname'])\n del(device['model'])\n del(device['lo0_ip'])\n del(device['mgmt_ip'])\n del(device['server'])\n del(device['type'])\n devices_list.extend(fvrrs)\n for data_center in inventory['regions'][region]['data_centers']:\n for building in inventory['regions'][region]['data_centers'][data_center]['buildings']:\n for module in inventory['regions'][region]['data_centers'][data_center]['buildings'][building]['modules']:\n if 'vpn_rrs' in inventory['regions'][region]['data_centers'][data_center]['buildings'][building]['modules'][module].keys():\n vpn_rrs.extend(inventory['regions'][region]['data_centers'][data_center]['buildings'][building]['modules'][module]['vpn_rrs'])\n for device in vpn_rrs:\n device.update(PARAMS)\n device.update({'host': device['mgmt_ip']})\n del(device['hostname'])\n del(device['model'])\n del(device['lo0_ip'])\n del(device['mgmt_ip'])\n devices_list.extend(vpn_rrs)\n print(devices_list)\n return devices_list\n\n\nasync def collect_outputs(device_params, command):\n # hostname = device_params.pop('hostname')\n async with netdev.create(**device_params) as connection:\n command_result = await connection.send_command(command)\n return command_result\n\n\ndef main():\n inventory = get_devices_params(INVENTORY_FILE)\n loop = asyncio.get_event_loop()\n tasks = [\n loop.create_task(collect_outputs(device, COMMAND))\n for device in inventory\n ]\n loop.run_until_complete(asyncio.wait(tasks))\n for task in tasks:\n print(task.result())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"NPDESI/async_tests/get_rrs_config.py","file_name":"get_rrs_config.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"346455785","text":"from __future__ import print_function\n\nimport sys\nimport os\n\nfrom metal.parser.sygus_parser import SyExp, parse_sexp\nfrom metal.parser.cfg import CFG\n\nfrom metal.common.constants import AND_TYPE, OR_TYPE, XOR_TYPE, NOT_TYPE, VAR_TYPE\n\ndef collect_types_in_preorder(sexp):\n typ = None\n if sexp.app == \"and\":\n typ = AND_TYPE\n elif sexp.app == \"or\":\n typ = OR_TYPE\n elif sexp.app == \"xor\":\n typ = XOR_TYPE\n elif sexp.app == \"not\":\n typ = NOT_TYPE\n else:\n typ = VAR_TYPE\n\n res = [ (sexp.app, typ) ]\n\n for x in sexp.args:\n res.extend( collect_types_in_preorder(x) )\n \n return res\n\n\ndef collect_vars(sexp):\n res = set()\n if sexp.app in [\"and\", \"or\", \"xor\", \"not\"]:\n for x in sexp.args:\n res = res | collect_vars(x)\n else:\n res.add(sexp.app)\n return res\n\nclass SygusInstance(object):\n def __init__(self, s):\n sexp_list = parse_sexp(s)\n self.spec = None\n self.grammar = None\n self.constraint = None\n for s in sexp_list:\n if s.get_app() == \"define-fun\":\n self.spec = s.get_args()[3]\n elif s.get_app() == \"synth-fun\":\n self.grammar = CFG(s.get_args()[3])\n elif s.get_app() == \"constraint\":\n self.constraint = s\n else:\n pass\n\n assert self.spec\n assert self.grammar\n # print(\"sygus instance is created\")\n\n def get_spec(self):\n return self.spec\n\n def get_grammar(self):\n return self.grammar\n \n\nclass SpecTree:\n def __init__(self, sygus_instance):\n \"\"\"\n this class casts the SyExp of a logic spec into list tokens(integers) in order to be fed to encoder\n\n sygus_instance:\n a SygusInstance object containing the logic spec and the grammar\n\n \"\"\"\n\n self.spec = sygus_instance.get_spec()\n self.grammar = sygus_instance.get_grammar()\n self.vars = list( collect_vars(self.spec) )\n\n self.node_seq = collect_types_in_preorder(self.spec) # e.g. [('and', AND_TYPE), ('LN29', VAR_TYPE)]\n self.node_type_seq = [t for (n, t) in self.node_seq]\n self.numOf_nodes = len(self.node_type_seq) # int\n self.nodename2ind = dict([(self.node_seq[i][0], i) for i in range(self.numOf_nodes)]) # e.g. {'and':0, 'LN29':1}\n\n # dump all tinytest cases\n self.all_tests = self.dump_all_tests()\n \n def dump_all_tests(self):\n i, n = 0, len(self.vars)\n m = 2 ** n\n Ts = []\n Fs = []\n while i < m:\n # 1. build env\n env = {}\n for k in range(n):\n env[ self.vars[k] ] = i & (1< 0\n # 2. evaluate current config\n res = self.spec.eval_py(env)\n if res:\n Ts.append( env )\n else:\n Fs.append( env )\n\n i += 1\n # print(\"Ts: \", len(Ts), \"Fs: \", len(Fs))\n return (Ts,Fs)\n \n\n\n\n\n\ndef is_tree_complete(ntset, spectree):\n \"\"\"\n helper function to tinytest if a SpecTree contains non-terminals\n\n ntset:\n set of names of non-terminals\n spectree:\n SpecTree obj\n\n return:\n False if contains non-terminal, otherwise True\n\n \"\"\"\n return (spectree.app not in ntset) and all([is_tree_complete(ntset, syexp) for syexp in spectree.args])\n","sub_path":"metal/common/spec_tree.py","file_name":"spec_tree.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"85186423","text":"\n\nfrom xai.brain.wordbase.nouns._dilemma import _DILEMMA\n\n#calss header\nclass _DILEMMAS(_DILEMMA, ):\n\tdef __init__(self,): \n\t\t_DILEMMA.__init__(self)\n\t\tself.name = \"DILEMMAS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"dilemma\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_dilemmas.py","file_name":"_dilemmas.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"121918740","text":"print('Enter state names with space between two states (first state entered to be considered the start state): -')\nstate_list = list(set(input().split()))\n\nprint('Any repeated state has been discarded.')\n\nprint('Select start state: -')\n\nwhile 1:\n\n start = input()\n\n if start not in state_list:\n print('No such state found. Try again.')\n continue\n\n break\n\nprint('Select final states: -')\n\nwhile 1:\n\n final = list(set(input().split()))\n\n try_flag = 0\n\n for i in final:\n\n if i not in state_list:\n print('A (few) state(s) was/were not identified. Try again')\n try_flag = 1\n break\n\n if try_flag == 1:\n continue\n\n break\n\nprint('Any state repeatedly marked final has been discarded.')\n\n\nprint('Enter alphabet set with space between two alphabets: -')\ntemp_aplha_list = list(set(input().split()))\nalpha_list = []\n\nrep_flag = 0\n\nfor i in temp_aplha_list:\n\n if i in state_list:\n rep_flag = 1\n continue\n\n alpha_list.append(i)\n\nif rep_flag == 1:\n print('There is/are alphabet(s) recognised by same string as that of some states and are hence discarded.')\n\ntrans = {}\n\nfor i in state_list:\n trans[i] = []\n\nfor i in state_list:\n\n print('\\n' + 'Defining TRANSITIONS for state \\'' + i + '\\'' + ' : -' + '\\n')\n\n for j in alpha_list:\n\n print('State transition(s) of \\'' + i + '\\'' + ' on input \\'' + j + '\\'' + \" (Type 'none' for no transition): -\")\n\n while 1:\n\n try_flag = 0\n trans_in = input()\n\n if trans_in == 'none':\n break\n\n to_states = list(set(trans_in.split()))\n\n for k in to_states:\n\n if k not in state_list:\n print('A (few) state(s) was/were not identified. Try again')\n try_flag = 1\n break\n\n if try_flag == 1:\n continue\n\n trans[i].append([j, to_states])\n\n break\n\nprint(trans)\n\ncreated_states = []\ncreated_states.append([start])\n\ndfa_trans = []\ndfa_states = []\ndfa_states.append([start])\n\nwhile len(created_states) != 0:\n\n dfa_states_temp = []\n\n for i in alpha_list:\n\n break_flag = 0\n\n dest_states = []\n\n for j in created_states[0]:\n\n for k in trans[j]:\n\n if k[0] == i:\n\n for l in k[1]:\n dest_states.append(l)\n\n dest_states = list(set(dest_states))\n count = len(dest_states)\n\n for j in dfa_states:\n\n cnt = 0\n\n for k in dest_states:\n\n if k in j:\n cnt += 1\n\n if cnt == count:\n dfa_trans.append([created_states[0], i, j])\n break_flag = 1\n break\n\n if break_flag == 1:\n continue\n\n dfa_trans.append([created_states[0], i, dest_states])\n dfa_states_temp.append(dest_states)\n created_states.append(dest_states)\n\n for i in range(len(dfa_states_temp)):\n\n temp = dfa_states_temp.pop(0)\n\n if temp not in dfa_states_temp:\n dfa_states.append(temp)\n\n created_states.pop(0)\n\nfor dfa_transitions in dfa_trans:\n print('State: ' + str(dfa_transitions[0]) + ' X ' + str(dfa_transitions[1]) + ' -> ' + str(dfa_transitions[2]))\n print()\n\n\n\n\n\n\n\n\n","sub_path":"nfa_to_dfa_conversion/algorithm2.py","file_name":"algorithm2.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114494473","text":"import copy\nimport lxml.cssselect\nimport lxml.etree\nimport lxml.html.clean\nimport itertools\n\nbad_tags = [\n 'head',\n 'header',\n 'footer',\n 'nav',\n #'form',\n 'svg',\n 'figure',\n 'aside',\n ]\nxpath_rm_nodes = '|'.join('(descendant-or-self::'+tag+')' for tag in bad_tags)\ncxpath_rm_nodes = lxml.etree.XPath(xpath_rm_nodes)\n\n# FIXME:\n# Should we really remove everything within forms?\n# Some websites wrap the entire page within a form.\n# file:///home/user/proj/metahtml/tests/.cache/https___education.ky.gov_443_districts_SHS_Pages_2019-Novel-Coronavirus.aspx992feb89/2020-07-17\n# file:///home/user/proj/metahtml/tests/.cache/https___elperuano.pe_noticia-corea-del-norte-modernizo-sus-misiles-79420.aspxc3178290/2020-05-30\n# file:///home/user/proj/metahtml/tests/.cache/https___www.passeportsante.net_fr_Maux_Problemes_Fiche.aspx_doc=maladie-charbon01924e11/2020-07-06\n\ncxpath_p = lxml.etree.XPath('descendant-or-self::p')\ncxpath_main = lxml.etree.XPath('descendant-or-self::main')\ncxpath_section = lxml.etree.XPath('descendant-or-self::section')\ncxpath_article = lxml.etree.XPath('descendant-or-self::article')\ncxpath_div = lxml.etree.XPath('descendant-or-self::div')\ncxpath_script_style_comments = lxml.etree.XPath('(descendant-or-self::script)|(descendant-or-self::style)|(descendant-or-self::comment())')\n\n# FIXME:\n# currently, this removes all
    tags;\n# for most webpages, content is contained within a

    tag within a

    tag,\n# but if not

    is present, then the content will be deleted\n# FIXME: uses
    to separate paragraphs\n# some webpages don't store content within

    tags;\n# file:///home/user/proj/metahtml/tests/.cache/https___abc13.com_politics_president-trump-meets-with-kim-jong-un-at-summit_5159138_1f916455/2020-06-23\n# file:///home/user/proj/metahtml/tests/.cache/https___abc7chicago.com_news_kim-jong-un-isnt-the-first-dictator-to-go-missing_346174_274f3fef/2020-06-23\n# file:///home/user/proj/metahtml/tests/.cache/https___abc7news.com_donald-trump-meeting-with-kim-jong-un-and-summit_5122815_fc051a09/2020-06-23\n# file:///home/user/proj/metahtml/tests/.cache/https___dzuturum.blogspot.com_2015_07_chinese-fan-has-plastic-surgery-to-look.html4295804b/2020-06-24\n# file:///home/user/proj/metahtml/tests/.cache/https___economictimes.indiatimes.com_magazines_panache_when-kim-jong-uns-influential-aunt-turned-to-dr-d-for-advice_articleshow_74168309.cms3aac81b8/2020-06-24\n# file:///home/user/proj/metahtml/tests/.cache/https___en.antaranews.com_news_140151_thermal-scanner-at-bali-airport-to-screen-wuhan-corona-viruse3d53729/2020-07-17\n# FIXME: contained within

    \n# file:///home/user/proj/metahtml/tests/.cache/https___www.spicee.com_fr_program_irak-les-mordeuses-de-daesh-1163ec2036e8/2020-07-05\n# file:///home/user/proj/metahtml/tests/.cache/https___angelfalques.blogspot.com_2020_03_kim-jong-un-responds-to-coronavirus.html1cb7abc9/2020-06-23\n# file:///home/user/proj/metahtml/tests/.cache/https___economictimes.indiatimes.com_news_international_world-news_kim-jong-un-here-are-some-interesting-facts-about-north-koreas-absolute-master_articleshow_64429914.cmsc1439ac9/2020-06-24\n# file:///home/user/proj/metahtml/tests/.cache/https___edition.cnn.com_2019_12_04_asia_north-korea-christmas-gift-kim-jong-un-intl-hnk_index.html19b38b76/2020-05-30\n#\n# FIXME: still broken\n# file:///home/user/proj/metahtml/tests/.cache/https___www.tishineh.com_touritem_1258_Sassani-fire-Temple-of-Natanzaf31b3d8/2020-07-01\n# file:///home/user/proj/metahtml/tests/.cache/https___www.tishineh.com_touritem_1258-134_%D8%A2%D8%AA%D8%B4%DA%A9%D8%AF%D9%87-%D8%B3%D8%A7%D8%B3%D8%A7%D9%86%DB%8C-%D9%86%D8%B7%D9%86%D8%B2e13d79d0/2020-07-01\n# file:///home/user/proj/metahtml/tests/.cache/https___www.timetoast.com_timelines_historique-bioterrorismea36cc622/2020-07-14\n# file:///home/user/proj/metahtml/tests/.cache/https___www.thereligionofpeace.com_pages_quran_violence.aspx82179f7a/2020-07-05\n# file:///home/user/proj/metahtml/tests/.cache/https___www.statista.com_statistics_294305_iran-unemployment-rate_07d66fdf/2020-07-03\n\ntags_section = ['address','article','aside','figcaption','figure','main','section']\ntags_inline = ['a','span']\ntags_list = ['ul', 'ol', 'li', 'dl', 'dt', 'dd']\ntags_block = ['p','blockquote']\ntags_header = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']\ntags_code = ['tt', 'pre', 'code']\ntags_table = ['table','tr','td','th','colgroup','col','tbody','thead','tfoot','caption','tbody']\narticle_cleaner = lxml.html.clean.Cleaner()\narticle_cleaner.javascript = True\narticle_cleaner.style = True\narticle_cleaner.remove_unknown_tags = False\narticle_cleaner.safe_attrs = ['href','rel', 'src']\narticle_cleaner.allow_tags = (\n tags_section + \n tags_inline +\n tags_block + \n tags_header + \n tags_code +\n tags_list +\n tags_table +\n ['br'] # FIXME: replace
    with

    ?\n )\n\ndef simple_extractor(parser):\n doc = parser.doc\n url_parsed = parser.url_parsed\n #url_parsed = urlparse(url)\n confidence = 'lo'\n\n # delete nodes that never contain content\n for node in cxpath_script_style_comments(doc):\n remove_node_keep_tail(node)\n\n # the

    tag indicates the main body of html5 text;\n # if present, restrict our search just to this tag\n mains = list(cxpath_main(doc))\n if len(mains)==1:\n doc = mains[0]\n confidence = 'hi'\n\n '''\n FIXME:\n should we include this?\n # the
    tag indicates the section body of html5 text;\n # if present, restrict our search just to this tag\n sections = list(cxpath_section(doc))\n if len(sections)==1:\n doc = sections[0]\n confidence = 'hi'\n '''\n\n # the
    tag indicates that an article is present;\n # unlike the main tag, there is commonly more than 1
    tag per page;\n # these multiple
    tags occur on category pages that link to many articles,\n # and in \"related pages\" sections;\n # in both of these cases, only small snippets of the actual article are in the html;;\n # in the former case, we want to extract nothing;\n # in the latter case, we want to extract only the large main article;\n # we achieve both goals by using the article tag only if its length is greater than twice \n # the average of all other article tags\n # FIXME:\n # the twice heuristic hasn't been tested and other numbers might reduce false positives/negatives\n # FIXME:\n # these checks should be in the article_type.py file as well\n articles = list(cxpath_article(doc))\n if len(articles)==1:\n # some webpages use the
    tag only around related articles links,\n # and not around the main article;\n # counting for

    tags ensures we don't miss the main article on these pages\n article = articles[0]\n article_text = text_content(article)\n if len(cxpath_p(article))>1 or len(article_text)>1000:\n doc = articles[0]\n confidence = 'hi'\n elif len(articles)>1:\n articles_lens = []\n for article in articles:\n articles_lens.append(len(text_content(article)))\n largest = max(articles_lens)\n mean_minus_largest = (sum(articles_lens) - largest) / (len(articles_lens) - 1)\n if largest > mean_minus_largest*2:\n largest_index = articles_lens.index(largest)\n doc = articles[largest_index]\n confidence = 'hi'\n\n # remove nodes that are semantically unrelated to the article's content;\n # FIXME:\n # should we be storing the figure/aside tags separately for later processing?\n for node in list(cxpath_rm_nodes(doc)):\n node.getparent().remove(node)\n\n # change

    tags to

    tags if they don't contain block elements;\n # many webpages do not use

    tags at all and only use

    tags,\n # and this conversion lets us capture the text on these pages\n # FIXME:\n # should we only do this if there are no/few

    tags?\n for div in doc.iterdescendants('div'):\n #for div in list(cxpath_div(doc)):\n # compute has_block\n has_block = False\n for descendant in div.iterdescendants():\n if descendant.tag in tags_section + tags_list + tags_block + ['div']:\n next_br = descendant.getnext() is not None and descendant.getnext().tag == 'br'\n prev_br = descendant.getprevious() is not None and descendant.getprevious().tag == 'br'\n if not (next_br or prev_br):\n has_block = True\n\n # compute div_text\n div_text = ''\n div_text_threshold = 20\n if div.text is not None:\n div_text += div.text.strip()\n for child in div.getchildren():\n if child.tag not in ['div']+tags_block+tags_section+tags_list:\n div_text += ' ' + text_content(child)\n else:\n child_tail = ''\n if child.tail is not None:\n child_tail = child.tail.strip()\n div_text += child_tail\n if len(div_text)>div_text_threshold:\n break\n\n # process div \n if len(div_text)>div_text_threshold or not has_block:\n div.tag = 'p'\n # replace
    tags with a series of

    tags\n # \n #

    a
    bcd
    e
    \n #\n # becomes\n #\n #

    a

    \n #

    bcd

    \n #

    e

    \n children = list(div.getchildren())\n for child in children:\n div.remove(child)\n for child in children:\n if child.tag in ['br']: #,'div']+tags_block+tags_section+tags_list:\n new_div = lxml.etree.Element('p')\n new_div.set('created_from_div_br','true')\n new_div.text = child.tail\n div.addnext(new_div)\n div = new_div\n else:\n div.append(child)\n\n # clean the html to remove all unwanted tags\n # FIXME: breaks some webpages for unknown reason\n # file:///home/user/proj/metahtml/tests/.cache/https___www.public.fr_News_Lionel-Messi-Menace-par-Daesh-1444943dd2edccb/2020-07-05.diff.html\n # FIXME: table gets converted into

    \n # file:///home/user/proj/metahtml/tests/.cache/https___www.uppersia.com_Iran-hotels_natanz-hotels.htmle84d1498/2020-07-01.diff.html\n doc = article_cleaner.clean_html(doc)\n\n # recursively remove empty nodes\n def go(doc):\n children = list(doc.getchildren())\n for child in children:\n child = go(child)\n has_text = doc.text is not None and doc.text.strip() != ''\n if not has_text and len(doc)==0:\n remove_node_keep_tail(doc)\n #doc.getparent().remove(doc)\n return doc\n doc = go(doc)\n\n # lists that don't come after at least 2

    tags are probably header info\n # FIXME: removes too much\n # file:///home/user/proj/metahtml/tests/.cache/https___www.businessinsider.com_trump-deploys-hospital-ship-mercy-to-los-angeles-2020-344984c01/2020-06-16.diff.html\n # file:///home/user/proj/metahtml/tests/.cache/https___www.sunsigns.org_famousbirthdays_profile_ayatollah-ruhollah-khomeini_b65082cc/2020-07-03.diff.html\n allow_lists = False\n nodes_to_remove = []\n for node in doc.iter():\n if not allow_lists and node.tag in tags_list:\n nodes_to_remove.append(node)\n if node.tag=='h1':\n allow_lists = True\n if node.tag=='p': # and node.getprevious() is not None and node.getprevious().tag=='p':\n text = text_content(node)\n if len(text)<20 or '©' in text or '.' not in text:\n if not allow_lists:\n nodes_to_remove.append(node)\n else:\n if not any(block in [ node.tag for node in node.iterancestors() ] for block in tags_list):\n allow_lists = True\n for node in nodes_to_remove:\n if node.getparent() is not None:\n node.getparent().remove(node)\n \n # lists/headers at the end of the page without following

    tags are probably footer info\n # FIXME: removes too little\n # file:///home/user/proj/metahtml/tests/.cache/https___arabic.sputniknews.com_world_202004081045105540-%25D8%25A7%25D9%2584%25D8%25B5%25D9%258A%25D9%2586-%25D8%25AA%25D8%25B3%25D8%25AC%25D9%2584-62-%25D8%25A5%25D8%25B5%25D8%25A7%25D8%25A8%25D8%25A9-%25D8%25AC%25D8%25AF%25D9%258A%25D8%25AF%25D8%25A9-%25D8%25A8%25D9%2581%25D9%258A%255c44ca74/\n # file:///home/user/proj/metahtml/tests/.cache/http___21stcenturywire.com_2018_05_07_gareth-porter-did-john-bolton-leak-intelligence-to-sabotage-a-trump-kim-deal_02bc34fa/2020-06-02.diff.html\n # file:///home/user/proj/metahtml/tests/.cache/https___allthingsnuclear.org_dwright_north-koreas-latest-missile-test7b6fb667/2020-05-30.diff.html\n # file:///home/user/proj/metahtml/tests/.cache/https___www.saphirnews.com_Que-dit-la-presse-arabe-sur-Daesh_a20186.html3c616d4a/2020-07-14.metahtml.html\n # file:///home/user/proj/metahtml/tests/.cache/https___www.tomsguide.fr_impact-mobilisation-internationale-contre-le-cyberterrorisme_1694d90c/2020-07-05.diff.html\n # file:///home/user/proj/metahtml/tests/.cache/https___www.semana.com_deportes_articulo_en-grave-peligro-hermetismo-sobre-estado-de-salud-de-kim-jong-un-tras-cirugia_664851c8fcfae6/2020-05-31.metahtml.html\n # FIXME: removes too much\n # file:///home/user/proj/metahtml/tests/.cache/https___www.thefamouspeople.com_profiles_ayatollah-khomeini-12.phpb8cbce93/2020-07-07.diff.html\n # file:///home/user/proj/metahtml/tests/.cache/https___www.sunsigns.org_famousbirthdays_profile_ayatollah-ruhollah-khomeini_b65082cc/2020-07-03.diff.html\n # file:///home/user/proj/metahtml/tests/.cache/https___english.khabarhub.com_2019_11_20114_24c28975/\n # file:///home/user/proj/metahtml/tests/.cache/https___www.police-nationale.interieur.gouv.fr_Organisation_Entites-rattachees-directement-au-DGPN_UCLATd58cd76b/2020-07-14.diff.html\n # file:///home/user/proj/metahtml/tests/.cache/https___www.thefamouspeople.com_profiles_ayatollah-khomeini-12.phpb8cbce93/2020-07-07\n # (the html is broken, no tags) file:///home/user/proj/metahtml/tests/.cache/https___www.scientificamerican.com_article_how-does-chlorine-added-t_cdc79060/2020-06-12\n # file:///home/user/proj/metahtml/tests/.cache/https___www.semana.com_deportes_articulo_en-grave-peligro-hermetismo-sobre-estado-de-salud-de-kim-jong-un-tras-cirugia_664851c8fcfae6/2020-05-31.metahtml.html\n allow_lists = False\n nodes_to_remove = []\n for node in reversed(list(doc.iter())):\n if not allow_lists and node.tag in tags_list + tags_header:\n nodes_to_remove.append(node)\n if node.tag=='p':\n text = text_content(node)\n if len(text)<20 or '©' in text or '.' not in text:\n if not allow_lists:\n nodes_to_remove.append(node)\n else:\n if not any(block in [ node.tag for node in node.iterancestors() ] for block in tags_list):\n allow_lists = True\n for node in nodes_to_remove:\n if node.getparent() is not None:\n node.getparent().remove(node)\n\n # remove content not contained within a

    tag\n nodes_to_remove = []\n for node in doc.iter():\n if node.tag in tags_section + ['div']:\n node.text = None\n node.tail = None\n if node.tag in tags_inline:\n if not any(block in [ node.tag for node in itertools.chain(node.iterancestors(),node.iterdescendants()) ] for block in tags_block+tags_list):\n nodes_to_remove.append(node)\n\n if node.tail:\n if not any(block in [ node.tag for node in node.iterancestors() ] for block in tags_block+tags_list):\n node.tail = None\n # FIXME:\n # should we remove highlink density tags?\n #if node.tag=='p' and is_highlink_density(node):\n #nodes_to_remove.append(node)\n for node in nodes_to_remove:\n if node.getparent() is not None:\n node.getparent().remove(node)\n \n # convert the parsed lxml document back into html\n strip_excess_whitespace(doc)\n html = lxml.etree.tostring(doc,method='html',pretty_print=True).decode('utf-8')\n text = lxml_to_text(doc)\n\n return {\n 'value' : {\n 'text' : text,\n 'html' : html,\n },\n 'confidence' : confidence,\n 'pattern' : 'simple_extractor',\n }\n\n################################################################################\n\n# FIXME: remove all calls to this function?\ndef text_content(node):\n try:\n return node.text_content()\n except ValueError:\n return ''\n txts = [i for i in node.itertext()]\n return innerTrim(' '.join(txts).strip())\n\nimport re\nTABSSPACE = re.compile(r'[\\s\\t]+')\ndef innerTrim(value):\n if isinstance(value, str):\n # remove tab and white space\n value = re.sub(TABSSPACE, ' ', value)\n value = ''.join(value.splitlines())\n return value.strip()\n return ''\n\ndef remove_node_keep_tail(node):\n '''\n The simplest way to remove a node from the lxml parse tree is to directly remove it from the parent with the code:\n\n > node.getparent().remove(node)\n\n This can have unexpected behavior when tags have tail text,\n as the tail text will also be removed.\n This function removes the html tag without removing the tail text.\n\n For details on tail text, see: https://docs.python.org/3/library/xml.etree.elementtree.html#xml.etree.ElementTree.Element.tail\n '''\n parent = node.getparent()\n if parent is not None:\n if node.tail:\n prev = node.getprevious()\n if prev is None:\n if not parent.text:\n parent.text = ''\n parent.text += ' ' + node.tail\n else:\n if not prev.tail:\n prev.tail = ''\n prev.tail += ' ' + node.tail\n node.clear()\n parent.remove(node)\n\n\n################################################################################\n\n\n_RE_WHITESPACE = re.compile(r'\\s+')\n_RE_SPACES = re.compile(r' +')\n_RE_NEWLINE = re.compile(r'\\s*\\n\\n\\s*')\n\ndef html_to_text(html):\n r'''\n This function converts a string containing an html document into a plaintext string.\n It's primary purpose is to serve as a test case for lxml_to_text.\n\n >>> html_to_text('test')\n 'test'\n\n >>> html_to_text('this is \\n a test')\n 'this is a test'\n\n >>> html_to_text(\"

    This is a paragraph with fancy text.

    This is another paragraph.

    \")\n 'This is a paragraph with fancy text.\\n\\nThis is another paragraph.'\n '''\n doc = lxml.html.fromstring(html)\n return lxml_to_text(doc)\n\n\ndef lxml_to_text(doc):\n '''\n This function converts an lxml parsed html document into a plain text string.\n A small amount of markdown is used for formatting headers/lists,\n but otherwise html tags are ignored.\n\n NOTE:\n This function doesn't support the full markdown syntax because\n this would introduce complications for articles that already use * and _ for other uses,\n and it would add clutter from links, images, and tables that might hurt downstream tasks.\n '''\n texts = []\n tags = ['p','h1','h2','h3','h4','h5','h6','li','table','br','ul','ol']\n\n def go(node):\n # FIXME:\n # instead of using the hX tag number,\n # we should use the nesting level of the h tags\n if node.tag[0]=='h':\n texts.append('#'*int(node.tag[1])+' ')\n if node.text:\n texts.append(_RE_WHITESPACE.sub(' ', node.text))\n for child in node.getchildren():\n # FIXME:\n # this code doesn't account for nested lists,\n # and it doesn't properly number lists\n if node.tag=='ul' and child.tag=='li':\n texts.append(' * ')\n if node.tag=='ol' and child.tag=='li':\n texts.append(' 1. ')\n go(child)\n if node.tail:\n texts.append(_RE_WHITESPACE.sub(' ', node.tail))\n if node.tag in tags:\n texts.append('\\n\\n')\n\n go(doc)\n\n text = ''.join(texts).strip()\n text = _RE_NEWLINE.sub('\\n\\n', text)\n text = _RE_SPACES.sub(' ', text)\n return text\n\n\ndef strip_excess_whitespace(doc):\n '''\n This function removes extra whitespace from an lxml document\n '''\n def go(node):\n if node.text:\n node.text = _RE_WHITESPACE.sub(' ', node.text)\n for child in node.getchildren():\n go(child)\n if node.tail:\n node.tail = _RE_WHITESPACE.sub(' ', node.tail)\n go(doc)\n","sub_path":"metahtml/property/content/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":20784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583376367","text":"\"\"\"\nThe group classes\n\n\"\"\"\n\nimport numpy as np\nfrom collections import defaultdict, OrderedDict\nfrom math import sqrt\n\nfrom . import nationals as nat\nfrom . import config as cfg\n\n# pylint: disable=E1101\n\n\n# ==============================================================================\n# Utility Functions\n# ==============================================================================\n\n\ndef test_student(student, filters):\n \"\"\"\n Checks whether a student should be included or not, based on the filters\n \"\"\"\n\n if filters is None:\n return True\n\n if not isinstance(filters, list):\n filters = [filters]\n\n return all([f(student) for f in filters])\n\n\ndef mean(items):\n \"\"\"\n Returns the average of a set of items, ignoring any None values\n \"\"\"\n\n values = [x for x in items if x is not None]\n if len(values) == 0:\n return None\n else:\n return np.mean(values)\n\n\n# ==============================================================================\n# Common Filters\n# ==============================================================================\n\n\ndef yeargroup(years):\n \"\"\"\n Returns a function to test whether a student is in a particular yeargroup.\n \"\"\"\n\n if not isinstance(years, list):\n years = [years]\n\n def in_year(student):\n \"\"\"Tests whether a student is in a yeargroup\"\"\"\n return student.pastoral.year in years\n\n return in_year\n\n\ndef pupilpremium(prem=True):\n \"\"\"Returns a function to test whether a student is pupil premium or not\"\"\"\n\n def is_pp(student):\n \"\"\" Tests whether a student is pupil premium or not\"\"\"\n return student.pastoral.pp == prem\n\n return is_pp\n\n\ndef specialed(sen=True):\n \"\"\"\n Returns a function to test whether a student has SEN or not. Assumes\n boolean if SEN is not S, A, P or N.\n \"\"\"\n\n def is_sen(student):\n \"\"\"Tests whether a student is in the specified SEN group\"\"\"\n\n if isinstance(sen, str):\n return student.pastoral.sen in sen\n elif sen:\n return student.pastoral.sen in 'SAP'\n else:\n return student.pastoral.sen == 'N'\n\n return is_sen\n\n\ndef studying(subject, basis, date=None):\n \"\"\"\n Returns a function to test whether a student is studying a subject.\n \"\"\"\n\n def is_studying(student):\n \"\"\"Tests whether the student is studying a particular subject\"\"\"\n if date is None:\n return subject in student.all_courses(basis)\n else:\n return (subject in student.all_courses(basis) and\n student.classname(subject, date) is not None)\n\n return is_studying\n\n\ndef hasupn(upns):\n \"\"\"\n Returns a function to test whether a student is in a list of upns.\n \"\"\"\n\n def has_upn(student):\n \"\"\"Tests whether the student has upn in a particular list.\"\"\"\n\n return student.upn in upns\n\n return has_upn\n\n\ndef exam_after(subject, date):\n \"\"\"\n Returns a function to test whether a student has sat an exam after a\n particular date.\n \"\"\"\n\n def sat_exam_after(student):\n \"\"\"Tests whether the student sat their exam after a particular date.\"\"\"\n\n return student.exam_after(subject, date)\n\n return sat_exam_after\n\n# ==============================================================================\n# Base Class\n# ==============================================================================\n\n\nclass Group:\n \"\"\"\n Base group class - subclasses should be used instead\n \"\"\"\n\n def __init__(self, name, short, students, filters):\n\n self.name = name\n self.shortname = short\n self.students = [students[s] for s in students if\n test_student(students[s], filters)]\n\n def __str__(self):\n \"\"\"\n Prints the group\n \"\"\"\n\n output = '='*30 + '\\n' + self.name + '\\n' + '='*30\n output += '\\n'.join(['{0}: {1}'.format(s.upn, s.name) for s\n in self.students])\n return output\n\n def __len__(self):\n \"\"\"Returns the size of the group - i.e. how many students it has.\"\"\"\n\n return len(self.students)\n\n def subgroup(self, name, short, filters):\n \"\"\"\n Returns a new group based upon the first with additional filters.\n \"\"\"\n\n students = OrderedDict({})\n for student in self.students:\n students[student.upn] = student\n return self.__class__(name, short, students, filters)\n\n def all_grades(self, level, basis, cumulative=False):\n \"\"\"\n Calculates the distribution of grades achieved in all subjects at a\n particular level.\n \"\"\"\n\n all_grades = defaultdict(int)\n for subject in cfg.SUBJECTS:\n if cfg.SUBJECTS[subject]['Level'] == level:\n grades = self.grade_distribution(subject, basis, cumulative)\n for grade in grades:\n all_grades[grade] += grades[grade]\n\n return all_grades\n\n def grade_distribution(self, subject, basis, cumulative=False):\n \"\"\"\n Calculates the distribution of grades achieved by students in a\n particular subject\n \"\"\"\n\n level = cfg.SUBJECTS[subject]['Level']\n grades = {g: 0 for g in cfg.list_grades(level)}\n for student in self.students:\n for grade in grades:\n if student.achieved_grade(subject, basis, grade,\n not cumulative):\n grades[grade] += 1\n\n return grades\n\n def subject_passrate(self, subject, basis, cumulative=False, full=False):\n \"\"\"\n Calculates the % of students achieving each grade (or better if\n cumulative). If full, calculates %'s based on the full cohort size.\n \"\"\"\n\n grades = self.grade_distribution(subject, basis, cumulative)\n if full:\n total = len(self.students)\n elif cumulative:\n total = max([grades[g] for g in grades])\n else:\n total = sum([grades[g] for g in grades])\n\n if total == 0:\n return {}\n else:\n return {g: grades[g]/total for g in grades}\n\n def all_passrate(self, level, basis, cumulative=False):\n \"\"\"\n Calculates the % passrates for all qualifications taken at a particular\n level.\n \"\"\"\n\n subjects = [s for s in cfg.SUBJECTS if\n cfg.SUBJECTS[s]['Level'] == level]\n all_grades = defaultdict(int)\n for subject in subjects:\n grades = self.grade_distribution(subject, basis, cumulative)\n for grade in grades:\n all_grades[grade] += grades[grade]\n\n if cumulative:\n total = max([all_grades[g] for g in all_grades])\n else:\n total = sum([all_grades[g] for g in all_grades])\n\n if total == 0:\n return {}\n else:\n return {g: all_grades[g]/total for g in all_grades}\n\n def average_points(self, subject, basis, lookup='Points'):\n \"\"\"\n Calculates the average points score achieved by students in a\n particular subject.\n \"\"\"\n\n return mean([s.points(subject, basis, lookup) for s in self.students])\n\n def achieved_grade(self, subject, basis, grade, exact=False, nums=False):\n \"\"\"\n Calculates the percentage of students who have achieved a particular\n grade or better in a particular subject.\n \"\"\"\n\n grades = [s.achieved_grade(subject, basis, grade, exact)\n for s in self.students]\n if nums:\n return sum([g for g in grades if g is not None])\n else:\n return mean(grades)\n\n def achieved_points(self, subject, basis, points, exact=False, nums=False):\n \"\"\"\n Calculates the percentage of students who achieved a particular grade\n of better in a particular subject.\n \"\"\"\n\n all_points = [s.points(subject, basis) for s in self.students if\n s.points(subject, basis) is not None]\n\n if len(all_points) == 0:\n return None\n\n if nums and exact:\n return all_points.count(points)\n elif nums:\n return len([p for p in all_points if p >= points])\n elif exact:\n return all_points.count(points) / len(all_points)\n else:\n return (len([p for p in all_points if p >= points]) /\n len(all_points))\n\n def ks2_aps(self):\n \"\"\"\n Calculates the average KS2 points score of the students.\n \"\"\"\n\n return mean([s.prior.ks2['APS'] for s in self.students])\n\n def ks2_band(self, band):\n \"\"\"\n Calculates the percentage of students in a particular KS2 band.\n \"\"\"\n\n return mean([1 if s.prior.ks2['Band'] == band else 0 for s\n in self.students])\n\n def cohort_size(self, subject, basis):\n \"\"\"\n Calculates the number of students who are studying a particular course\n \"\"\"\n\n return len([s for s in self.students if subject in\n s.all_courses(basis)])\n\n def group_size(self, filters=None, percentage=False):\n \"\"\"\n Calculates the number of students in the group, who meet the criteria\n given by the filters.\n \"\"\"\n\n number = sum([1 if test_student(s, filters) else 0 for s in\n self.students])\n\n if percentage:\n return number / len(self.students)\n else:\n return number\n\n def exam_accuracy(self, subject, basis, lookup='Points'):\n \"\"\"\n Calculates the error in the grades predicted.\n Returns % correct, average error and average absolute error\n \"\"\"\n\n error = []\n for student in self.students:\n exam = student.points(subject, 'x', lookup)\n predicted = student.points(subject, basis.replace('x', ''), lookup)\n try:\n error.append(exam - predicted)\n except TypeError:\n pass\n\n if len(error) == 0:\n return 0, 0, 0\n else:\n return (np.mean([1 if e == 0 else 0 for e in error]),\n np.mean(error), np.mean([abs(e) for e in error]))\n\n\n# =============================================================================\n# KS4 Group\n# =============================================================================\n\nclass KS4Group(Group):\n \"\"\"\n Provides analysis of the performance measure for a KS4 group\n \"\"\"\n\n def basics(self, basis):\n \"\"\"\n Calculates the percentage of students achieving the basics measure,\n given as a decimal.\n \"\"\"\n\n return mean([s.basics(basis) for s in self.students])\n\n def l2_threshold(self, basis, em=True, passes=5):\n \"\"\"\n Calculates the percentage of students achieving 5 A*-C grades,\n including maths and english, given as a decimal.\n \"\"\"\n\n if em:\n return mean([s.basics(basis) * (s.passes('L2', basis) >= passes)\n for s in self.students])\n else:\n return mean([(s.passes('L2', basis) >= passes)\n for s in self.students])\n\n def l2_threshold_expected(self):\n \"\"\"\n Calculates the expected percentage of students achieving 5 A*-C grades,\n including English and Maths, based on students prior attainment scores.\n \"\"\"\n\n priors = [s.prior.ks2['Av'].sublevel for s in self.students]\n return nat.gcse_5ac_expected(priors)\n\n def l1_threshold(self, basis, passes=5):\n \"\"\"\n Calculates the percentage of students achieving 5 A*-G grades,\n given as a decimal.\n \"\"\"\n\n return mean([s.passes('L1', basis) >= passes for s in self.students])\n\n def attainment_expected(self, subject, exact=False, nums=False):\n \"\"\"\n Calculates the number of students expected to achieved a particular\n grade, based on GCSE Subject TMs.\n \"\"\"\n\n prior_subj = cfg.SUBJECTS[subject]['KS2_Prior']\n priors = [s.prior.ks2[prior_subj].sublevel for s in self.students]\n return nat.gcse_expected_grades(subject, priors, exact, nums)\n\n def progress(self, subject, basis, num_levels, full=False):\n \"\"\"\n Calculates the percentage of students achieving 'levels' levels of\n progress in a particular subject.\n \"\"\"\n\n return mean([s.made_progress(subject, basis, num_levels, full)\n for s in self.students])\n\n def progress_expected(self, subject, num_levels):\n \"\"\"\n Calculates the percentage of students expected to achieve num_levels\n of progress, based on their prior attainment.\n \"\"\"\n\n prior_subj = cfg.SUBJECTS[subject]['KS2_Prior']\n priors = [s.prior.ks2[prior_subj].sublevel for s in self.students]\n return nat.gcse_expected_progress(subject, priors, num_levels)\n\n def entered_ebacc(self, basis, num=False):\n \"\"\"\n Calculates the number/percentage of students entering the EBacc.\n \"\"\"\n\n entered = [s.entered_ebacc(basis) for s in self.students]\n if num:\n return sum(entered)\n else:\n return mean(entered)\n\n def ebacc(self, basis):\n \"\"\"\n Calculates the percentage of students passing the Ebacc\n \"\"\"\n\n return mean([s.ebacc(basis) for s in self.students])\n\n def ebacc_va(self, area, basis):\n \"\"\"\n Calculates the value added score and confidence interval for an\n EBacc subject area.\n \"\"\"\n\n score = mean([s.ebacc_va(area, basis) for s in self.students])\n deviation = nat.EBACC_SD[area]\n interval = 1.96 * deviation / sqrt(len(self.students))\n return score, interval\n\n def capped8(self, basis, gcse_only=False, em=True):\n \"\"\"\n Calculates the average capped 8 points score per student.\n \"\"\"\n\n return mean([s.capped8(basis, gcse_only, em) for s in self.students])\n\n def capped8_va(self, basis):\n \"\"\"\n Calculates the capped 8 VA, and confidence interval for the group.\n \"\"\"\n\n if len(self.students) == 0:\n return None, None\n\n score = mean([s.capped8_va(basis) for s in self.students])\n deviation = nat.EBACC_SD['Best8']\n interval = 1.96 * deviation / sqrt(len(self.students))\n return score, interval\n\n def subject_va(self, subject, basis):\n \"\"\"\n Calculates the average points score\n \"\"\"\n\n return mean([s.value_added(subject, basis) for s in self.students])\n\n def attainment8(self, basis, start=0, end=9):\n \"\"\"\n Calculates the average attainment 8 score.\n \"\"\"\n\n if start == 0 and end == 9:\n return mean([s.attainment8(basis)[0] for s in self.students])\n\n else:\n points = [[x[2] for x in s.attainment8(basis)[1][start:end+1]]\n for s in self.students]\n return mean([sum(x) for x in points])\n\n def attainment8_entries(self, basis, start=0, end=9):\n \"\"\"\n Calculates the average number of entries in the attainment 8 basket.\n Start and End figures allow selection of basket section:\n 0 - 1 : Maths\n 2 - 3 : English\n 4 - 6 : EBacc\n 7 - 9 : Others\n \"\"\"\n\n return mean([s.attainment8_entries(basis, start, end) for\n s in self.students])\n\n def attainment8_full_basket(self, basis, start=0, end=9):\n \"\"\"Calculates % of students with all slots filled.\"\"\"\n\n exp = end - start + 1\n return mean([1 if s.attainment8_entries(basis, start, end) == exp\n else 0 for s in self.students])\n\n def progress8(self, basis):\n \"\"\"Calculates the average progress 8 figure for a student.\"\"\"\n\n score = mean([s.progress8(basis) for s in self.students])\n conf_int = nat.PROGRESS8_SD * 1.96 / sqrt(len(self))\n return score, conf_int\n\n# =============================================================================\n# KS5 Group\n# =============================================================================\n\n\nclass KS5Group(Group):\n \"\"\"\n Provides analysis of the performance measure for a KS5 group\n \"\"\"\n\n def num_students(self, area, basis):\n \"\"\"Calculates the number of students in a particular area.\"\"\"\n\n return sum([1 if s.all_entries(area, basis) > 0 else 0 for s\n in self.students])\n\n def fte_students(self, area, basis):\n \"\"\"\n Calculates the number of full time equivalent students in a\n particular area.\n \"\"\"\n\n return sum([s.full_time_equivalence(area, basis) for s in\n self.students])\n\n def points_per_student(self, area, basis):\n \"\"\"\n Calculates the average number of points per FTE student.\n \"\"\"\n\n points = sum([s.all_points(area, basis) for s in self.students])\n students = self.fte_students(area, basis)\n if students == 0:\n return None\n else:\n return points / students\n\n def points_per_entry(self, area, basis):\n \"\"\"\n Calculates the average number of points achieved per entry.\n \"\"\"\n\n points = sum([s.all_points(area, basis) for s in self.students])\n entries = sum([s.all_entries(area, basis) for s in self.students])\n try:\n return points / entries\n except ZeroDivisionError:\n return 0\n\n def average_grade(self, area, basis, level):\n \"\"\"\n Calculates the average grade achieved.\n \"\"\"\n\n average = self.points_per_entry(area, basis)\n lookup = {g: cfg.GRADES[level][g]['Points'] for g in cfg.GRADES[level]}\n\n closest = ''\n error = 1000\n for grade, points in lookup.items():\n if abs(average - points) < error:\n closest = grade\n error = abs(average - points)\n\n return closest\n\n def passes(self, area, basis, number):\n \"\"\"\n Calculates the percentage of students achieving 'number' passes\n \"\"\"\n\n passes = sum([1 if s.all_passes(area, basis) >= number else 0 for s\n in self.students])\n students = self.num_students(area, basis)\n\n return passes / students\n\n def alevel_entries(self, basis, number, facilitating=0):\n \"\"\"\n Calculates the percentage of alevel students with number of Alevel\n entries (excluding AS), which must include at least a certain number\n of facilitating subjects.\n \"\"\"\n\n entries = 0\n for student in self.students:\n facil = len(student.grade_summary('A2', basis, True))\n other = len(student.grade_summary('A2', basis, False))\n if facil >= facilitating and facil + other >= number:\n entries += 1\n\n return entries / self.num_students('alevel', basis)\n\n def achieved_aab(self, basis, facilitating=0):\n \"\"\"\n Calculates the percentage of alevel student who achieved AAB or better\n including a number of facilitating subjects.\n \"\"\"\n\n passed = sum([1 if s.achieved_aab(basis, facilitating) else 0 for s\n in self.students])\n return passed / self.num_students('alevel', basis)\n\n def subject_va(self, subject, basis, all_as=True, grade=True):\n \"\"\"\n Calculates the L3VA score for a subject\n\n Returns VA score, Standard Deviation and size of cohort\n\n Args:\n - all_AS (Bool) : If AS VA should be calculated using the code\n for All AS courses or just discontinued ones.\n Should be True for year 12, False for year 13.\n \"\"\"\n\n # pylint: disable=R0914\n cohort = [s for s in self.students\n if subject in s.all_courses(basis) and\n s.l3va(subject, basis, all_as) is not None]\n if len(cohort) == 0:\n return None, None, None\n\n value_added = np.mean([s.l3va(subject, basis, all_as) for s in cohort])\n prior = np.mean([s.prior.ks4['APS'] for s in cohort])\n coeffs = nat.l3va_coefficients(subject, all_as)\n if coeffs is None:\n return None, None, None\n\n covariance = np.array([[coeffs[5], coeffs[6], coeffs[8]],\n [coeffs[6], coeffs[7], coeffs[9]],\n [coeffs[8], coeffs[9], coeffs[10]]])\n prior_matrix = np.array([1, prior, prior**2])\n prior_transpose = np.array([[1], [prior], [prior**2]])\n tau_sq = np.dot(prior_matrix, np.dot(covariance, prior_transpose))[0]\n phi = coeffs[11] / tau_sq\n shrinkage = len(cohort) / (len(cohort) + phi)\n\n if grade and cfg.SUBJECTS[subject]['Level'] == 'AS':\n return (value_added * shrinkage / 15,\n sqrt(coeffs[11] / (len(cohort) + phi)) / 15, len(cohort))\n elif grade:\n return (value_added * shrinkage / 30,\n sqrt(coeffs[11] / (len(cohort) + phi)) / 30, len(cohort))\n else:\n return (value_added * shrinkage,\n sqrt(coeffs[11] / (len(cohort) + phi)), len(cohort))\n\n def level_va(self, level, basis, all_as=True, grade=True):\n \"\"\"\n Returns the average value added score for all subjects at a particular\n level, along with the standard deviation\n\n Args:\n basis (str): some combination of - 'a' (assessments)\n - 'e' (estimates)\n - 'x' (exam)\n level (str): AS, A2, BTEC3D etc.\n\n $ VA = \\\\frac{\\\\Sum{va_{subject}}}{\\\\Sum{n}} $\n $ \\\\sigma_{level} = \\\\frac{\\\\Sum{(\\\\sigma_{subject} \\\\times n)^2}}\n {(\\\\Sum{n})^2} $\n \"\"\"\n\n value_added, variance, cohort = 0, 0, 0\n subjects = [s for s in cfg.SUBJECTS if cfg.SUBJECTS[s]['Level'] ==\n level]\n\n for subj in subjects:\n va_score, st_dev, num_students = self.subject_va(subj, basis,\n all_as, False)\n if va_score is not None:\n value_added += va_score * num_students\n variance += (st_dev * num_students)**2\n cohort += num_students\n\n if cohort == 0:\n return None, None\n elif grade and level == \"AS\":\n n = 15*cohort\n elif grade:\n n = 30*cohort\n else:\n n = cohort\n\n return value_added / n, sqrt(variance) / n, cohort\n\n def area_va(self, area, basis, grade=True):\n \"\"\"\n Calculates the VA for all qualifications in a particular subject area.\n \"\"\"\n\n # pylint: disable=R0914\n if area == 'alevel':\n return self.level_va('A2', basis, False, True)\n elif area == 'aslevel':\n return self.level_va('AS', basis, True, True)\n\n value_added, variance, cohort = 0, 0, 0\n areas = {'academic': 'A', 'vocational': 'V'}\n subjects = [s for s in cfg.SUBJECTS if\n cfg.SUBJECTS[s]['KS5_Area'] == areas[area]]\n\n for subject in subjects:\n va_score, st_dev, num_students = self.subject_va(subject, basis,\n False, False)\n va_nat = nat.l3va_coefficients(subject, False)[15]\n\n if va_score is not None:\n value_added += (va_score - va_nat) * num_students\n variance += (st_dev * num_students)**2\n cohort += num_students * cfg.SUBJECTS[subject]['KS5_Weight']\n\n if cohort == 0:\n return None, None\n elif grade:\n return (value_added / (30 * cohort),\n sqrt(variance) / (30 * cohort),\n cohort)\n else:\n return value_added / cohort, sqrt(variance) / cohort, cohort\n","sub_path":"analysis/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":24241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"450755858","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 24 10:40:53 2019\n\n@author: m7979\n\"\"\"\n#Defining dictionary and an integer for the rest of the code\nimport sys\nimport csv\ncontent =[]\ndct={}\ni=1\n\n#Reading in the data as a comma delimited text\nfile= csv.reader(open(sys.argv[1],'r'), delimiter=',')\nnext(file, None)\nfor line in file:\n content.append(line)\npharm = content\n\nmedical= [0]*len(pharm)\nfor i in range(len(pharm)):\n medical[i]=[0, '', '', 0]\n\n#Combining first name and last name to get full name\n\nfor i in range(len(pharm)):\n medical[i][3] = float(pharm[i][4])\n medical[i][2] = pharm[i][3]\n medical[i][1] = pharm[i][1] + ' ' +pharm[i][2]\n \n\n#Making a list of the name of drugs by creating a dictionary and a for loop \nfor drug in medical:\n dct.setdefault(drug[2],[]).append(drug)\nmed= list(dct.keys())\n\n#Defining the objects that we are using during the rest of the code, the for loop is to make sure the length is sufficient\nname=[1]*len(med)\ntotcost=[1]*len(med)\nindiv=[1]*len(med)\n\nfor i in range ((len(med))):\n totcost[i]=0\n name[i]=[]\n indiv[i]=0\n \n#Counting unique names of individuals that consumed each drug \nfor i in range (len(med)):\n name=[0]*len(dct[med[i]])\n for j in range (len(dct[med[i]])):\n name[j]=(dct[med[i]])[j][1]\n indiv[i]=len(name)-name.count([])\n#Calculation the total cost for each drug based on dctionary values \nfor i in range (len(med)):\n for j in range (len(dct[med[i]])):\n totcost[i]=totcost[i]+(dct[med[i]])[j][3]\n\n\n#Creating a dataset that includes all the info that I created\ndata=[0]*(len(med))\nfor i in range (len(med)):\n data[i]= ['',0,0]\n#Filling out the data with the info that I extracted from the initial dataset\nfor i in range (len(med)):\n data[i][0]= med[i]\n data[i][1]= indiv[i]\n data[i][2]='%g'% (totcost[i])\n #Sorting data first based on the name and then based on the cost (to make sure cost comes first and then drugs are sorted by alphabet) \ndata.sort(key=lambda x: x[1])\ndata.sort(key=lambda x: x[0], reverse=True)\n\n#Inserting column names\ndata.insert(0,['drug_name','num_prescriber','total_cost'])\n#IF YOU PRINT data, it is the output!\n#Creating output file in the directory\nwith open(sys.argv[2], 'w') as output:\n csv.writer(output).writerows(data)\n\n#in case you want to get a text file as output please run follosing codes\n#with open(sys.argv[2], 'w') as output:\n # for item in data:\n # output.write(\"%s\\n\" % item)\n \n \n","sub_path":"src/pharmacy_counting.py","file_name":"pharmacy_counting.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527565861","text":"from fastapi import FastAPI\nfrom typing import Optional\nimport uvicorn\nfrom db import engine,connection_db,conn\nfrom sqlalchemy import func, select\nimport requests\nimport json\n\napp = FastAPI()\n\nfrom models import *\n@app.get('/crear_registros')\ndef crear_registros():\n url = 'http://api-faker/datos'\n response = requests.get(url)\n response_json = json.loads(response.text)\n for i in response_json:\n new_data = {\"first_name\":i[\"first_name\"],\"country\":i[\"country\"],\"day_of_month\":i[\"day_of_month\"],\"day_of_week\":i[\"day_of_week\"],\"word\":i[\"word\"]}\n with engine.connect() as con:\n con.execute(datos_falsos.insert().values(new_data))\n return {'data':\"Registros creados\"}\n\n@app.get('/registros_falsos')\ndef registros_falsos():\n with engine.connect() as con:\n obtener_data = \"select * from datos_falsos\"\n respuesta_data = con.execute(obtener_data)\n lista = list()\n for i in respuesta_data:\n data = dict()\n data[\"first_name\"] = i[1]\n data[\"day_of_month\"] = i[2]\n data[\"day_of_week\"] = i[3]\n data[\"country\"] = i[4]\n data[\"word\"] = i[5]\n lista.append(data)\n return {'data':lista}\n\n@app.get('/eliminar_registros')\ndef eliminar_registros():\n with engine.connect() as con:\n eliminar_data = \"delete from datos_falsos\"\n try:\n respuesta_data = con.execute(eliminar_data)\n except:\n return {\"respuesta\":\"Data no eliminada , validar \"} \n return {\"respuesta\":\"Data Eliminada\"}\n","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"593647402","text":"# -*- encoding: utf-8 -*-\n# Copyright 2016 Vinzor Co.,Ltd.\n#\n# comment\n#\n# 2016/6/1 0001 Jay : Init\n\nimport json\n\nfrom flask import render_template, redirect, url_for, flash, \\\n request, jsonify, abort\n\nfrom . import terminal\nfrom .forms import PlaceForm, TerminalForm\nfrom .. import db\nfrom ..models import Place, Terminal, TerminalState, User, Parameter, Role\n\n\n@terminal.route('/places', methods=['GET', 'POST'])\ndef places():\n form = PlaceForm()\n if form.validate_on_submit():\n place = Place(name=form.name.data,\n address=form.address.data)\n db.session.add(place)\n db.session.commit()\n flash(\"添加课室成功\", category='info')\n return redirect(url_for('terminal.places'))\n places = Place.query.all()\n return render_template('terminal/places.html', places=places, form=form)\n\n\n@terminal.route('/places', methods=['DELETE'])\ndef delete_places():\n place_id_list = request.json\n result_json = {\n 'result': 'success'\n }\n for place_id in place_id_list:\n place = Place.query.get(place_id)\n if place is None:\n flash(\"课室不存在,删除失败\", category=\"error\")\n else:\n course_count = place.courses.count()\n if course_count > 0:\n flash(\"课室{0}正在被课程占用,删除失败\".format(place.name), category=\"error\")\n else:\n db.session.delete(place)\n flash(\"课室{0}删除成功\".format(place.name), category='info')\n db.session.commit()\n return jsonify(result_json)\n\n\n@terminal.route('/places/', methods=['PUT'])\ndef update_place(place_id):\n place = Place.query.get(place_id)\n if not place:\n abort(404)\n form = PlaceForm()\n result_json = {\n 'result': 'success'\n }\n if form.validate_on_submit():\n place.name = form.name.data\n place.address = form.address.data\n db.session.add(place)\n db.session.commit()\n flash(\"修改课室信息成功\", category='info')\n else:\n flash(\"修改失败,课室名称或课室地址不能为空\", category=\"error\")\n return jsonify(result_json)\n\n\n@terminal.route('/registration', methods=['GET', 'POST'])\ndef registration():\n form = TerminalForm()\n if form.validate_on_submit():\n place = Place.query.get(form.place_id.data)\n if Terminal.query.filter_by(mac_address=form.mac_address.data).first():\n flash(\"创建终端失败,MAC地址已存在\", category=\"error\")\n return redirect(url_for(\"terminal.registration\"))\n\n parameter = Parameter.query.\\\n filter(Parameter.name == 'terminal_register_mode').first()\n\n terminal_user_name = \"{0}_{1}\".format(place.name, form.seat_number.data)\n terminal_user_password = form.mac_address.data\n role = Role.query.filter(Role.name == 'Terminal').first()\n user = User()\n user.username = terminal_user_name\n user.fullname = \"{0}_{1}\".format('terminal', terminal_user_name)\n user.role = role\n user.password = terminal_user_password\n user.is_device = True\n user.confirmed = True if parameter.value == TerminalState.APPROVED else False\n db.session.add(user)\n db.session.flush()\n\n terminal = Terminal(mac_address=form.mac_address.data,\n seat_number=form.seat_number.data,\n description=form.description.data,\n user_id=user.id,\n place_id=place.id,\n state=parameter.value if parameter else TerminalState.WAITING)\n db.session.add(terminal)\n db.session.commit()\n flash('创建终端申请成功', category='info')\n return redirect(url_for(\"terminal.registration\"))\n for field, msg in form.errors.items():\n flash(\"{0}: {1}\".format(field, msg[0]), category=\"error\")\n terminal_list = {}\n terminal_list[\"approved\"] = Terminal.query.filter_by(state=TerminalState.APPROVED).all()\n terminal_list[\"rejected\"] = Terminal.query.filter_by(state=TerminalState.REJECTED).all()\n terminal_list[\"waiting\"] = Terminal.query.filter_by(state=TerminalState.WAITING).all()\n return render_template('terminal/registration.html',\n form=form, terminal_list=terminal_list)\n\n\n@terminal.route('/registration', methods=['PUT'])\ndef update_registration():\n # TODO: 更新已通过审核的终端信息\n abort(405)\n\n\n@terminal.route('/registration', methods=['DELETE'])\ndef delete_registration():\n terminal_id_list = request.json\n result_json = {\n 'result': 'success'\n }\n for terminal_id in terminal_id_list:\n terminal = Terminal.query.filter_by(id=terminal_id).first()\n user = terminal.user\n if user:\n db.session.delete(terminal)\n db.session.delete(user)\n db.session.commit()\n flash(\"删除终端成功\", category='info')\n return jsonify(result_json)\n\n\n@terminal.route('/registration/approval', methods=['PUT'])\ndef approve_registration():\n terminal_id_list = request.json\n result_json = {\n 'result': 'success'\n }\n for terminal_id in terminal_id_list:\n terminal = Terminal.query.get(terminal_id)\n if terminal is not None:\n terminal.state = TerminalState.APPROVED\n db.session.add(terminal)\n\n user = terminal.user\n user.confirmed = True\n db.session.add(user)\n db.session.commit()\n flash(\"已成功通过审批\", category='info')\n return jsonify(result_json)\n\n\n@terminal.route('/registration/rejection', methods=['PUT'])\ndef reject_registration():\n terminal_id_list = request.json\n result_json = {\n 'result': 'success'\n }\n for terminal_id in terminal_id_list:\n terminal = Terminal.query.get(terminal_id)\n if terminal is not None:\n terminal.state = TerminalState.REJECTED\n db.session.add(terminal)\n db.session.commit()\n flash(\"已成功拒绝审批\", category='info')\n return jsonify(result_json)\n","sub_path":"src/web/app/terminal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"287108510","text":"import os\n\nBASE_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# Location of folder which contains corpus of text to scan.\nTEXT_FILE_FOLDER = os.path.join(BASE_PATH, 'text_files')\n\n\nMARKOV_DATABASE_FILE_PATH = os.path.join(BASE_PATH, 'database', 'markov_db.sqlite3')\n\n# Markov Order\nMARKOV_ORDER = 2\n\nDB_TRANSACTION_INTERVAL = 25\n\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529771391","text":"# coding=utf-8\n\nimport pytest\n\nfrom motse.spidermws import dedup\nfrom motse.spidermws import *\nfrom motse.http import HttpRequest, HttpResponse\n\n\nclass MongoClientMock:\n def __init__(self, mongo_addr):\n self.mongo_addr = mongo_addr\n\n def __getitem__(self, name):\n return MongoDatabaseMock(self, name)\n\n\nclass MongoDatabaseMock:\n def __init__(self, client, name):\n self.client = client\n self.name = name\n\n def __getitem__(self, name):\n return MongoCollectionMock(self, name)\n\n\nclass MongoCollectionMock:\n def __init__(self, database, name):\n self.database = database\n self.name = name\n self.index = None\n self.data = set()\n\n def create_index(self, name):\n self.index = name\n\n def find_one(self, req):\n assert len(req) == 1\n key, value = None, None\n for i, j in req.items():\n key, value = i, j\n assert key == self.index\n if value in self.data:\n return value\n\n def insert_one(self, req):\n assert len(req) == 1\n key, value = None, None\n for i, j in req.items():\n key, value = i, j\n assert key == self.index\n self.data.add(value)\n\n\n@pytest.fixture(scope=\"function\")\ndef mongo_client_patch(request, monkeypatch):\n monkeypatch.setattr(dedup, \"MongoClient\", MongoClientMock)\n request.addfinalizer(lambda: monkeypatch.undo())\n\n\nclass TestMongoDedupMiddleware:\n task_id = \"0123456789abcdef\"\n dedup_mongo_addr = \"mongodb://root:123456@127.0.0.1:27017\"\n dedup_mongo_db = \"motse_dedup\"\n dedup_mongo_tbl = \"task_{0}\".format(task_id)\n req1 = HttpRequest(\"http://127.0.0.1\", \"GET\")\n req2 = HttpRequest(\"http://127.0.0.1\", \"POST\")\n req3 = HttpRequest(\"http://127.0.0.2\", \"GET\")\n resp4 = HttpResponse(\"http://127.0.0.1\", 200)\n req_list = [req1,\n HttpRequest(\"http://127.0.0.1\", \"GET\"),\n req2,\n req3,\n HttpRequest(\"http://127.0.0.1\", \"POST\"),\n resp4,\n HttpRequest(\"http://127.0.0.2\", \"GET\")]\n res_list = [req1, req2, req3, resp4]\n\n def test_handle_start_requests(self, mongo_client_patch):\n mw = MongoDedupMiddleware.from_config(dict(task_id=self.task_id, dedup_mongo_addr=self.dedup_mongo_addr))\n assert mw._dedup_tbl.name == self.dedup_mongo_tbl\n assert mw._dedup_tbl.database.name == self.dedup_mongo_db\n assert mw._dedup_tbl.database.client.mongo_addr == self.dedup_mongo_addr\n res = [i for i in mw.handle_start_requests(self.req_list)]\n assert res == self.res_list\n\n def test_handle_output(self, mongo_client_patch):\n mw = MongoDedupMiddleware.from_config(dict(task_id=self.task_id, dedup_mongo_addr=self.dedup_mongo_addr))\n assert mw._dedup_tbl.name == self.dedup_mongo_tbl\n assert mw._dedup_tbl.database.name == self.dedup_mongo_db\n assert mw._dedup_tbl.database.client.mongo_addr == self.dedup_mongo_addr\n res = [i for i in mw.handle_output(None, self.req_list)]\n assert res == self.res_list\n\n\nclass TestDepthMiddleware:\n max_depth = 1\n key = \"_current_depth\"\n\n def test_handle_output(self):\n class R:\n def __init__(self, depth=None):\n self.meta = {}\n if depth is not None:\n self.meta[\"_current_depth\"] = depth\n\n mw = DepthMiddleware.from_config(dict(max_depth=1))\n req = HttpRequest(\"http://127.0.0.1\", \"GET\")\n resp = HttpResponse(\"http://127.0.0.1\", 200)\n res = [i for i in mw.handle_output(R(), [req, resp])]\n assert res == [req, resp] and req.meta[self.key] == 1\n res = [i for i in mw.handle_output(R(0), [req, resp])]\n assert res == [req, resp] and req.meta[self.key] == 1\n res = [i for i in mw.handle_output(R(1), [req, resp])]\n assert res == [resp]\n","sub_path":"tests/test_spidermws.py","file_name":"test_spidermws.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"426782765","text":"#wapp to read number from user and prefrom linear search\r\n\r\nimport array\r\nmarks=array.array('i',[])\r\nn=int(input(\"enter number of element \"))\r\nfor i in range(n):\r\n\tele=int(input(\"enter element\"))\r\n\tmarks.append(ele)\r\ncount=0\r\nk=int(input(\"enter the element to search \"))\r\nfor i in range(len(marks)):\r\n\tif marks[i]==k:\r\n\t\tprint(\"ele found at\",i+1)\r\n\t\tcount=count+1\r\n\r\nif count==0:\r\n\tprint(\" element not found\")","sub_path":"irshad dir/kamal/demo_python/python/L4/linearSearch.py","file_name":"linearSearch.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"86581756","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Mohamed NIANG\n\"\"\"\n\nimport numpy as np\nimport pycuda.driver as cuda\nimport pycuda.autoinit\nfrom pycuda.compiler import SourceModule\n\nf = open(\"vec_sum.cu\", 'r')\ncuda_source = \"\".join(f.readlines())\nmod=SourceModule(cuda_source)\nvecteur_somme_kernel=mod.get_function(\"somme\")\n\nBLOCKDIM=512\n\nn=10**6\n\na=np.arange(0,1,1/n,dtype=np.float32)\nb=np.ones_like(a)\nc=np.empty_like(a)\n\ninput1_gpu=cuda.mem_alloc(a.nbytes)\ninput2_gpu=cuda.mem_alloc(a.nbytes)\noutput_gpu=cuda.mem_alloc(a.nbytes)\ncuda.memcpy_htod(input1_gpu,a)\ncuda.memcpy_htod(input2_gpu,b)\nvecteur_somme_kernel(np.int32(a.size), input1_gpu, input2_gpu, output_gpu ,block=(BLOCKDIM,1,1), grid=(a.size//BLOCKDIM+1,1,1))\ncuda.memcpy_dtoh(c,output_gpu)\n\n","sub_path":"Code/vec_sum.py","file_name":"vec_sum.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"354440799","text":"import numpy as np\n\nTIME_RESOLUTION = 1e-7\nNUMBER_OF_TIME_BINS = 30\nNUMBER_OF_PADS = 144 * 12\nADC_BIT_RESOLUTION = 2**10\n\nINTERESTING_THRESHOLD = 300\nFITTING_BASELINE = 100\nDEFAULT_PLOT_BASELINE = 20\nDEFAULT_EVENT_SHAPE = np.asarray([12, 144, 11])\nDEFAULT_DETECTOR_DIMENSIONS = np.asarray([107.0, 107.0, 3.7])\nDEFAULT_SPACINGS = DEFAULT_DETECTOR_DIMENSIONS / (DEFAULT_EVENT_SHAPE - 1)\nDEFAULT_DATA_FOLDER = 'DATA/'\nDEFAULT_INTERESTING_DATA_FOLDER = \"DATA_INTERESTING/\"\nDATA_EXCLUDE_MASK = np.zeros((12, 144, 30), dtype=bool)\n# DATA_EXCLUDE_MASK[4:8,0:144,:] = True\nPRINT_EVNO_EVERY = 100\n\nCURRENT_FILE = '0417'\n","sub_path":"defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"274199151","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport setuptools\nfrom pathlib import Path\n\n\nrequirements = [\n r\n for r in Path(\"requirements.txt\").read_text().splitlines()\n if '@' not in r\n]\n\nextra_requirements = {\n \"av\": [\n r\n for r in Path(\"av_requirements.txt\").read_text().splitlines()\n if '@' not in r\n ]\n}\n\nwith open(\"README.md\", encoding=\"utf8\") as f:\n readme = f.read()\n\n\nsetuptools.setup(\n name=\"augly\",\n version=\"0.1.8\",\n description=\"A data augmentations library for audio, image, text, & video.\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/AugLy\",\n author=\"Zoe Papakipos and Joanna Bitton\",\n author_email=\"zoep@fb.com\",\n packages=setuptools.find_packages(exclude=[\"augly.tests\"]),\n include_package_data=True,\n install_requires=requirements,\n extras_require=extra_requirements,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\"\n ],\n python_requires=\">=3.6\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"521491935","text":"import requests\n\nURL = \"https://channel.develop.redeam.io/v1/suppliers/\"\n\n# PARAMS = {\n# \"meta\":{\n# \"reqId\": \"5fd78809-4700-46d7-8386-3b8738117f4d\"\n# }\n# }\n\nAPI_KEY = \"chris-key\"\n\nheaders = {\"X-API-KEY\": \"chris-key\", \"X-API-SECRET\": \"chris-test\"}\n\nresults = []\n\nrequest_url = URL + \"5fd78809-4700-46d7-8386-3b8738117f4d\"\n\nr = requests.get(\n url=request_url,\n headers = headers\n )\n\ndata = r.json()\n\nlocation = data['country']\n\nprint(location)\n","sub_path":"redeam/tests/get_request.py","file_name":"get_request.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"544507090","text":"import pygame\nfrom .constants import RED, WHITE, GREY, GOLD, SQUARE_SIZE\n\nclass Piece:\n # Set drawing constants\n RADIUS = SQUARE_SIZE // 3\n BORDER = SQUARE_SIZE // 20\n\n # Initialise game piece characteristics \n def __init__(self, row, col, colour):\n self.row = row\n self.col = col\n self.colour = colour\n self.king = False\n self.x = 0\n self.y = 0\n self.direction = 0\n self.calc_pos()\n \n # Calculate the x,y position of the game piece and assign direction\n def calc_pos(self):\n self.x = SQUARE_SIZE * self.col + SQUARE_SIZE // 2\n self.y = SQUARE_SIZE * self.row + SQUARE_SIZE // 2\n if self.colour == WHITE:\n self.direction = 1\n else:\n self.direction = -1\n\n # Changes the piece to a king\n def make_king(self):\n self.king = True\n\n # Draws the piece on the board\n def draw(self, win):\n pygame.draw.circle(win, GREY, (self.x, self.y), self.RADIUS + self.BORDER)\n pygame.draw.circle(win, self.colour, (self.x, self.y), self.RADIUS)\n if self.king:\n pygame.draw.circle(win, GOLD, (self.x, self.y), self.RADIUS // 2)\n\n # Changes the row and col of the piece and recalculates x,y pos\n def move(self, row, col):\n self.row = row\n self.col = col\n self.calc_pos()\n\n def __repr__(self):\n return str(self.colour)","sub_path":"Gameplay Engine/Checkers/piece.py","file_name":"piece.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"336599171","text":"import tensorflow as tf\nfrom tensorflow.keras.datasets import fashion_mnist\n\n# # rtx cudnn problem fix\n# from tensorflow.compat.v1 import ConfigProto\n# from tensorflow.compat.v1 import InteractiveSession\n# config = ConfigProto()\n# config.gpu_options.allow_growth = True\n# session = InteractiveSession(config=config)\n\nn_classes = 20\nmodel = tf.keras.Sequential([\n tf.keras.layers.Conv2D(\n 32, (5, 5), activation=tf.nn.relu, input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPool2D((2, 2), (2, 2)),\n tf.keras.layers.Conv2D(64, (3, 3), activation=tf.nn.relu),\n tf.keras.layers.MaxPool2D((2, 2), (2, 2)),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(1024, activation=tf.nn.relu),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(n_classes)\n])\nprint(\"Flag0\")\nmodel.summary()\nprint(\"Flag1\")\n(train_x, train_y), (test_x, test_y) = fashion_mnist.load_data()\n# Scale input in [-1, 1] range\nprint(train_x[1])\ntrain_x = train_x / 255. * 2 - 1\ntest_x = test_x / 255. * 2 - 1\ntrain_x = tf.expand_dims(train_x, -1).numpy()\ntest_x = tf.expand_dims(test_x, -1).numpy()\nmodel.compile(\noptimizer=tf.keras.optimizers.Adam(1e-5),\nloss='sparse_categorical_crossentropy',\nmetrics=['accuracy'])\nprint(\"Flag2\")\nmodel.fit(train_x, train_y, epochs=10)\nprint(\"Flag3\")\nmodel.evaluate(test_x, test_y)\n","sub_path":"t2.py","file_name":"t2.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"592747205","text":"import pandas as pd\nimport numpy as np\nimport xlsxwriter\nimport re\nfrom openpyxl import Workbook\nfrom openpyxl.compat import range\nfrom openpyxl.utils import get_column_letter\nimport string\nfrom openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font\nimport xlsxwriter\nimport matplotlib.pyplot as plt\nfrom pylab import *\n\n# create workbook for writing results in excel file\nwb = Workbook()\ndest_filename = \"173050061_solution.xls\"\n\n\n\n\n# Answer to question 1\n# Top five import and export destinations, by total imports and total exports\ndef q_1():\n\t# Load import and export files into pandas dataframes\n\txls_file = pd.ExcelFile('India_Imports_2011-12_And_2012-13.xls')\n\tdfi = xls_file.parse()\n\txls_file = pd.ExcelFile('India_Exports_2011-12_And_2012-13.xls')\n\tdfe = xls_file.parse()\n\n\t# apply groupby opertion on countries, sort them and take top five (by head function)\n\ttopI = dfi.groupby('Country')['Value-INR-2011-12'].sum().sort_values(ascending=False)\n\ttopE = dfe.groupby('Country')['Value-INR-2011-12'].sum().sort_values(ascending=False)\n\n\tp1 = topE.index.values\n\tp2 = list(topE)\n\tpp = topI.index.values.size\n\tppp = np.arange(pp)\n\tind = np.arange(5)\n\tplt.pie(p2,labels=p1)\n\tplt.show()\n\ttopI = topI.head()\n\ttopE = topE.head()\n\tp1 = topI.index.values\n\tp2 = list(topI)\n\tpp = topI.index.values.size\n\tppp = np.arange(pp)\n\tind = np.arange(5)\n\ttopI.columns = ['imports']\n\ttopE.columns = ['exports']\n\tprint (topI)\n\tprint (topE)\n\t\n\tprint (p1)\n\tprint (p2)\n\twidth = 0.35\n\tplt.bar(ppp,p2,align='center', alpha=0.35)\n\tplt.xticks(ppp, p1)\n\tplt.title(\"Top five coutries wrt total imports\")\n\tplt.show()\n\n\n\n\tp1 = topE.index.values\n\tp2 = list(topE)\n\tpp = topE.index.values.size\n\tppp = np.arange(pp)\n\tind = np.arange(5)\n\twidth = 0.35\n\tplt.bar(ppp,p2,align='center', alpha=0.35)\n\tplt.xticks(ppp, p1)\n\tplt.title(\"Top five coutries wrt total exports\")\n\tplt.show()\n\n\t# write results to sheets in excel file\n\tpp = topI.index.values\n\tname = \"Q1_Imports\"\n\tfont = Font(size=13, bold = True)\n\tws1 = wb.create_sheet(title = name)\n\tws1['A1'] = \"Country\"\n\tws1['A2'] = pp[0]\n\tws1['A3'] = pp[1]\n\tws1['A4'] = pp[2]\n\tws1['A5'] = pp[3]\n\tws1['A6'] = pp[4]\n\tfor cell in ws1[\"1:1\"]:\n\t\tcell.font = font\n\tpp = topE.index.values\n\tws1.column_dimensions['A'].width = 15\n\tname = \"Q1_Exports\"\n\tfont = Font(size=13, bold = True)\n\tws1 = wb.create_sheet(title = name)\n\tws1['A1'] = \"Country\"\n\tws1['A2'] = pp[0]\n\tws1['A3'] = pp[1]\n\tws1['A4'] = pp[2]\n\tws1['A5'] = pp[3]\n\tws1['A6'] = pp[4]\n\tfor cell in ws1[\"1:1\"]:\n\t\tcell.font = font\n\n\tws1.column_dimensions['A'].width = 15\n\n\n\n\n# Answer to question 2\n# Top five import and export commodities\ndef q_2():\n\t# Load import and export files into pandas dataframes\n\txls_file = pd.ExcelFile('India_Imports_2011-12_And_2012-13.xls')\n\tdfi = xls_file.parse()\n\txls_file = pd.ExcelFile('India_Exports_2011-12_And_2012-13.xls')\n\tdfe = xls_file.parse()\n\n\t# apply groupby opertion on commodities, sort them and take top five (by head function)\n\tcoI = dfi.groupby('Commodity')['Value-INR-2011-12'].sum().sort_values(ascending=False).head()\n\tcoE = dfe.groupby('Commodity')['Value-INR-2011-12'].sum().sort_values(ascending=False).head()\n\n\t# print (coI)\n\t# print (coE)\n\n\t# write results to sheets in excel file\n\tpp = coI.index.values\n\tname = \"Q2_Imports\"\n\tfont = Font(size=13, bold = True)\n\tws1 = wb.create_sheet(title = name)\n\tws1['A1'] = \"Commodity\"\n\tws1['A2'] = pp[0]\n\tws1['A3'] = pp[1]\n\tws1['A4'] = pp[2]\n\tws1['A5'] = pp[3]\n\tws1['A6'] = pp[4]\n\tfor cell in ws1[\"1:1\"]:\n\t\tcell.font = font\n\tws1.column_dimensions['A'].width = 35\n\tpp = coE.index.values\n\tname = \"Q2_Exports\"\n\tfont = Font(size=13, bold = True)\n\tws1 = wb.create_sheet(title = name)\n\tws1['A1'] = \"Commodity\"\n\tws1['A2'] = pp[0]\n\tws1['A3'] = pp[1]\n\tws1['A4'] = pp[2]\n\tws1['A5'] = pp[3]\n\tws1['A6'] = pp[4]\n\tfor cell in ws1[\"1:1\"]:\n\t\tcell.font = font\n\tws1.column_dimensions['A'].width = 35\n\n\n\n\n# Answer to question 3\n# Total imports, total exports, export/import ratio, export-import (trade deficit) for each country\ndef q_3():\n\t# Load import and export files into pandas dataframes\n\txls_file = pd.ExcelFile('India_Imports_2011-12_And_2012-13.xls')\n\tdfi = xls_file.parse()\n\tp = string.ascii_uppercase\n\txls_file = pd.ExcelFile('India_Exports_2011-12_And_2012-13.xls')\n\tdfe = xls_file.parse()\n\n\n\t# apply groupby opertion on countries and aggregate on sum\n\ttopI = dfi.groupby('Country')['Value-INR-2011-12'].sum()\n\ttopE = dfe.groupby('Country')['Value-INR-2011-12'].sum()\n\n\t# concatenate import and export columns horizontally\n\tcat = pd.concat([topI, topE],axis=1)\n\tcat.columns = ['Total_Imports','Total_Exports']\n\t# cacculate Export/import ratio and trade deficit\n\tcat['Export/import ratio'] = cat['Total_Exports'] / cat['Total_Imports']\n\tcat['Export-import (trade deficit)'] = cat['Total_Exports'] - cat['Total_Imports']\n\t# print (cat)\n\t# print (merge)\n\t# result = cat.to_latex()\n\t# print (result)\n\t# f1 = open(\"table1.tex\",'w')\n\t# f1.write(result)\n\t# f1.close()\n\t# write results to sheets in excel file\n\tpp = cat.index.values\n\tn = cat.index.values.size\n\tp1 = cat['Total_Imports']\n\tp2 = cat['Total_Exports']\n\tp3 = cat['Export/import ratio']\n\tp4 = cat['Export-import (trade deficit)']\n\n\tws1 = wb.create_sheet(title = \"Q3\")\n\tws1['A1'] = \"Country\"\n\tws1['B1'] = \"Total_Imports\"\n\tws1['C1'] = \"Total_Exports\"\n\tws1['D1'] = \"Export/import ratio\"\n\tws1['E1'] = \"Export-import (trade deficit)\"\n\tfont = Font(size = 13, bold = True)\n\tfor cell in ws1[\"1:1\"]:\n\t\tcell.font = font\n\tfor k in range(n):\t\n\t\ti = 0\n\t\tws1[str(p[i] + str(k+2))] = pp[k]\n\t\ti = i + 1\n\t\tws1[str(p[i] + str(k+2))] = p1[k]\n\t\ti = i + 1\n\t\tws1[str(p[i] + str(k+2))] = p2[k]\n\t\ti = i + 1\n\t\tws1[str(p[i] + str(k+2))] = p3[k]\n\t\ti = i + 1\n\t\tws1[str(p[i] + str(k+2))] = p4[k]\n\t\ti = i + 1\n\n\tws1.column_dimensions['A'].width = 20\n\tws1.column_dimensions['B'].width = 25\n\tws1.column_dimensions['C'].width = 25\n\tws1.column_dimensions['D'].width = 25\n\tws1.column_dimensions['E'].width = 25\n\n\n\n\n# Answer to question 4\n# All countries to whom our export is more than Rs 10,000 Cr using query method \ndef q_4():\n\t# Load import and export files into pandas dataframes\n\txls_file = pd.ExcelFile('India_Imports_2011-12_And_2012-13.xls')\n\tdfi = xls_file.parse()\n\txls_file = pd.ExcelFile('India_Exports_2011-12_And_2012-13.xls')\n\tdfe = xls_file.parse()\n\tp = string.ascii_uppercase\n\n\t# Apply group by operation on Country and sum on aggregate\n\ttopI = dfi.groupby('Country')['Value-INR-2011-12'].sum()\n\ttopE = dfe.groupby('Country')['Value-INR-2011-12'].sum()\n\n\t# concatenate import and export columns horizontally\n\tmerge = pd.concat([topE, topI],axis=1)\n\tmerge.columns = ['exports', 'imports']\n\n\t# query opration to find countries whose export is more than 10,000 Cr\n\tmerge = merge.query('exports > 1e+11')\n\t# print (merge)\n\tresult = merge.to_latex()\n\tprint (result)\n\tf1 = open(\"table1.tex\",'w')\n\tf1.write(result)\n\tf1.close()\n\t# write results to sheets in excel file\n\tfont = Font(size = 13, bold = True)\n\tpp = merge.index.values\n\tn = merge.index.values.size\n\tws1 = wb.create_sheet(title = \"Q4\")\n\tws1['A1'] = \"Country\"\n\tfor k in range(n):\n\t\tws1[str(p[0] + str(k+2))] = pp[k]\n\tfor cell in ws1[\"1:1\"]:\n\t\tcell.font = font\n\tws1.column_dimensions['A'].width = 20\n\n\n\n# Answer to question 5\n# Renaming the columns of the answer in question 4 to: 'Country', 'Exports', 'Imports\ndef q_5():\n\t# Load import and export files into pandas dataframes\n\txls_file = pd.ExcelFile('India_Imports_2011-12_And_2012-13.xls')\n\tdfi = xls_file.parse()\n\tp = string.ascii_uppercase\n\txls_file = pd.ExcelFile('India_Exports_2011-12_And_2012-13.xls')\n\tdfe = xls_file.parse()\n\n\t# Apply group by operation on Country and sum on aggregate\n\ttopI = dfi.groupby('Country')['Value-INR-2011-12'].sum()\n\ttopE = dfe.groupby('Country')['Value-INR-2011-12'].sum()\n\n\t# concatenate import and export columns horizontally\n\tmerge = pd.concat([topE, topI],axis=1)\n\tmerge.columns = ['exports', 'imports']\n\n\t# query opration to find countries whose export is more than 10,000 Cr\n\tmerge = merge.query('exports > 1e+11')\n\n\t# create dataframe temp with index from 0 to n-1 and one column with values as n countries\n\ttemp = pd.DataFrame(merge.index.values, index = np.arange(merge.index.values.size))\n\n\t# Concatenate newly created dataframe temp with merge horizontally and set index 0 to n-1 for new concatenated dataframe\n\tmerge = merge.set_index(np.arange(merge.index.values.size))\n\tmerge = pd.concat([temp, merge],axis=1)\n\tmerge.columns = ['Country', 'Exports', 'Imports']\n\t# print (merge)\n\n\n\t# write results to sheets in excel file\n\tpp = merge['Country']\n\tn = merge.index.values.size\n\tp1 = merge['Exports']\n\tp2 = merge['Imports']\n\tws1= wb.create_sheet(title = \"Q5\")\n\tws1['A1'] = \"Country\"\n\tws1['B1'] = \"Import(INR)\"\n\tws1['C1'] = \"Export(INR)\"\n\tfont = Font(size = 13, bold = True)\n\tfor cell in ws1[\"1:1\"]:\n\t\tcell.font = font\n\tfor k in range(n):\n\t\tws1[str(p[0] + str(k+2))] = pp[k]\n\t\tws1[str(p[1] + str(k+2))] = p1[k]\n\t\tws1[str(p[2] + str(k+2))] = p2[k]\n\tws1.column_dimensions['A'].width = 20\n\tws1.column_dimensions['B'].width = 25\n\tws1.column_dimensions['C'].width = 25\n\n\n# Answer to question 6\n# Creating a new table with column headings \"Country\", \"Transaction\", \"Value\" from table in answer 5 using melt mothod.\ndef q_6():\n\t# Load import and export files into pandas dataframes\n\txls_file = pd.ExcelFile('India_Imports_2011-12_And_2012-13.xls')\n\tdfi = xls_file.parse()\n\txls_file = pd.ExcelFile('India_Exports_2011-12_And_2012-13.xls')\n\tdfe = xls_file.parse()\n\tp = string.ascii_uppercase\n\n\t# Apply group by operation on Country and sum on aggregate\n\ttopI = dfi.groupby('Country')['Value-INR-2011-12'].sum()\n\ttopE = dfe.groupby('Country')['Value-INR-2011-12'].sum()\n\n\t# concatenate import and export columns horizontally\n\tmerge = pd.concat([topE, topI],axis=1)\n\tmerge.columns = ['exports', 'imports']\n\n\t# query opration to find countries whose export is more than 10,000 Cr\n\tmerge = merge.query('exports > 1e+11')\n\n\t# create dataframe temp with index from 0 to n-1 and one column with values as n countries\n\ttemp = pd.DataFrame(merge.index.values, index = np.arange(merge.index.values.size))\n\n\t# Concatenate newly created dataframe temp with merge horizontally and set index 0 to n-1 for new concatenated dataframe\n\tmerge = merge.set_index(np.arange(merge.index.values.size))\n\tmerge = pd.concat([temp, merge],axis=1)\n\tmerge.columns = ['Country', 'Exports', 'Imports']\n\n\t# Use melt function to reform dataframe intp the form -- Country, Transaction, Value(INR)\n\tmelted = pd.melt(merge, id_vars = ['Country'], var_name = 'Transaction', value_name = 'Value').sort_values(['Value'],ascending = False).head(n=10)\n\t# print (melted)\n\n\tresult = melted.to_latex()\n\tprint (result)\n\tf1 = open(\"table2.tex\",'w')\n\tf1.write(result)\n\tf1.close()\n\t# write results to sheets in excel file\n\tp1 = melted['Country']\n\tp2 = melted['Transaction']\n\tp3 = melted['Value']\n\tindex = melted.index.values\n\tws1 = wb.create_sheet(title = \"Q6\")\n\tws1['A1'] = \"Country\"\n\tws1['B1'] = \"Transaction\"\n\tws1['C1'] = \"Value(INR)\"\n\tfont = Font(size = 13, bold = True)\n\tfor cell in ws1[\"1:1\"]:\n\t\tcell.font = font\n\tn = 10\n\tfor k in range(n):\n\t\tws1[str(p[0] + str(k+2))] = p1[index[k]]\n\t\tws1[str(p[1] + str(k+2))] = p2[index[k]]\n\t\tws1[str(p[2] + str(k+2))] = p3[index[k]]\n\tws1.column_dimensions['A'].width = 20\n\tws1.column_dimensions['B'].width = 10\n\tws1.column_dimensions['C'].width = 25\n\n\n\n# Answer to question 7\n# Commodities that we both export and import.\ndef q_7():\n\t# Load import and export files into pandas dataframes\n\txls_file = pd.ExcelFile('India_Imports_2011-12_And_2012-13.xls')\n\tdfi = xls_file.parse()\n\tp = string.ascii_uppercase\n\txls_file = pd.ExcelFile('India_Exports_2011-12_And_2012-13.xls')\n\tdfe = xls_file.parse()\n\n\n\t# Apply group by operation on Commodity, sum on aggregate and sort them as per Value(INR)\n\tco12I = dfi.groupby('Commodity')['Value-INR-2011-12'].sum().sort_values(ascending=False)\n\tco12E = dfe.groupby('Commodity')['Value-INR-2011-12'].sum().sort_values(ascending=False)\n\tco13I = dfi.groupby('Commodity')['Value-INR-2012-13'].sum().sort_values(ascending=False)\n\tco13E = dfe.groupby('Commodity')['Value-INR-2012-13'].sum().sort_values(ascending=False)\n\n\t# concatenate import and export columns horizontally for each year 2011-12 and 2012-13 and store them in merge12 and merge13 respectively\n\tmerge12 = pd.concat([co12E, co12I],axis=1)\n\tmerge12.columns = ['exports', 'imports']\n\ttemp = pd.DataFrame(merge12.index.values, index = np.arange(merge12.index.values.size))\n\tmerge12 = merge12.set_index(np.arange(merge12.index.values.size))\n\tmerge12 = pd.concat([temp, merge12],axis=1)\n\tmerge12.columns = ['Commodity', 'Exports', 'Imports']\n\n\tmerge13 = pd.concat([co13E, co13I],axis=1)\n\tmerge13.columns = ['exports', 'imports']\n\ttemp = pd.DataFrame(merge13.index.values, index = np.arange(merge13.index.values.size))\n\tmerge13 = merge13.set_index(np.arange(merge13.index.values.size))\n\tmerge13 = pd.concat([temp, merge13],axis=1)\n\tmerge13.columns = ['Commodity', 'Exports', 'Imports']\n\n\t# concatenate merge12 and merge13 vertically\n\tmerge = pd.concat([merge12, merge13],axis=0)\n\tmerge = merge.groupby('Commodity').sum()\n\tmerge = merge.query('Exports > 0')\n\tmerge = merge.query('Imports > 0')\n\tprint (merge)\n\n\tresult = merge.to_latex()\n\tprint (result)\n\tf1 = open(\"table3.tex\",'w')\n\tf1.write(result)\n\tf1.close()\n\t# write results to sheets in excel file\n\tp1 = merge.index.values\n\tn = merge.index.values.size\n\tws1 = wb.create_sheet(title = \"Q7\")\n\tws1['A1'] = \"Country\"\n\tfont = Font(size = 13, bold = True)\n\tws1['A1'].font = font\n\tfor k in range(n):\n\t\tws1[str(p[0] + str(k+2))] = p1[k]\n\tws1.column_dimensions['A'].width = 35\n\n\n\n\n\n\n\n\n# Executing all functions\nq_1()\nq_2()\nq_3()\nq_4()\nq_5()\nq_6()\nq_7()\n\ndata = np.random.randn(2000)\nplt.hist(data)\nplt.title(\"Gaussian Histogram\")\nplt.xlabel(\"Numbers\")\nplt.ylabel(\"Frequency\")\nplt.show()\n\n\n\n# labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'\n# sizes = [15, 30, 45, 10]\n# explode = (0, 0.1, 0, 0)\n# plt.pie(sizes, explode=explode, labels=labels)\n# plt.show()\n\n\nt = arange(-1.0, 1.0, 0.01)\ns = sin(2.5 * pi * t)\nplot(t,s)\ntitle(\"sine wave\")\nxlabel(\"Time\")\nylabel(\"Amplitude\")\nshow()\n\ns = cos(2.5 * pi * t)\nplot(t,s)\ntitle(\"cos wave\")\nxlabel(\"Time\")\nylabel(\"Amplitude\")\nshow()\n# SToring all sheets with results of all questions in excel file 173050061_solution.xls\nsh = wb.get_sheet_by_name('Sheet')\nwb.remove_sheet(sh)\nwb.save(filename = dest_filename)\n\nprint (\"Success..... \" + \"\\n\" + \"check 173050061_solution.xls file.....\")","sub_path":"software_lab/173050061_lab9/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":14382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"458107301","text":"from os import environ\nfrom os.path import join, expanduser\nimport sqlalchemy\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.feature_selection import RFE\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.linear_model import LogisticRegression\nimport statsmodels.api as sm\n\n#https://towardsdatascience.com/building-a-logistic-regression-in-python-step-by-step-becd4d56c9c8\n\nsecrets_filepath = join(expanduser('~'), 'guardian_secrets')\n\nwith open(secrets_filepath, 'r', encoding='utf-8-sig') as secrets_file:\n lines = secrets_file.readlines()\n secrets = dict(\n line.rstrip().split(\"=\", maxsplit=1)\n for line in lines if line.rstrip() != '')\n\nenviron[\"NLS_LANG\"] = \"AMERICAN_AMERICA.UTF8\"\n\negn = sqlalchemy.create_engine('oracle+cx_oracle://crm_user:{0}@bc15-aix01:1521/?service_name=crm'.format(secrets['crm_crm_user_pass']))\ncnn = egn.connect()\n\ndf = pd.read_sql('select * from crm_user.TMP_GAG_CS_NEW_VV_2', cnn)\ncol_t = pd.read_sql(\n 'select lower(column_name) column_name, column_status, column_type '\n 'from crm_user.lib_scor_column_types '\n 'where sysdate between df and dt', cnn)\n\ncat_columns = col_t[col_t['column_type'] == 'NOMINAL']['column_name']\nfor cat_column in cat_columns:\n dummies = pd.get_dummies(df[cat_column], prefix=cat_column)\n df = df.join(dummies)\n\ndf = df.drop(labels=cat_columns.tolist(), axis=1)\n\ny = ['event']\nX = col_t[~col_t['column_name'].isin(y+cat_columns.tolist())]['column_name'].tolist()\n\nlogreg = LogisticRegression()\nrfe = RFE(logreg, 15)\nrfe = rfe.fit(df[X], df['event'])\n\nXf = np.array(X)[rfe.support_].tolist()\n\nfor f in Xf:\n Xf1 = [f2 for f2 in Xf if f2 != f]\n logit_model = sm.Logit(df['event'], df[Xf1])\n try:\n result=logit_model.fit()\n print('Success without {0}'.format(f))\n except:\n print('Error without {0}'.format(f))\n\n\nXf1 = [f2 for f2 in Xf if f2 != 'mnth_from_first_restr']\nlogit_model = sm.Logit(df['event'], df[Xf1])\n\n\nX_train, X_test, y_train, y_test = train_test_split(df[Xf1], df['event'], test_size=0.3, random_state=0)\n\n#train\nlogreg = LogisticRegression()\nlogreg.fit(X_train, y_train)\n\n#predicting\ny_pred = logreg.predict(X_test)\n\n#score result on test set\nprint('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg.score(X_test, y_test)))\n\n\n\nkfold = KFold(n_splits=10, random_state=7)\nmodelCV = LogisticRegression()\nscoring = 'accuracy'\nresults = cross_val_score(modelCV, X_train, y_train, cv=kfold, scoring=scoring)\n\nprint(\"10-fold cross validation average accuracy: %.3f\" % (results.mean()))\n\nconfusion_matrix = confusion_matrix(y_test, y_pred)\nprint(confusion_matrix)\n\n","sub_path":"GAG_CS_NEW_VV_2.py","file_name":"GAG_CS_NEW_VV_2.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"35868414","text":"\"\"\"Sample-level hard filtering based on Picard statistics\"\"\"\n\nimport os\nfrom collections import defaultdict\nfrom os.path import join\nimport logging\n\nimport pandas as pd\nimport hail as hl\n\nfrom cpg_qc.utils import gs_cache_file, file_exists\n\n\nlogger = logging.getLogger('cpg_qc_hard_filtering')\n\n\ndef compute_hard_filters(\n mt: hl.MatrixTable,\n metadata_ht: hl.MatrixTable,\n sex_ht: hl.Table,\n hail_sample_qc_ht: hl.Table,\n work_bucket: str,\n local_tmp_dir: str,\n cov_threshold: int,\n overwrite: bool = False,\n) -> hl.Table:\n \"\"\"\n Uses the sex imputation results, results of the sample_qc() run on\n bi-allelic variants, and Picard stats files specificed in `sample_df`,\n to apply filters to samples in `mt` and create a table with\n samples that fail at least one sampe.\n\n :param mt: input matrix table\n :param metadata_ht: metadata generated by combine_gvcfs. Expected fields:\n contamination, alignment_summary_metrics, duplicate_metrics,\n insert_size_metrics, wgs_metrics (any of those are optional).\n Values must point to corresponding Picard stats files (see\n `_parse_metrics` for details)\n :param sex_ht: required fields: \"sex_karyotype\", \"chr20_mean_dp\"\n :param hail_sample_qc_ht: required fields:\n \"bi_allelic_sample_qc { n_snp, n_singleton, r_het_hom_var }\"\n :param work_bucket: bucket to write checkpoints and intermediate files\n :param local_tmp_dir: local path to write temporary files\n :param cov_threshold: minimal chr20 coverage\n :param overwrite: overwrite checkpoints if they exist\n :return: table with samples failed the filters, and the following structure:\n 's': str\n 'hard_filters': set # a non-empty subset of { ambiguous_sex,\n sex_aneuploidy, low_coverage, bad_qc_metrics, contamination, chimera,\n coverage, insert_size }\n \"\"\"\n logger.info('Generating hard filters')\n out_ht_path = join(work_bucket, 'hard_filters.ht')\n if not overwrite and file_exists(out_ht_path):\n return hl.read_table(out_ht_path)\n\n metrics_ht = _parse_picard_metrics(metadata_ht, work_bucket, local_tmp_dir)\n metrics_ht.checkpoint(\n join(work_bucket, 'picard_metrics.ht'),\n overwrite=overwrite,\n _read_if_exists=not overwrite,\n )\n\n ht = mt.cols()\n ht = ht.annotate(hard_filters=hl.empty_set(hl.tstr))\n\n # Helper function to add filters into the `hard_filters` set\n def add_filter(ht, expr, name):\n return ht.annotate(\n hard_filters=hl.if_else(\n expr & hl.is_defined(expr), ht.hard_filters.add(name), ht.hard_filters\n )\n )\n\n # Remove samples with ambiguous sex assignments\n ht = add_filter(ht, sex_ht[ht.key].sex_karyotype == 'ambiguous', 'ambiguous_sex')\n ht = add_filter(\n ht,\n ~hl.set({'ambiguous', 'XX', 'XY'}).contains(sex_ht[ht.key].sex_karyotype),\n 'sex_aneuploidy',\n )\n\n # Remove low-coverage samples\n # chrom 20 coverage is computed to infer sex and used here\n ht = add_filter(ht, sex_ht[ht.key].chr20_mean_dp < cov_threshold, 'low_coverage')\n\n # Remove extreme raw bi-allelic sample QC outliers\n ht = add_filter(\n ht,\n (\n (hail_sample_qc_ht[ht.key].bi_allelic_sample_qc.n_snp > 3.75e6)\n | (hail_sample_qc_ht[ht.key].bi_allelic_sample_qc.n_snp < 2.4e6)\n | (hail_sample_qc_ht[ht.key].bi_allelic_sample_qc.n_singleton > 1e5)\n | (hail_sample_qc_ht[ht.key].bi_allelic_sample_qc.r_het_hom_var > 3.3)\n ),\n 'bad_qc_metrics',\n )\n\n # Remove samples that fail picard metric thresholds, percents are not divided\n # by 100, e.g. 5% == 5.00, 5% != 0.05\n ht = add_filter(ht, metrics_ht[ht.key].freemix > 5.00, 'contamination')\n ht = add_filter(ht, metrics_ht[ht.key].pct_chimeras > 5.00, 'chimera')\n ht = add_filter(ht, metrics_ht[ht.key].mean_coverage < 15, 'coverage')\n ht = add_filter(ht, metrics_ht[ht.key].median_insert_size < 250, 'insert_size')\n ht = ht.filter(hl.len(ht.hard_filters) > 0)\n ht.write(out_ht_path, overwrite=True)\n return ht\n\n\ndef _parse_picard_metrics(\n metadata_ht: hl.Table, work_bucket: str, local_tmp_dir: str\n) -> hl.Table:\n \"\"\"\n Reads Picard stats files from `metadata_ht`, and converts relevant\n stats into a Hail table.\n\n :param metadata_ht: metadata generated by combine_gvcfs. Expected fields:\n contamination, alignment_summary_metrics, duplicate_metrics,\n insert_size_metrics, wgs_metrics (any of those are optional).\n Values must point to corresponding Picard stats files:\n * `contamination` expected to point to a file like:\n `call-UnmappedBamToAlignedBam/UnmappedBamToAlignedBam/*/\n call-CheckContamination/*.selfSM`, and extract the metric `FREEMIX`\n * `alignment_summary_metrics` ->\n `call-AggregatedBamQC/AggregatedBamQC/*/call-CollectAggregationMetrics/\n *.alignment_summary_metrics`, extract `PCT_CHIMERAS`\n * `duplicate_metrics` ->\n `call-UnmappedBamToAlignedBam/UnmappedBamToAlignedBam/*/\n call-MarkDuplicates/*.duplicate_metrics`, extract `PERCENT_DUPLICATION`\n * `median_insert_size` ->\n `call-AggregatedBamQC/AggregatedBamQC/*/call-CollectAggregationMetrics/\n *.insert_size_metrics`, extact `MEDIAN_INSERT_SIZE`\n * `wgs_metrics` ->\n `call-CollectWgsMetrics/*.wgs_metrics`, extract `MEDIAN_COVERAGE`\n :param work_bucket: bucket to write intermediate files\n :param local_tmp_dir: local directory to write temporary files\n :return: a table with the folliwing structure:\n \"s\": hl.tstr,\n \"freemix\": hl.tfloat32,\n \"pct_chimeras\": hl.tfloat32,\n \"duplication\": hl.tfloat32,\n \"median_insert_size\": hl.tint32,\n \"mean_coverage\": hl.tint32\n \"\"\"\n data = defaultdict(list)\n for row in metadata_ht.collect():\n data['s'].append(row.sample)\n\n contam = row.get('contamination')\n data['freemix'].append(_parse_picard_metric(contam, 'FREEMIX', local_tmp_dir))\n\n aln_sum_metrics = row.get('alignment_summary_metrics')\n data['pct_chimeras'].append(\n _parse_picard_metric(aln_sum_metrics, 'PCT_CHIMERAS', local_tmp_dir)\n )\n\n dup_metrics = row.get('duplicate_metrics')\n data['duplication'].append(\n _parse_picard_metric(dup_metrics, 'PERCENT_DUPLICATION', local_tmp_dir)\n )\n\n is_metrics = row.get('insert_size_metrics')\n data['median_insert_size'].append(\n _parse_picard_metric(is_metrics, 'MEDIAN_INSERT_SIZE', local_tmp_dir)\n )\n\n wgs_metrics = row.get('wgs_metrics')\n data['mean_coverage'].append(\n _parse_picard_metric(wgs_metrics, 'MEDIAN_COVERAGE', local_tmp_dir)\n )\n\n csv_path = os.path.join(work_bucket, 'sample_qc_metrics.tsv')\n pd.DataFrame.from_dict(data).to_csv(csv_path, sep='\\t', index=False)\n ht = hl.import_table(\n csv_path,\n key='s',\n types={\n 's': hl.tstr,\n 'freemix': hl.tfloat32,\n 'pct_chimeras': hl.tfloat32,\n 'duplication': hl.tfloat32,\n 'median_insert_size': hl.tint32,\n 'mean_coverage': hl.tint32,\n },\n )\n return ht\n\n\ndef _parse_picard_metric(fpath, metric_name, local_tmp_dir):\n val = 'NA'\n if not fpath or pd.isnull(fpath):\n return val\n with open(gs_cache_file(fpath, local_tmp_dir)) as fh:\n idx = None\n for line in fh:\n if f'\\t{metric_name}\\t' in line:\n idx = line.split('\\t').index(metric_name)\n continue\n if idx is not None:\n val = line.split('\\t')[idx]\n try:\n val = int(val)\n except ValueError:\n try:\n val = float(val)\n except ValueError:\n pass\n break\n return val\n","sub_path":"cpg_qc/hard_filtering.py","file_name":"hard_filtering.py","file_ext":"py","file_size_in_byte":8076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"470266817","text":"#-*-coding:utf-8-*-\n#通过request向服务器传递数据\n\nimport urllib2\nimport urllib\n\nurl='http://www.someserver.com/register.cgi'\n\nvalues={'name':'WHY',\n\t'location':'SDU',\n\t'language':'Python'}\n\n#编码\ndata=urllib.urlencode(values)\n#发送请求的同时传送data表单\t\nreq=urllib2.Request(url,data)\n#接受反馈的信息\nresponse=urllib2.urlopen(req)\n#读取反馈的内容\nthe_page=response.read()\n","sub_path":"urlib2_test03.py","file_name":"urlib2_test03.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"525074165","text":"import sys, argparse, collections\nimport numpy as np\nfrom pylab import *\n\ndef plot_samples(samples, width=500, msg=None):\n \"\"\"Produce a 2d plot of samples from a wave.\n\n Args:\n samples - list of samples to plot\n width - number of samples to use in plot (starting from samples[0])\n msg - string used in title of plot, to make successive plots easier to\n identify\n \"\"\"\n do_multi_plot = isinstance(samples[0], collections.Iterable)\n\n sample_times = np.linspace(0, width, width)\n if do_multi_plot:\n for wave in samples:\n plot(sample_times, wave[:width], linewidth=1.0)\n else:\n plot(sample_times, samples[:width], linewidth=1.0)\n\n xlabel('Time (samples)')\n ylabel('Amplitude')\n if msg is None:\n title('Plot of samples')\n else:\n title('Plot of samples - {}'.format(msg))\n ylim(-1.1, 1.1)\n show()\n\ndef parse_args():\n '''Parse command line arguments.\n\n Returns: list of arguments\n '''\n parser = argparse.ArgumentParser(description=('Test of the plot module.'))\n parser.add_argument('-p', '--plot', action='store_true', dest='do_plot',\n help='plot the test wave array')\n return parser.parse_args()\n\ndef main():\n args = parse_args()\n sample_times = np.linspace(-np.pi, np.pi, 500)\n s = np.tile(np.sin(sample_times), 10)\n if args.do_plot:\n plot_samples(s, len(s), \"This is a test of plotting functionality.\")\n else:\n print(s)\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"pynthesizer/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338279156","text":"\"\"\"scrapli.driver.network.sync_driver\"\"\"\nfrom collections import defaultdict\nfrom io import BytesIO\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nfrom scrapli.driver.generic import GenericDriver\nfrom scrapli.driver.network.base_driver import BaseNetworkDriver, PrivilegeAction, PrivilegeLevel\nfrom scrapli.exceptions import ScrapliAuthenticationFailed, ScrapliPrivilegeError, ScrapliTimeout\nfrom scrapli.response import MultiResponse, Response\n\n\nclass NetworkDriver(GenericDriver, BaseNetworkDriver):\n def __init__(\n self,\n host: str,\n privilege_levels: Dict[str, PrivilegeLevel],\n default_desired_privilege_level: str,\n port: Optional[int] = None,\n auth_username: str = \"\",\n auth_password: str = \"\",\n auth_private_key: str = \"\",\n auth_private_key_passphrase: str = \"\",\n auth_strict_key: bool = True,\n auth_bypass: bool = False,\n timeout_socket: float = 15.0,\n timeout_transport: float = 30.0,\n timeout_ops: float = 30.0,\n comms_return_char: str = \"\\n\",\n ssh_config_file: Union[str, bool] = False,\n ssh_known_hosts_file: Union[str, bool] = False,\n on_init: Optional[Callable[..., Any]] = None,\n on_open: Optional[Callable[..., Any]] = None,\n on_close: Optional[Callable[..., Any]] = None,\n transport: str = \"system\",\n transport_options: Optional[Dict[str, Any]] = None,\n channel_log: Union[str, bool, BytesIO] = False,\n channel_log_mode: str = \"write\",\n channel_lock: bool = False,\n logging_uid: str = \"\",\n auth_secondary: str = \"\",\n failed_when_contains: Optional[List[str]] = None,\n textfsm_platform: str = \"\",\n genie_platform: str = \"\",\n ):\n # ensure type for comms_prompt_pattern exists before setting it in the mixin\n self.comms_prompt_pattern: str\n\n super().__init__(\n host=host,\n port=port,\n auth_username=auth_username,\n auth_password=auth_password,\n auth_private_key=auth_private_key,\n auth_private_key_passphrase=auth_private_key_passphrase,\n auth_strict_key=auth_strict_key,\n auth_bypass=auth_bypass,\n timeout_socket=timeout_socket,\n timeout_transport=timeout_transport,\n timeout_ops=timeout_ops,\n comms_return_char=comms_return_char,\n ssh_config_file=ssh_config_file,\n ssh_known_hosts_file=ssh_known_hosts_file,\n on_init=on_init,\n on_open=on_open,\n on_close=on_close,\n transport=transport,\n transport_options=transport_options,\n channel_log=channel_log,\n channel_log_mode=channel_log_mode,\n channel_lock=channel_lock,\n logging_uid=logging_uid,\n )\n\n self.auth_secondary = auth_secondary\n self.failed_when_contains = failed_when_contains or []\n self.textfsm_platform = textfsm_platform\n self.genie_platform = genie_platform\n\n self.privilege_levels = privilege_levels\n self.default_desired_privilege_level = default_desired_privilege_level\n self._priv_graph = defaultdict(set)\n self.update_privilege_levels()\n\n def _escalate(self, escalate_priv: PrivilegeLevel) -> None:\n \"\"\"\n Escalate to the next privilege level up\n\n Args:\n escalate_priv: privilege level to escalate to\n\n Returns:\n None\n\n Raises:\n ScrapliAuthenticationFailed: if auth escalation timeout\n\n \"\"\"\n self._pre_escalate(escalate_priv=escalate_priv)\n\n if escalate_priv.escalate_auth is False:\n self.channel.send_input(channel_input=escalate_priv.escalate)\n else:\n try:\n super().send_interactive(\n interact_events=[\n (escalate_priv.escalate, escalate_priv.escalate_prompt, False),\n (self.auth_secondary, escalate_priv.pattern, True),\n ],\n interaction_complete_patterns=[\n self.privilege_levels[escalate_priv.previous_priv].pattern,\n escalate_priv.pattern,\n ],\n )\n except ScrapliTimeout as exc:\n raise ScrapliAuthenticationFailed(\n f\"failed escalating privilege from '{escalate_priv.previous_priv}' to \"\n f\"'{escalate_priv.name}'. do you need to set an 'auth_secondary' password?\"\n ) from exc\n\n def _deescalate(self, current_priv: PrivilegeLevel) -> None:\n \"\"\"\n Deescalate to the next privilege level down\n\n Args:\n current_priv: current privilege level\n\n Returns:\n None\n\n Raises:\n N/A\n\n \"\"\"\n self.channel.send_input(channel_input=current_priv.deescalate)\n\n def acquire_priv(self, desired_priv: str) -> None:\n \"\"\"\n Acquire desired priv level\n\n Args:\n desired_priv: string name of desired privilege level see\n `scrapli.driver..driver` for levels\n\n Returns:\n None\n\n Raises:\n ScrapliPrivilegeError: if desired_priv cannot be attained\n\n \"\"\"\n self._validate_privilege_level_name(privilege_level_name=desired_priv)\n\n privilege_change_count = 0\n\n while True:\n current_prompt = self.channel.get_prompt()\n privilege_action, target_priv = self._process_acquire_priv(\n destination_priv=desired_priv,\n current_prompt=current_prompt,\n )\n\n if privilege_action == PrivilegeAction.NO_ACTION:\n self._current_priv_level = target_priv\n return\n if privilege_action == PrivilegeAction.DEESCALATE:\n self._deescalate(current_priv=target_priv)\n if privilege_action == PrivilegeAction.ESCALATE:\n self._escalate(escalate_priv=target_priv)\n\n privilege_change_count += 1\n if privilege_change_count > len(self.privilege_levels) * 2:\n msg = f\"Failed to acquire requested privilege level {desired_priv}\"\n raise ScrapliPrivilegeError(msg)\n\n def _acquire_appropriate_privilege_level(self, privilege_level: str = \"\") -> None:\n \"\"\"\n Acquire the appropriate priv level\n\n Acquires the \"right\" priv level based on generic_driver_mode, provided privilege level,\n and default desired privilege level. If in \"generic_driver_mode\" and no priv level is\n provided, we simply return as we are already at the \"right\" priv level (since we don't care\n about priv levels in this mode). If we are in \"generic_driver_mode\" and we are provided a\n priv level (this is only applicable in `send_interactive`) we will try to acquire that\n provided priv level. If a priv name is passed we try to resolve it and use that as the\n privilege level to acquire, otherwise if no priv leve is provided we will acquire the\n default_desired_privilege_level.\n\n Args:\n privilege_level: optional name of privilege level to acquire\n\n Returns:\n None\n\n Raises:\n N/A\n\n \"\"\"\n if not privilege_level and self._generic_driver_mode is True:\n return\n\n if privilege_level:\n self._validate_privilege_level_name(privilege_level_name=privilege_level)\n resolved_privilege_level = privilege_level\n else:\n resolved_privilege_level = self.default_desired_privilege_level\n\n if self._current_priv_level.name != resolved_privilege_level:\n self.acquire_priv(desired_priv=resolved_privilege_level)\n\n def send_command(\n self,\n command: str,\n *,\n strip_prompt: bool = True,\n failed_when_contains: Optional[Union[str, List[str]]] = None,\n timeout_ops: Optional[float] = None,\n ) -> Response:\n \"\"\"\n Send a command\n\n Super method will raise TypeError if anything but a string is passed here!\n\n Args:\n command: string to send to device in privilege exec mode\n strip_prompt: True/False strip prompt from returned output\n failed_when_contains: string or list of strings indicating failure if found in response\n timeout_ops: timeout ops value for this operation; only sets the timeout_ops value for\n the duration of the operation, value is reset to initial value after operation is\n completed\n\n Returns:\n Response: Scrapli Response object\n\n Raises:\n N/A\n\n \"\"\"\n self._acquire_appropriate_privilege_level()\n\n if failed_when_contains is None:\n failed_when_contains = self.failed_when_contains\n\n response: Response = super().send_command(\n command=command,\n strip_prompt=strip_prompt,\n failed_when_contains=failed_when_contains,\n timeout_ops=timeout_ops,\n )\n self._update_response(response)\n\n return response\n\n def send_commands(\n self,\n commands: List[str],\n *,\n strip_prompt: bool = True,\n failed_when_contains: Optional[Union[str, List[str]]] = None,\n stop_on_failed: bool = False,\n eager: bool = False,\n timeout_ops: Optional[float] = None,\n ) -> MultiResponse:\n \"\"\"\n Send multiple commands\n\n Super method will raise TypeError if anything but a list of strings is passed here!\n\n Args:\n commands: list of strings to send to device in privilege exec mode\n strip_prompt: True/False strip prompt from returned output\n failed_when_contains: string or list of strings indicating failure if found in response\n stop_on_failed: True/False stop executing commands if a command fails, returns results\n as of current execution\n eager: if eager is True we do not read until prompt is seen at each command sent to the\n channel. Do *not* use this unless you know what you are doing as it is possible that\n it can make scrapli less reliable!\n timeout_ops: timeout ops value for this operation; only sets the timeout_ops value for\n the duration of the operation, value is reset to initial value after operation is\n completed. Note that this is the timeout value PER COMMAND sent, not for the total\n of the commands being sent!\n\n Returns:\n MultiResponse: Scrapli MultiResponse object\n\n Raises:\n N/A\n\n \"\"\"\n self._acquire_appropriate_privilege_level()\n\n if failed_when_contains is None:\n failed_when_contains = self.failed_when_contains\n\n responses = super().send_commands(\n commands=commands,\n strip_prompt=strip_prompt,\n failed_when_contains=failed_when_contains,\n stop_on_failed=stop_on_failed,\n eager=eager,\n timeout_ops=timeout_ops,\n )\n\n for response in responses:\n self._update_response(response=response)\n\n return responses\n\n def send_commands_from_file(\n self,\n file: str,\n *,\n strip_prompt: bool = True,\n failed_when_contains: Optional[Union[str, List[str]]] = None,\n stop_on_failed: bool = False,\n eager: bool = False,\n timeout_ops: Optional[float] = None,\n ) -> MultiResponse:\n \"\"\"\n Send command(s) from file\n\n Args:\n file: string path to file\n strip_prompt: True/False strip prompt from returned output\n failed_when_contains: string or list of strings indicating failure if found in response\n stop_on_failed: True/False stop executing commands if a command fails, returns results\n as of current execution\n eager: if eager is True we do not read until prompt is seen at each command sent to the\n channel. Do *not* use this unless you know what you are doing as it is possible that\n it can make scrapli less reliable!\n timeout_ops: timeout ops value for this operation; only sets the timeout_ops value for\n the duration of the operation, value is reset to initial value after operation is\n completed. Note that this is the timeout value PER COMMAND sent, not for the total\n of the commands being sent!\n\n Returns:\n MultiResponse: Scrapli MultiResponse object\n\n Raises:\n N/A\n\n \"\"\"\n self._acquire_appropriate_privilege_level()\n\n if failed_when_contains is None:\n failed_when_contains = self.failed_when_contains\n\n return super().send_commands_from_file(\n file=file,\n strip_prompt=strip_prompt,\n failed_when_contains=failed_when_contains,\n stop_on_failed=stop_on_failed,\n eager=eager,\n timeout_ops=timeout_ops,\n )\n\n def send_interactive(\n self,\n interact_events: Union[List[Tuple[str, str]], List[Tuple[str, str, bool]]],\n *,\n failed_when_contains: Optional[Union[str, List[str]]] = None,\n privilege_level: str = \"\",\n timeout_ops: Optional[float] = None,\n interaction_complete_patterns: Optional[List[str]] = None,\n ) -> Response:\n \"\"\"\n Interact with a device with changing prompts per input.\n\n Used to interact with devices where prompts change per input, and where inputs may be hidden\n such as in the case of a password input. This can be used to respond to challenges from\n devices such as the confirmation for the command \"clear logging\" on IOSXE devices for\n example. You may have as many elements in the \"interact_events\" list as needed, and each\n element of that list should be a tuple of two or three elements. The first element is always\n the input to send as a string, the second should be the expected response as a string, and\n the optional third a bool for whether or not the input is \"hidden\" (i.e. password input)\n\n An example where we need this sort of capability:\n\n '''\n 3560CX#copy flash: scp:\n Source filename []? test1.txt\n Address or name of remote host []? 172.31.254.100\n Destination username [carl]?\n Writing test1.txt\n Password:\n\n Password:\n Sink: C0644 639 test1.txt\n !\n 639 bytes copied in 12.066 secs (53 bytes/sec)\n 3560CX#\n '''\n\n To accomplish this we can use the following:\n\n '''\n interact = conn.channel.send_inputs_interact(\n [\n (\"copy flash: scp:\", \"Source filename []?\", False),\n (\"test1.txt\", \"Address or name of remote host []?\", False),\n (\"172.31.254.100\", \"Destination username [carl]?\", False),\n (\"carl\", \"Password:\", False),\n (\"super_secure_password\", prompt, True),\n ]\n )\n '''\n\n If we needed to deal with more prompts we could simply continue adding tuples to the list of\n interact \"events\".\n\n Args:\n interact_events: list of tuples containing the \"interactions\" with the device\n each list element must have an input and an expected response, and may have an\n optional bool for the third and final element -- the optional bool specifies if the\n input that is sent to the device is \"hidden\" (ex: password), if the hidden param is\n not provided it is assumed the input is \"normal\" (not hidden)\n failed_when_contains: list of strings that, if present in final output, represent a\n failed command/interaction\n privilege_level: name of the privilege level to operate in\n timeout_ops: timeout ops value for this operation; only sets the timeout_ops value for\n the duration of the operation, value is reset to initial value after operation is\n completed. Note that this is the timeout value PER COMMAND sent, not for the total\n of the commands being sent!\n interaction_complete_patterns: list of patterns, that if seen, indicate the interactive\n \"session\" has ended and we should exit the interactive session.\n\n Returns:\n Response: scrapli Response object\n\n Raises:\n N/A\n\n \"\"\"\n self._acquire_appropriate_privilege_level(privilege_level=privilege_level)\n\n if failed_when_contains is None:\n failed_when_contains = self.failed_when_contains\n\n # type hint is due to the timeout_modifier wrapper returning `Any` so that we dont anger the\n # asyncio parts (which will get an awaitable not a Response returned)\n response: Response = super().send_interactive(\n interact_events=interact_events,\n failed_when_contains=failed_when_contains,\n timeout_ops=timeout_ops,\n interaction_complete_patterns=interaction_complete_patterns,\n )\n self._update_response(response=response)\n\n return response\n\n def _abort_config(self) -> None:\n \"\"\"\n Abort a configuration operation/session if applicable (for config sessions like junos/iosxr)\n\n Args:\n N/A\n\n Returns:\n None\n\n Raises:\n N/A\n\n \"\"\"\n\n def send_configs(\n self,\n configs: List[str],\n *,\n strip_prompt: bool = True,\n failed_when_contains: Optional[Union[str, List[str]]] = None,\n stop_on_failed: bool = False,\n privilege_level: str = \"\",\n eager: bool = False,\n timeout_ops: Optional[float] = None,\n ) -> MultiResponse:\n \"\"\"\n Send configuration(s)\n\n Args:\n configs: list of strings to send to device in config mode\n strip_prompt: True/False strip prompt from returned output\n failed_when_contains: string or list of strings indicating failure if found in response\n stop_on_failed: True/False stop executing commands if a command fails, returns results\n as of current execution; aborts configuration session if applicable (iosxr/junos or\n eos/nxos if using a configuration session)\n privilege_level: name of configuration privilege level/type to acquire; this is platform\n dependent, so check the device driver for specifics. Examples of privilege_name\n would be \"configuration_exclusive\" for IOSXRDriver, or \"configuration_private\" for\n JunosDriver. You can also pass in a name of a configuration session such as\n \"my-config-session\" if you have registered a session using the\n \"register_config_session\" method of the EOSDriver or NXOSDriver.\n eager: if eager is True we do not read until prompt is seen at each command sent to the\n channel. Do *not* use this unless you know what you are doing as it is possible that\n it can make scrapli less reliable!\n timeout_ops: timeout ops value for this operation; only sets the timeout_ops value for\n the duration of the operation, value is reset to initial value after operation is\n completed. Note that this is the timeout value PER CONFIG sent, not for the total\n of the configs being sent!\n\n Returns:\n MultiResponse: Scrapli MultiResponse object\n\n Raises:\n N/A\n\n \"\"\"\n resolved_privilege_level, failed_when_contains = self._pre_send_configs(\n configs=configs,\n failed_when_contains=failed_when_contains,\n privilege_level=privilege_level,\n )\n\n if self._current_priv_level.name != resolved_privilege_level:\n self.acquire_priv(desired_priv=resolved_privilege_level)\n\n responses = super().send_commands(\n commands=configs,\n strip_prompt=strip_prompt,\n failed_when_contains=failed_when_contains,\n stop_on_failed=stop_on_failed,\n eager=eager,\n timeout_ops=timeout_ops,\n )\n\n if stop_on_failed and responses.failed:\n self._abort_config()\n\n return self._post_send_configs(responses=responses)\n\n def send_config(\n self,\n config: str,\n *,\n strip_prompt: bool = True,\n failed_when_contains: Optional[Union[str, List[str]]] = None,\n stop_on_failed: bool = False,\n privilege_level: str = \"\",\n eager: bool = False,\n timeout_ops: Optional[float] = None,\n ) -> Response:\n \"\"\"\n Send configuration string\n\n Args:\n config: string configuration to send to the device, supports sending multi-line strings\n strip_prompt: True/False strip prompt from returned output\n failed_when_contains: string or list of strings indicating failure if found in response\n stop_on_failed: True/False stop executing commands if a command fails, returns results\n as of current execution; aborts configuration session if applicable (iosxr/junos or\n eos/nxos if using a configuration session)\n privilege_level: name of configuration privilege level/type to acquire; this is platform\n dependent, so check the device driver for specifics. Examples of privilege_name\n would be \"configuration_exclusive\" for IOSXRDriver, or \"configuration_private\" for\n JunosDriver. You can also pass in a name of a configuration session such as\n \"my-config-session\" if you have registered a session using the\n \"register_config_session\" method of the EOSDriver or NXOSDriver.\n eager: if eager is True we do not read until prompt is seen at each command sent to the\n channel. Do *not* use this unless you know what you are doing as it is possible that\n it can make scrapli less reliable!\n timeout_ops: timeout ops value for this operation; only sets the timeout_ops value for\n the duration of the operation, value is reset to initial value after operation is\n completed. Note that this is the timeout value PER CONFIG sent, not for the total\n of the configs being sent!\n\n Returns:\n Response: Scrapli Response object\n\n Raises:\n N/A\n\n \"\"\"\n split_config = self._pre_send_config(config=config)\n\n # now that we have a list of configs, just use send_configs to actually execute them\n multi_response = self.send_configs(\n configs=split_config,\n strip_prompt=strip_prompt,\n failed_when_contains=failed_when_contains,\n stop_on_failed=stop_on_failed,\n privilege_level=privilege_level,\n eager=eager,\n timeout_ops=timeout_ops,\n )\n return self._post_send_config(config=config, multi_response=multi_response)\n\n def send_configs_from_file(\n self,\n file: str,\n *,\n strip_prompt: bool = True,\n failed_when_contains: Optional[Union[str, List[str]]] = None,\n stop_on_failed: bool = False,\n privilege_level: str = \"\",\n eager: bool = False,\n timeout_ops: Optional[float] = None,\n ) -> MultiResponse:\n \"\"\"\n Send configuration(s) from a file\n\n Args:\n file: string path to file\n strip_prompt: True/False strip prompt from returned output\n failed_when_contains: string or list of strings indicating failure if found in response\n stop_on_failed: True/False stop executing commands if a command fails, returns results\n as of current execution; aborts configuration session if applicable (iosxr/junos or\n eos/nxos if using a configuration session)\n privilege_level: name of configuration privilege level/type to acquire; this is platform\n dependent, so check the device driver for specifics. Examples of privilege_name\n would be \"exclusive\" for IOSXRDriver, \"private\" for JunosDriver. You can also pass\n in a name of a configuration session such as \"session_mysession\" if you have\n registered a session using the \"register_config_session\" method of the EOSDriver or\n NXOSDriver.\n eager: if eager is True we do not read until prompt is seen at each command sent to the\n channel. Do *not* use this unless you know what you are doing as it is possible that\n it can make scrapli less reliable!\n timeout_ops: timeout ops value for this operation; only sets the timeout_ops value for\n the duration of the operation, value is reset to initial value after operation is\n completed. Note that this is the timeout value PER CONFIG sent, not for the total\n of the configs being sent!\n\n Returns:\n MultiResponse: Scrapli MultiResponse object\n\n Raises:\n N/A\n\n \"\"\"\n configs = self._pre_send_from_file(file=file, caller=\"send_configs_from_file\")\n\n return self.send_configs(\n configs=configs,\n strip_prompt=strip_prompt,\n failed_when_contains=failed_when_contains,\n stop_on_failed=stop_on_failed,\n privilege_level=privilege_level,\n eager=eager,\n timeout_ops=timeout_ops,\n )\n","sub_path":"scrapli/driver/network/sync_driver.py","file_name":"sync_driver.py","file_ext":"py","file_size_in_byte":25989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527568542","text":"from devpi_common.types import cached_property\nfrom .config import hookimpl\nfrom .fileutil import dumps, loads\nfrom .log import threadlog, thread_push_log, thread_pop_log\nfrom .readonly import ReadonlyView\nfrom .readonly import ensure_deeply_readonly, get_mutable_deepcopy\nfrom repoze.lru import LRUCache\nimport contextlib\nimport os\nimport py\nimport sqlite3\nimport time\n\n\nclass BaseConnection:\n def __init__(self, sqlconn, basedir, storage):\n self._sqlconn = sqlconn\n self._basedir = basedir\n self.dirty_files = {}\n self.storage = storage\n self._changelog_cache = storage._changelog_cache\n\n def close(self):\n self._sqlconn.close()\n\n def commit(self):\n self._sqlconn.commit()\n\n def rollback(self):\n self._sqlconn.rollback()\n\n @cached_property\n def last_changelog_serial(self):\n return self.db_read_last_changelog_serial()\n\n def db_read_last_changelog_serial(self):\n q = 'SELECT MAX(_ROWID_) FROM \"changelog\" LIMIT 1'\n res = self._sqlconn.execute(q).fetchone()[0]\n return -1 if res is None else res\n\n def db_read_typedkey(self, relpath):\n q = \"SELECT keyname, serial FROM kv WHERE key = ?\"\n c = self._sqlconn.cursor()\n row = c.execute(q, (relpath,)).fetchone()\n if row is None:\n raise KeyError(relpath)\n return tuple(row[:2])\n\n def db_write_typedkey(self, relpath, name, next_serial):\n q = \"INSERT OR REPLACE INTO kv (key, keyname, serial) VALUES (?, ?, ?)\"\n self._sqlconn.execute(q, (relpath, name, next_serial))\n\n def write_changelog_entry(self, serial, entry):\n threadlog.debug(\"writing changelog for serial %s\", serial)\n data = dumps(entry)\n self._sqlconn.execute(\n \"INSERT INTO changelog (serial, data) VALUES (?, ?)\",\n (serial, sqlite3.Binary(data)))\n\n def get_raw_changelog_entry(self, serial):\n q = \"SELECT data FROM changelog WHERE serial = ?\"\n row = self._sqlconn.execute(q, (serial,)).fetchone()\n if row is not None:\n return bytes(row[0])\n return None\n\n def get_changes(self, serial):\n changes = self._changelog_cache.get(serial)\n if changes is None:\n data = self.get_raw_changelog_entry(serial)\n changes, rel_renames = loads(data)\n # make values in changes read only so no calling site accidentally\n # modifies data\n changes = ensure_deeply_readonly(changes)\n assert isinstance(changes, ReadonlyView)\n self._changelog_cache.put(serial, changes)\n return changes\n\n\nclass Connection(BaseConnection):\n def io_file_os_path(self, path):\n return None\n\n def io_file_exists(self, path):\n assert not os.path.isabs(path)\n c = self._sqlconn.cursor()\n q = \"SELECT path FROM files WHERE path = ?\"\n c.execute(q, (path,))\n result = c.fetchone()\n c.close()\n return result is not None\n\n def io_file_set(self, path, content):\n assert not os.path.isabs(path)\n assert not path.endswith(\"-tmp\")\n c = self._sqlconn.cursor()\n q = \"INSERT OR REPLACE INTO files (path, size, data) VALUES (?, ?, ?)\"\n c.execute(q, (path, len(content), sqlite3.Binary(content)))\n c.close()\n self.dirty_files[path] = True\n\n def io_file_open(self, path):\n return py.io.BytesIO(self.io_file_get(path))\n\n def io_file_get(self, path):\n assert not os.path.isabs(path)\n c = self._sqlconn.cursor()\n q = \"SELECT data FROM files WHERE path = ?\"\n c.execute(q, (path,))\n content = c.fetchone()\n c.close()\n if content is None:\n raise IOError()\n return bytes(content[0])\n\n def io_file_size(self, path):\n assert not os.path.isabs(path)\n c = self._sqlconn.cursor()\n q = \"SELECT size FROM files WHERE path = ?\"\n c.execute(q, (path,))\n result = c.fetchone()\n c.close()\n if result is not None:\n return result[0]\n\n def io_file_delete(self, path):\n assert not os.path.isabs(path)\n c = self._sqlconn.cursor()\n q = \"DELETE FROM files WHERE path = ?\"\n c.execute(q, (path,))\n c.close()\n self.dirty_files[path] = None\n\n def write_transaction(self):\n return Writer(self.storage, self)\n\n def commit_files_without_increasing_serial(self):\n self.commit()\n\n\nclass BaseStorage(object):\n def __init__(self, basedir, notify_on_commit, cache_size):\n self.basedir = basedir\n self.sqlpath = self.basedir.join(self.db_filename)\n self._notify_on_commit = notify_on_commit\n self._changelog_cache = LRUCache(cache_size) # is thread safe\n self.last_commit_timestamp = time.time()\n self.ensure_tables_exist()\n\n def _get_sqlconn_uri_kw(self, uri):\n return sqlite3.connect(\n uri, timeout=60, isolation_level=None, uri=True)\n\n def _get_sqlconn_uri(self, uri):\n return sqlite3.connect(\n uri, timeout=60, isolation_level=None)\n\n def _get_sqlconn_path(self, uri):\n return sqlite3.connect(\n self.sqlpath.strpath, timeout=60, isolation_level=None)\n\n def _get_sqlconn(self, uri):\n # we will try different connection methods and overwrite _get_sqlconn\n # with the first successful one\n try:\n # the uri keyword is only supported from Python 3.4 onwards and\n # possibly other Python implementations\n conn = self._get_sqlconn_uri_kw(uri)\n # remember for next time\n self._get_sqlconn = self._get_sqlconn_uri_kw\n return conn\n except TypeError as e:\n if e.args and 'uri' in e.args[0] and 'keyword argument' in e.args[0]:\n threadlog.warn(\n \"The uri keyword for 'sqlite3.connect' isn't supported by \"\n \"this Python version.\")\n else:\n raise\n except sqlite3.OperationalError as e:\n threadlog.warn(\"%s\" % e)\n threadlog.warn(\n \"The installed version of sqlite3 doesn't seem to support \"\n \"the uri keyword for 'sqlite3.connect'.\")\n except sqlite3.NotSupportedError:\n threadlog.warn(\n \"The installed version of sqlite3 doesn't support the uri \"\n \"keyword for 'sqlite3.connect'.\")\n try:\n # sqlite3 might be compiled with default URI support\n conn = self._get_sqlconn_uri(uri)\n # remember for next time\n self._get_sqlconn = self._get_sqlconn_uri\n return conn\n except sqlite3.OperationalError as e:\n # log the error and switch to using the path\n threadlog.warn(\"%s\" % e)\n threadlog.warn(\n \"Opening the sqlite3 db without options in URI. There is a \"\n \"higher possibility of read/write conflicts between \"\n \"threads, causing slowdowns due to retries.\")\n conn = self._get_sqlconn_path(uri)\n # remember for next time\n self._get_sqlconn = self._get_sqlconn_path\n return conn\n\n def get_connection(self, closing=True, write=False):\n # we let the database serialize all writers at connection time\n # to play it very safe (we don't have massive amounts of writes).\n mode = \"ro\"\n if write:\n mode = \"rw\"\n if not self.sqlpath.exists():\n mode = \"rwc\"\n uri = \"file:%s?mode=%s\" % (self.sqlpath, mode)\n sqlconn = self._get_sqlconn(uri)\n if write:\n start_time = time.time()\n while 1:\n try:\n sqlconn.execute(\"begin immediate\")\n break\n except sqlite3.OperationalError:\n # another thread may be writing, give it a chance to finish\n time.sleep(0)\n if time.time() - start_time > 5:\n # if it takes this long, something is wrong\n raise\n conn = self.Connection(sqlconn, self.basedir, self)\n if closing:\n return contextlib.closing(conn)\n return conn\n\n\nclass Storage(BaseStorage):\n Connection = Connection\n db_filename = \".sqlite_db\"\n\n def perform_crash_recovery(self):\n pass\n\n def ensure_tables_exist(self):\n if self.sqlpath.exists():\n return\n with self.get_connection(write=True) as conn:\n threadlog.info(\"DB: Creating schema\")\n c = conn._sqlconn.cursor()\n c.execute(\"\"\"\n CREATE TABLE kv (\n key TEXT NOT NULL PRIMARY KEY,\n keyname TEXT,\n serial INTEGER\n )\n \"\"\")\n c.execute(\"\"\"\n CREATE TABLE changelog (\n serial INTEGER PRIMARY KEY,\n data BLOB NOT NULL\n )\n \"\"\")\n c.execute(\"\"\"\n CREATE TABLE files (\n path TEXT PRIMARY KEY,\n size INTEGER NOT NULL,\n data BLOB NOT NULL\n )\n \"\"\")\n conn.commit()\n\n\n@hookimpl\ndef devpiserver_storage_backend(settings):\n return dict(\n storage=Storage,\n name=\"sqlite_db_files\",\n description=\"SQLite backend with files in DB for testing only\")\n\n\n@hookimpl\ndef devpiserver_metrics(request):\n result = []\n xom = request.registry[\"xom\"]\n storage = xom.keyfs._storage\n if not isinstance(storage, BaseStorage):\n return result\n cache = getattr(storage, '_changelog_cache', None)\n if cache is None:\n return result\n result.extend([\n ('devpi_server_storage_cache_evictions', 'counter', cache.evictions),\n ('devpi_server_storage_cache_hits', 'counter', cache.hits),\n ('devpi_server_storage_cache_lookups', 'counter', cache.lookups),\n ('devpi_server_storage_cache_misses', 'counter', cache.misses),\n ('devpi_server_storage_cache_size', 'gauge', cache.size)])\n return result\n\n\nclass Writer:\n def __init__(self, storage, conn):\n self.conn = conn\n self.storage = storage\n self.changes = {}\n self.next_serial = conn.last_changelog_serial + 1\n\n def record_set(self, typedkey, value=None, back_serial=None):\n \"\"\" record setting typedkey to value (None means it's deleted) \"\"\"\n assert not isinstance(value, ReadonlyView), value\n if back_serial is None:\n try:\n _, back_serial = self.conn.db_read_typedkey(typedkey.relpath)\n except KeyError:\n back_serial = -1\n self.conn.db_write_typedkey(typedkey.relpath, typedkey.name, self.next_serial)\n # at __exit__ time we write out changes to the _changelog_cache\n # so we protect here against the caller modifying the value later\n value = get_mutable_deepcopy(value)\n self.changes[typedkey.relpath] = (typedkey.name, back_serial, value)\n\n def __enter__(self):\n self.log = thread_push_log(\"fswriter%s:\" % self.next_serial)\n return self\n\n def __exit__(self, cls, val, tb):\n commit_serial = self.next_serial\n thread_pop_log(\"fswriter%s:\" % commit_serial)\n if cls is None:\n entry = self.changes, []\n self.conn.write_changelog_entry(commit_serial, entry)\n self.conn.commit()\n message = \"committed: keys: %s\"\n args = [\",\".join(map(repr, list(self.changes)))]\n self.log.info(\"commited at %s\", commit_serial)\n self.log.debug(message, *args)\n\n self.storage._notify_on_commit(commit_serial)\n else:\n self.conn.rollback()\n self.log.info(\"roll back at %s\", commit_serial)\n","sub_path":"server/devpi_server/keyfs_sqlite.py","file_name":"keyfs_sqlite.py","file_ext":"py","file_size_in_byte":11939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"625619384","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 16 17:42:36 2020\r\n\r\n@author: Usuario\r\n\"\"\"\r\n\r\nimport tkinter\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nfrom tkinter import filedialog\r\nimport sys\r\nimport os\r\nimport numpy as np, os, gdal, glob, math\r\nimport math\r\n\r\n\r\nroot=Tk()\r\nroot.geometry(\"650x680\")\r\nroot.iconbitmap(\"icono.ico\")\r\nroot.title(\"VIs Córdoba & Urbano\")\r\n\r\n\r\nmiFrame = Frame(root)\r\nmiFrame.grid(row=0, column=0, padx=15,pady=5)\r\n\r\nmiFramevi0 = Frame(root) #titulo indices\r\nmiFramevi0.grid(row=8, column=0, padx=15,pady=10) \r\n\r\nmiFramevi = Frame(root)#indices izquierda\r\nmiFramevi.grid(row=9, column=0)\r\n\r\nmiFramevisavi = Frame(root)#frame para savi\r\nmiFramevisavi.grid(row=10, column=0, padx=15,pady=20)\r\n\r\nmiFramevipvi = Frame(root)#frame para pvi\r\nmiFramevipvi.grid(row=16, column=0, padx=15,pady=20)\r\n\r\ndef infoAdicional ():\r\n messagebox.showinfo(\"VIs Córdoba & Urbano\", \"Procesador de imáganes Landsat 8 versión 2020\")\r\n \r\n \r\ndef avisoLicencia():\r\n messagebox.showwarning(\"Licencia\", \"Producto bajo licencia genérica\")\r\n \r\n \r\ndef guia():\r\n def abrir():\r\n messagebox.showinfo(\"Guía\", \"Desarrollado por:\\n\\n Valentina Córdoba Rojas\\n Cristian David Urbano Rojas\") \r\n global dos\r\n dos=Toplevel()\r\n bit=dos.iconbitmap(\"icono.ico\") \r\n dos.geometry(\"200x650\")\r\n dos.title(\"Guía de Usuario\")\r\n dos.wm_attributes(\"-alpha\", 0.9)\r\n dos.configure(bg=\"gray\")\r\n dos.minsize(500,300)\r\n dos.resizable(0,0)\r\n \r\n imagen=PhotoImage(file=\"icono.png\")\r\n botonimagen=Button(dos, image=imagen, command=abrir, bg=\"gray\")\r\n titulo=Label(dos, bg=\"gray22\", fg=\"white\", font=(\"Terminal bold\",8), \r\n text=\"VIs Córdoba & Urbano\")\r\n \r\n instrucciones=Label(dos, bg=\"gray22\", fg=\"white\", font=(\"Terminal bold\",8), anchor=S,\r\n text=\"Para pasar las imágenes de DN a valores de reflectanciasiga los siguientes pasos:\\n\\n\"\r\n \r\n \"\\n 1. Haga clic sobre el botón Abrir fichero. Posteriormente, elija el metadato de las imágenes.\"\r\n \"\\n\\n 2. Una vez obtenida la ruta del metadato haga clic sobre el botón Leer metadato.\"\r\n \"\\n\\n 3. Copie y pegue la ruta de la carpeta que contiene las imágenes: los separadores deben \\nser de tipo \\. Haga clic sobre Cargar ruta imágnes. \"\r\n \"\\n\\n 4. Copie y pegue el nombre genérico de las imágenes: Es decir antes de _B#.TIF. \\nHaga clic sobre Establecer destino\"\r\n \"\\n\\n 5. Copie y pegue la ruta de la carpeta que almacenará los resultados: los separadores deben \\nser de tipo \\. Haga clic sobre Establecer destino. \"\r\n \"\\n\\n 6. Finalmente, haga clic sobre iniciar proceso y espere a la finalización de la tarea:\\n se notificará mediante una alerta.\"\r\n \"\\n\\n\\n\\n Finalizado el proceso anterior, podrá efectuar el cálculo de los índices disponibles en la lista \\nhaciendo clic sobre el boton correspondiente. Para los VIs SAVI y PVI\\n es necesario introducir el valor de las constantes solicitadas.\")\r\n\r\n botonimagen.place(x=130,y=0)\r\n titulo.place(x=190,y=215)\r\n instrucciones.place(x=17, y=270)\r\n dos.transient(root)\r\n dos.grab_set()\r\n root.wait_window(dos)\r\n \r\n \r\n \r\ndef saliraplicacion():\r\n valor=messagebox.askquestion(\"Salir\",\"¿Desea salir de la aplicación?\")\r\n if valor ==\"yes\":\r\n root.destroy()\r\n\r\n\r\ndef cerrarDocumento():\r\n valor=messagebox.askretrycancel(\"Reitentar\",\"No es posible cerrar archivo bloqueado\")\r\n if valor ==True:\r\n root.destroy()\r\n \r\n \r\n \r\n \r\ndef abreFichero():\r\n messagebox.showinfo(\"Información\",\"Seleccione el archivo del metadato de las imágenes: ....._MTL.txt. Posteriormete haga clic en el botón Leer metadato. \")\r\n fichero=filedialog.askopenfilename(title=\"Abrir\",filetypes=((\"Ficheros de Python\",\"*.py\"),\r\n (\"Ficheros de texto\", \"*.txt\"),\r\n (\"Todos los archivos\",\"*.*\")))\r\n \r\n return var.set(fichero)\r\n\r\ndef carpetaimg ():\r\n global path_img\r\n path_img= str(entrada1.get()) #Cambiar ruta a la carpeta que contenga las imgs\r\n print(\"esta es la ruta de la carpeta \"+path_img)\r\n\r\ndef nombreimgs ():\r\n global imagen\r\n imagen= str(entrada2.get()) #Cambiar ruta a la carpeta que tenga el metadato\r\n print(\"esta es nomebre de las imgs \"+imagen)\r\n \r\n \r\ndef rutaresultados ():\r\n global resultados\r\n resultados= str(entrada3.get()) #Cambiar ruta a la carpeta que tenga el metadato\r\n print(\"esta es la ruta de los resultados \"+resultados)\r\n create_folder(resultados)\r\n\r\n\r\n\r\n##3.FUNCION PARA CREAR CARPETA SI NO EXISTE\r\ndef create_folder(path):\r\n if not os.path.exists(path):\r\n os.mkdir(path)\r\n\r\n\r\n##CALCULO INDICADORES DE VEGETACION\r\n\r\n\r\n\r\n\r\ndef rvi(RED, NIR):\r\n rvi= RED/NIR\r\n return (rvi)\r\ndef disparadorRVI():\r\n calculo_rvi_l8=rvi(reflectancias[2],reflectancias[3])\r\n salida_rvi=(resultados+os.sep+'l8_rvi.TIF')\r\n guardar_tif(salida_rvi,calculo_rvi_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, Ratio Vegetation Index calculado\", title=\"RVI\")\r\n \r\n \r\n \r\n \r\ndef ndvi(RED,NIR):\r\n ndvi=(NIR-RED)/(NIR+RED)\r\n return(ndvi)\r\ndef disparadorNDVI():\r\n calculo_ndvi_l8=ndvi(reflectancias[2],reflectancias[3])\r\n salida_ndvi=(resultados+os.sep+'l8_ndvi.TIF')\r\n guardar_tif(salida_ndvi,calculo_ndvi_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, Normalized Difference Vegetation Index calculado\", title=\"NDVI\")\r\n \r\n \r\n \r\n \r\ndef pvi(RED,NIR,b,a):\r\n pvi=(NIR-RED-b)/(math.sqrt(a**2 +1))\r\n return(pvi)\r\ndef disparadorPVI():\r\n a= float(apvi.get())\r\n b= float(bpvi.get())\r\n calculo_pvi_l8=pvi(reflectancias[2],reflectancias[3],b,a)\r\n salida_pvi=(resultados+os.sep+'l8_pvi.TIF')\r\n guardar_tif(salida_pvi,calculo_pvi_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, Perpendicular Vegetation Index calculado\", title=\"PVI\")\r\n \r\n \r\n \r\ndef savi(RED,NIR,L):\r\n savi= (1 + L)*(NIR-RED)/(NIR+RED+ L)\r\n return(savi)\r\ndef disparadorSAVI():\r\n L = float(lsavi.get())\r\n calculo_savi_l8=savi(reflectancias[2], reflectancias[3],L)\r\n salida_savi=(resultados+os.sep+'l8_savi.TIF')\r\n guardar_tif(salida_savi,calculo_savi_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, Soil Adjusted Vegetation Índex calculado\", title=\"SAVI\") \r\n \r\n \r\n \r\ndef arvi(BLUE,RED,NIR):\r\n arvi2=(NIR-(RED-0.5*(RED-BLUE)))/(NIR+(RED-0.5*(RED-BLUE)))\r\n return(arvi2)\r\ndef disparadorARVI():\r\n calculo_arvi_l8=arvi(reflectancias[0],reflectancias[2],reflectancias[3])\r\n salida_arvi=(resultados+os.sep+'l8_arvi.TIF')\r\n guardar_tif(salida_arvi,calculo_arvi_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, Atmospherically Resistant Vegetation Index calculado\", title=\"ARVI\") \r\n \r\n \r\n \r\ndef gemi(RED,NIR):\r\n n=(2*(NIR**2 -RED**2)+1.5*NIR+0.5*RED)/(NIR+RED+0.5)\r\n gemi=(n*(1-0.25*n))-((RED-0.125))/(1-RED)\r\n return(gemi)\r\ndef disparadorGEMI():\r\n calculo_gemi_l8=gemi(reflectancias[2],reflectancias[3])\r\n salida_gemi=(resultados+os.sep+'l8_gemi.TIF')\r\n guardar_tif(salida_gemi,calculo_gemi_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, Global Environmental Monitoring Index calculado\", title=\"GEMI\") \r\n\r\n\r\n\r\n\r\ndef msavi(RED,NIR):\r\n msavi=(2*NIR+1-np.sqrt((2*NIR+1)**2 -8*(NIR-RED)))/(2)\r\n return(msavi)\r\ndef disparadorMSAVI():\r\n calculo_msavi_l8=msavi(reflectancias[2],reflectancias[3])\r\n salida_msavi=(resultados+os.sep+'l8_msavi.TIF')\r\n guardar_tif(salida_msavi,calculo_msavi_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, A modified soil adjusted vegetation index calculado\", title=\"MSAVI\") \r\n \r\n \r\n \r\ndef gari(BLUE,GREEN,RED,NIR):\r\n gari2=(NIR-(GREEN-(BLUE-RED)))/(NIR+(GREEN-(BLUE-RED)))\r\n return(gari2)\r\ndef disparadorGARI():\r\n calculo_gari_l8=gari(reflectancias[0],reflectancias[1],reflectancias[2],reflectancias[3])\r\n salida_gari=(resultados+os.sep+'l8_gari.TIF')\r\n guardar_tif(salida_gari,calculo_gari_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, A Green Atmospherically Resistant Vegetation Index calculado\", title=\"GARI\") \r\n\r\n\r\n\r\ndef evi(BLUE,RED,NIR):\r\n evi= 2.5*(NIR-RED)/((NIR+ 6*RED- 7.5* BLUE)+ 1)\r\n return(evi)\r\ndef disparadorEVI():\r\n calculo_evi_l8=evi(reflectancias[0],reflectancias[2],reflectancias[3])\r\n salida_evi=(resultados+os.sep+'l8_evi.TIF')\r\n guardar_tif(salida_evi,calculo_evi_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, Enhanced Vegetation Index calculado\", title=\"EVI\") \r\n\r\n \r\n\r\ndef gndvi(GREEN,NIR):\r\n gndvi=(NIR-GREEN)/(NIR+GREEN)\r\n return(gndvi)\r\ndef disparadorGNDVI():\r\n calculo_gndvi_l8=gndvi(reflectancias[1],reflectancias[3])\r\n salida_gndvi=(resultados+os.sep+'l8_gndvi.TIF')\r\n guardar_tif(salida_gndvi,calculo_gndvi_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, Green Normalized Difference Vegetation Index calculado\", title=\"GNDVI\") \r\n \r\n \r\n\r\ndef dvi(RED,NIR):\r\n dvi=NIR-RED\r\n return(dvi)\r\ndef disparadorDVI():\r\n calculo_dvi_l8=dvi(reflectancias[2],reflectancias[3])\r\n salida_dvi=(resultados+os.sep+'l8_dvi.TIF')\r\n guardar_tif(salida_dvi,calculo_dvi_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, Difference Vegetation Index calculado\", title=\"DVI\") \r\n\r\n\r\ndef tvi(RED,NIR):\r\n tvi= np.sqrt(((NIR-RED)/(NIR+RED))+0.5)\r\n return(tvi)\r\ndef disparadorTVI():\r\n calculo_tvi_l8=tvi(reflectancias[2],reflectancias[3])\r\n salida_tvi=(resultados+os.sep+'l8_tvi.TIF')\r\n guardar_tif(salida_tvi,calculo_tvi_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, Transformed Vegetation Index calculado\", title=\"TVI\") \r\n\r\n \r\n \r\ndef yvi(GREEN,RED,NIR,SWIR):\r\n yvi=((-0.899*GREEN)+(0.428*RED)+(0.076*NIR)-(0.041*NIR))\r\n return(yvi)\r\ndef disparadorYVI():\r\n calculo_yvi_l8=yvi(reflectancias[1],reflectancias[2],reflectancias[3])\r\n salida_yvi=(resultados+os.sep+'l8_yvi.TIF')\r\n guardar_tif(salida_yvi,calculo_yvi_l8,img_banda)\r\n messagebox.showinfo(message=\"Proceso Terminado, Yellow Vegetation Index calculado\", title=\"YVI\") \r\n \r\n \r\n \r\n \r\n \r\n\r\n#funciones de la automatización \r\n#------------------------------------------------------------------------------------------\r\n\r\n##1. FUNCION PARA LEER METADATO \r\n \r\nvar= StringVar()\r\n \r\n# def obtenerRutametadato():\r\n# ruta= str(var.get())\r\n# archivo_mtl= ruta #Cambiar ruta a la carpeta que tenga el metadato\r\n# print(archivo_mtl)\r\n# ruta_mtl=open(archivo_mtl,\"r\")\r\n# datos=metadato(ruta_mtl)\r\n\r\n\r\n\r\n\r\ndef guardar_tif(salida,matriz,im_entrada,x_in=0,y_in=0):\r\n #Define coordenadas iniciales\r\n geoTs=im_entrada.GetGeoTransform() #parametros\r\n driver=gdal.GetDriverByName(\"GTiff\")\r\n prj=im_entrada.GetProjection() #Proyeccion de la imagen de entrada \r\n cols=matriz.shape[1] #Filas \r\n filas=matriz.shape[0] #Columnas \r\n ulx=geoTs[0]+x_in*geoTs[1]\r\n uly=geoTs[3]+y_in*geoTs[5]\r\n geoTs=(ulx,geoTs[1],geoTs[2],uly,geoTs[4],geoTs[5])\r\n #Crear el archivo con los datos de entrada\r\n export=driver.Create(salida,cols,filas,1,gdal.GDT_Float32)\r\n banda=export.GetRasterBand(1)\r\n banda.WriteArray(matriz)\r\n export.SetGeoTransform(geoTs)\r\n export.SetProjection(prj)\r\n banda.FlushCache()\r\n export.FlushCache()\r\n \r\ndef reflectancia (M,ND,A,sun_elevation):\r\n reflec=(M*ND+A)/np.sin(sun_elevation)\r\n return(reflec)\r\n\r\n##CORRECCION ATMOSFERIA POR HISTOGRAMA\r\ndef refle_corregida(r):\r\n correccion= r-np.nanmin(r)\r\n return(correccion) \r\n \r\n##1. FUNCION PARA LEER METADATO\r\ndef obtenerRutametadato ():\r\n global archivo_mtl\r\n global ruta_mtl\r\n global datos \r\n archivo_mtl= str(var.get()) #Cambiar ruta a la carpeta que tenga el metadato\r\n print(archivo_mtl)\r\n ruta_mtl=open(archivo_mtl,\"r\")\r\n datos=metadato(ruta_mtl)\r\n \r\n \r\ndef metadato (archivo):\r\n metadatos={} #Diccionario vacío \r\n for i in archivo.readlines(): #iterar el archivo \r\n if \"=\" in i: #si la línea tiene un '='\r\n separador = i.split(\"=\") #cortamos la línea en el ' = '\r\n clave=separador[0].strip() #asignamos el primer elemento como clave\r\n valor=separador[1].strip() #asignamos el segundo elemento como valor\r\n metadatos[clave]=valor #llenamos el diccionario con clave y valor\r\n archivo.close()\r\n return metadatos\r\n\r\n\r\n\r\n\r\n##SCRIPT\r\n\r\n\r\ndef ndareflectancia():\r\n \r\n print(\"estos son los parametros\")\r\n print(\"*************************\")\r\n print(\"*************************\")\r\n print(path_img)\r\n print(imagen)\r\n print(resultados) \r\n \r\n global reflectancias\r\n reflectancias= []\r\n for banda in range (1,7):\r\n global img_banda\r\n global nd \r\n print (\"Inicia procesamiento de: \"+imagen+\"_B\"+str(banda)+\".TIF\")\r\n img_banda=gdal.Open(path_img+os.sep+imagen+\"_B\"+str(banda)+\".TIF\")\r\n nd=img_banda.ReadAsArray().astype('float')\r\n nd [[nd==0]] = np.nan\r\n if (banda==2 or banda==3 or banda==4 or banda==5 or banda==6):\r\n m=float(datos['REFLECTANCE_MULT_BAND_'+str(banda)])\r\n a=float(datos['REFLECTANCE_ADD_BAND_'+str(banda)])\r\n sun_el = float(datos['SUN_ELEVATION'])\r\n reflec=reflectancia(m,nd,a,sun_el)\r\n #Refelctancia corregida\r\n reflectancia_cor= refle_corregida(reflec)\r\n reflectancias.append(reflectancia_cor)\r\n out_file = resultados + os.sep+'reflectancia_cor_B'+str(banda)+'.tif'\r\n guardar_tif(out_file,reflectancia_cor,img_banda,x_in=0,y_in=0)\r\n print (\"HECHO\")\r\n messagebox.showinfo(message=\"Proceso Terminado\", title=\"DN To Reflectance\")\r\n\r\n\r\n \r\n\r\n##DEFINICION DE VARIABLES\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#BARRA DE MENÚ\r\n\r\nbarraMenu=Menu(root)\r\nroot.config(menu=barraMenu, width= 600, height=300)\r\n\r\narchivoMenu= Menu(barraMenu, tearoff=0)\r\n# archivoMenu.add_command(label=\"Nuevo\")\r\n# archivoMenu.add_command(label=\"Guardar\")\r\n# archivoMenu.add_command(label=\"Reiniciar\")\r\narchivoMenu.add_command(label=\"Cerrar\", command= cerrarDocumento)\r\narchivoMenu.add_command(label=\"Salir\", command= saliraplicacion)\r\n\r\narchivoEdicion= Menu(barraMenu)\r\narchivoEdicion.add_command(label=\"Copiar\")\r\narchivoEdicion.add_command(label=\"Cortar\")\r\narchivoEdicion.add_command(label=\"Pegar\")\r\n\r\n\r\n\r\narchivoHerramientas= Menu(barraMenu)\r\n\r\n\r\n\r\n\r\narchivoAyuda= Menu(barraMenu)\r\narchivoAyuda.add_command(label=\"Guía\", command= guia)\r\narchivoAyuda.add_command(label=\"Documentación\", command= avisoLicencia)\r\narchivoAyuda.add_command(label=\"About\", command= infoAdicional)\r\n\r\n\r\nbarraMenu.add_cascade(label=\"Archivo\", menu=archivoMenu)\r\n\r\n# barraMenu.add_cascade(label=\"Edición\", menu=archivoEdicion)\r\n\r\n# barraMenu.add_cascade(label=\"Herramientas\", menu=archivoHerramientas)\r\n\r\nbarraMenu.add_cascade(label=\"Ayudas\", menu=archivoAyuda)\r\n\r\n\r\n\r\n\r\n#---------------------------------------botones------------------------------------------------------------ \r\n\r\nbotonExaminar = Button(miFrame, width=17, text=\"Abrir fichero\", cursor=\"hand2\", command=abreFichero)\r\nbotonExaminar.grid(row=2, column=1,padx=3, pady=10)\r\n\r\n\r\nbotonExaminar = Button(miFrame, width=17, text=\"Leer metadato\", command=obtenerRutametadato)\r\nbotonExaminar.grid(row=2, column=2,padx=3, pady=10)\r\n\r\nentrada1= Entry(miFrame, width=59)\r\nentrada1.grid(row=3, column=0)\r\nbeRutaimg = Button(miFrame, width=17, text=\"Cargar ruta imáganes\", cursor=\"hand2\",command=carpetaimg)\r\nbeRutaimg.grid(row=3, column=1,padx=3, pady=10)\r\n\r\n#be boton ruta tal.....\r\n\r\nentrada2= Entry(miFrame, width=59)\r\nentrada2.grid(row=4, column=0)\r\nbeRutanombreimg = Button(miFrame, width=17, text=\"Cargar nombre imgs\", cursor=\"hand2\",command= nombreimgs)\r\nbeRutanombreimg.grid(row=4, column=1,padx=3, pady=10)\r\n\r\nentrada3= Entry(miFrame, width=59)\r\nentrada3.grid(row=5, column=0)\r\nbeRutasalida = Button(miFrame, width=17, text=\"Establecer destino\", cursor=\"hand2\", command=rutaresultados)\r\nbeRutasalida.grid(row=5, column=1,padx=3, pady=10)\r\n\r\n\r\niniciarproceso = Button(miFrame, width=17, text=\"Iniciar proceso\",font= \"Helvetica 10 bold\",cursor=\"hand2\", command=ndareflectancia)\r\niniciarproceso.grid(row=6, column=0)\r\n\r\n\r\nbotonrvi = Button(miFramevi, width=17, text=\"RVI\", cursor=\"hand2\", command=disparadorRVI)\r\nbotonrvi.grid(row=8, column=0,padx=5, pady=5)\r\n\r\nbotonNDVI= Button(miFramevi, width=17, text=\"NDVI\", cursor=\"hand2\", command=disparadorNDVI)\r\nbotonNDVI.grid(row=9, column=0,padx=5, pady=5)\r\n\r\n\r\nbotonARVI = Button(miFramevi, width=17, text=\"ARVI\", cursor=\"hand2\", command=disparadorARVI)\r\nbotonARVI.grid(row=11, column=0,padx=5, pady=5)\r\n\r\n\r\nbotonGEMI = Button(miFramevi, width=17, text=\"GEMI\", cursor=\"hand2\", command=disparadorGEMI)\r\nbotonGEMI.grid(row=13, column=0,padx=5, pady=5)\r\n\r\n\r\nbotonMSAVI = Button(miFramevi, width=17, text=\"MSAVI\", cursor=\"hand2\", command=disparadorMSAVI)\r\nbotonMSAVI.grid(row=10, column=0,padx=5, pady=5)\r\n\r\n\r\nbotonGARI = Button(miFramevi, width=17, text=\"GARI\", cursor=\"hand2\", command=disparadorGARI)\r\nbotonGARI.grid(row=8, column=1,padx=5, pady=5)\r\n\r\n\r\nbotonEVI = Button(miFramevi, width=17, text=\"EVI\", cursor=\"hand2\", command=disparadorEVI)\r\nbotonEVI.grid(row=9, column=1,padx=5, pady=5)\r\n\r\nbotonGNDVI = Button(miFramevi, width=17, text=\"GNDVI\", cursor=\"hand2\", command=disparadorGNDVI)\r\nbotonGNDVI.grid(row=10, column=1,padx=5, pady=5)\r\n\r\n\r\nbotonDVI = Button(miFramevi, width=17, text=\"DVI\", cursor=\"hand2\", command=disparadorDVI)\r\nbotonDVI.grid(row=11, column=1,padx=5, pady=5)\r\n\r\nbotonTVI = Button(miFramevi, width=17, text=\"TVI\", cursor=\"hand2\", command=disparadorTVI)\r\nbotonTVI.grid(row=12, column=0,padx=5, pady=5)\r\n\r\n\r\n\r\nbotonGEMI = Button(miFramevi, width=17, text=\"YVI\", cursor=\"hand2\", command=disparadorYVI)\r\nbotonGEMI.grid(row=11, column=1,padx=5, pady=5)\r\n\r\n\r\n#--------------------------------------------SAVI BOTON----------------------------------------------\r\n\r\nfactorL= Label(miFramevisavi,bg ='#DEDFDB' ,text=\"Ajuste del suelo L : \", width=15) #label L SAVI\r\nfactorL.grid(row=14,column=0,padx=5)\r\n\r\n\r\nlsavi= Entry(miFramevisavi, width=15)\r\nlsavi.grid(row=14, column=1,padx=5)\r\n\r\n\r\nbotonSAVI = Button(miFramevisavi, width=17, text=\"SAVI\", cursor=\"hand2\", command=disparadorSAVI)\r\nbotonSAVI.grid(row=14, column=2, padx=5)\r\n\r\n#-------------------------------------------- PVI BOTON----------------------------------------------\r\n\r\napvi= Label(miFramevipvi,bg ='#DEDFDB' ,text=\"Valor constante a : \", width=15) #label a\r\napvi.grid(row=16,column=0, padx=5)\r\n\r\napvi= Entry(miFramevipvi, width=15)\r\napvi.grid(row=16, column=1, padx=5)\r\n\r\n\r\nbpvi= Label(miFramevipvi,bg ='#DEDFDB' ,text=\"Valor constante b : \", width=15) #label b\r\nbpvi.grid(row=18,column=0, padx=5)\r\n\r\nbpvi= Entry(miFramevipvi, width=15)\r\nbpvi.grid(row=18, column=1, padx=5)\r\n\r\n\r\n\r\nbotonPVI = Button(miFramevipvi, width=17, text=\"PVI\", cursor=\"hand2\", command=disparadorPVI)\r\nbotonPVI.grid(row=17, column=2, padx=5)\r\n\r\n\r\n\r\n\r\n#------------------------------------Label----------------------------------------------------------\r\nexaminarMetadato= Label(miFrame, bg='#C9C5DC',textvariable=var, width=50) #caja para ruta del metadato\r\nexaminarMetadato.grid(row=2, column=0)\r\n\r\nlabelindices= Label(miFramevi0, bg='#29A436',text= \"ÍNDICES\",font= \"Helvetica 10 bold\", width=50) #titulo índices\r\nlabelindices.grid(row=7, column=0)\r\n\r\n\r\n\r\nroot.mainloop()","sub_path":"ventanaAvanzada.py","file_name":"ventanaAvanzada.py","file_ext":"py","file_size_in_byte":19898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"3453417","text":"#to choose who starts the game\nimport random\nimport copy\n\n# board is represented as 3*3 matrix\nempty_board = [[\" \",\" \",\" \"],[\" \",\" \",\" \"],[\" \",\" \",\" \"]]\nlinesep = \"-------------\\n\"\n\n# prints the current state of the board\ndef print_board(board):\n board_str = linesep\n for i in range(len(board)):\n line = \"\"\n for j in board[i]:\n element = \"| \"+str(j)+\" \"\n line = line+element\n line = line+\"|\"\n board_str = board_str+line+\"\\n\"+linesep\n print(\"\\n\"+board_str)\n\n#puts a symbol in the coordinate specified as move\ndef play(symbol,board,move):\n while True:\n if (len(move) == 2 and move[0] in [\"1\",\"2\",\"3\"] and move[1] in\n [\"1\",\"2\",\"3\"]):\n if board[int(move[0])-1][int(move[1])-1]==\" \":\n board[int(move[0])-1][int(move[1])-1] = symbol\n return\n else:\n print(\"There is already a symbol there!\")\n move=str(input(\"\\nGive me the coordinates of your move as\\\n rowcolum (es. 22)\\n\"))\n else:\n print(\"I did not understand what you mean\\n\")\n move=str(input(\"\\nGive me the coordinates of your move as rowcolum\\\n (es. 22)\\n\"))\n\n#returns the winning charachter, or draw\ndef eval_board(board):\n board_full = True\n for i in range(3):\n for j in range(3):\n if board[j][i]==\" \":\n board_full = False\n #checks for rows and columns\n for i in range(3):\n if (board[i][0]!=\" \" and board[i][0]==board[i][1] and\n board[i][0]==board[i][2]):\n return board[i][0]\n elif (board[0][i]!=\" \" and board[0][i]==board[1][i] and\n board[0][i]==board[2][i]):\n return board[0][i]\n if (board[0][0]!=\" \" and board[0][0]==board[1][1] and\n board[0][0]==board[2][2]): #checks diagonals\n return board[0][0]\n elif (board[0][2]!=\" \" and board[0][2]==board[1][1] and\n board[0][2]==board[2][0]):\n return board[0][2]\n elif board_full:\n return \"draw\"\n else:\n return \"incomplete\"\n\n# returns True if the game is finished, False if\ndef game_finished(board,virtual=False):\n global index\n if eval_board(board) == \"incomplete\": #not\n return False\n elif eval_board(board) == \"draw\":\n if not virtual:\n print(\"Game finished, it is a draw\\n\")\n print(\"Let's play again!\\n\")\n return True\n elif eval_board(board) == \"X\":\n if not virtual:\n print(\"Game finished, X is the winner\\n\")\n print(\"Let's play again!\\n\")\n return True\n elif eval_board(board) == \"O\":\n if not virtual:\n print(\"Game finished, O is the winner\\n\")\n print(\"Let's play again!\\n\")\n return True\n else: print(\"error game_finished\")\n\ndef eval_for_minmax(board):\n if eval_board(board) == \"draw\":\n return 0\n elif eval_board(board) == \"X\":\n if AI_symbol == \"X\":\n return 1\n elif AI_symbol == \"O\":\n return -1\n else: print(\"error2 eval for minamx\")\n elif eval_board(board) == \"O\":\n if AI_symbol == \"X\":\n return -1\n elif AI_symbol == \"O\":\n return 1\n else: print(\"error3 eval for minamx\")\n else: print(\"error eval for minmax\")\n\ndef virtual_play(symbol,board,move):\n virtual_board = copy.deepcopy(board)\n play(symbol,virtual_board,move)\n return virtual_board\n\n# returns a list with the possible moves for the player, given a board\ndef possible_moves(board):\n possible_moves = []\n for j in range(3):\n for i in range(3):\n if board[i][j]==\" \":\n possible_moves.append(str(i+1)+str(j+1))\n return possible_moves\n\n# not used, I choosed to use alphabeta\ndef minmax(board,AI_plays=False):\n if game_finished(board,True):\n return eval_for_minmax(board)\n elif not game_finished(board):\n if AI_plays:\n score = -2\n for move in possible_moves(board):\n score = max(score,minmax(virtual_play(AI_symbol,board,move)))\n return score\n elif not AI_plays:\n score = +2\n for move in possible_moves(board):\n score = min(score,minmax(virtual_play(player_symbol,board,move),\n True))\n return score\n else: print(\"error minmax\")\n else: print(\"error2 minmax\")\n\n# faster version of minmax\ndef alphabeta(board,alpha,beta,AI_plays=False):\n if game_finished(board,True):\n return eval_for_minmax(board)\n elif not game_finished(board):\n if AI_plays:\n score = -2\n for move in possible_moves(board):\n score = max(score,alphabeta(virtual_play(AI_symbol,board,move),\n alpha,beta))\n alpha = max(alpha,score)\n if alpha >= beta:\n break\n return score\n elif not AI_plays:\n score = +2\n for move in possible_moves(board):\n score = min(score,alphabeta(virtual_play(player_symbol,board,\n move),alpha,beta,True))\n beta = min(beta,score)\n if alpha >= beta:\n break\n return score\n else: print(\"error minmax\")\n else: print(\"error2 minmax\")\n\ndef computer_move(board):\n best_result = -2\n for move in possible_moves(board):\n board_after_move = virtual_play(AI_symbol,board,move)\n current_result = alphabeta(board_after_move,-2,2)\n if current_result>best_result:\n best_move = move\n best_result = current_result\n return best_move\n\ndef main_routine():\n global AI_symbol\n global player_symbol\n global empty_board\n print(\"\\nThis is a Tic-Tac-Toe game! Let's play\")\n while True:\n board = copy.deepcopy(empty_board)\n mode = input(\"How many players? (1/2)\\n\")\n print_board(board)\n if mode == \"2\":\n # 2 players\n turn_player_1 = True\n while True:\n if turn_player_1 and not game_finished(board):\n print(\"\\nIt is the turn of the player X\")\n my_move=str(input(\"\\nGive me the coordinates of your move\\\n as rowcolum (es. 22)\\n\"))\n play(\"X\",board,my_move)\n print_board(board)\n elif not turn_player_1 and not game_finished(board):\n print(\"\\nIt is the turn of the player O\")\n my_move=str(input(\"\\nGive me the coordinates of your move\\\n as rowcolum (es. 22)\\n\"))\n play(\"O\",board,my_move)\n print_board(board)\n else: break\n turn_player_1 = not turn_player_1\n elif mode == \"1\":\n # against AI\n while True:\n player_starts = input(\"Do you want to start? (Y/N)\\n\").upper()\n if player_starts == \"Y\":\n player_symbol = \"X\"\n AI_symbol = \"O\"\n turn_player = 1\n break\n elif player_starts == \"N\":\n player_symbol = \"O\"\n AI_symbol = \"X\"\n turn_player = 0\n break\n else: print(\"I did not understand your choice\")\n while True:\n if turn_player and not game_finished(board):\n print(\"It is your turn and you are\",player_symbol,\"\\n\")\n my_move=str(input(\"\\nGive me the coordinates of your move\\\n as rowcolum (es. 22)\\n\"))\n play(player_symbol,board,my_move)\n print_board(board)\n elif not turn_player and not game_finished(board):\n print(\"It is my turn!\\n\")\n play(AI_symbol,board,computer_move(board))\n print_board(board)\n else: break\n turn_player = not turn_player\n else: print(\"I did not understand your choice.\\n\")\n\nmain_routine()\n","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":8320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237331960","text":"from django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom api.models import *\n\nfrom faker import Faker\n\nfake = Faker()\n\n\n@receiver(post_save, sender=Transaction)\ndef gel_fond(sender, instance, **kwargs):\n \"\"\" Met à jour le solde des comptes lors d'un changement d'état d'une transaction \"\"\"\n\n if instance.etat == Transaction.GELE:\n compte = instance.compte\n compte.solde = instance.solde_apres\n compte.save()\n\n if instance.etat == Transaction.REFUSE:\n if hasattr(instance.compte, 'credit'):\n compte = instance.compte\n compte.solde = compte.solde + instance.montant\n compte.save()\n if hasattr(instance.compte, 'courant'):\n compte = instance.compte\n compte.solde = compte.solde - instance.montant\n compte.save()\n\n\n@receiver(post_save, sender=Credit)\n@receiver(post_save, sender=Courant)\ndef update_num_compte(sender, instance, created, **kwargs):\n \"\"\" Initialise le numéro de compte selon l'id lors de la création \"\"\"\n\n if created:\n prefix = 'NRB'\n num = prefix + format(instance.id, '05')\n Compte.objects.filter(id=instance.id).update(num_compte=num)\n\n\n@receiver(post_save, sender=Client)\ndef generate_compte(sender, instance, created, **kwargs):\n \"\"\" Génère les comptes lors de la création d'un client \"\"\"\n if created:\n client = Client.objects.get(id=instance.id)\n create_courant(client)\n create_credit(client)\n\n\ndef create_courant(client):\n \"\"\" Crée un compte Courant de base \"\"\"\n\n Courant.objects.create(solde=0.00, client=client)\n\n\ndef create_credit(client):\n \"\"\" Initialise une carte de crédit et crée le compte Crédit \"\"\"\n\n card = create_credit_card(client)\n Credit.objects.create(limite=1000.00, solde=0.00, carte_credit=card, client=client)\n\n\ndef create_credit_card(client):\n \"\"\" Initialise les paramètres nécessaire pour avoir une carte de crédit (16 chiffres, VISA) \"\"\"\n\n expire = fake.credit_card_expire(start=\"now\", end=\"+10y\", date_format=\"%m/%y\").split(\"/\")\n expire_month = expire[0]\n expire_year = expire[1]\n security_code = fake.credit_card_security_code(card_type='visa16')\n card_number = fake.credit_card_number(card_type='visa16')\n\n card = CarteCredit.objects.create(nom_titulaire=client.full_name(), annee_expiration=expire_year,\n mois_expiration=expire_month, cvv=security_code, num_carte=card_number)\n return card\n","sub_path":"api/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"298736302","text":"# -*- coding: utf-8 -*-\nfrom modules.kodi_utils import sys, parse_qsl\n# from modules.kodi_utils import logger\n\ndef routing():\n\tparams = dict(parse_qsl(sys.argv[2][1:], keep_blank_values=True))\n\t_get = params.get\n\tmode = _get('mode', 'navigator.main')\n\tif 'navigator.' in mode:\n\t\tfrom indexers.navigator import Navigator\n\t\texec('Navigator(params).%s()' % mode.split('.')[1])\n\telif 'menu_editor' in mode:\n\t\tfrom modules.menu_editor import MenuEditor\n\t\texec('MenuEditor(params).%s()' % mode.split('.')[1])\n\telif 'discover.' in mode:\n\t\tfrom indexers.discover import Discover\n\t\texec('Discover(params).%s()' % mode.split('.')[1])\n\telif 'furk.' in mode:\n\t\tif mode == 'furk.browse_packs':\n\t\t\tfrom modules.sources import Sources\n\t\t\tSources().furkPacks(_get('file_name'), _get('file_id'))\n\t\telif mode == 'furk.add_to_files':\n\t\t\tfrom indexers.furk import add_to_files\n\t\t\tadd_to_files(_get('item_id'))\n\t\telif mode == 'furk.remove_from_files':\n\t\t\tfrom indexers.furk import remove_from_files\n\t\t\tremove_from_files(_get('item_id'))\n\t\telif mode == 'furk.remove_from_downloads':\n\t\t\tfrom indexers.furk import remove_from_downloads\n\t\t\tremove_from_downloads(_get('item_id'))\n\t\telif mode == 'furk.remove_from_files':\n\t\t\tfrom indexers.furk import add_uncached_file\n\t\t\tadd_uncached_file(_get('id'))\n\t\telif mode == 'furk.myfiles_protect_unprotect':\n\t\t\tfrom indexers.furk import myfiles_protect_unprotect\n\t\t\tmyfiles_protect_unprotect(_get('action'), _get('name'), _get('item_id'))\n\t\telse:\n\t\t\tfrom indexers import furk\n\t\t\texec('furk.%s(params)' % mode.split('.')[1])\n\telif 'easynews.' in mode:\n\t\tfrom indexers import easynews\n\t\texec('easynews.%s(params)' % mode.split('.')[1])\n\telif '_play' in mode or 'play_' in mode:\n\t\tif mode == 'play_media':\n\t\t\tfrom modules.sources import Sources\n\t\t\tSources().playback_prep(params)\n\t\telif mode == 'media_play':\n\t\t\tfrom modules.player import FenPlayer\n\t\t\tFenPlayer().run(_get('url', None), _get('obj', None))\n\telif 'choice' in mode:\n\t\tfrom indexers import dialogs\n\t\tif mode == 'scraper_color_choice': dialogs.scraper_color_choice(_get('setting'))\n\t\telif mode == 'scraper_dialog_color_choice': dialogs.scraper_dialog_color_choice(_get('setting'))\n\t\telif mode == 'scraper_quality_color_choice': dialogs.scraper_quality_color_choice(_get('setting'))\n\t\telif mode == 'imdb_images_choice': dialogs.imdb_images_choice(_get('imdb_id'), _get('rootname'))\n\t\telif mode == 'set_quality_choice': dialogs.set_quality_choice(_get('quality_setting'))\n\t\telif mode == 'results_sorting_choice': dialogs.results_sorting_choice()\n\t\telif mode == 'results_layout_choice': dialogs.results_layout_choice()\n\t\telif mode == 'options_menu_choice': dialogs.options_menu(params)\n\t\telif mode == 'meta_language_choice': dialogs.meta_language_choice()\n\t\telif mode == 'extras_menu_choice': dialogs.extras_menu(params)\n\t\telif mode == 'enable_scrapers_choice': dialogs.enable_scrapers_choice()\n\t\telif mode == 'favorites_choice': dialogs.favorites_choice(params)\n\t\telif mode == 'trakt_manager_choice': dialogs.trakt_manager_choice(params)\n\t\telif mode == 'folder_scraper_manager_choice': dialogs.folder_scraper_manager_choice(params)\n\t\telif mode == 'set_language_filter_choice': dialogs.set_language_filter_choice(_get('filter_setting'))\n\t\telif mode == 'media_extra_info_choice': dialogs.media_extra_info(_get('media_type'), _get('meta'))\n\t\telif mode == 'extras_lists_choice': dialogs.extras_lists_choice()\n\t\telif mode == 'highlight_choice': dialogs.highlight_choice()\n\t\telif mode == 'easynews_use_custom_farm_choice': dialogs.easynews_use_custom_farm_choice()\n\t\telif mode == 'easynews_server_choice': dialogs.easynews_server_choice()\n\t\telif mode == 'navigate_to_page_choice': dialogs.navigate_to_page_choice(params)\n\t\telif mode == 'link_folders_choice': dialogs.link_folders_choice(params['service'], params['folder_id'], params['action'])\n\t\telif mode == 'movie_sets_to_collection_choice': dialogs.movie_sets_to_collection_choice(_get('collection_id'))\n\t\telif mode == 'clear_favourites_choice': dialogs.clear_favourites_choice()\n\telif 'trakt.' in mode:\n\t\tif '.list' in mode:\n\t\t\tfrom indexers import trakt_lists\n\t\t\texec('trakt_lists.%s(params)' % mode.split('.')[2])\n\t\telse:\n\t\t\tfrom apis import trakt_api\n\t\t\texec('trakt_api.%s(params)' % mode.split('.')[1])\n\telif 'build' in mode:\n\t\tif mode == 'build_movie_list':\n\t\t\tfrom indexers.movies import Movies\n\t\t\tMovies(params).fetch_list()\n\t\telif mode == 'build_tvshow_list':\n\t\t\tfrom indexers.tvshows import TVShows\n\t\t\tTVShows(params).fetch_list()\n\t\telif mode == 'build_season_list':\n\t\t\tfrom indexers.seasons import build_season_list\n\t\t\tbuild_season_list(params)\n\t\telif mode == 'build_episode_list':\n\t\t\tfrom indexers.episodes import build_episode_list\n\t\t\tbuild_episode_list(params)\n\t\telif mode == 'build_in_progress_episode':\n\t\t\tfrom indexers.episodes import build_single_episode\n\t\t\tbuild_single_episode('episode.progress')\n\t\telif mode == 'build_recently_watched_episode':\n\t\t\tfrom indexers.episodes import build_single_episode\n\t\t\tbuild_single_episode('episode.recently_watched')\n\t\telif mode == 'build_next_episode':\n\t\t\tfrom indexers.episodes import build_single_episode\n\t\t\tbuild_single_episode('episode.next')\n\t\telif mode == 'build_my_calendar':\n\t\t\tfrom indexers.episodes import build_single_episode\n\t\t\tbuild_single_episode('episode.trakt', params)\n\t\telif mode == 'build_next_episode_manager':\n\t\t\tfrom modules.episode_tools import build_next_episode_manager\n\t\t\tbuild_next_episode_manager()\n\t\telif mode == 'imdb_build_user_lists':\n\t\t\tfrom indexers.imdb import imdb_build_user_lists\n\t\t\timdb_build_user_lists(_get('media_type'))\n\t\telif mode == 'build_popular_people':\n\t\t\tfrom indexers.people import popular_people\n\t\t\tpopular_people()\n\t\telif mode == 'imdb_build_keyword_results':\n\t\t\tfrom indexers.imdb import imdb_build_keyword_results\n\t\t\timdb_build_keyword_results(_get('media_type'), _get('query'))\n\telif 'watched_unwatched' in mode:\n\t\tif mode == 'mark_as_watched_unwatched_episode':\n\t\t\tfrom modules.watched_status import mark_as_watched_unwatched_episode\n\t\t\tmark_as_watched_unwatched_episode(params)\n\t\telif mode == 'mark_as_watched_unwatched_season':\n\t\t\tfrom modules.watched_status import mark_as_watched_unwatched_season\n\t\t\tmark_as_watched_unwatched_season(params)\n\t\telif mode == 'mark_as_watched_unwatched_tvshow':\n\t\t\tfrom modules.watched_status import mark_as_watched_unwatched_tvshow\n\t\t\tmark_as_watched_unwatched_tvshow(params)\n\t\telif mode == 'mark_as_watched_unwatched_movie':\n\t\t\tfrom modules.watched_status import mark_as_watched_unwatched_movie\n\t\t\tmark_as_watched_unwatched_movie(params)\n\t\telif mode == 'watched_unwatched_erase_bookmark':\n\t\t\tfrom modules.watched_status import erase_bookmark\n\t\t\terase_bookmark(_get('media_type'), _get('tmdb_id'), _get('season', ''), _get('episode', ''), _get('refresh', 'false'))\n\telif 'history' in mode:\n\t\tif mode == 'search_history':\n\t\t\tfrom indexers.history import search_history\n\t\t\tsearch_history(params)\n\t\telif mode == 'clear_search_history':\n\t\t\tfrom modules.history import clear_search_history\n\t\t\tclear_search_history()\n\t\telif mode == 'remove_from_history':\n\t\t\tfrom modules.history import remove_from_search_history\n\t\t\tremove_from_search_history(params)\n\t\telif mode == 'clear_all_history':\n\t\t\tfrom modules.history import clear_all_history\n\t\t\tclear_all_history(_get('setting_id'), _get('refresh', 'false'))\n\telif 'real_debrid' in mode:\n\t\tif mode == 'real_debrid.rd_torrent_cloud':\n\t\t\tfrom indexers.real_debrid import rd_torrent_cloud\n\t\t\trd_torrent_cloud()\n\t\tif mode == 'real_debrid.rd_downloads':\n\t\t\tfrom indexers.real_debrid import rd_downloads\n\t\t\trd_downloads()\n\t\telif mode == 'real_debrid.browse_rd_cloud':\n\t\t\tfrom indexers.real_debrid import browse_rd_cloud\n\t\t\tbrowse_rd_cloud(_get('id'))\n\t\telif mode == 'real_debrid.resolve_rd':\n\t\t\tfrom indexers.real_debrid import resolve_rd\n\t\t\tresolve_rd(params)\n\t\telif mode == 'real_debrid.rd_account_info':\n\t\t\tfrom indexers.real_debrid import rd_account_info\n\t\t\trd_account_info()\n\t\telif mode == 'real_debrid.delete':\n\t\t\tfrom indexers.real_debrid import rd_delete\n\t\t\trd_delete(_get('id'), _get('cache_type'))\n\t\telif mode == 'real_debrid.authenticate':\n\t\t\tfrom apis.real_debrid_api import RealDebridAPI\n\t\t\tRealDebridAPI().auth()\n\t\telif mode == 'real_debrid.revoke_authentication':\n\t\t\tfrom apis.real_debrid_api import RealDebridAPI\n\t\t\tRealDebridAPI().revoke()\n\telif 'premiumize' in mode:\n\t\tif mode == 'premiumize.pm_torrent_cloud':\n\t\t\tfrom indexers.premiumize import pm_torrent_cloud\n\t\t\tpm_torrent_cloud(_get('id', None), _get('folder_name', None))\n\t\telif mode == 'premiumize.pm_transfers':\n\t\t\tfrom indexers.premiumize import pm_transfers\n\t\t\tpm_transfers()\n\t\telif mode == 'premiumize.pm_account_info':\n\t\t\tfrom indexers.premiumize import pm_account_info\n\t\t\tpm_account_info()\n\t\telif mode == 'premiumize.rename':\n\t\t\tfrom indexers.premiumize import pm_rename\n\t\t\tpm_rename(_get('file_type'), _get('id'), _get('name'))\n\t\telif mode == 'premiumize.delete':\n\t\t\tfrom indexers.premiumize import pm_delete\n\t\t\tpm_delete(_get('file_type'), _get('id'))\n\t\telif mode == 'premiumize.authenticate':\n\t\t\tfrom apis.premiumize_api import PremiumizeAPI\n\t\t\tPremiumizeAPI().auth()\n\t\telif mode == 'premiumize.revoke_authentication':\n\t\t\tfrom apis.premiumize_api import PremiumizeAPI\n\t\t\tPremiumizeAPI().revoke()\n\telif 'alldebrid' in mode:\n\t\tif mode == 'alldebrid.ad_torrent_cloud':\n\t\t\tfrom indexers.alldebrid import ad_torrent_cloud\n\t\t\tad_torrent_cloud(_get('id', None))\n\t\telif mode == 'alldebrid.browse_ad_cloud':\n\t\t\tfrom indexers.alldebrid import browse_ad_cloud\n\t\t\tbrowse_ad_cloud(_get('folder'))\n\t\telif mode == 'alldebrid.resolve_ad':\n\t\t\tfrom indexers.alldebrid import resolve_ad\n\t\t\tresolve_ad(params)\n\t\telif mode == 'alldebrid.ad_account_info':\n\t\t\tfrom indexers.alldebrid import ad_account_info\n\t\t\tad_account_info()\n\t\telif mode == 'alldebrid.authenticate':\n\t\t\tfrom apis.alldebrid_api import AllDebridAPI\n\t\t\tAllDebridAPI().auth()\n\t\telif mode == 'alldebrid.revoke_authentication':\n\t\t\tfrom apis.alldebrid_api import AllDebridAPI\n\t\t\tAllDebridAPI().revoke()\n\telif '_settings' in mode:\n\t\tif mode == 'open_settings':\n\t\t\tfrom modules.kodi_utils import open_settings\n\t\t\topen_settings(_get('query', '0.0'), _get('addon', 'plugin.video.fen'))\n\t\telif mode == 'clean_settings':\n\t\t\tfrom modules.kodi_utils import clean_settings\n\t\t\tclean_settings()\n\t\telif mode == 'clear_settings_window_properties':\n\t\t\tfrom modules.kodi_utils import clear_settings_window_properties\n\t\t\tclear_settings_window_properties()\n\telif '_cache' in mode:\n\t\timport caches\n\t\tif mode == 'clear_cache':\n\t\t\tcaches.clear_cache(_get('cache'))\n\t\telif mode == 'clear_all_cache':\n\t\t\tcaches.clear_all_cache()\n\t\telif mode == 'clean_databases_cache':\n\t\t\tcaches.clean_databases()\n\t\telif mode == 'check_corrupt_databases_cache':\n\t\t\tcaches.check_corrupt_databases()\n\telif '_image' in mode:\n\t\tfrom indexers.images import Images\n\t\tImages().run(params)\n\telif '_text' in mode:\n\t\tif mode == 'show_text':\n\t\t\tfrom modules.kodi_utils import show_text\n\t\t\tshow_text(_get('heading'), _get('text', None), _get('file', None), _get('font_size', 'small'), _get('kodi_log', 'false') == 'true')\n\t\telif mode == 'show_text_media':\n\t\t\tfrom modules.kodi_utils import show_text_media\n\t\t\tshow_text(_get('heading'), _get('text', None), _get('file', None), _get('meta'), {})\n\telif '_view' in mode:\n\t\tfrom modules import kodi_utils\n\t\tif mode == 'choose_view':\n\t\t\tkodi_utils.choose_view(_get('view_type'), _get('content', ''))\n\t\telif mode == 'set_view':\n\t\t\tkodi_utils.set_view(_get('view_type'))\n\t##EXTRA modes##\n\telif mode == 'kodi_refresh':\n\t\tfrom modules.kodi_utils import kodi_refresh\n\t\tkodi_refresh()\n\telif mode == 'get_search_term':\n\t\tfrom modules.history import get_search_term\n\t\tget_search_term(params)\n\telif mode == 'person_data_dialog':\n\t\tfrom indexers.people import person_data_dialog\n\t\tperson_data_dialog(params)\n\telif mode == 'downloader':\n\t\tfrom modules.downloader import runner\n\t\trunner(params)\n\telif mode == 'download_manager':\n\t\tfrom modules.downloader import download_manager\n\t\tdownload_manager(params)\n\telif mode == 'manual_add_magnet_to_cloud':\n\t\tfrom modules.debrid import manual_add_magnet_to_cloud\n\t\tmanual_add_magnet_to_cloud(params)\n\telif mode == 'debrid.browse_packs':\n\t\tfrom modules.sources import Sources\n\t\tSources().debridPacks(_get('provider'), _get('name'), _get('magnet_url'), _get('info_hash'))\n\telif mode == 'upload_logfile':\n\t\tfrom modules.kodi_utils import upload_logfile\n\t\tupload_logfile()\n\telif mode == 'toggle_language_invoker':\n\t\tfrom modules.kodi_utils import toggle_language_invoker\n\t\ttoggle_language_invoker()\n","sub_path":"plugin.video.fen/resources/lib/modules/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":12443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"371055520","text":"import json\nimport os\nimport os.path\nfilename = 'Potenit_ALL.json'\ncat_id_path = 'catids.json'\nDB_ROOT = './datasets/New_Sejong_RCV_dataset/RGBTDv3'\nimage_set = 'test_all.txt'\n\nannopath_Sejong_New = os.path.join('%s','json','%s','RGB','%s.json')\ndata = dict(annotations=[], images=[], categories=[])\n### Load \nid_offset = 0\nimage_id_offset = 0\nids = list() \n\nfor line in open(os.path.join(DB_ROOT, 'ImageSet', image_set)):\n ids.append((DB_ROOT, line.strip().split('/')))\n \nfor ii, annotation_path in enumerate(ids):\n \n frame_id = ids[ii]\n with open(annopath_Sejong_New %(DB_ROOT,frame_id[1][0],frame_id[1][1])) as j:\n data_t = json.load(j)\n for ann in data_t['annotation']:\n ann['category_id'] = int(ann['category_id'])\n ann['id'] += id_offset\n ann['image_id'] = image_id_offset\n ann['bbox'][2] = ann['bbox'][2]-ann['bbox'][0]\n ann['bbox'][3] = ann['bbox'][3]-ann['bbox'][1]\n if ann['occlusion'] == 2 or ann['bbox'][2] < 0 or ann['bbox'][3] < 0 :\n ann['ignore'] = 1\n \n id_offset += len(data_t['annotation'])\n data['annotations'].extend(data_t['annotation'])\n data_t['image'] = data_t['image'][0]\n data_t['image']['id'] = int(image_id_offset)\n data['images'].append([data_t['image']])\n \n image_id_offset += 1\nwith open(os.path.join('./datasets/New_Sejong_RCV_dataset_jw',cat_id_path), 'r') as f:\n data_t = json.load(f) \ndata['categories'].extend(data_t)\nprint('Write results in COCO format.')\nwith open(filename, 'wt') as f:\n f.write( json.dumps(data, indent=4) )","sub_path":"5일차_Object_Detection/StaticFusion/make_annotation.py","file_name":"make_annotation.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"394134996","text":"from JOOMLADetector import JOOMLADetector\r\nfrom MyDB import MyDB\r\nfrom WP.wp_detector import WPDetector\r\n\r\n\r\nclass Model(object):\r\n def __init__(self):\r\n #self._platform = ['wordpress', 'joomla', 'drupal', 'squarspace']\r\n self._platform = ['WordPress', 'Joomla']\r\n\r\n def check_platform_helper(self, platform_name, domain):\r\n \"\"\"\r\n :param platform_name: string. the name of the platform I want to check.\r\n :param domain: string. the domain which I want to check if it runs on platform.\r\n :return: tuple. platform-name, true/false, version(if found)\r\n \"\"\"\r\n try:\r\n if platform_name == 'WordPress':\r\n return ('WordPress',) + WPDetector(domain).detect()\r\n if platform_name == 'Joomla':\r\n return ('Joomla',) + JOOMLADetector(domain).detect()\r\n '''if platform_name == 'drupal':\r\n return ('drupal',) + WPDetector(domain).detect()\r\n if platform_name == 'squarespace':\r\n return ('squarespace',) + JOOMLADetector(domain).detect()'''\r\n except Exception as e:\r\n print(e)\r\n\r\n def check_platform(self, domain, platform=None):\r\n \"\"\"\r\n :param platform: string. the user know platform is the website, he looks for the version.\r\n :param domain: the domain the user want to check what platform it belongs to.\r\n :return: tuple. platform-name/false, version/'could not detect platform'.\r\n \"\"\"\r\n try:\r\n optional_results = []\r\n if platform is None:\r\n for platform in self._platform:\r\n optional_results.append(self.check_platform_helper(platform, domain))\r\n else:\r\n optional_results.append(self.check_platform_helper(platform, domain))\r\n tuple_result = []\r\n for result in optional_results:\r\n if result[1] == 'True':\r\n tuple_result.append(result)\r\n if len(tuple_result) > 1:\r\n platforms = \"it might be: \"\r\n for name in tuple_result:\r\n platforms += name[0] + \", \"\r\n return False, \"could not detect platform \" + platforms\r\n elif len(tuple_result) == 0:\r\n return False, \"could not detect platform \"\r\n else:\r\n return tuple_result[0][0], tuple_result[0][2]\r\n except Exception as e:\r\n print(e)\r\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"128099082","text":"def isArm(x):\r\n temp=x\r\n ord=0\r\n while temp!=0:\r\n ord+=1\r\n temp//=10\r\n sum=0\r\n while x>0:\r\n r=x%10\r\n sum=sum+pow(r,ord)\r\n x//=10\r\n return sum\r\n\r\n\r\nprint(\"The following are Armstrong numbers in the interval 1-100:\")\r\nfor i in range(1,101):\r\n if isArm(i)==i:\r\n print(i,end=', ')","sub_path":"7_1.py","file_name":"7_1.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"158942654","text":"__author__ = 'Zhang Shaojun'\r\n\r\n# listening address and port\r\nCC_HOST = '0.0.0.0'\r\nCC_PORT = 9696\r\n\r\n# max message id\r\nMAX_XID = 0xffffffff\r\n\r\n# version of TransFormed Layered Controller\r\nTFLC_VERSION_1 = 1\r\n\r\n# packet_out timeout event\r\nPACKET_OUT_TIMEOUT = 65536\r\n\r\n# load_report interval\r\nLOAD_REPORT_INTERVAL = 10\r\n\r\n# switch_configuration_change interval\r\nSWITCH_CHANGE_INTERVAL = 10\r\n\r\n# intern domain graph information passing interval\r\nGRAPH_INFO_UPDATE_INTERVAL = 10\r\n\r\n# the window datapath and corresponding out port\r\n# DPID_2_IS_WIN = {1: True, 2: True}\r\n# DPID_2_OUT_PORT = {1: 3, 2: 1}\r\n\r\n","sub_path":"Controllers/cc_client/cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"384217377","text":"#! /usr/bin/env python\n\nimport pandas as pd\nimport numpy as np\nimport argparse\nfrom collections import defaultdict\nfrom progressbar import ProgressBar\nfrom multiprocessing import Pool\nimport time\nimport networkx as nx\nimport os\nimport sys\nimport scipy.stats as stats\nfrom statsmodels.stats.multitest import multipletests\n\n\nclass ComputeFisher():\n\n \"\"\"Calculate phi coefficient and Fisher's exact test p-value\n for all the weighted edges in a graph\"\"\"\n \n def __init__(self, G, tmp_dir, ncpus = 10):\n \n \"\"\"\n Args:\n G (nx.Graph): weighted graph\n \"\"\"\n \n self.G = G\n self.S = self.G.size(weight='weight')\n pairs = list(self.G.edges())\n self.chunks = np.array_split(pairs, 100)\n self.tmp_dir = tmp_dir\n self.ncpus = 10\n \n \n def compute_fisher_directional(self, source, target):\n \n \"\"\"\n Get phi coefficient and Fisher's Exact Test p-value\n Args:\n source (int): source node id\n target (int): target node id\n Returns:\n fe (tuple): Fisher's p-value and odds ratio\n phi_ij (float): phi coefficient\n \"\"\"\n \n G = self.G\n S = self.S\n dij = G[source][target]['weight']\n ## i>any other\n di_ = (G.out_degree(source, weight='weight')) - dij \n ## any other>j\n d_j = (G.in_degree(target, weight='weight')) - dij \n ## any other, any other\n d_ = S - (G.out_degree(source, weight='weight')) - (G.in_degree(target, weight='weight')) + dij\n\n cont_table = [[dij,di_],[d_j,d_]]\n\n sj_in = G.in_degree(target, weight='weight')\n si_out = G.out_degree(source, weight='weight')\n phi_ij = ((dij * S) - (sj_in * si_out))/np.sqrt(si_out * sj_in * (S - si_out) * (S - sj_in))\n\n fe = stats.fisher_exact(cont_table)\n return(fe,phi_ij)\n \n def run_parallel(self, pair):\n \"\"\"Run compute_fisher_directional in parallel\n Args:\n pair (list): source and target node ids\n Returns:\n table (pd.DataFrame): fisher results\n \"\"\"\n \n source, target = pair\n (odds,pvalue),phi_ij = self.compute_fisher_directional(source,target)\n d = (source,target,odds,pvalue,phi_ij)\n return(d)\n\n def prepare_table(self, res):\n \"\"\"Organize results from run_parallel\n Args:\n res (list):\n Returns:\n table (pd.DataFrame): formatted results\n \"\"\"\n table = defaultdict(dict)\n for i, elem in enumerate(res):\n table[i]['source'], table[i]['target'], table[i]['odds_ratio'], table[i]['pvalue'], table[i]['phi_ij'] = elem\n table = pd.DataFrame.from_dict(table, orient='index')\n return(table)\n\n \n def run(self):\n\n \"\"\"Run Calculation\"\"\"\n \n pbar = ProgressBar()\n c = 0\n for chunk in pbar(self.chunks):\n s = time.time()\n p = Pool(self.ncpus)\n res = p.map(self.run_parallel, chunk)\n p.close()\n dres = self.prepare_table(res)\n dres.to_csv(self.tmp_dir + 'chunk_%d.csv'%c, index=None)\n c = c + 1\n e = time.time() - s\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='Compute phi and Fisher')\n parser.add_argument('-infile', dest = 'infile',action='store',\n help='infile', required=True)\n parser.add_argument('-tmp_dir', dest='tmp_dir', action='store',\n help='tmp dir', required=True)\n parser.add_argument('-outfile', dest='outfile', action='store',\n help='outfile', default='out.csv')\n parser.add_argument('-ncpus', dest='n cpus', action='store',default = 10,\n help='number of cpus')\n \n if len(sys.argv) <= 1:\n print (parser.print_help())\n sys.exit(1)\n else:\n args = parser.parse_args()\n\n dt = pd.read_csv(args.infile, index_col = 0)\n wedges = [(i,j,k) for i,j,k in zip(dt.source, dt.target, dt.counts)]\n G = nx.DiGraph()\n G.add_weighted_edges_from(wedges)\n print ('## Load Network ##')\n print ('## {0} nodes and {1} edges'.format(len(G.nodes()), len(G.edges())))\n\n \n compute = ComputeFisher(G, args.tmp_dir)\n compute.run()\n \n \n res = []\n files = os.listdir(os.path.abspath(args.tmp_dir))\n for fi in files:\n if fi.startswith('chunk'):\n dx = pd.read_csv(os.path.abspath(args.tmp_dir) + '/' + fi)\n res.append(dx)\n \n res = pd.concat(res)\n res['fdr_bh_pvalue_adj'] = multipletests(res['pvalue'],\n alpha=0.05, \n method='fdr_bh')[1]\n res.to_csv(args.outfile, index=None)\n \n \n ","sub_path":"python/compute_fisher_phi.py","file_name":"compute_fisher_phi.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"365642977","text":"class Kalkulator:\n \"\"\"contoh kelas kalkulator sederhana. anggap kelas ini tidak boleh diubah!\"\"\"\n\n def __init__(self, nilai=0):\n self.nilai = nilai\n\n def tambah_angka(self, angka1, angka2):\n self.nilai = angka1 + angka2\n if self.nilai > 9: # kalkulator sederhana hanya memroses sampai 9\n print('kalkulator sederhana melebihi batas angka: {}'.format(self.nilai))\n return self.nilai\n\nclass KalkulatorKali(Kalkulator):\n\n def kali_angka(self, angka1, angka2):\n self.nilai = angka1 * angka2\n return self.nilai\n\n\nkk = KalkulatorKali()\na = kk.kali_angka(2, 3) # sesuai dengan definisi class memiliki fitur kali_angka\nprint(a)\n\nb = kk.tambah_angka(5, 6) # memiliki fitur tambah_angka karena mewarisi dari Kalkulator\nprint(b)","sub_path":"Dicoding/Dasar/PBO/inheritance.py","file_name":"inheritance.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"26372180","text":"# A201 / Fall 2017\n# Programming Assignment 12\n# Name :\n# IU account :\n\n\ndef read_file(filename):\n \"\"\"\n Opens the file, returns the content of file as a str\n :param filename: str, name of file\n :return: str, content of file\n \"\"\"\n with open(filename, mode='r', encoding='utf-8') as file:\n content = file.read()\n return content\n\n# main program\ncontent = read_file('poem.txt')\nprint(content)\n","sub_path":"Extra/Python作业集/Homework 13/work/hw13-fileprinter.py","file_name":"hw13-fileprinter.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"496759775","text":"#from __future__ import division, print_function\nimport sqlite3\nimport numpy as np\nimport healpy as hp\nfrom astropy.io import fits\nimport multiprocessing\nfrom multiprocessing import Pool\nimport time\nimport h5py\nimport os\nimport glob\n\n# Other repo imports (RHT helper code)\nimport sys \nsys.path.insert(0, '../../RHT')\nimport RHT_tools\nsys.path.insert(0, '../../GalfaCuber/code')\nimport galfa_vel_helpers as gvh\n\nLOCAL = False\n\nclass SkyPixels():\n \"\"\"\n Base class for spatial dimensions.\n This will define healpix pixels included in a chunk.\n Can be as few as 1 pixel but will probably be as many as can be \n processed at one time.\n \"\"\"\n \n def __init__(self, nthetas=165, nvels=21):\n # Set dimensions for (velocity, orientation)\n self.nthetas = nthetas\n self.nvels = nvels\n \n def add_pixels_by_array(self, pixel_array):\n \"\"\"\n Initialize a bunch of healpix indices from a list or array\n \"\"\"\n self.skypixels = pixel_array\n self.npix = len(self.skypixels)\n self.startpix = pixel_array[0]\n self.stoppix = pixel_array[-1]\n \n # should add option to add by number and 'chunk index'\n \n def get_lonlat(self, skypixels, nside=1024, nest=False):\n \"\"\"\n Return latitude and longitude from pixel index\n \"\"\"\n lon, lat = hp.pixelfunc.pix2ang(nside, skypixels, lonlat=True, nest=nest)\n \n return lon, lat\n\n \nclass TemperatureCloud(SkyPixels):\n \"\"\"\n Temperature structure in (pixel, velocity, theta) space\n \"\"\"\n def __init__(self, startpix=0, stoppix=12582912):\n SkyPixels.__init__(self)\n \n def load_mean_T_data(self, startpix=0, stoppix=12582912):\n \n # mean dust temperature map saved as hdf5, RING ordering\n if LOCAL:\n t_map_root = '/Users/susanclark/Dropbox/DustModels/data/'\n else:\n t_map_root = '../data/'\n t_map_fn = t_map_root + 'COM_CompMap_Dust-GNILC-Model-Temperature_1024_R2.00_fwhm180' + '.h5'\n \n with h5py.File(t_map_fn, 'r') as tmap_file:\n self.mean_T_data = tmap_file['Tdust'][startpix:stoppix]\n \n def load_T_n_v_theta(self, delta_T_fn=None, startpix=0, stoppix=12582912, nvels=21):\n \"\"\"\n \"\"\"\n with h5py.File(delta_T_fn, 'r') as deltatmap_file:\n self.delta_t_n_nmap= deltatmap_file['deltaT'][startpix:stoppix, :]\n self.maxamp = deltatmap_file['deltaT'].attrs['maxamp']\n self.alpha_TT = deltatmap_file['deltaT'].attrs['alpha_TT']\n self.nrealizations = deltatmap_file['deltaT'].attrs['nrealizations']\n \n print(\"delta_t_n_nmap shape\", self.delta_t_n_nmap.shape)\n # create v, theta structure from number provided\n ntheta = 165\n lenchunks = ntheta/self.nrealizations\n allchunkstart = np.arange(0, ntheta, lenchunks)\n \n self.delta_t_n_v_theta = np.zeros((stoppix-startpix, nvels, ntheta), np.float_)\n \n for _i, chunkstart in enumerate(allchunkstart):\n chunkstop = chunkstart + lenchunks\n self.delta_t_n_v_theta[:, :, chunkstart:chunkstop] = self.delta_t_n_nmap[:, _i].reshape(stoppix-startpix, 1, 1)\n \n print(\"delta_t_n_v_theta shape\", self.delta_t_n_v_theta.shape, \"nonzero:\", len(np.nonzero(self.delta_t_n_v_theta)[0]))\n\n def get_T_n_v_theta(self, skypixels, nside=1024, v_index_list=[93, 95, 97, 99], includegamma=True):\n \"\"\"\n make (n, v, theta) array of temperature,\n including mean temperature.\n \"\"\"\n \n amp = 5. # Amplitude of temperature fluctuations in K\n \n lon, lat = self.get_lonlat(skypixels, nside=nside)\n \n deg_to_rad = np.pi/180.\n phi_t = 4.*lon*deg_to_rad + 2.*3.*lat*deg_to_rad\n phi_v = 3.*lon*deg_to_rad + 2.*5.*lat*deg_to_rad\n \n phi_t = phi_t.reshape(len(phi_t), 1, 1)\n phi_v = phi_t.reshape(len(phi_v), 1, 1)\n \n if includegamma:\n phi_g = 1.*lon*deg_to_rad + 1.*lat*deg_to_rad\n phi_g = phi_t.reshape(len(phi_g), 1, 1)\n \n theta = RHT_tools.get_thets(75, save=False, verbose=False)\n \n if LOCAL:\n HI4PI_root = '/Users/susanclark/Dropbox/HI4PI/'\n else:\n HI4PI_root = '/data/seclark/HI4PI/full_data/'\n vgrid = np.loadtxt(HI4PI_root + 'velo_grid_120.txt')\n v = vgrid[v_index_list]\n print(\"computing for velocities: {}\".format(v))\n \n vmax = np.max(v)\n vv, tt = np.meshgrid(2.0*theta, v)\n \n self.delta_t_n_v_theta = amp*np.sin((1.+vv/vmax)*np.pi + phi_v)*np.sin(9.*tt + phi_t)\n print(\"delta_t_n_v_theta shape\", self.delta_t_n_v_theta.shape)\n \n print(\"delta t_v_theta created\")\n \n if includegamma:\n self.gamma_n_v_theta = (tt + phi_g + (1.+vv/vmax)*np.pi) % (np.pi/2.)\n \n def B_nu (self, nu, T):\n \"\"\"\n Planck function\n Inputs: nu : frequency (in GHz)\n T : temperature (in K)\n \"\"\"\n # Physical constants\n c = 2.99792458e10 # Speed of light, cm/s\n h = 6.62606957e-27 # Planck constant, erg s\n k = 1.3806488e-16 # Boltzmann constant, erg/K\n \n nu = nu*1.E9 # convert to Hz\n\n return 2.0*h*((nu)**3)/(c*c*(np.expm1(h*nu/(k*T))))\n\n \nclass RHTCloud(SkyPixels):\n \"\"\"\n RHT structure in (pixel, velocity, theta) space\n \"\"\"\n def __init__(self, startpix=0, stoppix=12582912, nvels=4, startstopvel=[83, 101]):\n\n startvel = startstopvel[0]\n stopvel = startstopvel[-1]\n \n rht_root = '../data/'\n rht_fn = rht_root + 'Rtheta_{}_{}_1_hpix{}_{}.h5'.format(startvel, stopvel, startpix, stoppix)\n \n with h5py.File(rht_fn, 'r') as rht_file:\n self.RHT_n_v_theta = rht_file['hthets_arr'][:] # needs colon indexer or will just be pointer #[startpix:stoppix]\n self.v_index_list = rht_file['hthets_arr'].attrs['velocities']\n print(\"velocities\", self.v_index_list)\n\n # load HI intensity data as well\n if LOCAL:\n hi4pi_root = '/Users/Dropbox/HI4PI/'\n else:\n hi4pi_root = '/data/seclark/HI4PI/full_data/'\n \n \n self.HI_n_v = np.zeros((stoppix-startpix, nvels), np.float_)\n hi4pi_fn = hi4pi_root + 'HI4PI_120kms.h5'\n with h5py.File(hi4pi_fn, 'r') as f:\n for _i, _vel in enumerate(self.v_index_list):\n self.HI_n_v[:, _i] = f['survey'][startpix:stoppix, _vel]\n \n print(\"HI_n_v created\")\n\n def get_cos_sin_thet(self):\n # cos(2theta) and sin(2theta) data\n wlen = 75 # RHT window length\n self.theta_array = RHT_tools.get_thets(wlen, save=False, verbose=False)\n self.cos_two_theta = np.cos(2*self.theta_array)\n self.sin_two_theta = np.sin(2*self.theta_array)\n \nclass MagneticCloud():\n \"\"\"\n Creates \"magnetically coherent cloud\" structure in (pixel, velocity, theta)\n space for as many healpix pixels as given.\n \n Combines R, I Beta_nu, cos(2theta) or sin(2theta), and gamma\n \n \"\"\"\n def __init__(self, startpix=0, stoppix=12582912, v_index_list=[93, 95, 97, 99], includegamma=True, delta_T_fn=None):\n \n # start and stop pixels\n allpix = np.arange(startpix, stoppix)\n startvel = v_index_list[0]\n stopvel = v_index_list[-1]\n print(\"MagneticCloud for vel index {} to {}\".format(startvel, stopvel))\n \n # get temperature structure\n temperature_cloud = TemperatureCloud(startpix=startpix, stoppix=stoppix)\n temperature_cloud.load_mean_T_data(startpix=startpix, stoppix=stoppix)\n print(\"mean T data loaded\")\n \n if delta_T_fn != None:\n print(\"Loading T_n_v_theta from random sims\")\n temperature_cloud.load_T_n_v_theta(delta_T_fn = delta_T_fn, startpix=startpix, stoppix=stoppix, nvels=len(v_index_list))\n else:\n print(\"Generating T_n_v_theta using simple sinusoid prescription\")\n temperature_cloud.get_T_n_v_theta(allpix, v_index_list=v_index_list, includegamma=includegamma)\n \n if includegamma:\n self.gamma_n_v_theta = temperature_cloud.gamma_n_v_theta\n print(\"Created gamma_n_v_theta\")\n \n self.temperature_cloud = temperature_cloud\n \n # get RHT structure\n print(\"Loading RHT data\") \n rht_cloud = RHTCloud(startpix=startpix, stoppix=stoppix, nvels=len(v_index_list), startstopvel=[startvel, stopvel])\n self.RHT_n_v_theta = rht_cloud.RHT_n_v_theta\n \n # Intensity-weighted RHT data\n self.I_weighted_RHT = self.RHT_n_v_theta * rht_cloud.HI_n_v[:, :, np.newaxis]\n print(\"I_weighted_RHT created\")\n \n print(\"Getting sin cos values\") \n rht_cloud.get_cos_sin_thet()\n self.cos_two_theta = rht_cloud.cos_two_theta\n self.sin_two_theta = rht_cloud.sin_two_theta\n \n # Add mean temperature + perturbation\n print(\"Adding mean temp to perturbation\") \n self.total_T_n_v_theta = temperature_cloud.mean_T_data[:, np.newaxis, np.newaxis] + temperature_cloud.delta_t_n_v_theta\n \n \ndef make_IQU(nus=[353], nchunks=16, nside=1024, v_index_list=[93, 95, 97, 99], includegamma=True, deltaTGauss=True):\n \"\"\"\n Make I, Q, and U maps for specified frequencies.\n \"\"\"\n \n npix = 12*nside**2\n stepsize = np.int(npix/nchunks)\n all_startpix = np.arange(0, npix, stepsize, dtype=np.int_)\n all_stoppix = all_startpix + stepsize\n \n I_RHT = np.zeros(npix)\n I_n_nu = np.zeros((npix, len(nus)), np.float_)\n Q_n_nu = np.zeros((npix, len(nus)), np.float_)\n U_n_nu = np.zeros((npix, len(nus)), np.float_)\n \n # make and save delta_T fluctuations\n if deltaTGauss:\n delta_T_fn = make_delta_T_GaussRandom(nside=1024, maxamp=5, alpha_TT=-2, nrealizations=5)\n else:\n delta_T_fn = None\n \n for _i, (startpix, stoppix) in enumerate(zip(all_startpix, all_stoppix)):\n print(\"beginning chunk {}: from {} to {}\".format(_i, startpix, stoppix))\n \n mc = MagneticCloud(startpix=startpix, stoppix=stoppix, v_index_list=v_index_list, delta_T_fn=delta_T_fn, includegamma=includegamma)\n \n for nu_i, _nu in enumerate(nus):\n print(\"Computing IQU for nu = {}\".format(_nu))\n mbb = mc.temperature_cloud.B_nu(_nu, mc.total_T_n_v_theta)\n tot_arr = mbb * mc.I_weighted_RHT\n \n # Include gamma perturbations or not\n if includegamma:\n print(\"Including gamma perturbations\")\n Q_n_nu[startpix:stoppix, nu_i] = np.nansum(np.nansum(tot_arr * mc.cos_two_theta * np.cos(mc.gamma_n_v_theta)**2, axis=-1), axis=-1)\n U_n_nu[startpix:stoppix, nu_i] = np.nansum(np.nansum(tot_arr * mc.sin_two_theta * np.cos(mc.gamma_n_v_theta)**2, axis=-1), axis=-1)\n else:\n print(\"Not including gamma perturbations\")\n Q_n_nu[startpix:stoppix, nu_i] = np.nansum(np.nansum(tot_arr * mc.cos_two_theta, axis=-1), axis=-1)\n U_n_nu[startpix:stoppix, nu_i] = np.nansum(np.nansum(tot_arr * mc.sin_two_theta, axis=-1), axis=-1)\n \n I_n_nu[startpix:stoppix, nu_i] = np.nansum(np.nansum(tot_arr, axis=-1), axis=-1)\n \n # Place integrated HI intensity-weighted RHT output\n I_RHT[startpix:stoppix] = np.nansum(np.nansum(mc.I_weighted_RHT, axis=-1), axis=-1)\n print(\"Placed into I_RHT\")\n \n return I_n_nu, Q_n_nu, U_n_nu, I_RHT\n\ndef make_delta_T_GaussRandom(nside=1024, maxamp=5, alpha_TT=-2, nrealizations=5):\n \"\"\"\n Generate delta_T fluctuations as a power law in multipole, i.e.\n C_ell^TT ~ ell^alpha_TT\n \"\"\"\n npix = 12*nside**2\n allmaps = np.zeros((npix, nrealizations), np.float_)\n \n # define power spectrum\n ellmin = 0\n ellmax = 3000\n ells = np.arange(ellmin,ellmax+1)\n TTDust = ells**(alpha_TT*1.0) # can't have negative integer power\n \n # remove NaN in ell=0\n TTDust[0] = 0.0\n \n print(\"Making maps with alpha_TT = {} and maxamp = {}\".format(alpha_TT, maxamp))\n \n # Make nrealizations of this power spectrum\n for nmap in np.arange(nrealizations):\n simmap = hp.synfast(TTDust, nside=nside, lmax=int(ellmax), pol=False, pixwin=True, new=True)\n allmaps[:, nmap] = maxamp*simmap/np.max(simmap)\n \n # Need to write this out, at least temporarily. Also may want this data to check imprint of delta_T.\n compress = False\n outroot = \"../data/temp_deltaT/\"\n \n # check for previous writes with same format..\n outfn_base = \"deltaT_nside{}_maxamp{}_alphaTT{}_nmaps{}_lmin{}_lmax{}*\".format(nside, maxamp, alpha_TT, nrealizations, ellmin, ellmax)\n names = [os.path.basename(x) for x in glob.glob(outroot+outfn_base)]\n if len(names) > 0:\n print(names)\n allnums = np.zeros(len(names))\n for _i, name in enumerate(names):\n split1 = name.split(\"_\")\n lastnum = split1[-1].split(\".\")[0] # split off from file suffix\n allnums[_i] = np.int(lastnum)\n outnum = np.max(allnums) + 1\n outnumstr = \"{0:0=3d}\".format(np.int(outnum))\n else:\n outnumstr = \"{0:0=3d}\".format(0)\n \n outfn = outroot + \"deltaT_nside{}_maxamp{}_alphaTT{}_nmaps{}_lmin{}_lmax{}_{}.h5\".format(nside, maxamp, alpha_TT, nrealizations, ellmin, ellmax, outnumstr)\n with h5py.File(outfn, 'w') as f:\n if compress:\n dset = f.create_dataset(name='deltaT', data=allmaps, compression=\"gzip\")\n else:\n dset = f.create_dataset(name='deltaT', data=allmaps)\n dset.attrs['maxamp'] = maxamp\n dset.attrs['alpha_TT'] = alpha_TT\n dset.attrs['nrealizations'] = nrealizations\n \n return outfn\n\n \nif __name__ == '__main__':\n\n # Pick frequencies and velocities\n nus = [353, 217, 143]\n\n #v_index_list = np.arange(77, 109)\n v_index_list = np.arange(73, 112)\n startvel = v_index_list[0]\n stopvel = v_index_list[-1]\n \n # number of chunks that RHT data is stored in\n nchunks = 32\n \n includegamma = False\n deltaTGauss = True\n if deltaTGauss:\n deltaTstr = \"deltaTGauss\"\n \n # Test whether I_RHT needs to be made (does not change with frequency)\n I_RHT_fn = \"../data/IRHT_HI4PI_GNILC_{}_{}_1_{}.fits\".format(startvel, stopvel, deltaTstr)\n if not os.path.isfile(I_RHT_fn):\n print(\"Will create I_RHT file.\")\n \n time0=time.time()\n\n I_n_nu, Q_n_nu, U_n_nu, I_RHT = make_IQU(nchunks=nchunks, nus=nus, v_index_list=v_index_list, includegamma=includegamma, deltaTGauss=deltaTGauss)\n time1=time.time()\n print(\"took {} minutes\".format((time1-time0)/60.))\n \n # I_RHT file does not change with frequency\n if not os.path.isfile(I_RHT_fn):\n hp.fitsfunc.write_map(I_RHT_fn, I_RHT, nest=False, fits_IDL=False, coord='G')\n\n for i_nu, _nu in enumerate(nus):\n I_fn = \"../data/I_{}_HI4PI_GNILC_{}_{}_1_{}.fits\".format(_nu, startvel, stopvel, deltaTstr)\n Q_fn = \"../data/Q_{}_HI4PI_GNILC_{}_{}_1_gamma_{}_{}.fits\".format(_nu, startvel, stopvel, includegamma, deltaTstr)\n U_fn = \"../data/U_{}_HI4PI_GNILC_{}_{}_1_gamma_{}_{}.fits\".format(_nu, startvel, stopvel, includegamma, deltaTstr)\n hp.fitsfunc.write_map(I_fn, I_n_nu[:, i_nu], nest=False, fits_IDL=False, coord='G')\n hp.fitsfunc.write_map(Q_fn, Q_n_nu[:, i_nu], nest=False, fits_IDL=False, coord='G')\n hp.fitsfunc.write_map(U_fn, U_n_nu[:, i_nu], nest=False, fits_IDL=False, coord='G')\n \n\n\n\n","sub_path":"code/magnetically_coherent.py","file_name":"magnetically_coherent.py","file_ext":"py","file_size_in_byte":15710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"198894599","text":"from django.core.management.base import BaseCommand\nfrom reestr.models import AccreditedCenter, AccreditedCertificationPoint, SROMember\nfrom registry.models import RegistryRecordPersonal\nimport re\nimport time\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('--start_from', default=False, type=int, help='set starting point')\n\n\n def handle(self, *args, **options):\n start_from = 0\n if options['start_from']:\n start_from = options['start_from']\n\n records_count = RegistryRecordPersonal.objects.count()\n # records_count = 2000\n chunk_size = 1000\n ap_template = ', \\d{1,2}АП'\n regex = re.compile(ap_template)\n inactive_sro_member = SROMember.objects.filter(status='na').first()\n for i in range(start_from, records_count, chunk_size):\n records = RegistryRecordPersonal.objects.all().order_by('pk')[i:i+chunk_size]\n started = time.time()\n for rec in records:\n try:\n if rec.data['place_of_att']:\n code = rec.data['place_of_att'].strip()\n if 'АП' in code:\n match = re.search(regex, code)\n start_end = match.span()\n point = code[start_end[0]:start_end[1]]\n center_code = code.replace(point, \"\")\n center, center_created = AccreditedCenter.objects.get_or_create(\n short_code=center_code,\n direction='personal')\n cert_point_code = point.replace(\", \", \"\")\n cert_point, cert_point_created = AccreditedCertificationPoint.objects.get_or_create(\n short_code=cert_point_code,\n parent=center\n )\n rec.data['center_pk'] = center.pk\n rec.data['cert_point_pk'] = cert_point.pk\n else:\n center, created = AccreditedCenter.objects.get_or_create(short_code=code)\n rec.data['center_pk'] = center.pk\n rec.save()\n except Exception as e:\n print('FK ERROR: ', e, code)\n continue\n print('---------> i:', i, '<----------- elapsed:', time.time() - started)\n\n\n\n\n\n\n\n","sub_path":"mainapp/management/commands/set_registry_fk.py","file_name":"set_registry_fk.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"200224848","text":"from django.conf import settings\nfrom django.contrib.sites.models import Site\n\n\nstr2bool = lambda s: s.lower() in ['true', 'yes', 't', '1']\n\n\ndef get_service_host(site=None):\n current_site = site if site else Site.objects.get_current()\n protocol = getattr(settings, \"DEFAULT_HTTP_PROTOCOL\", \"http\")\n service_url = \"{0}://{1}\".format(\n protocol,\n current_site.domain\n )\n return service_url\n","sub_path":"mysite/account/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"261313764","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nThis is a script used to test while recycle.\n'''\n\n__author__ = 'leanna li'\n\ndef query_price():\n age = ''\n while age != 'quit':\n age = input('Please input age. Enter quit to exit: ')\n if age == 'quit':\n print('Exit query menu.')\n break\n age = int(age)\n if age < 3:\n print('Age under 3 is free.')\n elif 3 <= age < 12:\n print('Age between 3 and 12 is 10 dollars.')\n elif age >=12:\n print('Age exceed 12 is 15 dollars.')\n\ndef main():\n query_price()\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"7-5.py","file_name":"7-5.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"278238874","text":"\ndef main():\n\t# Try to open the input file\n\ttry:\n\t\tf = open('modules.txt','r')\n\t\tprocessFile(f)\n\t\tf.close()\n\texcept (OSError, IOError) as e:\n\t\tprint('File not found!')\n\ndef processFile(f):\n\n\tdb = {}\n\tfor line in f:\n\t\tparts = line.split(':')\n\t\tdb.setdefault(parts[0],list()).append( parts[1] )\n\n\tfor k in db:\n\t\tprint(k, ' ', db[k])\n# Run!\nmain()","sub_path":"Task 3.1/Task 3.1.py","file_name":"Task 3.1.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338193700","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Fernandroid\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 04 12:08:27 2015\r\n\r\n@author: u304479\r\n\"\"\"\r\n\r\ndef NNSoftMax(nn_params,input_layer_size,hidden_layer_size,num_labels,X, label,landa): \r\n \"\"\"\r\n NNSoftMax Implements the neural network cost function for a 2 layer\r\n neural network which performs classification\r\n [J grad] = DropoutNN2hl(nn_params, hidden_layer_size, num_labels, ...\r\n X, y, lambda) computes the cost and gradient of the neural network. The\r\n parameters for the neural network are \"unrolled\" into the vector\r\n nn_params and need to be converted back into the weight matrices. \r\n The returned parameter grad should be a \"unrolled\" vector of the\r\n partial derivatives of the neural network.\r\n Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\r\n for our 3 layer neural network\r\n P: is a row vector (array(1,2)) for Bernoulli variable with P(1) equals p for input layer \r\n and P(2) equals for hidden layer\r\n X: data array(n samples,fetures)\r\n num_labels: number of classes\r\n label: sample classes array(size(m))\r\n \"\"\"\r\n import numpy as np\r\n from scipy import sparse \r\n \r\n \r\n Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size+1 )], (hidden_layer_size, input_layer_size + 1))\r\n Theta2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):],(num_labels, hidden_layer_size + 1))\r\n \"\"\"input_layer_size=300\r\n hidden_layer_size=500\r\n num_labels=10\r\n X=random.rand(1000,300)\r\n Theta1=random.rand(hidden_layer_size,input_layer_size+1)*2*0.01-0.01\r\n Theta2=random.rand(num_labels,hidden_layer_size+1)*2*0.01-0.01 \r\n label=random.random_integers(num_labels,size=(m))-1\r\n \"\"\"\r\n \r\n \r\n def sigmoid(z):\r\n s=1/(1+np.exp(-z))\r\n return s\r\n \r\n def sigmoidGradient(z):\r\n s=sigmoid(z)*(1-sigmoid(z))\r\n return s\r\n \r\n def SoftMax(z):\r\n maxes = np.amax(z, axis=1)\r\n maxes = maxes.reshape(maxes.shape[0], 1)\r\n e = np.exp(z - maxes)\r\n P = e /np.sum(e, axis=0)\r\n return P\r\n \r\n \"Setup some useful variables\"\r\n\r\n m = X.shape[0]\r\n J = 0.\r\n Theta1_grad = np.zeros(np.shape(Theta1))\r\n Theta2_grad = np.zeros(np.shape(Theta2))\r\n Y=sparse.csc_matrix((np.ones((m)),(np.arange(m),label)),shape=(m,num_labels)).toarray()\r\n\r\n \" Part 1: forward propagation:\"\r\n\r\n X = np.hstack([np.ones((m, 1)), X])\r\n z2=np.dot(X,Theta1.T)\r\n a2=sigmoid(z2)\r\n a2 = np.hstack([np.ones((m, 1)), a2])\r\n z3=np.dot(a2,Theta2.T)\r\n h=SoftMax(z3.T)\r\n\r\n \" Cost function\"\r\n J=-(1./m)*np.sum(Y*np.log(h.T)) + (landa/(2.*m))*(np.sum(Theta1[:,1:]**2)+np.sum(Theta2[:,1:]**2))\r\n \r\n \"Part 2: back propagation to compute gradient\"\r\n \r\n delta3=(h.T-Y)\r\n delta2=np.dot(delta3,Theta2[:,1:])*sigmoidGradient(z2)\r\n\r\n Theta2_grad=np.dot(delta3.T,a2)/m\r\n Theta1_grad=np.dot(delta2.T,X)/m\r\n b2=Theta2_grad[:,0]\r\n b1=Theta1_grad[:,0]\r\n Theta2_grad=np.hstack([b2.reshape(b2.shape[0],1), Theta2_grad[:,1:] + (landa/m)*Theta2[:,1:]])\r\n Theta1_grad=np.hstack([b1.reshape(b1.shape[0],1), Theta1_grad[:,1:] + (landa/m)*Theta1[:,1:]])\r\n\r\n \" Unroll gradients\"\r\n grad=np.concatenate((Theta1_grad.flatten(),Theta2_grad.flatten()))\r\n\r\n\r\n return (J, grad)\r\n\r\n","sub_path":"NNSoftMax.py","file_name":"NNSoftMax.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"312034172","text":"import numpy as np\nfrom utils.parse_utils import BIWIParser, DpParser, create_dataset\n\n# eth data.\n# annot_file = 'data/ewap_dataset/seq_eth/obsmat.txt'\n# npz_out_file = 'data/ewap_dataset/seq_eth/data.npz'\n# parser = BIWIParser()\n\n# DP data.\nannot_file = 'data/dp_vehicle/train/*0.txt'\nnpz_out_file = 'data/dp_vehicle/train/data_4s_with_offset.npz'\nparser = DpParser()\n\nparser.load(annot_file)\n\nobsvs, preds, times, batches = create_dataset(parser, n_past=5, n_next=40)\n\nnp.savez(npz_out_file, obsvs=obsvs, preds=preds, times=times, batches=batches)\nprint('dataset was created successfully and stored in:', npz_out_file)\n","sub_path":"create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"107339903","text":"# -*- coding: utf-8 -*-\n\nEMBEDDING_DIM = 100\n\n## test-output file\nOUTPUT_FILE = 'ner_test_output.txt'\n\n## sample_test\nTEST_FILE = 'ner_test_input.txt'\n\n## conll 20003\nTRAINING_FILE = \"DATA/train.txt\" \nVALIDATION_FILE = \"DATA/valid.txt\"\nMODEL_FILE = \"model_weights.hdf5\"\n#TEST_FILE = \"ner_test_input.txt\"\n\n## chemdner\n#TRAINING_FILE = \"data/che_train.txt\" \n#VALIDATION_FILE = \"data/che_valid.txt\"\n#MODEL_FILE = \"model_weights_che.hdf5\"\n#TEST_FILE = \"data/che_test.txt\"\n\n## ontonotes\n#TRAINING_FILE = \"data/data.modified_train_ontonotes.txt\"\n#VALIDATION_FILE = \"data/ontonotes_valid.txt\"\n#MODEL_FILE = \"model_weights_che.hdf5\"\n#TEST_FILE = \"data/data.modified_test_ontonotes.txt\"\n\nGLOVE_EMBEDDINGS = \"DATA/glove.6B.100d.txt\"\nBATCH_SIZE = 10\nEPOCHS = 1\nMAX_CHARS = 40\nCHAR_EMBDS_DIM = 30\nPOOL_SIZE = 40\nFILTER_SIZE = 3\nNO_OF_FILTERS = 30\nDICT_FILE = \"DATA/dicts.txt\"\nMAX_SEQ_LEN = 150\nEMBEDDINGS_FILE = \"DATA/embds.npy\"\nDROPOUT = 0.5","sub_path":"extraction/named_entity/End_to_end/tests/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403571607","text":"from bottle import Bottle, request, hook, route, response, run\nimport pandas as pd\nfrom dataUtils import top_n_crops_produced_at_point, top_n_production_points_for_crop\n\ndf = None\n\napp = Bottle()\n\n# Load the panda table\ndef _initialize():\n global df\n df = pd.read_csv('./production_clim_merged_georasters.csv')\n return\n\n@app.hook('after_request')\ndef enable_cors():\n '''Add headers to enable CORS'''\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\n response.headers['Access-Control-Allow-Headers'] = 'Authorization, Origin, Accept, Content-Type, X-Requested-With'\n\n@app.route('/top_points', method=['GET'])\ndef index():\n crop = request.query['crop']\n n = int(request.query['n'])\n\n top_points = top_n_production_points_for_crop(crop, n, df)\n return {'data': top_points}\n\n@app.route('/top_crops', method=['GET'])\ndef index():\n x = float(request.query['x'])\n y = float(request.query['y'])\n n = int(request.query['n'])\n\n top_crops = top_n_crops_produced_at_point(x, y, n, df)\n return {'data': top_crops}\n\nif __name__ == \"__main__\":\n _initialize()\n app.run(host='localhost', port=8080)\n","sub_path":"fakeServer/fakeApi.py","file_name":"fakeApi.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"439517062","text":"from datetime import datetime\n\n\nfuncWrap = []\nfuncClock = []\n\n\ndef wrap(func):\n print(\"Wrap\", func)\n funcWrap.append(func)\n def action(*args, **kwargs):\n result = func(*args, **kwargs)\n print(\"Decorator wrapper\")\n return result\n return action\n\n\ndef clock(func):\n print(\"Clock\", func)\n funcClock.append(func)\n def action(*args, **kwargs):\n print(\"Clock\", func)\n start = datetime.now()\n result = func(*args, **kwargs)\n end = datetime.now()\n message = \"Start: {0}, End: {1})\"\n print(message.format(start,end))\n return(result)\n return action\n\n\n@wrap\n@clock\ndef func():\n for i in range(0, 10):\n print(i)\n return(\"End\")\n\nif __name__ == '__main__':\n print(func())\n","sub_path":"decor.py","file_name":"decor.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"616080372","text":"#!/usr/bin/env python3\r\n# coding:utf-8\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport math\r\nimport threading\r\nimport time\r\nimport datetime\r\n\r\nimport CMDcontrol\r\n\r\nchest_r_width = 480\r\nchest_r_height = 640\r\nhead_r_width = 640\r\nhead_r_height = 480\r\n\r\nobs_rec = False\r\nbaf_rec = False\r\nhole_rec = False\r\nbridge_rec = False\r\ndoor_rec = False\r\nkick_ball_rec = False\r\nfloor_rec = False\r\n\r\nimg_debug = 1\r\naction_DEBUG = False\r\nbox_debug = False\r\nstream_pic = True\r\nrobot_IP = \"192.168.43.201\"\r\nsingle_debug = 0\r\n\r\nchest_ret = True # 读取图像标志位\r\nret = False # 读取图像标志位\r\nChestOrg_img = None # 原始图像更新\r\n\r\nHeadOrg_img = None # 原始图像更新\r\nChestOrg_copy = None\r\nHeadOrg_copy = None\r\n\r\nsleep_time_s = 0.01\r\nsleep_time_l = 0.05\r\nreal_test = 1 # yw:这个量为1表示是实际赛道情况,机器人会执行相应的动作,否则就只打印出现在想做什么但是并不会实际做出来。\r\nreset = 0\r\n\r\nif stream_pic:\r\n stream_head = \"http://\" + robot_IP + \":8082/?action=stream?dummy=param.mjpg\"\r\n cap_head = cv2.VideoCapture(stream_head)\r\n stream_chest = \"http://\" + robot_IP + \":8080/?action=stream?dummy=param.mjpg\"\r\n cap_chest = cv2.VideoCapture(stream_chest)\r\nelse:\r\n cap_chest = cv2.VideoCapture(0)\r\n cap_head = cv2.VideoCapture(2)\r\ncolor_range = {\r\n 'yellow_door': [(26 , 166 , 118), (32 , 241 , 246)], # yw:起点和终点门上的黄色*******10.16 11\r\n 'black_door':[(0 , 0 , 13), (165 , 204 , 42)], # yw:起点和终点门上的黑色**************\r\n 'blue_baf': [(100 , 116 , 63), (110 , 255 , 198)], # yw:挡板的蓝色******************\r\n 'black_dir': [(0 , 0 , 23), (176 , 182 , 101)], # yw:地雷的黑色*******************\r\n 'gray_dir': [(76 , 41 , 112), (82 , 65 , 177)], # yw:地雷关卡地板的灰色************\r\n 'green_hole_chest': [(71 , 116 , 83), (79 , 255 , 231)], # yw:过坑的绿色(胸部检测)*********11\r\n 'green_hole_head': [(73 , 157 , 123), (80 , 255 , 170)], # yw:过坑的绿色(头部检测)***********11\r\n 'blue_floor': [(99 , 152 , 179), (106 , 227 , 255)], # yw:蓝色台阶************************\r\n 'green_floor': [(71 , 126 , 122), (78 , 217 , 199)], # yw:绿色台阶*********************\r\n 'red_floor1': [(0, 138 , 137), (4, 231 , 255)] , # yw:红色台阶 我们取红色台阶需要有两个值************\r\n 'red_floor2': [(176, 138 , 137), (179, 231 , 255)],#****************************\r\n 'red_XP1': [(0, 138 , 137), (4, 231 , 255)] , # yw:红色下坡 他这里取了两个掩模做了或运算 不过这两个掩模的值怎么来的我不清楚。#****************************\r\n 'red_XP2': [(176, 138 , 137), (179, 231 , 255)],#****************************\r\n 'white_ball_head': [(0 , 0 , 155), (176 , 68 , 255)], # yw:踢的白球*******************88\r\n 'white_ball_chest': [(0 , 0 , 155), (176 , 68 , 255)],# yw:踢的��球*******************88\r\n 'd_red_ball_floor1': [(177, 92 , 100), (179, 206 , 197)], # yw:这个和下面这个应该是砖[(0 , 92 , 100), (179 , 206 , 197)]\r\n 'd_red_ball_floor2': [(75, 92 , 100), (90, 206 , 197)],#****************************\r\n 'blue_hole_chest': [(103 , 117 , 98), (115 , 247 , 234)], # yw:*********************蓝色坑\r\n 'blue_hole_head': [(103 , 117 , 98), (115 , 247 , 234)],#****************************\r\n 'blue_hole': [(112 , 110 , 102), (147 , 183 , 190)],#踢球洞的蓝色圈\r\n 'head_blue_door': [(55 , 62 , 47), (120 , 255 , 186)], # wc:蓝色门************************\r\n 'kick_ball_rec': [(0 , 0 , 155), (176 , 68 , 255)],#**********************\r\n # 补充\r\n 'green_bridge': [(70 , 134 , 124), (78 , 229 , 254)],#***************\r\n 'blue_bridge': [(100 , 152 , 182), (105 , 209 , 255)],#*********************\r\n 'green_bridge_rec': [(69, 116, 115), (79, 209, 176)],\r\n 'blue_bridge_rec': [(102, 123, 132), (110, 213, 235)],\r\n}\r\n\r\n\r\n#################################################################识别\r\n# 台阶识别\r\ndef floor_detect(frame, color): # 该函数输入值为图片和期待检测的颜色\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n if color == 'red_floor':\r\n Imask1 = cv2.inRange(hsv, color_range['red_floor1'][0], color_range['red_floor1'][1])\r\n Imask2 = cv2.inRange(hsv, color_range['red_floor2'][0], color_range['red_floor2'][1])\r\n mask = cv2.bitwise_or(Imask1, Imask2)\r\n else:\r\n mask = cv2.inRange(hsv, color_range[color][0],\r\n color_range[color][1]) # 图像,lower,upper。在lower和upper之间的像素变为255,否则变为0\r\n # cv2.imshow(\"mask\",mask)\r\n # cv2.waitKey(0)\r\n _, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_NONE) # 找出轮廓 https://blog.csdn.net/hjxu2016/article/details/77833336/\r\n areaMaxContour, area_max = getAreaMaxContour1(contours) # 找出最大轮廓\r\n percent = round(100 * area_max / (chest_r_width * chest_r_height), 2) # 最大轮廓的百分比\r\n if areaMaxContour is not None:\r\n # print(percent)\r\n if percent > 1:\r\n return 1\r\n else:\r\n return 0\r\n else:\r\n return 0\r\n\r\n\r\ndef floor_judge(frame):\r\n color = 'red_floor'\r\n if floor_detect(frame, color) == 1:\r\n color = 'blue_floor'\r\n if floor_detect(frame, color) == 1:\r\n color = 'green_floor'\r\n if floor_detect(frame, color) == 1:\r\n return 1\r\n else:\r\n return 0\r\n else:\r\n return 0\r\n else:\r\n return 0\r\n\r\n\r\n# 挡板识别\r\ndef baffle_recognize():\r\n global org_chest_image\r\n org_chest_image = ChestOrg_img.copy()\r\n color = 'blue_baf'\r\n src = org_chest_image.copy()\r\n src = src[int(100):int(500), int(50):int(500)]\r\n src = cv2.GaussianBlur(src, (5, 5), 0)\r\n hsv_img = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\r\n mask = cv2.inRange(hsv_img, color_range[color][0], color_range[color][1])\r\n mask = cv2.dilate(mask, None, iterations=8)\r\n # mask = cv2.erode(mask,None,iterations=10)\r\n _, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n if len(contours) > 0:\r\n max_area_contour, contour_max_area = getAreaMaxContour1(contours)\r\n Area = cv2.contourArea(max_area_contour)\r\n rect = cv2.minAreaRect(max_area_contour) # 最小外接矩形\r\n box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点\r\n edge1 = math.sqrt(math.pow(box[3, 1] - box[2, 1], 2) + math.pow(box[3, 0] - box[2, 0], 2))\r\n edge2 = math.sqrt(math.pow(box[3, 1] - box[0, 1], 2) + math.pow(box[3, 0] - box[0, 0], 2))\r\n ratio = edge1 / edge2 # 长与宽的比值大于3认为是条线\r\n\r\n # print(contour_max_area)\r\n # print(box)\r\n # print(len(contours))\r\n # cv2.drawContours(src,[max_area_contour],0,(0,0,255),2)\r\n # cv2.imshow(\"src\",src)\r\n # cv2.imshow(\"mask\",mask)\r\n # cv2.waitKey()\r\n # print(Area,ratio)\r\n\r\n if Area >= 5000 and ratio > 3:\r\n # print(\"正式进入挡板阶段\")\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# 过坑识别\r\ndef hole_recognize(): # yw:hole_recognize和hole_recognize_2的区别在于颜色不一样,前者是绿色,后者是蓝色。\r\n global org_chest_img\r\n org_chest_img = ChestOrg_img.copy()\r\n Area = 0\r\n color = 'green_hole_chest'\r\n src = org_chest_img.copy()\r\n src = src[int(100):int(400), int(50):int(500)] # yw:这里我记得是Y,X,切片顺序与常识不一样\r\n src = cv2.GaussianBlur(src, (5, 5), 0)\r\n hsv_img = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\r\n #如果有红色或蓝色则判断不是坑\r\n Imask1_red = cv2.inRange(hsv_img.copy(), color_range['red_floor1'][0], color_range['red_floor1'][1])\r\n Imask2_red = cv2.inRange(hsv_img.copy(), color_range['red_floor2'][0], color_range['red_floor2'][1])\r\n Imask_red = cv2.bitwise_or(Imask1_red, Imask2_red)\r\n area_red=area_bits(Imask_red)\r\n if area_red>2000:#如果红色面积大于2000则判断这里不是坑,大概率是台阶。\r\n return False\r\n mask_blue=cv2.inRange(hsv_img.copy(),color_range['blue_floor'][0],color_range['blue_floor'][1])\r\n area_blue=area_bits(mask_blue)\r\n if area_blue>2000:\r\n return False\r\n ###############################\r\n mask = cv2.inRange(hsv_img, color_range[color][0], color_range[color][1]) # yw:用HSV空间分割颜色有更好的效果\r\n closed = cv2.dilate(mask, None, iterations=5) # yw:膨胀5次,腐蚀8次。但为什么核为NONE?\r\n closed = cv2.erode(closed, None, iterations=8)\r\n\r\n # cv2.imshow(\"closed\",closed)\r\n # cv2.waitKey()\r\n\r\n _, contours, hierarchy = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n if len(contours) > 0:\r\n max_area = max(contours, key=cv2.contourArea)\r\n Area = cv2.contourArea(max_area)\r\n rect = cv2.minAreaRect(max_area)\r\n # print(rect[0])\r\n # # print(Area)\r\n _, contours2, hierarchy2 = cv2.findContours(closed, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\r\n\r\n # cv2.drawContours(src,contours2, 0, (0, 0, 255), 2)\r\n # print(len(contours2))\r\n # print(Area)\r\n if Area > 18000:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef hole_recognize_2():\r\n global org_chest_img\r\n org_chest_img = ChestOrg_img.copy()\r\n Area = 0\r\n color = 'blue_hole_chest'\r\n src = org_chest_img.copy()\r\n src = src[int(100):int(400), int(50):int(500)]\r\n src = cv2.GaussianBlur(src, (5, 5), 0)\r\n hsv_img = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\r\n # 如果有红色则判断不是坑\r\n Imask1_red = cv2.inRange(hsv_img.copy(), color_range['red_floor1'][0], color_range['red_floor1'][1])\r\n Imask2_red = cv2.inRange(hsv_img.copy(), color_range['red_floor2'][0], color_range['red_floor2'][1])\r\n Imask_red = cv2.bitwise_or(Imask1_red, Imask2_red)\r\n area_red = area_bits(Imask_red)\r\n if area_red > 2000: # 如果红色面积大于2000则判断这里不是坑,大概率是台阶。\r\n return False\r\n mask_green = cv2.inRange(hsv_img.copy(), color_range['green_floor'][0], color_range['green_floor'][1])\r\n area_green = area_bits(mask_green)\r\n if area_green > 2000:\r\n return False\r\n ###########################\r\n mask = cv2.inRange(hsv_img, color_range[color][0], color_range[color][1])\r\n closed = cv2.dilate(mask, None, iterations=5)\r\n closed = cv2.erode(closed, None, iterations=8)\r\n\r\n # cv2.imshow(\"closed\",closed)\r\n # cv2.waitKey()\r\n\r\n _, contours, hierarchy = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n if len(contours) > 0:\r\n max_area = max(contours, key=cv2.contourArea)\r\n Area = cv2.contourArea(max_area)\r\n rect = cv2.minAreaRect(max_area)\r\n # print(rect[0])\r\n # # print(Area)\r\n _, contours2, hierarchy2 = cv2.findContours(closed, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\r\n\r\n # cv2.drawContours(src,contours2, 0, (0, 0, 255), 2)\r\n # print(len(contours2))\r\n # print(Area)\r\n if Area > 18000 :\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# 地雷识别\r\ndef tacle_recognize():\r\n color = 'black_dir'\r\n src = ChestOrg_img.copy()\r\n src = src[int(180):int(400), int(100):int(400)]\r\n src2 = HeadOrg_img.copy()\r\n src2 = src2[int(160):int(400), int(20):int(480)]\r\n\r\n # cv2.imshow(\"src0\",src2)\r\n\r\n src = cv2.GaussianBlur(src, (5, 5), 0)\r\n hsv_img = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\r\n mask = cv2.inRange(hsv_img, color_range[color][0], color_range[color][1])\r\n closed = cv2.dilate(mask, None, iterations=6)\r\n mask = cv2.erode(closed, None, iterations=4)\r\n _, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n\r\n color2 = 'gray_dir'\r\n Area = 0\r\n src2 = cv2.GaussianBlur(src2, (5, 5), 0)\r\n hsv_img2 = cv2.cvtColor(src2, cv2.COLOR_BGR2HSV)\r\n mask1 = cv2.inRange(hsv_img2, color_range[color2][0], color_range[color2][1])\r\n mask2 = cv2.erode(mask1, None, iterations=10)\r\n mask3 = cv2.dilate(mask2, None, iterations=10)\r\n _, contours2, hierarchy2 = cv2.findContours(mask3, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n\r\n # print(len(contours))\r\n if len(contours2) > 0:\r\n max_area_contour, contour_max_area = getAreaMaxContour1(contours2)\r\n Area = contour_max_area\r\n # print(Area)\r\n\r\n # cv2.imshow(\"mask\",mask3)\r\n # cv2.waitKey()\r\n # print(len(contours))\r\n if Area > 15000:\r\n if len(contours) >= 3:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# 楼梯识别\r\ndef floor_recognize():\r\n src = ChestOrg_img.copy()\r\n # src = src[int(100):int(400),int(50):int(500)]\r\n src = cv2.GaussianBlur(src, (5, 5), 0)\r\n judge = floor_judge(src)\r\n if judge == 1:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# 过桥识别\r\ndef bridge_recognize():\r\n color = 'green_bridge_rec' # 颜色变量设置为桥面所用\r\n contour_max_area = 0 # 初始化\r\n src = HeadOrg_img.copy() # 获取头部图像的拷贝\r\n src = src[int(200):int(500), int(50):int(500)] # 截取该图像的一部分内容进行处理提取轮廓\r\n src = cv2.GaussianBlur(src, (5, 5), 0) # 用5*5的卷积核进行高斯模糊降噪\r\n hsv_img = cv2.cvtColor(src, cv2.COLOR_BGR2HSV) # 将图像从rgb空间转换到hsv空间\r\n # 如果有红色或蓝色则判断不是坑\r\n chest_img = ChestOrg_img.copy()\r\n hsv_chest = cv2.cvtColor(chest_img, cv2.COLOR_BGR2HSV)\r\n Imask1_red = cv2.inRange(hsv_chest.copy(), color_range['red_floor1'][0], color_range['red_floor1'][1])\r\n Imask2_red = cv2.inRange(hsv_chest.copy(), color_range['red_floor2'][0], color_range['red_floor2'][1])\r\n Imask_red = cv2.bitwise_or(Imask1_red, Imask2_red)\r\n area_red = area_bits(Imask_red)\r\n if area_red > 1000: # 如果红色面积大于1000则判断这里不是坑,大概率是台阶。\r\n return False\r\n mask_blue = cv2.inRange(hsv_img.copy(), color_range['blue_floor'][0], color_range['blue_floor'][1])\r\n area_blue = area_bits(mask_blue)\r\n if area_blue > 2000:\r\n return False\r\n mask = cv2.inRange(hsv_img, color_range[color][0], color_range[color][1]) # 将符合绿色桥颜色范围的图像部分用白色显示,图像其余部分变为黑色\r\n mask1 = cv2.dilate(mask, None, iterations=10) # 先进行10次膨胀\r\n # mask1 = cv2.erode(mask, None, iterations=10)\r\n mask2 = cv2.erode(mask1, None, iterations=8) # 再进行8次腐蚀\r\n _, contours, hierarchy = cv2.findContours(mask1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # 用膨胀后图像去提取所有外轮廓\r\n # 如果存在外轮廓:\r\n if len(contours) > 0:\r\n max_area_contour, contour_max_area = getAreaMaxContour1(contours) # 找到面积最大的轮廓以及最大轮廓的面积\r\n rect = cv2.minAreaRect(max_area_contour) # 最大轮廓的最小外接矩形\r\n box = np.int0(cv2.boxPoints(rect)) # 找到上述最小外接矩形的四个顶点\r\n # 分别求出最小外接矩形的长和宽\r\n edge1 = math.sqrt(math.pow(box[3, 1] - box[2, 1], 2) + math.pow(box[3, 0] - box[2, 0], 2))\r\n edge2 = math.sqrt(math.pow(box[3, 1] - box[0, 1], 2) + math.pow(box[3, 0] - box[0, 0], 2))\r\n # 求出外接矩形长与宽之比\r\n ratio = edge1 / edge2\r\n # 找到经过腐蚀后图像的轮廓。建立两个等级的轮廓,上面的一层为外边界,里面的一层为内孔的边界信息。如果内孔内还有一 个连通物体,这个物体的边界也在顶层。\r\n _, contours2, hierarchy2 = cv2.findContours(mask2, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\r\n\r\n # cv2.imshow(\"mask1\",mask1)\r\n # cv2.imshow(\"mask2\",mask2)\r\n # cv2.waitKey()\r\n # print(Area,ratio,len(contours2))\r\n\r\n # if contour_max_area >= 4000 and ratio < 1.4:\r\n # 如果膨胀后图像的最大轮廓面积超过4000同时该最大轮廓最小外接矩形的长宽比小于1.6(意味着长宽相近或是横宽竖长,\r\n # 这样提取的轮廓比较接近在头部视角获得的桥的形状),并且再腐蚀后的图像轮廓也仅剩下一个,则意味着识别到了桥\r\n if contour_max_area >= 4000 and contour_max_area <= 15000:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# 步骤与1完全一致,只是有桥颜色上的改变\r\ndef bridge_recognize_2():\r\n color = 'blue_bridge_rec'\r\n contour_max_area = 0\r\n src = HeadOrg_img.copy()\r\n src = src[int(200):int(500), int(50):int(500)]\r\n src = cv2.GaussianBlur(src, (5, 5), 0)\r\n hsv_img = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\r\n # 如果有红色则判断不是坑\r\n Imask1_red = cv2.inRange(hsv_img.copy(), color_range['red_floor1'][0], color_range['red_floor1'][1])\r\n Imask2_red = cv2.inRange(hsv_img.copy(), color_range['red_floor2'][0], color_range['red_floor2'][1])\r\n Imask_red = cv2.bitwise_or(Imask1_red, Imask2_red)\r\n area_red = area_bits(Imask_red)\r\n if area_red > 2000: # 如果红色面积大于2000则判断这里不是坑,大概率是台阶。\r\n return False\r\n mask = cv2.inRange(hsv_img, color_range[color][0], color_range[color][1])\r\n mask1 = cv2.dilate(mask, None, iterations=10)\r\n # mask1 = cv2.erode(mask, None, iterations=10)\r\n mask2 = cv2.erode(mask1, None, iterations=8)\r\n _, contours, hierarchy = cv2.findContours(mask1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n if len(contours) > 0:\r\n max_area_contour, contour_max_area = getAreaMaxContour1(contours)\r\n rect = cv2.minAreaRect(max_area_contour) # 最小外接矩形\r\n box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点\r\n edge1 = math.sqrt(math.pow(box[3, 1] - box[2, 1], 2) + math.pow(box[3, 0] - box[2, 0], 2))\r\n edge2 = math.sqrt(math.pow(box[3, 1] - box[0, 1], 2) + math.pow(box[3, 0] - box[0, 0], 2))\r\n ratio = edge1 / edge2 # 长与宽的比值大于3认为是条线\r\n\r\n _, contours2, hierarchy2 = cv2.findContours(mask2, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\r\n\r\n # cv2.imshow(\"mask1\",mask1)\r\n # cv2.imshow(\"mask2\",mask2)\r\n # cv2.waitKey()\r\n # print(Area,ratio,len(contours2))\r\n\r\n if contour_max_area >= 4000 and ratio < 1.6 and len(contours2) == 1:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# 踢球识别\r\ndef kick_ball_recognize():\r\n color = 'kick_ball_rec'\r\n Area = 0\r\n src = HeadOrg_img.copy()\r\n src = cv2.GaussianBlur(src, (5, 5), 0)\r\n hsv_img = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\r\n mask = cv2.inRange(hsv_img, color_range[color][0], color_range[color][1])\r\n # mask1 = cv2.dilate(mask, None, iterations=10)\r\n mask1 = cv2.erode(mask, None, iterations=10)\r\n mask2 = cv2.dilate(mask1, None, iterations=10)\r\n _, contours, hierarchy = cv2.findContours(mask1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n # cv2.imshow(\"mask\",mask2)\r\n # cv2.waitKey()\r\n # print(len(contours))\r\n if len(contours) > 0:\r\n max_area_contour, contour_max_area = getAreaMaxContour1(contours)\r\n Area = contour_max_area\r\n # print(Area)\r\n if Area >= 30000:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef area_calculate(color):\r\n contour_max_area = 0\r\n src = ChestOrg_img.copy()\r\n src = cv2.GaussianBlur(src, (5, 5), 0)\r\n hsv_img = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\r\n mask = cv2.inRange(hsv_img, color_range[color][0], color_range[color][1])\r\n mask1 = cv2.erode(mask, None, iterations=4)\r\n mask2 = cv2.dilate(mask1, None, iterations=4)\r\n _, contours, hierarchy = cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n if len(contours) > 0:\r\n max_area_contour, contour_max_area = getAreaMaxContour1(contours)\r\n return contour_max_area\r\n else:\r\n return 0\r\n\r\n\r\ndef recognize(): # yw:这个函数应该是用来识别关卡的.该队伍本来准备通过这个函数识别所有关卡,但可能经过实操不大行,所以只用它识别洞和桥。\r\n global obs_rec\r\n global baf_rec\r\n global hole_rec\r\n global bridge_rec\r\n global door_rec\r\n global kick_ball_rec\r\n global floor_rec\r\n if floor_rec == False and floor_recognize():\r\n floor_rec = True\r\n return 7\r\n elif hole_rec == False and hole_recognize(): # yw:绿洞\r\n hole_rec = True\r\n return 1\r\n elif hole_rec == False and hole_recognize_2(): # yw:蓝洞\r\n hole_rec = True\r\n return 10\r\n elif bridge_rec == False and bridge_recognize(): # yw:绿桥\r\n bridge_rec = True\r\n return 5\r\n elif bridge_rec == False and bridge_recognize_2(): # yw:蓝桥 不过我不明白为什么要分成一个绿的一个蓝的,是为了防止光线原因识别错误颜色吗?\r\n bridge_rec = True\r\n return 9\r\n # if bridge_rec == False and bridge_recognize():\r\n # bridge_rec = True\r\n # return 5\r\n # elif baf_rec == False and baffle_recognize():\r\n # baf_rec = True\r\n # return 3\r\n # elif kick_ball_rec == False and kick_ball_recognize():\r\n # kick_ball_rec = True\r\n # return 6\r\n # elif obs_rec == False and obstacle_recognize():\r\n # obs_rec = True\r\n # return 2\r\n\r\n else:\r\n return 0\r\n\r\n\r\nacted_name = \"\"\r\n\r\n\r\ndef action_append(act_name):\r\n global acted_name\r\n\r\n # print(\"please enter to continue...\")\r\n # cv2.waitKey(0)\r\n\r\n if action_DEBUG == False:\r\n if act_name == \"forwardSlow0403\" and (acted_name == \"Forwalk02RL\" or acted_name == \"Forwalk02L\"):\r\n acted_name = \"Forwalk02LR\"\r\n elif act_name == \"forwardSlow0403\" and (acted_name == \"Forwalk02LR\" or acted_name == \"Forwalk02R\"):\r\n acted_name = \"Forwalk02RL\"\r\n elif act_name != \"forwardSlow0403\" and (acted_name == \"Forwalk02LR\" or acted_name == \"Forwalk02R\"):\r\n # CMDcontrol.action_list.append(\"Forwalk02RS\")\r\n # acted_name = act_name\r\n print(act_name, \"动作未执行 执行 Stand\")\r\n acted_name = \"Forwalk02RS\"\r\n elif act_name != \"forwardSlow0403\" and (acted_name == \"Forwalk02RL\" or acted_name == \"Forwalk02L\"):\r\n # CMDcontrol.action_list.append(\"Forwalk02LS\")\r\n # acted_name = act_name\r\n print(act_name, \"动作未执�� 执行 Stand\")\r\n acted_name = \"Forwalk02LS\"\r\n elif act_name == \"forwardSlow0403\":\r\n acted_name = \"Forwalk02R\"\r\n else:\r\n acted_name = act_name\r\n\r\n CMDcontrol.actionComplete = False\r\n if len(CMDcontrol.action_list) > 0:\r\n print(\"队列超过一个动作\")\r\n CMDcontrol.action_list.append(acted_name)\r\n else:\r\n if single_debug:\r\n cv2.waitKey(0)\r\n CMDcontrol.action_list.append(acted_name)\r\n CMDcontrol.action_wait()\r\n\r\n else:\r\n print(\"-----------------------执行动作名:\", act_name)\r\n time.sleep(2)\r\n\r\n\r\ndef getAreaMaxContour1(contours):\r\n contour_area_temp = 0\r\n contour_area_max = 0\r\n area_max_contour = None\r\n for c in contours:\r\n contour_area_temp = math.fabs(cv2.contourArea(c)) # 计算轮廓面积\r\n if contour_area_temp > contour_area_max:\r\n contour_area_max = contour_area_temp\r\n if contour_area_temp > 25:\r\n area_max_contour = c\r\n return area_max_contour, contour_area_max\r\n\r\n\r\ndef getAreaMaxContour2(contours, area=1):\r\n contour_area_max = 0\r\n area_max_contour = None\r\n for c in contours:\r\n contour_area_temp = math.fabs(cv2.contourArea(c))\r\n if contour_area_temp > contour_area_max:\r\n contour_area_max = contour_area_temp\r\n if contour_area_temp > area:\r\n area_max_contour = c\r\n return area_max_contour\r\n\r\n\r\ndef getLine_SumContour(contours, area=1):\r\n global handling\r\n contours_sum = None\r\n for c in contours:\r\n area_temp = math.fabs(cv2.contourArea(c))\r\n rect = cv2.minAreaRect(c) # 最小外接矩形\r\n box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点\r\n edge1 = math.sqrt(math.pow(box[3, 1] - box[0, 1], 2) + math.pow(box[3, 0] - box[0, 0], 2))\r\n edge2 = math.sqrt(math.pow(box[3, 1] - box[2, 1], 2) + math.pow(box[3, 0] - box[2, 0], 2))\r\n ratio = edge1 / edge2 # 长与宽的比值\r\n center_y = (box[0, 1] + box[1, 1] + box[2, 1] + box[3, 1]) / 4\r\n if (area_temp > area) and (ratio > 3 or ratio < 0.33) and center_y > 240:\r\n contours_sum = c\r\n break\r\n for c in contours:\r\n area_temp = math.fabs(cv2.contourArea(c))\r\n rect = cv2.minAreaRect(c) # 最小外接矩形\r\n box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点\r\n edge1 = math.sqrt(math.pow(box[3, 1] - box[0, 1], 2) + math.pow(box[3, 0] - box[0, 0], 2))\r\n edge2 = math.sqrt(math.pow(box[3, 1] - box[2, 1], 2) + math.pow(box[3, 0] - box[2, 0], 2))\r\n ratio = edge1 / edge2\r\n # print(\"ratio:\",ratio,\"area_temp:\",area_temp)\r\n\r\n if (area_temp > area) and (ratio > 3 or ratio < 0.33): # 满足面积条件 长宽比条件\r\n\r\n rect = cv2.minAreaRect(c) # 最小外接矩形\r\n box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点\r\n center_x = (box[0, 0] + box[1, 0] + box[2, 0] + box[3, 0]) / 4\r\n center_y = (box[0, 1] + box[1, 1] + box[2, 1] + box[3, 1]) / 4\r\n\r\n if center_y > 240: # 满足中心点坐标条件\r\n contours_sum = np.concatenate((contours_sum, c), axis=0) # 将所有轮廓点拼接到一起\r\n if box_debug:\r\n cv2.drawContours(handling, [box], -1, (0, 255, 0), 5)\r\n if img_debug:\r\n cv2.imshow('handling', handling)\r\n cv2.waitKey(10)\r\n else:\r\n if box_debug:\r\n cv2.drawContours(handling, [box], -1, (0, 0, 255), 5)\r\n if img_debug:\r\n cv2.imshow('handling', handling)\r\n cv2.waitKey(10)\r\n else:\r\n rect = cv2.minAreaRect(c) # 最小外接矩形\r\n box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点\r\n if box_debug:\r\n cv2.drawContours(handling, [box], -1, (0, 0, 255), 5)\r\n cv2.imshow('handling', handling)\r\n cv2.waitKey(10)\r\n\r\n return contours_sum\r\n\r\n\r\n# 根据颜色边缘调整角度与位置(头部)\r\ndef edge_angle(color):\r\n global HeadOrg_img, chest_copy, reset, skip, handling\r\n global handling\r\n angle_ok_flag = False\r\n angle = 90\r\n dis = 0\r\n bottom_centreX = 0\r\n bottom_centreY = 0\r\n see = False\r\n dis_ok_count = 0\r\n headTURN = 0\r\n hole_flag = 0\r\n\r\n step = 1\r\n while True:\r\n OrgFrame = HeadOrg_img.copy()\r\n x_start = 260\r\n blobs = OrgFrame[int(0):int(480), int(x_start):int(380)] # 只对中间部分识别处理Y , X\r\n handling = blobs.copy()\r\n frame_mask = blobs.copy()\r\n\r\n # 获取图像中心点坐标x, y\r\n center = []\r\n # 开始处理图像\r\n hsv = cv2.cvtColor(frame_mask, cv2.COLOR_BGR2HSV)\r\n hsv = cv2.GaussianBlur(hsv, (3, 3), 0)\r\n Imask = cv2.inRange(hsv, color_range[color][0], color_range[color][1])\r\n Imask = cv2.erode(Imask, None, iterations=1)\r\n Imask = cv2.dilate(Imask, np.ones((3, 3), np.uint8), iterations=2)\r\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15))\r\n Imask = cv2.morphologyEx(Imask, cv2.MORPH_OPEN, kernel)\r\n _, contours, hierarchy = cv2.findContours(Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # 找出所有轮廓\r\n # cv2.imshow(\"opened\",Imask)\r\n # print(\"len:\",len(cnts))\r\n\r\n if len(contours) > 0:\r\n max_area = max(contours, key=cv2.contourArea)\r\n epsilon = 0.05 * cv2.arcLength(max_area, True)\r\n approx = cv2.approxPolyDP(max_area, epsilon, True)\r\n approx_list = list(approx)\r\n approx_after = []\r\n for i in range(len(approx_list)):\r\n approx_after.append(approx_list[i][0])\r\n approx_sort = sorted(approx_after, key=lambda x: x[1], reverse=True)\r\n # if approx_sort[0][0] > approx_sort[1][0]:\r\n # approx_sort[0], approx_sort[1] = approx_sort[1], approx_sort[0]\r\n if len(approx_sort) == 4:\r\n bottom_line = (approx_sort[3], approx_sort[2])\r\n center_x = (bottom_line[1][0] + bottom_line[0][0]) / 2\r\n center_y = (bottom_line[1][1] + bottom_line[0][1]) / 2\r\n else:\r\n bottom_line = None\r\n\r\n else:\r\n bottom_line = None\r\n\r\n # 初始化\r\n L_R_angle = 0\r\n blackLine_L = [0, 0]\r\n blackLine_R = [0, 0]\r\n\r\n if bottom_line is not None:\r\n see = True\r\n if bottom_line[0][1] - bottom_line[1][1] == 0:\r\n angle = 90\r\n else:\r\n angle = - math.atan(\r\n (bottom_line[1][1] - bottom_line[0][1]) / (bottom_line[1][0] - bottom_line[0][0])) * 180.0 / math.pi\r\n Ycenter = int((bottom_line[1][1] + bottom_line[0][1]) / 2)\r\n Xcenter = int((bottom_line[1][0] + bottom_line[0][0]) / 2)\r\n if bottom_line[1][1] > bottom_line[0][1]:\r\n blackLine_L = [bottom_line[1][0], bottom_line[1][1]]\r\n blackLine_R = [bottom_line[0][0], bottom_line[0][1]]\r\n else:\r\n blackLine_L = [bottom_line[0][0], bottom_line[0][1]]\r\n blackLine_R = [bottom_line[1][0], bottom_line[1][1]]\r\n cv2.circle(OrgFrame, (Xcenter + x_start, Ycenter), 10, (255, 255, 0), -1) # 画出中心点\r\n\r\n if blackLine_L[0] == blackLine_R[0]:\r\n L_R_angle = 0\r\n else:\r\n L_R_angle = (-math.atan(\r\n (blackLine_L[1] - blackLine_R[1]) / (blackLine_L[0] - blackLine_R[0])) * 180.0 / math.pi) - 4\r\n\r\n if img_debug:\r\n cv2.circle(OrgFrame, (blackLine_L[0] + x_start, blackLine_L[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(OrgFrame, (blackLine_R[0] + x_start, blackLine_R[1]), 5, [255, 0, 255], 2)\r\n cv2.line(OrgFrame, (blackLine_R[0] + x_start, blackLine_R[1]),\r\n (blackLine_L[0] + x_start, blackLine_L[1]), (0, 255, 255), thickness=2)\r\n cv2.putText(OrgFrame, \"L_R_angle:\" + str(L_R_angle), (10, OrgFrame.shape[0] - 30),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n cv2.putText(OrgFrame, \"Xcenter:\" + str(Xcenter + x_start), (10, OrgFrame.shape[0] - 50),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n cv2.putText(OrgFrame, \"Ycenter:\" + str(Ycenter), (200, OrgFrame.shape[0] - 50),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n\r\n # cv2.drawContours(frame_mask, cnt_sum, -1, (255, 0, 255), 3)\r\n # cv2.imshow('frame_mask', frame_mask)\r\n cv2.imshow('black', Imask)\r\n cv2.imshow('OrgFrame', OrgFrame)\r\n cv2.waitKey(10)\r\n else:\r\n see = False\r\n\r\n # print(Ycenter)\r\n\r\n # 决策执行动作\r\n if step == 1:\r\n print(\"653L 向右看 HeadTurn015\")\r\n action_append(\"HeadTurn015\")\r\n action_append(\"Stand\")\r\n time.sleep(1) # timefftest\r\n step = 2\r\n\r\n elif step == 2:\r\n if not see: # not see the edge\r\n # cv2.destroyAllWindows()\r\n print(\"662L 右侧看不到边缘 左侧移 Left3move\")\r\n action_append(\"Left3move\")\r\n else: # 0\r\n if L_R_angle > 1.5:\r\n if L_R_angle > 7:\r\n headTURN += 1\r\n print(\"668L 左大旋转 turn001L \", L_R_angle)\r\n action_append(\"turn001L\")\r\n\r\n else:\r\n print(\"672L 左旋转 turn000L \", L_R_angle)\r\n headTURN += 1\r\n action_append(\"turn000L\")\r\n\r\n elif L_R_angle < -1.5:\r\n if L_R_angle < -7:\r\n headTURN += 1\r\n print(\"679L 右大旋转 turn001R \", L_R_angle)\r\n action_append(\"turn001R\")\r\n\r\n else:\r\n print(\"683L 右旋转 turn000R \", L_R_angle)\r\n action_append(\"turn000R\")\r\n\r\n elif Ycenter >= 405:\r\n print(\"687L 左侧移 Left02move > 365 \", Ycenter)\r\n action_append(\"Left02move\")\r\n\r\n elif Ycenter < 380:\r\n print(\"691L 右侧移 Right02move <400 \", Ycenter)\r\n action_append(\"Right02move\")\r\n\r\n else:\r\n print(\"695L 角度与位置合适 Stand\")\r\n action_append(\"Stand\")\r\n step = 3\r\n\r\n\r\n elif step == 3:\r\n return 1\r\n break\r\n\r\n\r\n# 根据颜色边缘调整角度与位置(胸部)\r\ndef edge_angle_chest(color):\r\n global org_img, state, state_sel, step, reset, skip, debug\r\n r_w = chest_r_width\r\n r_h = chest_r_height\r\n top_angle = 0\r\n T_B_angle = 0\r\n topcenter_x = 0.5 * r_w\r\n topcenter_y = 0\r\n bottomcenter_x = 0.5 * r_w\r\n bottomcenter_y = 0\r\n step = 0\r\n while (True):\r\n Corg_img = ChestOrg_img.copy()\r\n Corg_img = np.rot90(Corg_img)\r\n OrgFrame = Corg_img.copy()\r\n\r\n # 初始化 bottom_right bottom_left\r\n bottom_right = (480, 0)\r\n bottom_left = (0, 0)\r\n top_right = (480, 0) # 右上角点坐标\r\n top_left = (0, 0) # 左上角点坐标\r\n\r\n frame = cv2.resize(OrgFrame, (chest_r_width, chest_r_height), interpolation=cv2.INTER_LINEAR)\r\n frame_copy = frame.copy()\r\n # 获取图像中心点坐标x, y\r\n center = []\r\n # 开始处理图像\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n hsv = cv2.GaussianBlur(hsv, (3, 3), 0)\r\n Imask = cv2.inRange(hsv, color_range[color][0], color_range[color][1])\r\n Imask = cv2.dilate(Imask, np.ones((3, 3), np.uint8), iterations=2)\r\n\r\n _, cnts, hierarchy = cv2.findContours(Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # 找出所有轮廓\r\n\r\n cnt_sum, area_max = getAreaMaxContour1(cnts) # 找出最大轮廓\r\n C_percent = round(area_max * 100 / (r_w * r_h), 2) # 最大轮廓百分比\r\n cv2.drawContours(frame, cnt_sum, -1, (255, 0, 255), 3)\r\n\r\n if cnt_sum is not None:\r\n see = True\r\n rect = cv2.minAreaRect(cnt_sum) # 最小外接矩形\r\n box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点\r\n\r\n bottom_right = cnt_sum[0][0] # 右下角点坐标\r\n bottom_left = cnt_sum[0][0] # 左下角点坐标\r\n top_right = cnt_sum[0][0] # 右上角点坐标\r\n top_left = cnt_sum[0][0] # 左上角点坐标\r\n for c in cnt_sum:\r\n\r\n if c[0][0] + 1 * (r_h - c[0][1]) < bottom_left[0] + 1 * (r_h - bottom_left[1]):\r\n bottom_left = c[0]\r\n if c[0][0] + 1 * c[0][1] > bottom_right[0] + 1 * bottom_right[1]:\r\n bottom_right = c[0]\r\n\r\n if c[0][0] + 3 * c[0][1] < top_left[0] + 3 * top_left[1]:\r\n top_left = c[0]\r\n if (r_w - c[0][0]) + 3 * c[0][1] < (r_w - top_right[0]) + 3 * top_right[1]:\r\n top_right = c[0]\r\n\r\n # if debug:\r\n # handling = ChestOrg_img.copy()\r\n # cv2.circle(handling, (c[0][0], c[0][1]), 5, [0, 255, 0], 2)\r\n # cv2.circle(handling, (bottom_left[0], bottom_left[1]), 5, [255, 255, 0], 2)\r\n # cv2.circle(handling, (bottom_right[0], bottom_right[1]), 5, [255, 0, 255], 2)\r\n # cv2.imshow('handling', handling) # 显示图像\r\n # cv2.waitKey(2)\r\n\r\n bottomcenter_x = (bottom_left[0] + bottom_right[0]) / 2 # 得到bottom中心坐标\r\n bottomcenter_y = (bottom_left[1] + bottom_right[1]) / 2\r\n\r\n topcenter_x = (top_right[0] + top_left[0]) / 2 # 得到top中心坐标\r\n topcenter_y = (top_left[1] + top_right[1]) / 2\r\n\r\n bottom_angle = -math.atan(\r\n (bottom_right[1] - bottom_left[1]) / (bottom_right[0] - bottom_left[0])) * 180.0 / math.pi\r\n top_angle = -math.atan((top_right[1] - top_left[1]) / (top_right[0] - top_left[0])) * 180.0 / math.pi\r\n if math.fabs(topcenter_x - bottomcenter_x) <= 1: # 得到连线的角度\r\n T_B_angle = 90\r\n else:\r\n T_B_angle = - math.atan(\r\n (topcenter_y - bottomcenter_y) / (topcenter_x - bottomcenter_x)) * 180.0 / math.pi\r\n\r\n if img_debug:\r\n cv2.drawContours(frame_copy, [box], 0, (0, 255, 0), 2) # 将大矩形画在图上\r\n cv2.line(frame_copy, (bottom_left[0], bottom_left[1]), (bottom_right[0], bottom_right[1]),\r\n (255, 255, 0), thickness=2)\r\n cv2.line(frame_copy, (top_left[0], top_left[1]), (top_right[0], top_right[1]), (255, 255, 0),\r\n thickness=2)\r\n cv2.line(frame_copy, (int(bottomcenter_x), int(bottomcenter_y)), (int(topcenter_x), int(topcenter_y)),\r\n (255, 255, 255), thickness=2) # T_B_line\r\n\r\n cv2.putText(frame_copy, \"bottom_angle:\" + str(bottom_angle), (30, 450), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(frame_copy, \"top_angle:\" + str(top_angle), (30, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n (0, 0, 0), 2)\r\n cv2.putText(frame_copy, \"T_B_angle:\" + str(T_B_angle), (30, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n (0, 0, 255), 2)\r\n\r\n cv2.putText(frame_copy, \"bottomcenter_x:\" + str(bottomcenter_x), (30, 480), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(frame_copy, \"y:\" + str(int(bottomcenter_y)), (300, 480), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n (0, 0, 0), 2) # (0, 0, 255)BGR\r\n\r\n cv2.putText(frame_copy, \"topcenter_x:\" + str(topcenter_x), (30, 180), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(frame_copy, \"topcenter_y:\" + str(int(topcenter_y)), (230, 180), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n\r\n cv2.putText(frame_copy, 'C_percent:' + str(C_percent) + '%', (30, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n (0, 0, 0), 2)\r\n cv2.putText(frame_copy, \"step:\" + str(step), (30, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0),\r\n 2) # (0, 0, 255)BGR\r\n\r\n cv2.circle(frame_copy, (int(topcenter_x), int(topcenter_y)), 5, [255, 0, 255], 2)\r\n cv2.circle(frame_copy, (int(bottomcenter_x), int(bottomcenter_y)), 5, [255, 0, 255], 2)\r\n cv2.circle(frame_copy, (top_right[0], top_right[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(frame_copy, (top_left[0], top_left[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(frame_copy, (bottom_right[0], bottom_right[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(frame_copy, (bottom_left[0], bottom_left[1]), 5, [0, 255, 255], 2)\r\n cv2.imshow('Chest_Camera', frame_copy) # 显示图像\r\n # cv2.imshow('chest_red_mask', Imask)\r\n cv2.waitKey(100)\r\n\r\n else:\r\n print(\"815L chest NONE\")\r\n\r\n # 决策执行动作\r\n angle_ok_flag = False\r\n\r\n if step == 0: # 前进依据chest 调整大致位置,方向 看底边线调整角度\r\n\r\n if top_angle > 2: # 需要左转\r\n if top_angle > 6:\r\n print(\"826L 大左转一下 turn001L \", bottom_angle)\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"829L bottom_angle > 3 需要小左转 turn001L \", bottom_angle)\r\n action_append(\"turn001L\")\r\n elif top_angle < -2: # 需要右转\r\n if top_angle < -6:\r\n print(\"833L 右大旋转 turn001R < -6 \")\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"836L bottom_angle < -3 需要小右转 turn001R \", bottom_angle)\r\n action_append(\"turn001R\")\r\n elif -2 <= top_angle <= 2: # 角度正确\r\n print(\"839L 角度合适\")\r\n step = 1\r\n elif step == 1:\r\n\r\n if topcenter_x > 250 or topcenter_x < 230:\r\n if topcenter_x > 250:\r\n print(\"843L 微微右移,\", topcenter_x)\r\n action_append(\"Right3move\")\r\n elif topcenter_x < 230:\r\n print(\"846L 微微左移,\", topcenter_x)\r\n action_append(\"Left3move\")\r\n\r\n else:\r\n print(\"850L 位置合适\")\r\n break\r\n\r\n\r\n# 找到两个门的轮廓\r\ndef find_two(list):\r\n List_new = []\r\n a, b = (list[0][0], list[1][0]) if list[0][0] > list[1][0] else (list[1][0], list[0][0])\r\n for i in range(2, len(list)):\r\n if list[i][0] > list[0][0]:\r\n b = a\r\n a = list[i]\r\n elif list[i][0] > list[1][0]:\r\n b = list[i]\r\n List_new.append(a)\r\n List_new.append(b)\r\n\r\n return List_new\r\n\r\n\r\n# ###################### 过 独 木 桥-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-\r\ndef Greenbridge(colorMask):\r\n global state_sel, org_img, step, reset, skip, debug, chest_ret\r\n\r\n r_w = chest_r_width\r\n r_h = chest_r_height\r\n\r\n step = 0\r\n state = 6\r\n\r\n print(\"/-/-/-/-/-/-/-/-/-进入Greenbridge\")\r\n\r\n while (state == 6): # 初始化\r\n\r\n # 开始处理图像\r\n chest_copy = np.rot90(ChestOrg_img)\r\n\r\n chest_copy = chest_copy.copy()\r\n # chest\r\n cv2.rectangle(chest_copy, (0, 0), (480, 150), (255, 255, 255), -1)\r\n border = cv2.copyMakeBorder(chest_copy, 12, 12, 16, 16, borderType=cv2.BORDER_CONSTANT,\r\n value=(255, 255, 255)) # 扩展白边,防止边界无法识别\r\n Chest_img_copy = cv2.resize(border, (r_w, r_h), interpolation=cv2.INTER_CUBIC) # 将图片缩放\r\n\r\n Chest_frame_gauss = cv2.GaussianBlur(Chest_img_copy, (3, 3), 0) # 高斯模糊\r\n Chest_frame_hsv = cv2.cvtColor(Chest_frame_gauss, cv2.COLOR_BGR2HSV) # 将图片转换到HSV空间\r\n Chest_frame_green = cv2.inRange(Chest_frame_hsv, color_range[colorMask][0],\r\n color_range[colorMask][1]) # 对原图像和掩模(颜色的字典)进行位运算\r\n if img_debug:\r\n cv2.imshow(\"mask\", Chest_frame_green)\r\n Chest_opened = cv2.morphologyEx(Chest_frame_green, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8)) # 开运算 去噪点\r\n Chest_closed = cv2.morphologyEx(Chest_opened, cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)) # 闭运算 封闭连接\r\n\r\n _, Chest_contours, hierarchy = cv2.findContours(Chest_closed, cv2.RETR_LIST,\r\n cv2.CHAIN_APPROX_NONE) # 找出轮廓cv2.CHAIN_APPROX_NONE\r\n # print(\"Chest_contours len:\",len(Chest_contours))\r\n Chest_areaMaxContour, Chest_area_max = getAreaMaxContour1(Chest_contours) # 找出最大轮廓\r\n Chest_percent = round(Chest_area_max * 100 / (r_w * r_h), 2)\r\n\r\n if Chest_areaMaxContour is not None:\r\n found = 1\r\n Chest_rect = cv2.minAreaRect(Chest_areaMaxContour)\r\n # center, w_h, Head_angle = rect # 中心点 宽高 旋转角度\r\n Chest_box = np.int0(cv2.boxPoints(Chest_rect)) # 点的坐标\r\n\r\n # 初始化四个顶点坐标\r\n Chest_top_left = Chest_areaMaxContour[0][0]\r\n Chest_top_right = Chest_areaMaxContour[0][0]\r\n Chest_bottom_left = Chest_areaMaxContour[0][0]\r\n Chest_bottom_right = Chest_areaMaxContour[0][0]\r\n for c in Chest_areaMaxContour: # 遍历找到四个顶点\r\n if c[0][0] + 1.5 * c[0][1] < Chest_top_left[0] + 1.5 * Chest_top_left[1]:\r\n Chest_top_left = c[0]\r\n if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Chest_top_right[0]) + 1.5 * Chest_top_right[1]:\r\n Chest_top_right = c[0]\r\n if c[0][0] + 1.5 * (r_h - c[0][1]) < Chest_bottom_left[0] + 1.5 * (r_h - Chest_bottom_left[1]):\r\n Chest_bottom_left = c[0]\r\n if c[0][0] + 1.5 * c[0][1] > Chest_bottom_right[0] + 1.5 * Chest_bottom_right[1]:\r\n Chest_bottom_right = c[0]\r\n angle_top = math.atan(\r\n (Chest_top_right[1] - Chest_top_left[1]) / (Chest_top_right[0] - Chest_top_left[0])) * 180.0 / math.pi\r\n angle_bottom = math.atan((Chest_bottom_right[1] - Chest_bottom_left[1]) / (\r\n Chest_bottom_right[0] - Chest_bottom_left[0])) * 180.0 / math.pi\r\n Chest_top_center_x = int((Chest_top_right[0] + Chest_top_left[0]) / 2)\r\n Chest_top_center_y = int((Chest_top_right[1] + Chest_top_left[1]) / 2)\r\n Chest_bottom_center_x = int((Chest_bottom_right[0] + Chest_bottom_left[0]) / 2)\r\n Chest_bottom_center_y = int((Chest_bottom_right[1] + Chest_bottom_left[1]) / 2)\r\n Chest_center_x = int((Chest_top_center_x + Chest_bottom_center_x) / 2)\r\n Chest_center_y = int((Chest_top_center_y + Chest_bottom_center_y) / 2)\r\n if img_debug:\r\n cv2.drawContours(Chest_img_copy, [Chest_box], 0, (0, 0, 255), 2) # 将大矩形画在图上\r\n cv2.circle(Chest_img_copy, (Chest_top_right[0], Chest_top_right[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(Chest_img_copy, (Chest_top_left[0], Chest_top_left[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(Chest_img_copy, (Chest_bottom_right[0], Chest_bottom_right[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(Chest_img_copy, (Chest_bottom_left[0], Chest_bottom_left[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(Chest_img_copy, (Chest_top_center_x, Chest_top_center_y), 5, [0, 255, 255], 2)\r\n cv2.circle(Chest_img_copy, (Chest_bottom_center_x, Chest_bottom_center_y), 5, [0, 255, 255], 2)\r\n cv2.circle(Chest_img_copy, (Chest_center_x, Chest_center_y), 7, [255, 255, 255], 2)\r\n cv2.line(Chest_img_copy, (Chest_top_center_x, Chest_top_center_y),\r\n (Chest_bottom_center_x, Chest_bottom_center_y), [0, 255, 255], 2) # 画出上下中点连线\r\n if math.fabs(Chest_top_center_x - Chest_bottom_center_x) <= 1: # 得到连线的角度\r\n Chest_angle = 90\r\n else:\r\n Chest_angle = - math.atan((Chest_top_center_y - Chest_bottom_center_y) / (\r\n Chest_top_center_x - Chest_bottom_center_x)) * 180.0 / math.pi\r\n else:\r\n Chest_angle = 90\r\n # center_x = 0.5*r_w\r\n Chest_center_x = -1\r\n Chest_bottom_center_x = -1\r\n Chest_bottom_center_y = -1\r\n Chest_top_center_x = -1\r\n Chest_top_center_y = -1\r\n\r\n angle_top = 90\r\n angle_bottom = 90\r\n found = 0\r\n\r\n # if step==0:\r\n # head_angle_dis()\r\n\r\n if img_debug:\r\n cv2.drawContours(Chest_img_copy, Chest_contours, -1, (255, 0, 255), 1)\r\n cv2.putText(Chest_img_copy, 'Chest_percent:' + str(Chest_percent) + '%', (30, 25),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(Chest_img_copy, \"Chest_angle:\" + str(int(Chest_angle)), (30, 55), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(Chest_img_copy, \"Chest_bottom_center(x,y): \" + str(int(Chest_bottom_center_x)) + \" , \" + str(\r\n int(Chest_bottom_center_y)), (30, 125), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(Chest_img_copy,\r\n \"Chest_top_center(x,y): \" + str(int(Chest_top_center_x)) + \" , \" + str(int(Chest_top_center_y)),\r\n (30, 105), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(Chest_img_copy, \"angle_top:\" + str(int(angle_top)), (30, 145), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(Chest_img_copy, \"angle_bottom:\" + str(int(angle_bottom)), (30, 165), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(Chest_img_copy, \"step :\" + str(int(step)), (30, 185), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0),\r\n 2) # (0, 0, 255)BGR\r\n cv2.imshow('Chest_Camera', Chest_img_copy) # 显示图像\r\n # cv2.imshow('chest_green_mask', Chest_closed) # 显示图像\r\n cv2.waitKey(100)\r\n\r\n # 决策执行动作\r\n\r\n # step=0:\r\n # 前后:Chest_bottom_center_y\r\n # 转向:angle_bottom\r\n # 左右:Chest_bottom_center_x\r\n if step == 0: # 接近 看下边沿 角度 Chest_percent > 5\r\n if found == 0 or Chest_percent < 0.1:\r\n print(\"1000L step=0 什么也没有看到,向左转90° turn005L\")\r\n if real_test:\r\n action_append(\"turn001L\")\r\n time.sleep(sleep_time_s)\r\n\r\n elif Chest_percent > 16 and Chest_bottom_center_y > 460:\r\n print(\"1006L step=0, 上桥了\")\r\n step = 1\r\n\r\n elif angle_bottom > 5:\r\n if angle_bottom > 8:\r\n print(\"1011L step=0 大左转一下 > 8 turn001L angle_bottom={}\".format(angle_bottom))\r\n print(\r\n f\"right({Chest_bottom_right[0]},{Chest_bottom_right[1]}) left({Chest_bottom_left[0]},{Chest_bottom_left[1]})\")\r\n if real_test:\r\n action_append(\"turn001L\")\r\n time.sleep(sleep_time_s)\r\n # if Chest_bottom_center_x > 260 and Chest_bottom_center_y < 400:\r\n # print(\"1016L 再向右移一些 Right3move angle_bottom={}\".format(angle_bottom))\r\n # action_append(\"Right3move\")\r\n else:\r\n print(\"1019L step=0 小左转 turn000L angle_bottom={}\".format(angle_bottom))\r\n if real_test:\r\n action_append(\"turn000L\")\r\n # time.sleep(1)\r\n elif angle_bottom < -5:\r\n if angle_bottom < -8:\r\n print(\"1666L step=0 大右转一下 < -8 turn001R angle_bottom={}\".format(angle_bottom))\r\n print(\r\n f\"right({Chest_bottom_right[0]},{Chest_bottom_right[1]}) left({Chest_bottom_left[0]},{Chest_bottom_left[1]})\")\r\n if real_test:\r\n action_append(\"turn001R\")\r\n time.sleep(sleep_time_s)\r\n # if Chest_bottom_center_x < 200 and Chest_bottom_center_y < 400:\r\n # print(\"1030L 再向左移一些 Right3move angle_bottom={}\".format(angle_bottom))\r\n # action_append(\"Left3move\")\r\n else:\r\n print(\"1033L step=0 小右转 turn000R angle_bottom={}\".format(angle_bottom))\r\n if real_test:\r\n action_append(\"turn000R\")\r\n # time.sleep(1)\r\n\r\n elif Chest_bottom_center_x > 260: # 右移 center_x\r\n print(\"1039L 向右移 Right3move x>260 Chest_bottom_center_x={}\".format(Chest_bottom_center_x))\r\n if real_test:\r\n action_append(\"Right3move\")\r\n elif Chest_bottom_center_x < 200: # 左移 center_x\r\n print(\"1043L 向左移 Left3move x<200 Chest_bottom_center_x={}\".format(Chest_bottom_center_x))\r\n if real_test:\r\n action_append(\"Left3move\")\r\n\r\n elif Chest_bottom_center_y < 460:\r\n if Chest_bottom_center_y < 330 and 200 <= Chest_bottom_center_x <= 260:\r\n print(\r\n \"1049L y<350 step=0 快速前进 fastForward03 Chest_bottom_center_y={}\".format(Chest_bottom_center_y))\r\n if real_test:\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n action_append(\"fastForward03\")\r\n else:\r\n print(\"1053L y<460 step=0 大步前进 两步 Forwalk01 Chest_bottom_center_y={}\".format(Chest_bottom_center_y))\r\n if real_test:\r\n action_append(\"Forwalk01\")\r\n time.sleep(sleep_time_s)\r\n\r\n if angle_bottom >= 3:\r\n action_append(\"turn001L\")\r\n elif angle_bottom <= -3:\r\n action_append(\"turn001R\")\r\n\r\n action_append(\"Forwalk01\")\r\n time.sleep(sleep_time_l)\r\n if angle_bottom >= 3:\r\n action_append(\"turn001L\")\r\n elif angle_bottom <= -3:\r\n action_append(\"turn001R\")\r\n\r\n elif 220 <= Chest_bottom_center_x <= 240: # Chest_bottom_center_y < 450\r\n # print(\"1071L 前进两步 forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n print(\"1073L 快走333 fastForward03\")\r\n if real_test:\r\n time.sleep(sleep_time_s)\r\n action_append(\"Forwalk01\")\r\n action_append(\"turn001R\")\r\n if angle_bottom > 3:\r\n action_append(\"turn001L\")\r\n action_append(\"Left1move\")\r\n elif angle_bottom < -3:\r\n action_append(\"turn001R\")\r\n action_append(\"Right1move\")\r\n\r\n else:\r\n print(\"1086L step = 0 已经到达绿桥边缘,需要进入下一步对准绿桥\")\r\n step = 1\r\n # 260< Chest_bottom_center_y <460\r\n\r\n\r\n elif step == 1: # 到绿桥边沿,对准绿桥阶段\r\n if Chest_bottom_center_y > 565:\r\n print(\"1096L step = 1, 已经冲到第二阶段了\")\r\n step = 2\r\n elif angle_bottom > 2:\r\n if angle_bottom > 6:\r\n print(\"1100L 大左转一下 > 6 turn001L \", angle_bottom)\r\n if real_test:\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"1104L 小左转 turn000L \", angle_bottom)\r\n if real_test:\r\n action_append(\"turn000L\")\r\n # time.sleep(1)\r\n elif angle_bottom < -2:\r\n if angle_bottom < -6:\r\n print(\"1110L 大右转一下 < -6 turn001R \", angle_bottom)\r\n if real_test:\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"1114L 小右转 turn001R \", angle_bottom)\r\n if real_test:\r\n action_append(\"turn001R\")\r\n # time.sleep(1)\r\n elif Chest_bottom_center_x > 260: # 右移 center_x\r\n print(\"1119L 向右移 Right02move x>250\")\r\n if real_test:\r\n action_append(\"Right02move\")\r\n elif Chest_bottom_center_x < 220: # 左移 center_x\r\n print(\"1123L 向左移 Left02move x<220\")\r\n if real_test:\r\n action_append(\"Left02move\")\r\n else:\r\n print(\"1127L 对准 快走 Forwalk01\")\r\n if real_test:\r\n action_append(\"Forwalk01\")\r\n # action_append(\"turn001R\")\r\n\r\n\r\n\r\n elif step == 2: # 已经在独木桥阶段 行走独木桥 调整角度 位置 看中线 角度\r\n if Chest_percent > 2 and Chest_top_center_y > 360:\r\n print(\"1136L step = 2, 接近独木桥中点啦,进入第三阶段\")\r\n step = 3\r\n elif Chest_percent < 2:\r\n print(\"1139L step = 2, 接近独木桥终点啦,进入第四阶段\")\r\n action_append(\"fastForward03\")\r\n # action_append(\"fastForward03\")\r\n step = 4\r\n\r\n elif Chest_bottom_center_x >= 247: # 右移 center_x\r\n if Chest_bottom_center_x >= 280:\r\n print(\r\n \"1146L step =2 接近左边缘, 先往右大移 Right3move Chest_bottom_center_x={}\".format(Chest_bottom_center_x))\r\n if real_test:\r\n action_append(\"Right3move\")\r\n\r\n if Chest_bottom_center_x >= 300:\r\n print(\"1151L step = 2 可能是方向偏左了, 再往右转 turn001R Chest_bottom_center_x={}\".format(\r\n Chest_bottom_center_x))\r\n if real_test:\r\n action_append(\"turn001R\")\r\n elif Chest_bottom_center_x >= 260:\r\n print(\r\n \"1155L step =2 接近左边缘, 先往右移 Right02move Chest_bottom_center_x={}\".format(Chest_bottom_center_x))\r\n if real_test:\r\n action_append(\"Right02move\")\r\n else:\r\n print(\"1159L step =2 再向右小移 Right1move Chest_bottom_center_x={}\".format(Chest_bottom_center_x))\r\n if real_test:\r\n action_append(\"Right1move\")\r\n\r\n elif Chest_bottom_center_x <= 213: # 左移 center_x\r\n # print(\"1164L 向左移 Left02move <230 ,\", Chest_bottom_center_x)\r\n if Chest_bottom_center_x <= 180:\r\n print(\r\n \"1166L step =2 接近右边缘, 先往左大移 Left3move Chest_bottom_center_x={}\".format(Chest_bottom_center_x))\r\n if real_test:\r\n action_append(\"Left3move\")\r\n\r\n elif Chest_bottom_center_x <= 200:\r\n print(\r\n \"1171L step =2 接近右边缘, 先往左移 Left02move Chest_bottom_center_x={}\".format(Chest_bottom_center_x))\r\n if real_test:\r\n action_append(\"Left02move\")\r\n else:\r\n print(\"1175L step =2 再向左小移 Left1move Chest_bottom_center_x={} \".format(Chest_bottom_center_x))\r\n if real_test:\r\n # action_append(\"Left02move\")\r\n action_append(\"Left1move\")\r\n\r\n elif Chest_percent > 2 and Chest_top_center_y > 100:\r\n # 调整角度位置\r\n if 0 < Chest_angle < 88: # 右转\r\n if Chest_angle < 87:\r\n print(\"1184L step =2 向右转 turn001R Chest_angle:\", Chest_angle)\r\n if real_test:\r\n action_append(\"turn000R\")\r\n else:\r\n print(\"1188L step =2 向右小转 turn000R Chest_angle:\", Chest_angle)\r\n if real_test:\r\n action_append(\"turn000R\")\r\n # time.sleep(1) # timefftest\r\n elif -88 < Chest_angle < 0: # 左转\r\n if Chest_angle > -87:\r\n print(\"1194L step =2 向左转 turn001L Chest_angle:\", Chest_angle)\r\n if real_test:\r\n action_append(\"turn000L\")\r\n else:\r\n print(\"1198L step =2 向左小转 turn000L Chest_angle:\", Chest_angle)\r\n if real_test:\r\n action_append(\"turn000L\")\r\n # time.sleep(1) # timefftest\r\n\r\n\r\n else: # 走三步\r\n # print(\"337L 前进一步 forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n print(\"1207L step =2 上桥后,快走 fastForward03 Ccenter_y:\", Chest_center_x)\r\n if real_test:\r\n # action_append(\"fastForward03\")\r\n action_append(\"forwardSlow0403\")\r\n action_append(\"forwardSlow0403\")\r\n action_append(\"Stand\")\r\n action_append(\"forwardSlow0403\")\r\n action_append(\"Stand\")\r\n action_append(\"forwardSlow0403\")\r\n action_append(\"Stand\")\r\n # time.sleep(sleep_time_l)\r\n # action_append(\"turn001R\")\r\n\r\n # if abs(Chest_angle - 90) < 2:\r\n # print(\"暴走\")\r\n # action_append(\"fastForward03\")\r\n if 0 < Chest_angle < 87:\r\n print(\"1217L 歪了,右转, turn001R Chest_angle={}\".format(Chest_angle))\r\n if real_test:\r\n action_append(\"turn001R\")\r\n elif -87 < Chest_angle < 0:\r\n print(\"1221L 歪了,左转, turn001L Chest_angle={}\".format(Chest_angle))\r\n if real_test:\r\n action_append(\"turn001L\")\r\n\r\n\r\n else:\r\n # print(\"341L 没有看到绿桥向前直行 forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n print(\"1229L 已经下桥\")\r\n step = 4\r\n\r\n\r\n elif step == 3: # 接近 看上边沿 调整角度 Chest_percent > 5\r\n if Chest_percent < 1 or Chest_top_center_y > 500:\r\n # print(\"1235L 接近桥终点 直行两步离开桥 forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"Stand\")\r\n\r\n print(\"1241L 接近桥终点 快走离开桥 fastForward03 * 2\")\r\n if real_test:\r\n # action_append(\"fastForward03\")\r\n action_append(\"forwardSlow0403\")\r\n # action_append(\"Stand\")\r\n action_append(\"forwardSlow0403\")\r\n # action_append(\"Stand\")\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"Stand\")\r\n\r\n step = 4\r\n\r\n elif Chest_top_center_x > 270: # 右移 center_x\r\n if Chest_top_center_x > 280:\r\n print(\"1255L step = 3 向右移一大步 Right02move\")\r\n if real_test:\r\n action_append(\"Right02move\")\r\n else:\r\n print(\"1259L 向右移 >270\")\r\n if real_test:\r\n action_append(\"Right1move\")\r\n elif Chest_top_center_x < 200: # 左移 center_x\r\n if Chest_top_center_x < 190:\r\n print(\"1264L step = 3 向左移一大步 Left02move\")\r\n if real_test:\r\n action_append(\"Left02move\")\r\n else:\r\n print(\"1268L 向左移 <210\")\r\n if real_test:\r\n action_append(\"Left1move\")\r\n\r\n elif angle_top > 5:\r\n if angle_top > 9:\r\n print(\"1274L 大左转一下 turn001L angle_top={}\".format(angle_top))\r\n if real_test:\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"1278L 左转 turn000L angle_top\")\r\n if real_test:\r\n action_append(\"turn000L\")\r\n elif angle_top < -5:\r\n if angle_top < -9:\r\n print(\"1283L 大右转一下 turn001R angle_top={}\".format(angle_top))\r\n if real_test:\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"1287L 右转 turn000R\")\r\n if real_test:\r\n action_append(\"turn000R\")\r\n elif 220 <= Chest_top_center_x <= 250:\r\n # print(\"1802L 前进一步 forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n print(\"1293L 快走 fastforwardstep\")\r\n if real_test:\r\n action_append(\"fastForward03\")\r\n\r\n\r\n elif step == 4: # 离开独木桥阶段 chest 出现bridge 依据chest调整角度位置\r\n print(\"1299L 离开桥\")\r\n\r\n print(\"1301L 过桥结束,step = -1 下一关 踢球\")\r\n step = 100\r\n\r\n print(\"--continue---\")\r\n break\r\n\r\n\r\n# ###################### 过 门-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-\r\ndoor_flag = True\r\nAngle = 0\r\nangle_top = 0\r\nBottom_center_y = 0\r\nBottom_center_x = 0\r\nTop_center_x = 0\r\nTop_center_y = 0\r\nTop_lenth = 0\r\ncamera_choice = \"Head\"\r\n\r\n\r\ndef door_act_move():\r\n global step, state, reset, skip\r\n global door_flag\r\n global real_test\r\n global camera_choice\r\n global Angle, angle_top, Bottom_center_y, Bottom_center_x, Top_center_y, Top_center_x, Top_lenth\r\n\r\n step0_far = 130\r\n step0_close = 24\r\n step0_angle_top_R = -8\r\n step0_angle_top_L = 8\r\n step0_top_center_x_L = 365\r\n step0_top_center_x_R = 315\r\n step0_delta = 30\r\n step0_turn_times = 3\r\n\r\n step1_angle_top_L = 3\r\n step1_angle_top_R = -3\r\n step1_head_bottom_x_F = 265\r\n step1_head_bottom_x_B = 305\r\n step1_delta = 30\r\n step1_close = 375\r\n\r\n step2_get_close = 5\r\n\r\n if step == 0: # 接近 看下边沿 角度 Chest_percent > 5\r\n if door_flag == False:\r\n print(\"1346L step=0 什么也没有看到,向左转45° turn005L\")\r\n if real_test:\r\n action_append(\"turn005L\")\r\n time.sleep(sleep_time_s)\r\n\r\n elif Top_center_y > 160:\r\n print(\"1352L step = 0 距离门很远, 快走靠近 fastForward03 Top_center_y={} > 150\".format(Top_center_y))\r\n if real_test:\r\n action_append(\"fast_forward_step\")\r\n action_append(\"turn001R\")\r\n action_append(\"fast_forward_step\")\r\n time.sleep(sleep_time_l)\r\n\r\n elif Top_center_y > step0_far:\r\n print(\"1360L step = 0 再往前一些,慢走 fast_forward_step Top_center_y={} > {}\".format(Top_center_y, step0_far))\r\n if real_test:\r\n action_append(\"fast_forward_step\")\r\n time.sleep(sleep_time_l)\r\n\r\n elif Top_center_y < step0_close:\r\n print(\"1366L step = 0 距离门很近了, 后退一点 Back3Run Top_center_y={} < {}\".format(Top_center_y, step0_close))\r\n if real_test:\r\n action_append(\"Back3Run\")\r\n time.sleep(sleep_time_l)\r\n\r\n elif angle_top < step0_angle_top_R:\r\n print(\"1372L step = 0 方向偏了, 向左转 turn001L angel_top = {} < {}\".format(angle_top, step0_angle_top_R))\r\n if real_test:\r\n action_append(\"turn001L\")\r\n\r\n elif angle_top > step0_angle_top_L:\r\n print(\"1377L step = 0 方向偏了, 向右转 turn001R angel_top = {} > {}\".format(angle_top, step0_angle_top_L))\r\n if real_test:\r\n action_append(\"turn001R\")\r\n\r\n elif Top_center_x > step0_top_center_x_L:\r\n if Top_center_x > step0_top_center_x_L + step0_delta:\r\n print(\"1383L step = 0 站位很偏了, 向右移, Right3move Top_center_x = {} > {}\".format(Top_center_x,\r\n step0_top_center_x_L + step0_delta))\r\n if real_test:\r\n action_append(\"Right3move\")\r\n time.sleep(sleep_time_s)\r\n else:\r\n print(\"1388L step = 0 站位偏了, 向右移, Right2move Top_center_x = {} > {}\".format(Top_center_x,\r\n step0_top_center_x_L))\r\n if real_test:\r\n action_append(\"Right02move\")\r\n time.sleep(sleep_time_s)\r\n elif Top_center_x < step0_top_center_x_R:\r\n if Top_center_x < step0_top_center_x_R - step0_delta:\r\n print(\"1394L step = 0 站位很偏了, 向左移, Left3move Top_center_x = {} < {}\".format(Top_center_x,\r\n step0_top_center_x_R - step0_delta))\r\n if real_test:\r\n action_append(\"Left3move\")\r\n time.sleep(sleep_time_s)\r\n else:\r\n print(\"1399L step = 0 站位偏了, 向左移, Left02move Top_center_x = {} < {}\".format(Top_center_x,\r\n step0_top_center_x_R))\r\n if real_test:\r\n action_append(\"Left02move\")\r\n time.sleep(sleep_time_s)\r\n\r\n else:\r\n print(\"1405L 进入下一阶段, 调整侧身 turn005R x {} HeadTurn185\".format(step0_turn_times))\r\n # cv2.waitKey(0)\r\n if real_test:\r\n for i in range(0, step0_turn_times):\r\n action_append(\"turn005R\")\r\n time.sleep(sleep_time_l)\r\n\r\n # action_append(\"turn004R\")\r\n # action_append(\"turn001R\")\r\n # action_append(\"turn001R\")\r\n action_append(\"HeadTurn185\")\r\n time.sleep(sleep_time_l)\r\n step = 1\r\n\r\n elif step == 1:\r\n if Top_lenth < 100:\r\n print(\"1421L 歪了! 左转, 再向右移\")\r\n if real_test:\r\n action_append(\"Back3Run\")\r\n action_append(\"Right02move\")\r\n\r\n elif angle_top > step1_angle_top_L or 0 < Angle < 85:\r\n print(\"1427L step = 1, 方向偏了, 向右转 turn000R angle_top={} > {}\".format(angle_top, step1_angle_top_L))\r\n if real_test:\r\n action_append(\"turn001R\")\r\n time.sleep(sleep_time_l)\r\n elif angle_top < step1_angle_top_R or -85 < Angle < 0:\r\n print(\"1432L step = 1 方向偏了, 向左转 turn000L angle_top={} < {}\".format(angle_top, step1_angle_top_R))\r\n if real_test:\r\n action_append(\"turn001L\")\r\n time.sleep(sleep_time_l)\r\n\r\n elif Bottom_center_x < step1_head_bottom_x_F:\r\n if Bottom_center_x < step1_head_bottom_x_F - step1_delta:\r\n print(\"1439L step = 1 站位很靠前了,向后移 Back3Run Bottom_center_x={} < {}\".format(Bottom_center_x,\r\n step1_head_bottom_x_F - step1_delta))\r\n if real_test:\r\n action_append(\"Back3Run\")\r\n time.sleep(sleep_time_s)\r\n else:\r\n print(\"1444L step = 1 站位靠前了,向后移 Back3Run Bottom_center_x={} < {}\".format(Bottom_center_x,\r\n step1_head_bottom_x_F))\r\n if real_test:\r\n action_append(\"Back3Run\")\r\n time.sleep(sleep_time_s)\r\n\r\n elif Bottom_center_x > step1_head_bottom_x_B:\r\n if Bottom_center_x > step1_head_bottom_x_B + step1_delta:\r\n print(\"1451L step = 1 站位很靠后了,向前移 Forwalk01 Bottom_center_x={} > {}\".format(Bottom_center_x,\r\n step1_head_bottom_x_B + step1_delta))\r\n if real_test:\r\n action_append(\"Forwalk01\")\r\n time.sleep(sleep_time_s)\r\n else:\r\n print(\"1456L step = 1 站位靠后了,向前移 Forwalk01 Bottom_center_x={} > {}\".format(Bottom_center_x,\r\n step1_head_bottom_x_B))\r\n if real_test:\r\n action_append(\"Forwalk01\")\r\n time.sleep(sleep_time_s)\r\n\r\n elif Bottom_center_y < step1_close:\r\n print(\"1462L step = 1, 靠近门, Left3move Bottom_center_y={} < {}\".format(Bottom_center_y, step1_close))\r\n if real_test:\r\n action_append(\"Left3move\")\r\n time.sleep(sleep_time_l)\r\n\r\n elif Bottom_center_y > step1_close:\r\n print(\"1468L 已经接近门了,进入下一阶段,摸黑过门, Bottom_center_y = {} > {}\".format(Bottom_center_y, step1_close))\r\n step = 2\r\n\r\n elif step == 2:\r\n print(\"-------/////////////////过门 Left3move x 4\")\r\n # action_append(\"Back3Run\")\r\n for i in range(0, step2_get_close):\r\n if real_test:\r\n action_append(\"Left3move\")\r\n time.sleep(sleep_time_l)\r\n # print(\"向后退一点! Back3Run\")\r\n # if real_test:\r\n # action_append(\"Back3Run\")\r\n\r\n # cv2.waitKey(0)\r\n\r\n for i in range(0, 7):\r\n if real_test:\r\n action_append(\"Left3move\")\r\n if i == 3:\r\n action_append(\"turn001R\")\r\n action_append(\"turn001R\")\r\n time.sleep(sleep_time_l)\r\n\r\n # cv2.waitKey(0)\r\n\r\n print(\"完成! \")\r\n\r\n if real_test:\r\n for i in range(0, step0_turn_times):\r\n action_append(\"turn005L\")\r\n time.sleep(sleep_time_l)\r\n action_append(\"HeadTurnMM\")\r\n action_append(\"fast_forward_step\")\r\n\r\n state = -1\r\n\r\n\r\ndef into_the_door():\r\n global state_sel, org_img, step, reset, skip, debug, chest_ret, HeadOrg_img, state\r\n global door_flag\r\n global camera_choice\r\n global Angle, angle_top, Bottom_center_y, Bottom_center_x, Top_center_x, Top_center_y, Top_lenth\r\n step = 0\r\n state = 5\r\n\r\n r_w = chest_r_width\r\n r_h = chest_r_height\r\n\r\n print(\"/-/-/-/-/-/-/-/-/-开始过门\")\r\n\r\n while (state == 5):\r\n Area = []\r\n if camera_choice == \"Chest\":\r\n # print(\"胸部相机\")\r\n chest_OrgFrame = np.rot90(ChestOrg_img)\r\n Img_copy = chest_OrgFrame.copy()\r\n\r\n elif camera_choice == \"Head\":\r\n # print(\"头部相机\")\r\n Img_copy = HeadOrg_img.copy()\r\n # Img_copy = cv2.resize(border, (r_w, r_h), interpolation=cv2.INTER_CUBIC)\r\n # Img_copy = Head_OrgFrame\r\n\r\n Frame_gauss = cv2.GaussianBlur(Img_copy, (3, 3), 0) # 高斯模糊\r\n Frame_hsv = cv2.cvtColor(Frame_gauss, cv2.COLOR_BGR2HSV) # 将图片转换到HSV空间\r\n if camera_choice == \"Chest\":\r\n Frame_blue = cv2.inRange(Frame_hsv, color_range['chest_blue_door'][0],\r\n color_range['chest_blue_door'][1]) # 对原图像和掩模(颜色的字典)进行位运算\r\n elif camera_choice == \"Head\":\r\n Frame_blue = cv2.inRange(Frame_hsv, color_range['head_blue_door'][0],\r\n color_range['head_blue_door'][1]) # 对原图像和掩模(颜色的字典)进行位运算\r\n Opened = cv2.morphologyEx(Frame_blue, cv2.MORPH_OPEN, np.ones((1, 1), np.uint8)) # 开运算 去噪点\r\n Closed = cv2.morphologyEx(Opened, cv2.MORPH_CLOSE, np.ones((5, 5), np.uint8)) # 闭运算 封闭连接\r\n Closed = cv2.dilate(Closed, np.ones((5, 5), np.uint8), iterations=3)\r\n if img_debug:\r\n cv2.imshow(\"Imask\", Closed)\r\n\r\n _, contours, hierarchy = cv2.findContours(Closed, cv2.RETR_LIST,\r\n cv2.CHAIN_APPROX_NONE) # 找出轮廓cv2.CHAIN_APPROX_NONE\r\n\r\n if len(contours) == 0:\r\n print(\"没有找到门!\")\r\n door_flag = False\r\n\r\n else:\r\n door_flag = True\r\n for i in range(0, len(contours)):\r\n # print(\"len[Chest_contours]={}——i:{}\".format(len(Chest_contours), i))\r\n area = cv2.contourArea(contours[i])\r\n if 2000 < area < 640 * 480 * 0.45:\r\n Area.append((area, i))\r\n\r\n # print(\"area{} = {}\".format(i, area))\r\n # cv2.imshow(\"Processed\", Img_copy)\r\n # cv2.waitKey(0)\r\n # cv2.drawContours(Img_copy, contours, -1, (0, 0, 255), 1)\r\n\r\n AreaMaxContour, Area_max = getAreaMaxContour1(contours)\r\n\r\n if step != 2 and camera_choice == \"Head\":\r\n Rect = cv2.minAreaRect(AreaMaxContour)\r\n Box = np.int0(cv2.boxPoints(Rect))\r\n\r\n cv2.drawContours(Img_copy, [Box], -1, (255, 200, 100), 2)\r\n\r\n Top_left = AreaMaxContour[0][0]\r\n Top_right = AreaMaxContour[0][0]\r\n Bottom_left = AreaMaxContour[0][0]\r\n Bottom_right = AreaMaxContour[0][0]\r\n for c in AreaMaxContour: # 遍历找到四个顶点\r\n if c[0][0] + 1.5 * c[0][1] < Top_left[0] + 1.5 * Top_left[1]:\r\n Top_left = c[0]\r\n if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Top_right[0]) + 1.5 * Top_right[1]:\r\n Top_right = c[0]\r\n if c[0][0] + 1.5 * (r_h - c[0][1]) < Bottom_left[0] + 1.5 * (r_h - Bottom_left[1]):\r\n Bottom_left = c[0]\r\n if c[0][0] + 1.5 * c[0][1] > Bottom_right[0] + 1.5 * Bottom_right[1]:\r\n Bottom_right = c[0]\r\n\r\n angle_top = - math.atan(\r\n (Top_right[1] - Top_left[1]) / (Top_right[0] - Top_left[0])) * 180.0 / math.pi\r\n\r\n Top_lenth = abs(Top_right[0] - Top_left[0])\r\n Top_center_x = int((Top_right[0] + Top_left[0]) / 2)\r\n Top_center_y = int((Top_right[1] + Top_left[1]) / 2)\r\n Bottom_center_x = int((Bottom_right[0] + Bottom_left[0]) / 2)\r\n Bottom_center_y = int((Bottom_right[1] + Bottom_left[1]) / 2)\r\n\r\n cv2.circle(Img_copy, (Top_right[0], Top_right[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(Img_copy, (Top_left[0], Top_left[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(Img_copy, (Bottom_right[0], Bottom_right[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(Img_copy, (Bottom_left[0], Bottom_left[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(Img_copy, (Top_center_x, Top_center_y), 5, [0, 255, 255], 2)\r\n cv2.circle(Img_copy, (Bottom_center_x, Bottom_center_y), 5, [0, 255, 255], 2)\r\n cv2.line(Img_copy, (Top_center_x, Top_center_y),\r\n (Bottom_center_x, Bottom_center_y), [0, 255, 255], 2) # 画出上下中点连线\r\n\r\n if math.fabs(Top_center_x - Bottom_center_x) <= 1: # 得到连线的角度\r\n Angle = 90\r\n else:\r\n Angle = - math.atan((Top_center_y - Bottom_center_y) / (\r\n Top_center_x - Bottom_center_x)) * 180.0 / math.pi\r\n\r\n if img_debug:\r\n cv2.putText(Img_copy, \"angle_top:\" + str(int(angle_top)), (30, 425), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n (0, 0, 255), 2)\r\n cv2.putText(Img_copy, \"Head_bottom_center(x,y): \" + str(int(Bottom_center_x)) + \" , \" + str(\r\n int(Bottom_center_y)), (30, 450), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255),\r\n 2) # (0, 0, 255)BGR\r\n cv2.putText(Img_copy,\r\n \"Head_top_center(x,y): \" + str(int(Top_center_x)) + \" , \" + str(int(Top_center_y)),\r\n (30, 470), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2) # (0, 0, 255)BGR\r\n cv2.putText(Img_copy, \"Angle:\" + str(int(Angle)), (30, 20), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.65, (0, 0, 255), 2) # (0, 0, 255)BGR\r\n cv2.putText(Img_copy, \"Top_lenth:\" + str(int(Top_lenth)), (400, 20), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.65, (0, 0, 255), 2) # (0, 0, 255)BGR\r\n\r\n if img_debug:\r\n cv2.imshow(\"Processed\", Img_copy)\r\n cv2.waitKey(10)\r\n\r\n door_act_move()\r\n print(\"state={}\".format(state))\r\n\r\n # if len(Area) > 2:\r\n # Area = find_two(Area)\r\n\r\n # elif len(Area) < 2:\r\n # door_found = False\r\n # print(\"没有发现门框,调用头部相机\")\r\n # camera_choice = \"Head\"\r\n # # cv2.drawContours(Img_copy, Chest_contours[Area[0][1]], -1, (0, 0, 255), 1)\r\n\r\n # if len(Area) == 2:\r\n # door_found = True\r\n # Chest_rect1 = cv2.minAreaRect(Chest_contours[Area[0][1]])\r\n # Chest_box1 = np.int0(cv2.boxPoints(Chest_rect1))\r\n # Chest_rect2 = cv2.minAreaRect(Chest_contours[Area[1][1]])\r\n # Chest_box2 = np.int0(cv2.boxPoints(Chest_rect2))\r\n\r\n # cv2.drawContours(Img_copy, [Chest_box1], -1, (255, 200, 100), 2)\r\n # cv2.drawContours(Img_copy, [Chest_box2], -1, (255, 200, 100), 2)\r\n\r\n # Chest_top_left1 = Chest_contours[Area[0][1]][0][0]\r\n # Chest_top_right1 = Chest_contours[Area[0][1]][0][0]\r\n # Chest_bottom_left1 = Chest_contours[Area[0][1]][0][0]\r\n # Chest_bottom_right1 = Chest_contours[Area[0][1]][0][0]\r\n # for c in Chest_contours[Area[0][1]]: # 遍历找到四个顶点\r\n # if c[0][0] + 1.5 * c[0][1] < Chest_top_left1[0] + 1.5 * Chest_top_left1[1]:\r\n # Chest_top_left1 = c[0]\r\n # if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Chest_top_right1[0]) + 1.5 * Chest_top_right1[1]:\r\n # Chest_top_right1 = c[0]\r\n # if c[0][0] + 1.5 * (r_h - c[0][1]) < Chest_bottom_left1[0] + 1.5 * (r_h - Chest_bottom_left1[1]):\r\n # Chest_bottom_left1 = c[0]\r\n # if c[0][0] + 1.5 * c[0][1] > Chest_bottom_right1[0] + 1.5 * Chest_bottom_right1[1]:\r\n # Chest_bottom_right1 = c[0]\r\n # cv2.circle(Img_copy, (Chest_top_right1[0], Chest_top_right1[1]), 5, [0, 255, 255], 2)\r\n # cv2.circle(Img_copy, (Chest_top_left1[0], Chest_top_left1[1]), 5, [0, 255, 255], 2)\r\n # cv2.circle(Img_copy, (Chest_bottom_right1[0], Chest_bottom_right1[1]), 5, [0, 255, 255], 2)\r\n # cv2.circle(Img_copy, (Chest_bottom_left1[0], Chest_bottom_left1[1]), 5, [0, 255, 255], 2)\r\n # angle_Right = - math.atan(\r\n # (Chest_top_left1[1] - Chest_bottom_left1[1]) / (Chest_top_left1[0] - Chest_bottom_left1[0])) * 180.0 / math.pi\r\n\r\n # Chest_top_left2 = Chest_contours[Area[1][1]][0][0]\r\n # Chest_top_right2 = Chest_contours[Area[1][1]][0][0]\r\n # Chest_bottom_left2 = Chest_contours[Area[1][1]][0][0]\r\n # Chest_bottom_right2 = Chest_contours[Area[1][1]][0][0]\r\n # for c in Chest_contours[Area[1][1]]: # 遍历找到四个顶点\r\n # if c[0][0] + 1.5 * c[0][1] < Chest_top_left2[0] + 1.5 * Chest_top_left2[1]:\r\n # Chest_top_left2 = c[0]\r\n # if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Chest_top_right2[0]) + 1.5 * Chest_top_right2[1]:\r\n # Chest_top_right2 = c[0]\r\n # if c[0][0] + 1.5 * (r_h - c[0][1]) < Chest_bottom_left2[0] + 1.5 * (r_h - Chest_bottom_left2[1]):\r\n # Chest_bottom_left2 = c[0]\r\n # if c[0][0] + 1.5 * c[0][1] > Chest_bottom_right2[0] + 1.5 * Chest_bottom_right2[1]:\r\n # Chest_bottom_right2 = c[0]\r\n # cv2.circle(Img_copy, (Chest_top_right2[0], Chest_top_right2[1]), 5, [0, 255, 255], 2)\r\n # cv2.circle(Img_copy, (Chest_top_left2[0], Chest_top_left2[1]), 5, [0, 255, 255], 2)\r\n # cv2.circle(Img_copy, (Chest_bottom_right2[0], Chest_bottom_right2[1]), 5, [0, 255, 255], 2)\r\n # cv2.circle(Img_copy, (Chest_bottom_left2[0], Chest_bottom_left2[1]), 5, [0, 255, 255], 2)\r\n # angle_Left = - math.atan(\r\n # (Chest_top_right2[1] - Chest_bottom_right2[1]) / (Chest_top_right2[0] - Chest_bottom_right2[0])) * 180.0 / math.pi\r\n\r\n # Chest_top_center_x = int((Chest_top_right2[0] + Chest_top_left1[0]) / 2)\r\n # Chest_top_center_y = int((Chest_top_right2[1] + Chest_top_left1[1]) / 2)\r\n # cv2.circle(Img_copy, (Chest_top_center_x, Chest_top_center_y), 5, [0, 255, 255], 2)\r\n\r\n # cv2.putText(Img_copy, \"Chest_top_center(x,y): \" + str(int(Chest_top_center_x)) + \" , \" + str(\r\n # int(Chest_top_center_y)), (30, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n # cv2.putText(Img_copy, \"angle_left:\" + str(int(angle_Left)), (30, 425), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n # (0, 0, 255), 2) # (0, 0, 255)BGR\r\n # cv2.putText(Img_copy, \"angle_right:\" + str(int(angle_Right)), (30, 460), cv2.FONT_HERSHEY_SIMPLEX,\r\n # 0.65, (0, 0, 255), 2)\r\n\r\n\r\n# ###################### 踢 球-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-\r\ngolf_angle_ball = 90\r\nChest_ball_angle = 90\r\nhole_Angle = 45\r\ngolf_angle = 0\r\nball_x = 0\r\nball_y = 0\r\ngolf_angle_flag = False\r\ngolf_dis_start = True\r\ngolf_angle_start = False\r\ngolf_ok = False\r\nhole_flag = False\r\nChest_ball_flag = False\r\nChest_golf_angle = 0\r\n\r\nball_dis_start = True\r\nhole_angle_start = False\r\n\r\nhead_state = 0 # 90 ~ -90 左+90 右-90\r\n\r\nhole_x = 0\r\nhole_y = 0\r\njump_count = 0\r\ncount = 0\r\nangle_dis_count = 0\r\npicnum = 0\r\nfast_run = True\r\n\r\n\r\n###################################################踢球决策\r\ndef kick_act_move():\r\n global step, state, reset, skip\r\n global hole_Angle, ball_hole\r\n global golf_angle_ball, golf_angle, Chest_ball_angle, Chest_golf_angle\r\n global ball_x, ball_y, Chest_ball_x, Chest_ball_y\r\n global golf_angle_flag, golf_dis_flag # golf_dis_flag未使用\r\n global golf_angle_start\r\n global golf_ok\r\n global hole_flag, Chest_ball_flag\r\n global ball_dis_start, hole_angle_start\r\n global head_state, angle_dis_count, fast_run\r\n global count\r\n global jump_count\r\n ball_hole_angle_ok = False\r\n\r\n # 由脚底到红球延伸出一条射线,依据球洞与该射线的关系,调整机器人位置\r\n # ball_hole_local()\r\n # jump_out=0\r\n if True:\r\n if step == -1:\r\n # for i in range(0, 2):\r\n # if edge_angle('red_floor') == 1:\r\n # action_append(\"Forwalk01\")\r\n\r\n # action_append(\"HeadTurnMM\")\r\n # action_append(\"Right3move\")\r\n # action_append(\"Right3move\")\r\n action_append(\"Stand\")\r\n action_append(\"Forwalk01\")\r\n\r\n action_append(\"Stand\")\r\n action_append(\"turn001L\")\r\n # action_append(\"Forwalk01\")\r\n # action_append(\"turn001L\")\r\n action_append(\"Forwalk01\")\r\n action_append(\"Forwalk01\")\r\n action_append(\"turn001R\")\r\n step = 0\r\n # step = 0 # 单步调试某一步骤用\r\n elif step == 0: # 发现球,发现球洞,记录球与球洞的相对位置\r\n # print(\"看黑线调整居中\")\r\n if Chest_ball_flag is True: # 前进到球跟前\r\n if fast_run:\r\n if Chest_ball_y <= 320: # 340\r\n print(\"2002L step = 0 看到了球,距离很远, 快走前进 fastForward04 Chest_ball_y={} < 320\".format(Chest_ball_y))\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n if real_test:\r\n action_append(\"Forwalk01\")\r\n action_append(\"turn001R\")\r\n time.sleep(sleep_time_l)\r\n # head_angle_dis() # headfftest\r\n elif Chest_ball_y <= 400: # 340\r\n print(\"2012L 看到了球,距离远, 快走前进 Forwalk01 Chest_ball_y={} < 290\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"Forwalk01\")\r\n time.sleep(sleep_time_l)\r\n # head_angle_dis() # headfftest\r\n\r\n if hole_flag:\r\n if 45 < hole_Angle < 70:\r\n print(\r\n \"2020L step = 0,看到球门了,向球靠近的过程中,也要对准球门,以免看不到球门了 Hole_angle={}, 因此需要向右转 turn001R\".format(\r\n hole_Angle))\r\n if real_test:\r\n action_append(\"turn001R\")\r\n elif -70 < hole_Angle < -45:\r\n print(\r\n \"2024L step = 0,看到球门了,向球靠近的过程中,也要对准球门,以免看不到球门了 Hole_angle={}, 因此需要向左转 turn001L\".format(\r\n hole_Angle))\r\n if real_test:\r\n action_append(\"turn001L\")\r\n\r\n if Chest_ball_y > 370:\r\n print(\"2029L 已接近球了,不能再跑这么快了,进入细调模式(ball_y > 270) Chest_ball_y={}\".format(Chest_ball_y))\r\n fast_run = False\r\n\r\n else:\r\n if Chest_ball_y < 370: # 390 400改成了390 zzx 10.14\r\n # X\r\n if Chest_ball_x < 140*(4/3): # 240 - 100\r\n print(\"2036L step = 0 Chest_ball_x < 180 左侧移 Chest_ball_x={}\".format(Chest_ball_x))\r\n if real_test:\r\n action_append(\"Left3move\")\r\n elif Chest_ball_x > 340*(4/3): # 240 + 100\r\n print(\"2040L step = 0 Chest_ball_x > 300 右侧移 Chest_ball_x={}\".format(Chest_ball_x))\r\n if real_test:\r\n action_append(\"Right3move\")\r\n else:\r\n if Chest_ball_y < 370:\r\n print(\"2045L step = 0 再靠球近些(ball_y < 370)前挪一点 forwalkVeryslow Chest_ball_y={}\".format(\r\n Chest_ball_y))\r\n if real_test:\r\n # action_append(\"Forwalk01\") #zzx 10.14\r\n action_append(\"forwalkVeryslow\")\r\n action_append(\"turn001R\")\r\n # action_append(\"Forwalk02\")\r\n else:\r\n print(\"2052L step = 0 再靠球近些(ball_y < 360)前挪一点点 Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"forwalkVeryslow\")\r\n action_append(\"Stand\")\r\n\r\n elif Chest_ball_y > 430: # 470改成了430 zzx 10.14\r\n print(\"2058L step = 0 隔球太近(ball_y < 360)后退一点点 Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"Stand\")\r\n action_append(\"Back3Run\")\r\n\r\n elif Chest_ball_y >= 400 and Chest_ball_y <= 450: # Chest_ball_y>360\r\n print(\"2063L goto step1 Chest_ball_y={}\".format(Chest_ball_y))\r\n step = 1\r\n else:\r\n print(\"未发现球 寻找球\")\r\n count += 1\r\n if real_test:\r\n if count > 5:\r\n action_append(\"turn001R\") # ffetst\r\n action_append(\"Stand\")\r\n # elif count < 1:\r\n # action_append(\"Forwalk01\")\r\n else:\r\n # action_append(\"Forwalk01\") zzx 10.13\r\n action_append(\"forwalkVeryslow\")\r\n action_append(\"turn001R\")\r\n if jump_count>20:\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n step = 9\r\n\r\n # if head_state == 0:\r\n # print(\"头右转(-60)寻找球\")\r\n # head_state = -60\r\n # elif head_state == -60:\r\n # print(\"头由右转变为左转(+60)寻找球\")\r\n # head_state = 60\r\n # elif head_state == 60:\r\n # print(\"头部 恢复0 向前迈进\")\r\n\r\n elif step == 1: # 看球调整位置 逐步前进调整至看球洞\r\n if jump_count<=20:\r\n if Chest_ball_flag is False:\r\n if real_test:\r\n action_append(\"Stand\")\r\n action_append(\"Back3Run\")\r\n\r\n elif Chest_ball_y <= 400:\r\n print(\"2094L step = 1 前挪一点点 forwalkVeryslow < 380 Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"forwalkVeryslow\")\r\n elif Chest_ball_y > 450:\r\n print(\"2098L step = 1 后一步 Back3Run > 480 Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"Stand\")\r\n action_append(\"Back3Run\")\r\n elif 400 < Chest_ball_y <= 450:\r\n if hole_flag == True:\r\n if head_state == -60:\r\n print(\"头右看,看到球洞\")\r\n step = 0\r\n # print(\"172L 头恢复0 向右平移\")\r\n # head_state = 0\r\n elif head_state == 60:\r\n print(\"头左看,看到球洞\")\r\n step = 0\r\n # print(\"172L 头恢复0 向左平移\")\r\n # head_state = 0\r\n elif head_state == 0: # 头前看 看到球洞\r\n print(\"2115L step4\")\r\n step = 4\r\n else:\r\n print(\"2118L error 左右旋转头 寻找球洞 \")\r\n if real_test:\r\n action_append(\"turn001R\")\r\n else:\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n step = 9\r\n\r\n elif step == 4: # 粗略调整朝向 球与球洞大致在一条线\r\n # print(\"调整红球在左脚正前方不远处,看球洞的位置调整\")\r\n if jump_count<=20:\r\n if ball_dis_start:\r\n if Chest_ball_x <= int(190 * (4 / 3)):\r\n if Chest_ball_x < int(190 * (4 / 3)):\r\n print(\"2199L4 step = 4 需要左侧移 Left3move Chest_ball_x={}\".format(Chest_ball_x))\r\n if real_test:\r\n action_append(\"Left3move\")\r\n action_append(\"Stand\")\r\n else:\r\n print(\"2203L4 step = 4 需要左侧移 Left02move Chest_ball_x={}\".format(Chest_ball_x))\r\n if real_test:\r\n action_append(\"Left02move\")\r\n action_append(\"Stand\")\r\n angle_dis_count = 0\r\n elif Chest_ball_x > 290 * (4 / 3):\r\n if Chest_ball_x > 290 * (4 / 3):\r\n print(\"2209L4 step = 4 需要右侧移 Right3move Chest_ball_x={}\".format(Chest_ball_x))\r\n if real_test:\r\n action_append(\"Right3move\")\r\n action_append(\"Stand\")\r\n else:\r\n print(\"2213L4 step = 4 需要右侧移 Right02move Chest_ball_x={}\".format(Chest_ball_x))\r\n if real_test:\r\n action_append(\"Right02move\")\r\n action_append(\"Stand\")\r\n angle_dis_count = 0\r\n else:\r\n print(\"2218L4 Chest_ball_y---位置ok\")\r\n ball_dis_start = False\r\n hole_angle_start = True\r\n if hole_angle_start:\r\n if hole_Angle <= 0:\r\n # angle\r\n if hole_Angle > -67:\r\n if hole_Angle >= -65:\r\n if Chest_ball_y > 470:\r\n print(\"2227L4 需要后挪一点 Back3Run Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"Stand\")\r\n action_append(\"Back3Run\")\r\n angle_dis_count = 0\r\n elif Chest_ball_y < 410:\r\n print(\"2232L4 需要前挪一点 forwalkVeryslow Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"forwalkVeryslow\")\r\n angle_dis_count = 0\r\n\r\n print(\"2237L4 大左转一下 turn003L hole_Angle={}\".format(hole_Angle))\r\n if real_test:\r\n action_append(\"turn003L\")\r\n else:\r\n if Chest_ball_y > 470:\r\n print(\"2242L4 需要后挪一点 Back3Run Chest_ball_y\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"Stand\")\r\n action_append(\"Back3Run\")\r\n angle_dis_count = 0\r\n elif Chest_ball_y < 410:\r\n print(\"2247L4 需要前挪一点 forwalkVeryslow Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"forwalkVeryslow\")\r\n angle_dis_count = 0\r\n\r\n print(\"2252L4 左转一下 turn001L hole_Angle={}\".format(hole_Angle))\r\n if real_test:\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"2256L4 hole_Angle---角度ok\")\r\n angle_dis_count = angle_dis_count + 1\r\n ball_dis_start = True\r\n hole_angle_start = False\r\n\r\n # ball_dis_start = True\r\n # hole_angle_start = False\r\n if hole_Angle > 0:\r\n # angle\r\n if hole_Angle < 67:\r\n if hole_Angle <= 65:\r\n if Chest_ball_y > 470:\r\n print(\"2268L4 需要后挪一点 Back3Run Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"Stand\")\r\n action_append(\"Back3Run\")\r\n angle_dis_count = 0\r\n elif Chest_ball_y < 410:\r\n print(\"2273L4 需要前挪一点 forwalkVeryslow Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"forwalkVeryslow\")\r\n angle_dis_count = 0\r\n\r\n print(\"2278L4 大右转一下 turn001R hole_Angle={}\".format(hole_Angle))\r\n action_append(\"turn001R\") # turn003R 改成了 turn001R zzx 10.14\r\n else:\r\n if Chest_ball_y > 470:\r\n print(\"2282L4 需要后挪一点 Back3Run Chest_ball_y={}\".format(Chest_ball_y))\r\n action_append(\"Stand\")\r\n action_append(\"Back3Run\")\r\n angle_dis_count = 0\r\n elif Chest_ball_y < 410:\r\n print(\"2286L4 需要前挪一点 forwalkVeryslow Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"forwalkVeryslow\")\r\n angle_dis_count = 0\r\n\r\n print(\"2291L4 右转一下 turn001R \", hole_Angle)\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"2294L4 hole_Angle---角度OK\")\r\n angle_dis_count = angle_dis_count + 1\r\n ball_dis_start = True\r\n hole_angle_start = False\r\n\r\n # ball_dis_start = True\r\n # hole_angle_start = False\r\n\r\n if angle_dis_count > 0:\r\n angle_dis_count = 0\r\n print(\"2304L step step 5555\")\r\n step = 5\r\n else:\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n step = 9\r\n\r\n\r\n elif step == 5: # 调整 球与球洞在一条直线 球范围 230 260 * (4 / 3):\r\n # if Chest_ball_x - 240 > 40:\r\n # print(\"2328L 需要右侧移 Right02move\")\r\n # action_append(\"Right02move\")\r\n # else:\r\n if Chest_ball_x > 270 * (4 / 3):\r\n print(\"2332L 需要右侧移 Right02move Chest_ball_x={}\".format(Chest_ball_x))\r\n if real_test:\r\n action_append(\"Right02move\")\r\n else:\r\n print(\"2336L 需要右侧移一点 Right1move Chest_ball_x={}\".format(Chest_ball_x))\r\n if real_test:\r\n action_append(\"Right1move\")\r\n angle_dis_count = 0\r\n\r\n else:\r\n print(\"2341L Chest_ball_y---位置ok\")\r\n ball_dis_start = False\r\n hole_angle_start = True\r\n\r\n if hole_angle_start:\r\n if hole_Angle < 0:\r\n # angle\r\n if hole_Angle > -70:\r\n # y\r\n if Chest_ball_y > 515:\r\n print(\"2350L 需要后挪一点 Back3Run Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"Stand\")\r\n action_append(\"Back3Run\")\r\n angle_dis_count = 0\r\n elif Chest_ball_y < 460:\r\n print(\"2355L 需要前挪一点 forwalkVeryslow Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"forwalkVeryslow\")\r\n angle_dis_count = 0\r\n\r\n if hole_Angle >= -65:\r\n print(\"2361L 大左转一下 turn001L hole_Angle={}\".format(hole_Angle))\r\n if real_test:\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"2365L 左转一下 turn001L \", hole_Angle)\r\n if real_test:\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"2369L hole_Angle---角度ok\")\r\n angle_dis_count = angle_dis_count + 1\r\n\r\n ball_dis_start = True\r\n hole_angle_start = False\r\n if hole_Angle > 0:\r\n # angle\r\n if hole_Angle < 70:\r\n # y\r\n if Chest_ball_y > 500:\r\n print(\"2379L 需要后挪一点 Back3Run Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"Stand\")\r\n action_append(\"Back3Run\")\r\n angle_dis_count = 0\r\n elif Chest_ball_y < 460:\r\n print(\"2384L 需要前挪一点 forwalkVeryslow Chest_ball_y={}\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"forwalkVeryslow\")\r\n angle_dis_count = 0\r\n\r\n if hole_Angle <= 65:\r\n print(\"2390L 大右转一下 turn001R hole_Angle={}\".format(hole_Angle))\r\n if real_test:\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"2394L 右转一下 turn001R hole_Angle={}\".format(hole_Angle))\r\n if real_test:\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"2398L hole_Angle---角度OK\")\r\n angle_dis_count = angle_dis_count + 1\r\n\r\n ball_dis_start = True\r\n hole_angle_start = False\r\n\r\n if angle_dis_count > 0:\r\n angle_dis_count = 0\r\n step = 6\r\n else:\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n step = 9\r\n\r\n\r\n elif step == 6:\r\n if jump_count<=20:\r\n # print(\"666\")\r\n if Chest_ball_angle > 75 and hole_Angle > 75:\r\n ball_hole_angle_ok = True\r\n if Chest_ball_angle < -75 and hole_Angle > 75:\r\n ball_hole_angle_ok = True\r\n if Chest_ball_angle < -75 and hole_Angle < -75:\r\n ball_hole_angle_ok = True\r\n if Chest_ball_angle > 75 and hole_Angle < -75:\r\n ball_hole_angle_ok = True\r\n\r\n if Chest_ball_angle > 73 and hole_Angle > 73 and ball_hole_angle_ok == False:\r\n print(\"2421L 右转一点点 turn001R\")\r\n if real_test:\r\n action_append(\"turn001R\")\r\n elif Chest_ball_angle < -73 and hole_Angle < -73 and ball_hole_angle_ok == False:\r\n print(\"2425L 左转一点点 turn001L\")\r\n if real_test:\r\n action_append(\"turn001L\")\r\n elif Chest_ball_y <= 450:\r\n print(\"2429L 向前挪动一点点 forwalkVeryslow\")\r\n if real_test:\r\n action_append(\"forwalkVeryslow\")\r\n # action_append(\"turn001R\")\r\n\r\n elif hole_x > 250 * (4 / 3):\r\n print(\"step = 6 方向偏左了, 往右转 turn001R hole_x={}\".format(hole_x))\r\n if real_test:\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"/////////////////////////////next step 进入最后对准阶段 step=7\")\r\n step = 7\r\n else:\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n step = 9\r\n\r\n\r\n elif step == 7:\r\n if jump_count<=20:\r\n if Chest_ball_y > 500:\r\n print(\"2444L 靠太近了,向后挪动一点点 Back0Run Chest_ball_y={} > 500\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"Stand\")\r\n action_append(\"Back3Run\")\r\n\r\n # elif 80 < Chest_ball_angle < 85:\r\n # print(\"2449L 右转一点点 turn000R\")\r\n # if real_test:\r\n # action_append(\"turn000R\")\r\n\r\n elif Chest_ball_x > 203 * (4 / 3): # 210\r\n if Chest_ball_x > 220 * (4 / 3):\r\n print(\"2455L step = 7 向右移动 Right02move Chest_ball_x={} > 200\".format(Chest_ball_x))\r\n if real_test:\r\n action_append(\"Right02move\")\r\n time.sleep(sleep_time_s)\r\n else:\r\n print(\"2460L step = 7 向右移动一点点 Right1move Chest_ball_x = {} > 195\".format(Chest_ball_x))\r\n if real_test:\r\n action_append(\"Right1move\")\r\n time.sleep(sleep_time_s)\r\n elif Chest_ball_x < 180 * (4 / 3):\r\n if Chest_ball_x < 175 * (4 / 3):\r\n print(\"2466L step = 7 向左移动 Left02move Chest_ball_x={} < 175\".format(Chest_ball_x))\r\n if real_test:\r\n action_append(\"Left02move\")\r\n time.sleep(sleep_time_s)\r\n else:\r\n print(\"2471L 向左移动 Left1move\")\r\n if real_test:\r\n action_append(\"Left1move\")\r\n time.sleep(sleep_time_s)\r\n elif Chest_ball_y < 470:\r\n print(\"2476L step = 7 向前挪动一点点 forwalkVeryslow Chest_ball_y={} < 490\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"forwalkVeryslow\")\r\n time.sleep(sleep_time_l)\r\n action_append(\"turn000R\")\r\n\r\n # elif hole_Angle >80:\r\n # print(\"站位有问题,后退, 重整 Back3Run\")\r\n # step = 5\r\n # if real_test:\r\n # action_append(\"Back3Run\")\r\n\r\n else:\r\n print(\"2490L 踢球踢球阶段 LfootShot\")\r\n step = 8\r\n else:\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n step = 9\r\n\r\n\r\n elif step == 8:\r\n if jump_count > 20:\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n step = 9\r\n else:\r\n if real_test:\r\n if Chest_ball_y > 500:\r\n print(\"向后退\")\r\n action_append(\"Stand\")\r\n action_append(\"Back3Run\")\r\n\r\n elif Chest_ball_angle > 80 or Chest_ball_angle <= 0:\r\n if Chest_ball_angle <= 0 or Chest_ball_angle > 82:\r\n print(\"右移一大步\")\r\n action_append(\"Right02move\")\r\n else:\r\n print(\"右移一小步\")\r\n action_append(\"Right1move\")\r\n\r\n elif Chest_ball_angle < 75:\r\n if Chest_ball_angle < 73:\r\n print(\"左移一大步\")\r\n action_append(\"Left02move\")\r\n else:\r\n print(\"左移一小步\")\r\n action_append(\"Left1move\")\r\n\r\n elif hole_Angle < -82 or hole_Angle > 0:\r\n print(\"右转左移\")\r\n action_append(\"turn001R\")\r\n action_append(\"Stand\")\r\n action_append(\"Left1move\")\r\n\r\n elif hole_Angle > -73:\r\n print(\"左转右移\")\r\n action_append(\"turn001L\")\r\n action_append(\"Stand\")\r\n action_append(\"Right1move\")\r\n\r\n elif Chest_ball_y < 470:\r\n print(\"2527L step = 7 向前挪动一点点 Forwalk00 Chest_ball_y={} < 490\".format(Chest_ball_y))\r\n if real_test:\r\n action_append(\"forwalkVeryslow\")\r\n time.sleep(sleep_time_l)\r\n # action_append(\"turn000R\")\r\n\r\n else:\r\n print(\"准备踢球\")\r\n action_append(\"Stand\")\r\n time.sleep(0.5)\r\n action_append(\"LfootShot\")\r\n step = 9\r\n if real_test:\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n action_append(\"turn005L\")\r\n # action_append(\"Forwalk01\")\r\n\r\n\r\n elif step == 9:\r\n for i in range(0, 3):\r\n if edge_angle('red_floor') == 1:\r\n action_append(\"Forwalk01\")\r\n if i == 1:\r\n action_append(\"Right3move\")\r\n\r\n action_append(\"HeadTurnMM\")\r\n action_append(\"Right3move\")\r\n action_append(\"Right3move\")\r\n action_append(\"Right3move\")\r\n action_append(\"Forwalk01\")\r\n action_append(\"fastorward_step\")\r\n print(\"完成! 77777\")\r\n state = -1\r\n step = 10\r\n\r\n\r\ndef kick_ball():\r\n global state, state_sel, step, reset, skip\r\n global hole_Angle\r\n global golf_angle_ball, golf_angle, Chest_ball_angle, Chest_golf_angle\r\n global ball_x, ball_y, Chest_ball_x, Chest_ball_y\r\n global hole_flag, Chest_ball_flag\r\n global ChestOrg_img\r\n global picnum, img_debug\r\n global jump_count\r\n\r\n # 初始化\r\n sum_contours = np.array([[[0, 0]], [[0, 1]], [[1, 1]], [[1, 0]]])\r\n step = -1\r\n state = 7\r\n\r\n while state == 7:\r\n if -1 <= step < 9: # 踢球的七步\r\n ChestOrg = ChestOrg_img.copy()\r\n ChestOrg = np.rot90(ChestOrg)\r\n HeadOrg = HeadOrg_img.copy()\r\n Hole_OrgFrame = HeadOrg.copy()\r\n Hole_OrgFrame = cv2.resize(Hole_OrgFrame, (int(640), int(640)))\r\n Ball_OrgFrame = ChestOrg.copy()\r\n Ball_OrgFrame = cv2.resize(Ball_OrgFrame, (int(640), int(640)))\r\n\r\n img_h, img_w = Hole_OrgFrame.shape[:2]\r\n\r\n # 把上中心点和下中心点200改为640/2 fftest\r\n bottom_center = (int(320), int(img_h)) # 图像底中点\r\n top_center = (int(320), int(0)) # 图像顶中点\r\n # bottom_center = (int(640/2), int(img_h)) #图像底中点\r\n # top_center = (int(640/2), int(0)) #图像顶中点\r\n\r\n # 开始处理图像\r\n Hole_hsv = cv2.cvtColor(Hole_OrgFrame, cv2.COLOR_BGR2HSV)\r\n\r\n Hole_Imask = cv2.inRange(Hole_hsv, color_range['blue_hole'][0], color_range['blue_hole'][1]) # 识别到洞\r\n Hole_Imask = cv2.dilate(Hole_Imask, np.ones((5, 5), np.uint8), iterations=3)\r\n Hole_Imask = cv2.erode(Hole_Imask, np.ones((3, 3), np.uint8), iterations=3)\r\n\r\n # 初始化\r\n hole_center = [0, 0]\r\n Chest_ball_center = [0, 0]\r\n\r\n temp = 100\r\n temp_e = None\r\n temp_i = -1\r\n temp_area = 0\r\n\r\n temp_b = 100\r\n temp_b_e = None\r\n temp_b_i = -1\r\n temp_b_area = 0\r\n\r\n # chest 球洞处理\r\n hole_x = 0\r\n hole_y = 0\r\n\r\n _, cnts, hierachy = cv2.findContours(Hole_Imask, cv2.RETR_CCOMP,\r\n cv2.CHAIN_APPROX_NONE) # **获得图片轮廓值 #遍历图像层级关系\r\n # *取得一个球洞的轮廓*\r\n for i in range(0, len(cnts)): # 初始化sum_contours,使其等于其中一个c,便于之后拼接的格式统一\r\n # cv2.drawContours(Hole_OrgFrame, cnts[i], -1, (0, 0, 255), 1)\r\n # cv2.imshow(\"Contours\", Hole_OrgFrame)\r\n # cv2.waitKey(0)\r\n area = cv2.contourArea(cnts[i]) # 计算轮廓面积\r\n # print(len(cnts))\r\n # print(\"area={}\".format(area))\r\n # if img_debug and area > 100:\r\n # cv2.putText(Hole_OrgFrame, \"area:\" + str(area), (10, Hole_OrgFrame.shape[0] - 55),\r\n # cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 1)\r\n if 640 * 480 * 0.0033 < area < 640 * 480 * 0.45: # 去掉很小的干扰轮廓以及最大的图像边界\r\n e = cv2.fitEllipse(cnts[i]) # 拟合椭圆,获得ellipse = [ (x, y) , (a, b), angle ]。(x, y)代表椭圆中心点的位置;\r\n # (a, b)代表长短轴长度,应注意a、b为长短轴的直径,而非半径;angle 代表了中心旋转的角度\r\n area2 = np.pi * e[1][0] * e[1][1]\r\n # print(\"ratio:{}\".format(area/area2))\r\n if area / area2 > 0.05 and np.abs(90 - e[2]) < 90: # 不太懂这个判断条件的意义\r\n if temp < e[0][1]:\r\n temp = e[0][1]\r\n temp_e = e\r\n temp_i = i\r\n temp_area = area\r\n else:\r\n continue\r\n # break\r\n else:\r\n continue\r\n\r\n if temp_i == -1:\r\n print(\"没有找到洞\")\r\n hole_flag = False\r\n else:\r\n cnt_large = cnts[temp_i]\r\n cv2.ellipse(Hole_OrgFrame, temp_e, (255, 255, 255), 1)\r\n hole_flag = True\r\n (hole_x, hole_y), radius = cv2.minEnclosingCircle(cnt_large) # 最小内接圆形\r\n hole_center = (int(hole_x), int(hole_y))\r\n radius = int(radius)\r\n cv2.circle(Hole_OrgFrame, hole_center, radius, (100, 200, 30), 2)\r\n # ellipse = cv2.fitEllipse(cnt_large)\r\n # cv2.ellipse(OrgFrame,ellipse,(255,255,0),2)\r\n cv2.line(Hole_OrgFrame, hole_center, bottom_center, (0, 0, 100), 2)\r\n if (hole_center[0] - bottom_center[0]) == 0:\r\n hole_Angle = 90\r\n else:\r\n # hole_Angle (y1-y0)/(x1-x0)\r\n hole_Angle = - math.atan(\r\n (hole_center[1] - bottom_center[1]) / (hole_center[0] - bottom_center[0])) * 180.0 / math.pi\r\n\r\n if img_debug:\r\n cv2.putText(Hole_OrgFrame, \"step:\" + str(step),\r\n (10, Hole_OrgFrame.shape[0] - 35), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n cv2.putText(Hole_OrgFrame, \"hole_angle:\" + str(hole_Angle),\r\n (10, Hole_OrgFrame.shape[0] - 115), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n cv2.putText(Hole_OrgFrame, \"hole_x:\" + str(hole_x),\r\n (10, Hole_OrgFrame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n cv2.putText(Hole_OrgFrame, \"hole_y:\" + str(hole_y),\r\n (220, Hole_OrgFrame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n cv2.putText(Hole_OrgFrame, \"hole_flag:\" + str(hole_flag),\r\n (10, Hole_OrgFrame.shape[0] - 95), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n\r\n # chest 红球处理\r\n Chest_ball_x = 0\r\n Chest_ball_y = 0\r\n # 模板匹配,远距离靠近\r\n if step == -2: # 该部分没有办法进入\r\n template = cv2.imread('//home//pi//RunningRobot_test//template.jpg')\r\n w = template.shape[0]\r\n h = template.shape[1]\r\n\r\n meth = 'cv2.TM_SQDIFF_NORMED'\r\n method = eval(meth)\r\n res = cv2.matchTemplate(Ball_OrgFrame, template, method)\r\n min_val, max_val, top_left, max_loc = cv2.minMaxLoc(res)\r\n bottom_right = (top_left[0] + w, top_left[1] + h)\r\n cv2.rectangle(Ball_OrgFrame, top_left, bottom_right, 255, 2)\r\n Chest_ball_x = int(top_left[0] + w / 2)\r\n Chest_ball_y = int(top_left[1] + h / 2)\r\n Chest_ball_flag = True\r\n\r\n else:\r\n if step < 4:\r\n e_kernelSize = 3\r\n else:\r\n e_kernelSize = 5\r\n\r\n\r\n Chest_Ball_hsv = cv2.cvtColor(Ball_OrgFrame, cv2.COLOR_BGR2HSV)\r\n # Chest_Ball_hsv = cv2.GaussianBlur(Chest_Ball_hsv, (3, 3), 0)\r\n\r\n # Chest_Ball_Imask_1 = cv2.inRange(Chest_Ball_hsv, color_range['d_red_ball_floor1'][0],\r\n # color_range['d_red_ball_floor1'][1])\r\n # Chest_Ball_Imask_2 = cv2.inRange(Chest_Ball_hsv, color_range['d_red_ball_floor2'][0],\r\n # color_range['d_red_ball_floor2'][1])\r\n # Chest_Ball_Imask = cv2.bitwise_or(Chest_Ball_Imask_1, Chest_Ball_Imask_2)\r\n Chest_Ball_Imask = cv2.inRange(Chest_Ball_hsv,color_range['kick_ball_rec'][0],color_range['kick_ball_rec'][1])\r\n Chest_Ball_Imask = cv2.erode(Chest_Ball_Imask, np.ones((e_kernelSize, e_kernelSize), np.uint8), iterations=2)\r\n Chest_Ball_Imask = cv2.morphologyEx(Chest_Ball_Imask, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8),iterations=1)\r\n\r\n # cv2.imshow(\"red_floor_INV\", Chest_Ball_Imask)\r\n # cv2.waitKey(0)\r\n # Chest_Ball_Imask = cv2.inRange(Chest_Ball_hsv, color_range['ball_red'][0], color_range['ball_red'][1])\r\n\r\n # Chest_Ball_Imask = cv2.erode(Chest_Ball_Imask, None, iterations=5)\r\n # Chest_Ball_Imask = cv2.dilate(Chest_Ball_Imask, np.ones((7, 7), np.uint8), iterations=2)\r\n # cv2.imshow(\"red_ball\", Chest_Ball_Imask)\r\n # cv2.waitKey(0)\r\n\r\n _, cnts2, hierachy2 = cv2.findContours(Chest_Ball_Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n if cnts2 is not None:\r\n for i in range(0, len(cnts2)):\r\n area = cv2.contourArea(cnts2[i]) # 计算轮廓面积\r\n if img_debug:\r\n # print(len(cnts2))\r\n # print(\"area={}\".format(area))\r\n if area > 100:\r\n cv2.putText(Ball_OrgFrame, \"area:\" + str(area), (10, Ball_OrgFrame.shape[0] - 55),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 1)\r\n if 300 < area < 640 * 480 * 0.025: # 去掉很小的干扰轮廓以及最大的图像边界\r\n if cnts2[i].size > 10:\r\n e = cv2.fitEllipse(cnts2[i])\r\n area2 = np.pi * e[1][0] * e[1][1]\r\n # print(\"e={} 等待\".format(e))\r\n # print(\"ratio:{}\".format(area/area2))\r\n bias = abs(1 - e[1][0] / e[1][1])\r\n # cv2.waitKey(0)\r\n if ((step < 4 and area / area2 > 0.05 and 290 < e[0][1] < 550) or (step >= 4 and 400 < e[0][1] < 550)) and \\\r\n (e[1][1]/e[1][0] < 2.3):\r\n if temp_b > bias:\r\n temp_b = bias\r\n temp_b_e = e\r\n temp_b_i = i\r\n temp_b_area = area\r\n\r\n else:\r\n continue\r\n else:\r\n continue\r\n # cv2.waitKey(0)\r\n\r\n # break\r\n else:\r\n # cv2.drawContours(Hole_OrgFrame, cnts, -1, (0, 0, 255), 3)\r\n continue\r\n else:\r\n print(\"2887L cnt_large is None\")\r\n continue\r\n\r\n # 圆球轮廓 计算角度 Chest_ball_angle\r\n if temp_b_i == -1:\r\n print(\"没有找到球\")\r\n jump_count+=1\r\n Chest_ball_flag = False\r\n Chest_ball_y = 0\r\n Chest_ball_x = 0\r\n else:\r\n print(\"球位置:{} 面积:{}\".format(temp_b_e, temp_b_area))\r\n cnt_large3 = cnts2[temp_b_i]\r\n if img_debug:\r\n cv2.ellipse(Ball_OrgFrame, temp_b_e, (255, 255, 255), 1)\r\n Chest_ball_flag = True\r\n (Chest_circle_x, Chest_circle_y), Chest_radius = cv2.minEnclosingCircle(cnt_large3)\r\n Chest_ball_center = (int(Chest_circle_x), int(Chest_circle_y))\r\n Chest_radius = int(Chest_radius)\r\n if img_debug:\r\n cv2.circle(Ball_OrgFrame, Chest_ball_center, Chest_radius, (100, 200, 20), 2)\r\n cv2.line(Ball_OrgFrame, Chest_ball_center, top_center, (0, 100, 0), 2)\r\n # ellipse = cv2.fitEllipse(cnt_large)\r\n # cv2.ellipse(OrgFrame,ellipse,(255,255,0),2)\r\n if (Chest_ball_center[0] - top_center[0]) == 0:\r\n Chest_ball_angle = 90\r\n else:\r\n # *Chest_ball_angle* (y1-y0)/(x1-x0)\r\n Chest_ball_angle = - math.atan((Chest_ball_center[1] - top_center[1]) / (\r\n Chest_ball_center[0] - top_center[0])) * 180.0 / math.pi\r\n\r\n Chest_ball_x = int(Chest_circle_x) # *ball_x*\r\n Chest_ball_y = int(Chest_circle_y) # *ball_y*\r\n\r\n if img_debug:\r\n cv2.putText(Ball_OrgFrame, \"step:\" + str(step),\r\n (10, Ball_OrgFrame.shape[0] - 35), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n cv2.putText(Ball_OrgFrame, \"Chest_ball_x:\" + str(Chest_ball_x),\r\n (10, Ball_OrgFrame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n cv2.putText(Ball_OrgFrame, \"Chest_ball_y:\" + str(Chest_ball_y),\r\n (220, Ball_OrgFrame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n cv2.putText(Ball_OrgFrame, \"Chest_ball_flag:\" + str(Chest_ball_flag),\r\n (10, Hole_OrgFrame.shape[0] - 95), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n cv2.putText(Ball_OrgFrame, \"ball_angle:\" + str(Chest_ball_angle),\r\n (10, Ball_OrgFrame.shape[0] - 115), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n\r\n else:\r\n break\r\n\r\n if img_debug:\r\n cv2.imshow(\"Ball_OrgFrame\", Ball_OrgFrame)\r\n cv2.imshow(\"Hole_OrgFrame\", Hole_OrgFrame)\r\n cv2.waitKey(10)\r\n kick_act_move()\r\n\r\n\r\n###################### 档 板-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-\r\ndef baffle():\r\n global org_img, step, reset, skip\r\n global handling\r\n print(\"/-/-/-/-/-/-/-/-/-进入baffle\")\r\n step = 0\r\n baffle_dis_Y_flag = False\r\n baffle_angle = 0\r\n notok = True\r\n see = False\r\n finish = False\r\n angle = 45\r\n dis = 0\r\n dis_flag = False\r\n angle_flag = False\r\n center_x = 0\r\n while (1):\r\n if True:\r\n Corg_img = ChestOrg_img.copy()\r\n Corg_img = np.rot90(Corg_img)\r\n # Corg_img = Corg_img[int(300):int(400),int(100):int(500)]\r\n OrgFrame = Corg_img.copy()\r\n handling = Corg_img.copy()\r\n frame = Corg_img.copy()\r\n center = []\r\n\r\n # 开始处理图像\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n hsv = cv2.GaussianBlur(hsv, (3, 3), 0)\r\n Imask = cv2.inRange(hsv, color_range['blue_baf'][0], color_range['blue_baf'][1])\r\n Imask = cv2.erode(Imask, None, iterations=2)\r\n Imask = cv2.dilate(Imask, np.ones((3, 3), np.uint8), iterations=2)\r\n # cv2.imshow('BLcolor', Imask)\r\n _, cnts, hieracy = cv2.findContours(Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # 找出所有轮廓\r\n # print(\"cnts len:\",len(cnts))\r\n if cnts is not None:\r\n cnt_large, cnt_area = getAreaMaxContour1(cnts)\r\n\r\n # print(cnt_area)\r\n\r\n else:\r\n print(\"2984L cnt_large is None\")\r\n continue\r\n\r\n blue_bottom_Y = 0\r\n if cnt_large is not None:\r\n rect = cv2.minAreaRect(cnt_large) # 最小外接矩形\r\n box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点\r\n\r\n Ax = box[0, 0]\r\n Ay = box[0, 1]\r\n Bx = box[1, 0]\r\n By = box[1, 1]\r\n Cx = box[2, 0]\r\n Cy = box[2, 1]\r\n Dx = box[3, 0]\r\n Dy = box[3, 1]\r\n pt1_x, pt1_y = box[0, 0], box[0, 1]\r\n pt3_x, pt3_y = box[2, 0], box[2, 1]\r\n center_x = int((pt1_x + pt3_x) / 2)\r\n center_y = int((pt1_y + pt3_y) / 2)\r\n center.append([center_x, center_y])\r\n cv2.drawContours(OrgFrame, [box], -1, [0, 0, 255, 255], 3)\r\n cv2.circle(OrgFrame, (center_x, center_y), 10, (0, 0, 255), -1) # 画出中心点\r\n # 求得大矩形的旋转角度,if条件是为了判断长的一条边的旋转角度,因为box存储的点的顺序不确定\\\r\n if math.sqrt(math.pow(box[3, 1] - box[0, 1], 2) + math.pow(box[3, 0] - box[0, 0], 2)) > math.sqrt(\r\n math.pow(box[3, 1] - box[2, 1], 2) + math.pow(box[3, 0] - box[2, 0], 2)):\r\n baffle_angle = - math.atan((box[3, 1] - box[0, 1]) / (box[3, 0] - box[0, 0])) * 180.0 / math.pi\r\n else:\r\n baffle_angle = - math.atan(\r\n (box[3, 1] - box[2, 1]) / (box[3, 0] - box[2, 0])) * 180.0 / math.pi # 负号是因为坐标原点的问题\r\n if center_y > blue_bottom_Y:\r\n blue_bottom_Y = center_y\r\n baffle_dis_Y = blue_bottom_Y\r\n baffle_dis_X = center_x\r\n if baffle_dis_Y > 240:\r\n baffle_dis_Y_flag = True\r\n\r\n if img_debug:\r\n cv2.putText(OrgFrame, \"baffle_dis_Y:\" + str(baffle_dis_Y),\r\n (10, OrgFrame.shape[0] - 35), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n\r\n cv2.putText(OrgFrame, \"baffle_dis_Y_flag:\" + str(baffle_dis_Y_flag),\r\n (10, OrgFrame.shape[0] - 55), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n\r\n cv2.putText(OrgFrame, \"baffle_angle:\" + str(baffle_angle),\r\n (10, OrgFrame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n cv2.putText(OrgFrame, \"step:\" + str(step), (30, OrgFrame.shape[0] - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n (0, 0, 0), 2) # (0, 0, 255)BGR\r\n\r\n cv2.imshow('OrgFrame', OrgFrame)\r\n k = cv2.waitKey(10)\r\n if k == 27:\r\n cv2.destroyWindow('closed_pic')\r\n cv2.destroyWindow('org_img_copy')\r\n break\r\n elif k == ord('s'):\r\n print(\"save picture123\")\r\n cv2.imwrite(\"picture123.jpg\", org_img) # 保存图片\r\n\r\n # 决策执行动作\r\n if step == 0:\r\n if baffle_dis_Y <= 250:\r\n print(\"3045L 大步前进 Forwalk02\")\r\n action_append(\"Forwalk02\")\r\n elif baffle_dis_Y > 250:\r\n step = 1\r\n\r\n\r\n elif step == 1: # 调整角度 -5 ~ 5\r\n if baffle_angle > 5:\r\n if baffle_angle > 8:\r\n print(\"3054L 大左转一下 turn001L baffle_angle:\", baffle_angle)\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"3057L 左转 turn000L baffle_angle:\", baffle_angle)\r\n action_append(\"turn000L\")\r\n elif baffle_angle < -5:\r\n if baffle_angle < -8:\r\n print(\"3061L 大右转一下 turn001R baffle_angle:\", baffle_angle)\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"3064L 右转 turn000R baffle_angle:\", baffle_angle)\r\n action_append(\"turn000R\")\r\n else:\r\n step = 2\r\n\r\n elif step == 2: # 调整前进位置 调整左右位置\r\n if baffle_dis_Y < 390:\r\n print(\"3071L 大一步前进 forwardSlow0403\")\r\n action_append(\"forwardSlow0403\")\r\n elif 390 < baffle_dis_Y < 460:\r\n print(\"3074L 向前挪动 Forwalk00\")\r\n action_append(\"Forwalk00\")\r\n elif 460 < baffle_dis_Y:\r\n step = 3\r\n elif step == 3: # 调整角度\r\n if baffle_angle > 2:\r\n if baffle_angle > 5:\r\n print(\"3081L 大左转一下 turn001L \", baffle_angle)\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"3084L 左转 turn001L\")\r\n action_append(\"turn001L\")\r\n elif baffle_angle < -2:\r\n if baffle_angle < -5:\r\n print(\"3088L 大右转一下 turn001R \", baffle_angle)\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"3091L 右转 turn001R \", baffle_angle)\r\n action_append(\"turn001R\")\r\n elif baffle_dis_Y_flag:\r\n step = 4\r\n elif step == 4: # 跨栏后调整方向\r\n\r\n print(\"3097L 前挪一点点\")\r\n print(\"3098L 翻栏杆 翻栏杆 RollRail\")\r\n action_append(\"Right3move\")\r\n # action_append(\"Right3move\")\r\n # action_append(\"Right3move\")\r\n action_append(\"Stand\")\r\n action_append(\"RollRail\")\r\n action_append(\"Stand\")\r\n # print(\"step step step 444 \")\r\n\r\n action_append(\"turn004L\")\r\n action_append(\"turn004L\")\r\n action_append(\"turn004L\")\r\n action_append(\"turn001L\")\r\n action_append(\"Back3Run\")\r\n # action_append(\"turn004L\")\r\n # action_append(\"turn004L\")\r\n\r\n break\r\n\r\n\r\n###################### 过 坑-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-\r\ndef hole_edge(color):\r\n edge_angle_chest(color) # 调整好角度与距离\r\n step = 1\r\n while (1):\r\n Area = 0\r\n src = ChestOrg_img.copy()\r\n src = np.rot90(src)\r\n src = src.copy()\r\n # cv2.imshow(\"src1\",src)\r\n src = src[int(40):int(400), int(50):int(500)]\r\n src_copy = src\r\n # cv2.imshow(\"src2\",src)\r\n src = cv2.GaussianBlur(src, (5, 5), 0)\r\n hsv_img = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\r\n # cv2.imshow(\"hsv\",hsv_img)\r\n mask = cv2.inRange(hsv_img, color_range[color][0], color_range[color][1])\r\n # cv2.imshow(\"mask\",mask)\r\n\r\n mask2 = cv2.erode(mask, None, iterations=5)\r\n mask1 = cv2.dilate(mask2, None, iterations=8)\r\n\r\n # cv2.imshow(\"mask1\",mask1)\r\n # # cv2.imshow(\"mask2\",mask2)\r\n # # # cv2.imshow(\"mask\",mask)\r\n # cv2.waitKey()\r\n\r\n _, contours2, hierarchy2 = cv2.findContours(mask1, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\r\n if step == 1:\r\n if len(contours2) >= 2:\r\n print(\"3146L 仍然看得到内轮廓,向前走 forwardSlow0403\")\r\n action_append(\"forwardSlow0403\")\r\n step = 1\r\n\r\n else:\r\n print(\"已近迈进,正式进入过坑阶段\")\r\n action_append(\"Stand\")\r\n step = 2\r\n elif step == 2:\r\n if color == 'blue_hole_chest':\r\n hole_edge_main('blue_hole_head')\r\n break\r\n elif color == 'green_hole_chest':\r\n hole_edge_main('green_hole_head')\r\n break\r\n\r\n\r\ndef hole_edge_main(color):\r\n global HeadOrg_img, chest_copy, reset, skip, handling\r\n global handling\r\n angle_ok_flag = False\r\n angle = 90\r\n dis = 0\r\n bottom_centreX = 0\r\n bottom_centreY = 0\r\n see = False\r\n dis_ok_count = 0\r\n headTURN = 0\r\n hole_flag = 0\r\n\r\n step = 1\r\n print(\"/-/-/-/-/-/-/-/-/-hole edge\")\r\n while True:\r\n OrgFrame = HeadOrg_img.copy()\r\n x_start = 180\r\n blobs = OrgFrame[int(0):int(480), int(x_start):int(380)] # 只对中间部分识别处理 Y , X\r\n r_h = 480\r\n r_w = 200\r\n handling = blobs.copy()\r\n frame_mask = blobs.copy()\r\n\r\n # 获取图像中心点坐标x, y\r\n center = []\r\n # 开始处理图像\r\n\r\n hsv = cv2.cvtColor(frame_mask, cv2.COLOR_BGR2HSV)\r\n hsv = cv2.GaussianBlur(hsv, (3, 3), 0)\r\n Imask = cv2.inRange(hsv, color_range[color][0], color_range[color][1])\r\n # Imask = cv2.erode(Imask, np.ones((3, 3), np.uint8), iterations=1)\r\n Imask = cv2.dilate(Imask, np.ones((3, 3), np.uint8), iterations=3)\r\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15))\r\n Imask = cv2.morphologyEx(Imask, cv2.MORPH_OPEN, kernel)\r\n _, cnts, hierarchy = cv2.findContours(Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # 找出所有轮廓\r\n\r\n cnt_sum, area_max = getAreaMaxContour1(cnts) # 找出最大轮廓\r\n C_percent = round(area_max * 100 / (r_w * r_h), 2) # 最大轮廓百分比\r\n # cv2.drawContours(frame, cnt_sum, -1, (255, 0, 255), 3)\r\n\r\n if cnt_sum is not None:\r\n see = True\r\n rect = cv2.minAreaRect(cnt_sum) # 最小外接矩形\r\n box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点\r\n\r\n bottom_right = cnt_sum[0][0] # 右下角点坐标\r\n bottom_left = cnt_sum[0][0] # 左下角点坐标\r\n top_right = cnt_sum[0][0] # 右上角点坐标\r\n top_left = cnt_sum[0][0] # 左上角点坐标\r\n for c in cnt_sum:\r\n\r\n if c[0][0] + 1 * (r_h - c[0][1]) < bottom_left[0] + 1 * (r_h - bottom_left[1]):\r\n bottom_left = c[0]\r\n if c[0][0] + 1 * c[0][1] > bottom_right[0] + 1 * bottom_right[1]:\r\n bottom_right = c[0]\r\n\r\n if c[0][0] + 3 * c[0][1] < top_left[0] + 3 * top_left[1]:\r\n top_left = c[0]\r\n if (r_w - c[0][0]) + 3 * c[0][1] < (r_w - top_right[0]) + 3 * top_right[1]:\r\n top_right = c[0]\r\n\r\n # if debug:\r\n # handling = ChestOrg_img.copy()\r\n # cv2.circle(handling, (c[0][0], c[0][1]), 5, [0, 255, 0], 2)\r\n # cv2.circle(handling, (bottom_left[0], bottom_left[1]), 5, [255, 255, 0], 2)\r\n # cv2.circle(handling, (bottom_right[0], bottom_right[1]), 5, [255, 0, 255], 2)\r\n # cv2.imshow('handling', handling) # 显示图像\r\n # cv2.waitKey(2)\r\n\r\n bottomcenter_x = (bottom_left[0] + bottom_right[0]) / 2 # 得到bottom中心坐标\r\n bottomcenter_y = (bottom_left[1] + bottom_right[1]) / 2\r\n\r\n topcenter_x = (top_right[0] + top_left[0]) / 2 # 得到top中心坐标\r\n topcenter_y = (top_left[1] + top_right[1]) / 2\r\n bottom_angle = -math.atan(\r\n (bottom_right[1] - bottom_left[1]) / (bottom_right[0] - bottom_left[0])) * 180.0 / math.pi\r\n top_angle = -math.atan((top_right[1] - top_left[1]) / (top_right[0] - top_left[0])) * 180.0 / math.pi + 3\r\n\r\n # if img_debug:\r\n\r\n # cv2.circle(OrgFrame, (blackLine_L[0] + x_start, blackLine_L[1]), 5, [0, 255, 255], 2)\r\n # cv2.circle(OrgFrame, (blackLine_R[0] + x_start, blackLine_R[1]), 5, [255, 0, 255], 2)\r\n # cv2.line(OrgFrame, (blackLine_R[0] + x_start,blackLine_R[1]), (blackLine_L[0] + x_start,blackLine_L[1]), (0, 255, 255), thickness=2)\r\n # cv2.putText(OrgFrame, \"L_R_angle:\" + str(L_R_angle),(10, OrgFrame.shape[0] - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n # cv2.putText(OrgFrame, \"Xcenter:\" + str(Xcenter + x_start),(10, OrgFrame.shape[0] - 50), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n # cv2.putText(OrgFrame, \"Ycenter:\" + str(Ycenter),(200, OrgFrame.shape[0] - 50), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)\r\n\r\n # # cv2.drawContours(frame_mask, cnt_sum, -1, (255, 0, 255), 3)\r\n # cv2.imshow('frame_mask', frame_mask)\r\n # # cv2.imshow('black', Imask)\r\n # cv2.imshow('OrgFrame', OrgFrame)\r\n # cv2.waitKey(10)\r\n # else:\r\n # see = False\r\n\r\n # print(Ycenter)\r\n\r\n # 决策执行动作\r\n if step == 1:\r\n print(\"3266L 向右看 HeadTurn015\")\r\n action_append(\"HeadTurn015\")\r\n time.sleep(1) # timefftest\r\n step = 2\r\n elif step == 2:\r\n\r\n if top_angle > 1.5:\r\n if top_angle > 7:\r\n headTURN += 1\r\n print(\"3279L 左da旋转 turn001L \", top_angle)\r\n action_append(\"turn001L\")\r\n\r\n else:\r\n print(\"3283L 左旋转 turn000L \", top_angle)\r\n headTURN += 1\r\n action_append(\"turn000L\")\r\n\r\n # time.sleep(1) # timefftest\r\n elif top_angle < -1.5:\r\n if top_angle < -7:\r\n headTURN += 1\r\n print(\"3292L 右da旋转 turn001R \", top_angle)\r\n action_append(\"turn001R\")\r\n\r\n else:\r\n print(\"3296L 右旋转 turn000R \", top_angle)\r\n action_append(\"turn000R\")\r\n else:\r\n step = 3\r\n\r\n # time.sleep(1) # timefftest\r\n elif step == 3:\r\n\r\n if topcenter_y >= 369:\r\n if topcenter_y > 399:\r\n print(\"3303L 左da侧移 Left1move >440 \", topcenter_y)\r\n action_append(\"Left3move\")\r\n else:\r\n print(\"3306L 左侧移 Left02move > 365 \", topcenter_y)\r\n action_append(\"Left02move\")\r\n elif topcenter_y < 360:\r\n print(\"3309L 右侧移 Right02move <400 \", topcenter_y)\r\n action_append(\"Right02move\")\r\n else:\r\n print(\"3312L 右看 X位置ok\")\r\n action_append(\"fastForward03\")\r\n # action_append(\"Left02move\")\r\n # print(\"向前一步\")\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n action_append(\"Stand\")\r\n step = 4\r\n # cv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n elif step == 4:\r\n print(\"3352L 右侧看到绿色边缘 右侧移 Right3move\")\r\n action_append(\"Right3move\")\r\n action_append(\"Right3move\")\r\n action_append(\"Right3move\")\r\n # action_append(\"Right3move\")\r\n action_append(\"HeadTurnMM\")\r\n step = 5\r\n\r\n elif step == 5:\r\n print(\"过坑阶段结束\")\r\n action_append(\"Stand\")\r\n break\r\n\r\n\r\n###################### 过 地 雷 区-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-\r\ndef angle_adjust(): # 调整角度,确保始终朝前\r\n global baffle_angle, Bbox_centerY\r\n if Bbox_centerY > 250:\r\n if baffle_angle > 2:\r\n if baffle_angle > 5:\r\n print(\"朝前 大左转一下 turn001L \", baffle_angle)\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"朝前 左转 turn001L\")\r\n action_append(\"turn001L\")\r\n elif baffle_angle < -2:\r\n if baffle_angle < -5:\r\n print(\"朝前 大右转一下 turn001R \", baffle_angle)\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"朝前 右转 turn001R \", baffle_angle)\r\n action_append(\"turn001R\")\r\n else:\r\n pass\r\n\r\n\r\nBbox_centerY = 0\r\n\r\n\r\ndef area_bits(Imask):\r\n area = 0\r\n for i in Imask:\r\n for j in i:\r\n if j != 0:\r\n area = area + 1\r\n else:\r\n continue\r\n return area\r\n\r\n\r\ndef obstacle():\r\n global HeadOrg_img, step\r\n global Head_L_R_angle, Bbox_centerY, blue_rail\r\n global baffle_angle\r\n print(\"/-/-/-/-/-/-/-/-/-进入obstacle\")\r\n action_append(\"Stand\")\r\n step = 1\r\n k = 1\r\n blue_rail = False\r\n\r\n while (1):\r\n if True:\r\n if ChestOrg_img is None:\r\n continue\r\n Corg_img = ChestOrg_img.copy()\r\n Corg_img = np.rot90(Corg_img)\r\n # Corg_img = Corg_img[int(200):int(400),int(100):int(500)]\r\n Corg_img = Corg_img.copy()\r\n hsv = cv2.cvtColor(Corg_img, cv2.COLOR_BGR2HSV)\r\n hsv = cv2.GaussianBlur(hsv, (3, 3), 0)\r\n\r\n # blue 分析图像 决策执行\r\n Bumask = cv2.inRange(hsv, color_range['blue_baf'][0], color_range['blue_baf'][1])\r\n Bumask = cv2.erode(Bumask, None, iterations=2)\r\n Bumask = cv2.dilate(Bumask, np.ones((3, 3), np.uint8), iterations=2)\r\n # cv2.imshow('Bluemask', Bumask)\r\n _, cntsblue, hierarchy = cv2.findContours(Bumask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) # 找出轮廓\r\n\r\n if cntsblue is not None:\r\n cnt_large = getAreaMaxContour2(cntsblue) # 取最大轮廓\r\n else:\r\n print(\"1135L cnt_large is None\")\r\n continue\r\n\r\n if cnt_large is not None:\r\n rect_blue = cv2.minAreaRect(cnt_large)\r\n box_blue = np.int0(cv2.boxPoints(rect_blue)) # 点的坐标\r\n Bbox_centerX = int((box_blue[3, 0] + box_blue[2, 0] + box_blue[1, 0] + box_blue[0, 0]) / 4)\r\n Bbox_centerY = int((box_blue[3, 1] + box_blue[2, 1] + box_blue[1, 1] + box_blue[0, 1]) / 4)\r\n Bbox_center = [Bbox_centerX, Bbox_centerY]\r\n cv2.circle(Corg_img, (Bbox_center[0], Bbox_center[1]), 7, (0, 0, 255), -1) # 圆点标记\r\n\r\n cv2.drawContours(Corg_img, [box_blue], -1, (255, 0, 0), 3)\r\n if math.sqrt(math.pow(box_blue[3, 1] - box_blue[0, 1], 2) + math.pow(box_blue[3, 0] - box_blue[0, 0],\r\n 2)) > math.sqrt(\r\n math.pow(box_blue[3, 1] - box_blue[2, 1], 2) + math.pow(box_blue[3, 0] - box_blue[2, 0], 2)):\r\n baffle_angle = - math.atan(\r\n (box_blue[3, 1] - box_blue[0, 1]) / (box_blue[3, 0] - box_blue[0, 0])) * 180.0 / math.pi\r\n else:\r\n baffle_angle = - math.atan((box_blue[3, 1] - box_blue[2, 1]) / (\r\n box_blue[3, 0] - box_blue[2, 0])) * 180.0 / math.pi # 负号是因为坐标原点的问题\r\n obscle_area_blue = 0\r\n # 当遇到蓝色门槛时停止\r\n for c in cntsblue:\r\n obscle_area_blue += math.fabs(cv2.contourArea(c))\r\n if Bbox_centerY >= 280 and obscle_area_blue > 0.05 * 640 * 480: # and go_up: # 320 obscle_area_blue > 0.05 * 640 * 480 and\r\n\r\n if img_debug:\r\n cv2.imshow('Corg_img', Corg_img)\r\n cv2.waitKey(10)\r\n print(\"遇到蓝色门槛-----*-----*-----*-----* Bbox_center Y:\", Bbox_centerY)\r\n action_append(\"Stand\")\r\n blue_rail = True\r\n\r\n cv2.destroyAllWindows()\r\n break\r\n\r\n # black 分析图像 决策执行\r\n Imask = cv2.inRange(hsv, color_range['black_dir'][0], color_range['black_dir'][1]) # 黑色地雷\r\n Imask = cv2.erode(Imask, None, iterations=3)\r\n Imask = cv2.dilate(Imask, np.ones((3, 3), np.uint8), iterations=2)\r\n # cv2.imshow('black', Imask)\r\n _, contours, hierarchy = cv2.findContours(Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # 找出所有轮廓\r\n cv2.drawContours(Corg_img, contours, -1, (255, 0, 255), 2)\r\n\r\n left_point = [640, 0]\r\n right_point = [0, 0]\r\n\r\n if len(contours) != 0:\r\n\r\n Big_battle = [0, 0]\r\n\r\n for c in contours:\r\n rect = cv2.minAreaRect(c) # 最小外接矩形\r\n box = cv2.boxPoints(rect) # 我们需要矩形的4个顶点坐标box, 通过函数 cv2.cv.BoxPoints() 获得\r\n box = np.intp(box) # 最小外接矩形的四个顶点\r\n box_Ax, box_Ay = box[0, 0], box[0, 1]\r\n box_Bx, box_By = box[1, 0], box[1, 1]\r\n box_Cx, box_Cy = box[2, 0], box[2, 1]\r\n box_Dx, box_Dy = box[3, 0], box[3, 1]\r\n box_centerX = int((box_Ax + box_Bx + box_Cx + box_Dx) / 4)\r\n box_centerY = int((box_Ay + box_By + box_Cy + box_Dy) / 4)\r\n box_center = [box_centerX, box_centerY]\r\n\r\n # 剔除图像上部分点 和底部点\r\n if box_centerY < 300 or box_centerY > 550:\r\n continue\r\n\r\n # 遍历点 画圈\r\n if box_debug:\r\n cv2.circle(Corg_img, (box_centerX, box_centerY), 8, (0, 0, 255), 2) # 圆点标记识别黑点\r\n cv2.imshow('Corg_img', Corg_img)\r\n cv2.waitKey(1)\r\n\r\n # 找出最左点与最右点\r\n if box_centerX < left_point[0]:\r\n left_point = box_center\r\n if box_centerX > right_point[0]:\r\n right_point = box_center\r\n\r\n if box_centerX <= 80 or box_centerX >= 400: # 排除左右边沿点 box_centerXbox_centerX 240\r\n continue\r\n if math.pow(box_centerX - 240, 2) + math.pow(box_centerY - 640, 2) < math.pow(Big_battle[0] - 240,\r\n 2) + math.pow(\r\n Big_battle[1] - 640, 2):\r\n Big_battle = box_center # 这个是要规避的黑点\r\n # print(\"1272L go_up False \",Big_battle[0],Big_battle[1])\r\n\r\n # 显示图\r\n if img_debug:\r\n cv2.circle(Corg_img, (left_point[0], left_point[1]), 7, (0, 255, 0), -1) # 圆点标记\r\n cv2.circle(Corg_img, (right_point[0], right_point[1]), 7, (0, 255, 255), -1) # 圆点标记\r\n cv2.circle(Corg_img, (Big_battle[0], Big_battle[1]), 7, (255, 255, 0), -1) # 圆点标记\r\n cv2.putText(Corg_img, \"Bbox_centerY:\" + str(int(Bbox_centerY)), (230, 460),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(Corg_img, \"Big_battle x,y:\" + str(int(Big_battle[0])) + ', ' + str(int(Big_battle[1])),\r\n (230, 480), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(Corg_img, \"baffle_angle:\" + str(int(baffle_angle)), (230, 440),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.line(Corg_img, (Big_battle[0], Big_battle[1]), (240, 640), (0, 255, 255), thickness=2)\r\n cv2.line(Corg_img, (0, 500), (480, 500), (255, 255, 255), thickness=2)\r\n cv2.rectangle(Corg_img, (50, 350), (430, 550), (0, 0, 255), thickness=2)\r\n\r\n # cv2.imshow('handling', handling)\r\n cv2.imshow('Corg_img', Corg_img)\r\n k = cv2.waitKey(100)\r\n if k == 27:\r\n cv2.destroyWindow('closed_pic')\r\n cv2.destroyWindow('org_img_copy')\r\n break\r\n elif k == ord('s'):\r\n print(\"save picture123\")\r\n cv2.imwrite(\"picture123.jpg\", HeadOrg_img) # 保存图片\r\n\r\n # 370修改为360\r\n if Big_battle[1] < 350:\r\n print(\"3564L 前进靠近一步 forwardSlow0403 \", Big_battle[1])\r\n action_append(\"Stand\")\r\n action_append(\"forwardSlow0403\")\r\n action_append(\"Stand\")\r\n angle_adjust()\r\n\r\n\r\n # 410\r\n elif Big_battle[1] < 400:\r\n print(\"3575L 慢慢前进靠近 Forwalk01\", Big_battle[1])\r\n action_append(\"Stand\")\r\n action_append(\"Forwalk01\")\r\n action_append(\"Stand\")\r\n angle_adjust()\r\n\r\n\r\n\r\n\r\n elif (50 <= Big_battle[0] and Big_battle[0] < 140):\r\n print(\"3580L 右平移一步 Right02move\", Big_battle[0])\r\n action_append(\"Stand\")\r\n action_append(\"Right02move\")\r\n\r\n # 240修改为265\r\n elif (140 <= Big_battle[0] and Big_battle[0] < 240):\r\n print(\"3586L 右平移三步 Right3move\", Big_battle[0])\r\n action_append(\"Stand\")\r\n action_append(\"Right3move\")\r\n action_append(\"Stand\")\r\n action_append(\"Right02move\")\r\n action_append(\"Stand\")\r\n action_append(\"Right02move\")\r\n\r\n\r\n\r\n elif (240 <= Big_battle[0] and Big_battle[0] < 360):\r\n print(\"3592L 向左平移三步 Left3move\", Big_battle[0])\r\n action_append(\"Stand\")\r\n action_append(\"Left3move\")\r\n action_append(\"Stand\")\r\n action_append(\"Left3move\")\r\n action_append(\"Stand\")\r\n action_append(\"Left3move\")\r\n\r\n\r\n\r\n elif (360 <= Big_battle[0] < 430):\r\n print(\"3598L 向左平移一步 Left02move\", Big_battle[0])\r\n action_append(\"Stand\")\r\n action_append(\"Left02move\")\r\n action_append(\"Stand\")\r\n\r\n\r\n\r\n else:\r\n print(\"3604L error 不在范围 继续向前走\")\r\n action_append(\"Stand\")\r\n action_append(\"forwardSlow0403\")\r\n # Big_battle = [0,0]\r\n else:\r\n print(\"3607L 继续向前\")\r\n action_append(\"Stand\")\r\n action_append(\"forwardSlow0403\")\r\n Big_battle = [0, 0]\r\n\r\n if img_debug:\r\n cv2.circle(Corg_img, (left_point[0], left_point[1]), 7, (0, 255, 0), -1) # 圆点标记\r\n cv2.circle(Corg_img, (right_point[0], right_point[1]), 7, (0, 255, 255), -1) # 圆点标记\r\n cv2.circle(Corg_img, (Big_battle[0], Big_battle[1]), 7, (255, 255, 0), -1) # 圆点标记\r\n cv2.line(Corg_img, (Big_battle[0], Big_battle[1]), (240, 640), (0, 255, 255), thickness=2)\r\n # 500线\r\n cv2.line(Corg_img, (0, 500), (480, 500), (255, 255, 255), thickness=2)\r\n cv2.imshow('Corg_img', Corg_img)\r\n k = cv2.waitKey(100)\r\n if k == 27:\r\n cv2.destroyWindow('closed_pic')\r\n cv2.destroyWindow('org_img_copy')\r\n break\r\n elif k == ord('s'):\r\n print(\"save picture123\")\r\n cv2.imwrite(\"picture123.jpg\", HeadOrg_img) # 保存图片\r\n\r\n\r\n###################### 终 点-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-\r\ndef end_door():\r\n global ChestOrg_img, state, state_sel, step, reset, skip, img_debug, end_door_flag\r\n end_door_flag = 0\r\n state_sel = 'door'\r\n state = 1\r\n if state == 1: # 初始化\r\n print(\"/-/-/-/-/-/-/-/-/-进入end_door\")\r\n step = 0\r\n else:\r\n return\r\n\r\n while state == 1:\r\n\r\n if step == 0: # 判断门是否抬起\r\n if ChestOrg_img is None:\r\n continue\r\n\r\n org_img_copy = ChestOrg_img.copy()\r\n org_img_copy = np.rot90(org_img_copy)\r\n handling = org_img_copy.copy()\r\n\r\n border = cv2.copyMakeBorder(handling, 12, 12, 16, 16, borderType=cv2.BORDER_CONSTANT,\r\n value=(255, 255, 255)) # 扩展白边,防止边界无法识别\r\n handling = cv2.resize(border, (chest_r_width, chest_r_height), interpolation=cv2.INTER_CUBIC) # 将图片缩放\r\n frame_gauss = cv2.GaussianBlur(handling, (21, 21), 0) # 高斯模糊\r\n frame_hsv = cv2.cvtColor(frame_gauss, cv2.COLOR_BGR2HSV) # 将图片转换到HSV空间\r\n\r\n frame_door_yellow = cv2.inRange(frame_hsv, color_range['yellow_door'][0],\r\n color_range['yellow_door'][1]) # 对原图像和掩模(颜色的字典)进行位运算\r\n frame_door_black = cv2.inRange(frame_hsv, color_range['black_door'][0],\r\n color_range['black_door'][1]) # 对原图像和掩模(颜色的字典)进行位运算\r\n\r\n # frame_door = cv2.add(frame_door_yellow, frame_door_black)\r\n open_pic = cv2.morphologyEx(frame_door_yellow, cv2.MORPH_OPEN, np.ones((13, 13), np.uint8)) # 开运算 去噪点\r\n closed_pic = cv2.morphologyEx(open_pic, cv2.MORPH_CLOSE, np.ones((50, 50), np.uint8)) # 闭运算 封闭连接\r\n\r\n (image, contours, hierarchy) = cv2.findContours(closed_pic, cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_NONE) # 找出轮廓\r\n areaMaxContour, area_max = getAreaMaxContour1(contours) # 找出最大轮廓\r\n percent = round(100 * area_max / (chest_r_width * chest_r_height), 2) # 最大轮廓的百分比\r\n if areaMaxContour is not None:\r\n rect = cv2.minAreaRect(areaMaxContour) # 矩形框选\r\n box = np.int0(cv2.boxPoints(rect)) # 点的坐标\r\n if img_debug:\r\n cv2.drawContours(handling, [box], 0, (153, 200, 0), 2) # 将最小外接矩形画在图上\r\n\r\n if img_debug:\r\n cv2.putText(handling, 'area: ' + str(percent) + '%', (30, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (0, 0, 255), 2)\r\n cv2.imshow('handling', handling) # 显示图像\r\n\r\n # cv2.imshow('frame_door_yellow', frame_door_yellow) # 显示图像\r\n # cv2.imshow('frame_door_black', frame_door_black) # 显示图像\r\n\r\n k = cv2.waitKey(10)\r\n if k == 27:\r\n cv2.destroyWindow('open_after_closed')\r\n cv2.destroyWindow('handling')\r\n break\r\n elif k == ord('s'):\r\n print(\"save picture123\")\r\n cv2.imwrite(\"picture123.jpg\", org_img_copy) # 保存图片\r\n\r\n # 根据比例得到是否前进的信息\r\n if percent > 5: # 检测到横杆\r\n print(percent, \"%\")\r\n print(\"有障碍 等待 contours len:\", len(contours))\r\n action_append(\"Stand\")\r\n end_door_flag = 1\r\n time.sleep(3)\r\n else:\r\n if end_door_flag == 0:\r\n print(percent)\r\n print(\"暂未发现横杆 等待检测\")\r\n action_append(\"Stand\")\r\n\r\n elif end_door_flag == 1 and percent < 1:\r\n print(percent)\r\n # print(\"3894L 执行3步\")\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n\r\n print(\"3899L 执行快走555\")\r\n action_append(\"fastForward05\")\r\n action_append(\"Stand\")\r\n step = 1\r\n\r\n else:\r\n print(percent, \"%\")\r\n print(\"有障碍 等待 contours len:\", len(contours))\r\n action_append(\"Stand\")\r\n time.sleep(3)\r\n\r\n elif step == 1:\r\n break\r\n\r\n\r\n#################################################台阶##########################################\r\ndef two_color_analyze(frame,point1,point2,color1,color2):\r\n \"\"\"point是一个元组,其内有3个量,量1是中心点坐标,量2是长宽,量3是角度\r\n 如point1=((60,550),(40,60),0),则是取了(60,550)为中心点,长40,高60,角度为0的一个矩形框。\r\n 该方法能返回point代表的矩形框中两种color所覆盖的面积\r\n 该函数可以有效用于不同颜色之间的边界问题判断\r\n 返回值percent 分别为1_1,1_2,2_1,2_2。#####1_表示point1,_2表示color2.\r\n \"\"\"\r\n global img_debug,color_range\r\n img=frame\r\n hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n frame_copy=img.copy()\r\n\r\n detect_rec1 = np.int0(cv2.boxPoints(point1))\r\n detect_rec2 = np.int0(cv2.boxPoints(point2))\r\n print(\"detect_rec1 is\",detect_rec1)\r\n print(\"detect_rec2 is\",detect_rec2)\r\n # point1 and color1\r\n if color1==\"red_floor\":\r\n Imask1__1 = cv2.inRange(hsv, color_range['red_floor1'][0], color_range['red_floor1'][1])\r\n Imask1__2 = cv2.inRange(hsv, color_range['red_floor2'][0], color_range['red_floor2'][1])\r\n imask1 = cv2.bitwise_or(Imask1__1, Imask1__2)\r\n else:\r\n imask1=cv2.inRange(hsv,color_range[color1][0],color_range[color1][1])\r\n imask1_1=imask1[detect_rec1[2][1]:detect_rec1[0][1],detect_rec1[0][0]:detect_rec1[2][0]]\r\n area1_1=area_bits(imask1_1)\r\n percent1_1=area1_1/((detect_rec1[2][1]-detect_rec1[0][1])*(detect_rec1[0][0]-detect_rec1[2][0]))\r\n print(\"area1_1 is\",area1_1)\r\n print(\"percent1_1 is\",percent1_1)\r\n # cv2.imshow('1_1',imask1_1)\r\n #point1 and color2\r\n imask2 = cv2.inRange(hsv, color_range[color2][0], color_range[color2][1])\r\n imask1_2 = imask2[detect_rec1[2][1]:detect_rec1[0][1], detect_rec1[0][0]:detect_rec1[2][0]]\r\n area1_2 = area_bits(imask1_2)\r\n percent1_2 = area1_2 / ((detect_rec1[2][1] - detect_rec1[0][1]) * (detect_rec1[0][0] - detect_rec1[2][0]))\r\n print(\"area1_2 is\", area1_2)\r\n print(\"percent1_2 is\", percent1_2)\r\n # cv2.imshow('1_2', imask1_2)\r\n #point2 and color1\r\n imask2_1 = imask1[detect_rec2[2][1]:detect_rec2[0][1], detect_rec2[0][0]:detect_rec2[2][0]]\r\n area2_1 = area_bits(imask2_1)\r\n percent2_1 = area2_1 / ((detect_rec2[2][1] - detect_rec2[0][1]) * (detect_rec2[0][0] - detect_rec2[2][0]))\r\n print(\"area2_1 is\", area2_1)\r\n print(\"percent2_1 is\", percent2_1)\r\n # cv2.imshow('2_1', imask2_1)\r\n #point2 and color2\r\n imask2_2 = imask2[detect_rec2[2][1]:detect_rec2[0][1], detect_rec2[0][0]:detect_rec2[2][0]]\r\n area2_2 = area_bits(imask2_2)\r\n percent2_2 = area2_2 / ((detect_rec2[2][1] - detect_rec2[0][1]) * (detect_rec2[0][0] - detect_rec2[2][0]))\r\n print(\"area2_2 is\", area2_2)\r\n print(\"percent2_2 is\", percent2_2)\r\n # cv2.imshow('2_2', imask2_2)\r\n if img_debug:\r\n cv2.drawContours(frame_copy, [detect_rec1], 0, (0, 255, 255), 2) # 检测框1\r\n cv2.drawContours(frame_copy, [detect_rec2], 0, (0, 255, 255), 2) # 检测框2\r\n cv2.imshow(\"检测框\",frame_copy)\r\n percent=[percent1_1,percent1_2,percent2_1,percent2_2]\r\n print(percent)\r\n return percent\r\ndef floor():\r\n global org_img, state, state_sel, step, reset, skip, debug\r\n global camera_out\r\n state_sel = 'floor'\r\n\r\n if state_sel == 'floor': # 初始化\r\n print(\"/-/-/-/-/-/-/-/-/-进入floor\")\r\n step = 0\r\n # 检测框的坐标\r\n dec_point1=((60,550),(40,60),0)\r\n dec_point2=((420,550),(40,60),0)\r\n\r\n r_w = chest_r_width\r\n r_h = chest_r_height\r\n\r\n top_angle = 0\r\n T_B_angle = 0\r\n topcenter_x = 0.5 * r_w\r\n topcenter_y = 0\r\n bottomcenter_x = 0.5 * r_w\r\n bottomcenter_y = 0\r\n\r\n topcenter_x_setl = 230\r\n topcenter_x_setr = 250\r\n # topcenter_y_setu=280\r\n # bottomcenter_x_setl=0\r\n # bottomcenter_x_setr=0\r\n while state_sel == 'floor':\r\n # chest\r\n if True: # 上下边沿\r\n t1 = cv2.getTickCount()\r\n Corg_img = ChestOrg_img.copy()\r\n Corg_img = np.rot90(Corg_img)\r\n OrgFrame = Corg_img.copy()\r\n\r\n # 初始化 bottom_right bottom_left\r\n bottom_right = (480, 0)\r\n bottom_left = (0, 0)\r\n top_right = (480, 0) # 右上角点坐标\r\n top_left = (0, 0) # 左上角点坐标\r\n\r\n frame = cv2.resize(OrgFrame, (chest_r_width, chest_r_height), interpolation=cv2.INTER_LINEAR)\r\n frame_copy = frame.copy()\r\n # 获取图像中心点坐标x, y\r\n center = []\r\n # 开始处理图像\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n hsv = cv2.GaussianBlur(hsv, (3, 3), 0)\r\n if step == 0:\r\n Imask = cv2.inRange(hsv, color_range['blue_floor'][0],\r\n color_range['blue_floor'][1]) # 对原图像和掩模(颜色的字典)进行位运算\r\n elif step == 1:\r\n Imask = cv2.inRange(hsv, color_range['blue_floor'][0], color_range['blue_floor'][1])\r\n elif step == 2:\r\n Imask = cv2.inRange(hsv, color_range['green_floor'][0], color_range['green_floor'][1])\r\n elif step == 3:\r\n Imask1 = cv2.inRange(hsv, color_range['red_floor1'][0], color_range['red_floor1'][1])\r\n Imask2 = cv2.inRange(hsv, color_range['red_floor2'][0], color_range['red_floor2'][1])\r\n Imask = cv2.bitwise_or(Imask1, Imask2)\r\n elif step == 4:\r\n Imask = cv2.inRange(hsv, color_range['green_floor'][0], color_range['green_floor'][1])\r\n elif step == 5:\r\n Imask = cv2.inRange(hsv, color_range['blue_floor'][0], color_range['blue_floor'][1])\r\n elif step == 6 or step == 6.1 or step == 7:\r\n frame_1 = cv2.inRange(hsv, color_range['red_XP1'][0], color_range['red_XP1'][1]) # 对原图像和掩模(颜色的字典)进行位运算\r\n frame_2 = cv2.inRange(hsv, color_range['red_XP2'][0], color_range['red_XP2'][1])\r\n Imask = cv2.bitwise_or(frame_1, frame_2)\r\n # Imask = cv2.inRange(hsv, color_range['blue_floor'][0], color_range['blue_floor'][1])\r\n else:\r\n print(\"no color\")\r\n\r\n # opened = cv2.morphologyEx(Imask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8)) # 开运算 去噪点\r\n # Imask = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)) # 闭运算 封闭连接\r\n\r\n # Imask = cv2.erode(Imask, None, iterations=2)\r\n Imask = cv2.dilate(Imask, np.ones((3, 3), np.uint8), iterations=2)\r\n\r\n _, cnts, hierarchy = cv2.findContours(Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # 找出所有轮廓\r\n\r\n cnt_sum, area_max = getAreaMaxContour1(cnts) # 找出最大轮廓\r\n C_percent = round(area_max * 100 / (r_w * r_h), 2) # 最大轮廓百分比\r\n cv2.drawContours(frame, cnt_sum, -1, (255, 0, 255), 3)\r\n\r\n if cnt_sum is not None:\r\n see = True\r\n rect = cv2.minAreaRect(cnt_sum) # 最小外接矩形\r\n box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点\r\n bottom_right = cnt_sum[0][0] # 右下角点坐标\r\n bottom_left = cnt_sum[0][0] # 左下角点坐标\r\n top_right = cnt_sum[0][0] # 右上角点坐标\r\n top_left = cnt_sum[0][0] # 左上角点坐标\r\n # #杨巍改的算矩形四个顶点位置的方法\r\n # left_x=cnt_sum[0][0][0]\r\n # right_x=cnt_sum[0][0][0]\r\n # up_y=cnt_sum[0][0][1]\r\n # down_y=cnt_sum[0][0][1]\r\n for c in cnt_sum:\r\n if c[0][0] + 1 * (r_h - c[0][1]) < bottom_left[0] + 1 * (r_h - bottom_left[1]):\r\n bottom_left = c[0]\r\n if c[0][0] + 1 * c[0][1] > bottom_right[0] + 1 * bottom_right[1]:\r\n bottom_right = c[0]\r\n\r\n if c[0][0] + 3 * c[0][1] < top_left[0] + 3 * top_left[1]:\r\n top_left = c[0]\r\n if (r_w - c[0][0]) + 3 * c[0][1] < (r_w - top_right[0]) + 3 * top_right[1]:\r\n top_right = c[0]\r\n # if c[0][0] < left_x:\r\n # left_point = c[0]\r\n # if c[0][0] > right_x:\r\n # right_point = c[0]\r\n # if c[0][1]down_y:\r\n # down_point = c[0]\r\n\r\n # if debug:\r\n # handling = ChestOrg_img.copy()\r\n # cv2.circle(handling, (c[0][0], c[0][1]), 5, [0, 255, 0], 2)\r\n # cv2.circle(handling, (bottom_left[0], bottom_left[1]), 5, [255, 255, 0], 2)\r\n # cv2.circle(handling, (bottom_right[0], bottom_right[1]), 5, [255, 0, 255], 2)\r\n # cv2.imshow('handling', handling) # 显示图像\r\n # cv2.waitKey(2)\r\n\r\n bottomcenter_x = (bottom_left[0] + bottom_right[0]) / 2 # 得到bottom中心坐标\r\n bottomcenter_y = (bottom_left[1] + bottom_right[1]) / 2\r\n\r\n topcenter_x = (top_right[0] + top_left[0]) / 2 # 得到top中心坐标\r\n topcenter_y = (top_left[1] + top_right[1]) / 2\r\n\r\n bottom_angle = -math.atan(\r\n (bottom_right[1] - bottom_left[1]) / (bottom_right[0] - bottom_left[0])) * 180.0 / math.pi\r\n top_angle = -math.atan((top_right[1] - top_left[1]) / (top_right[0] - top_left[0])) * 180.0 / math.pi\r\n if math.fabs(topcenter_x - bottomcenter_x) <= 1: # 得到连线的角度\r\n T_B_angle = 90\r\n else:\r\n T_B_angle = - math.atan(\r\n (topcenter_y - bottomcenter_y) / (topcenter_x - bottomcenter_x)) * 180.0 / math.pi\r\n\r\n if img_debug:\r\n detect_point=[[230,520],[250,520],[250,540],[230,540]]\r\n cv2.drawContours(frame_copy, [box], 0, (0, 255, 0), 2) # 将大矩形画在图上\r\n cv2.line(frame_copy, (bottom_left[0], bottom_left[1]), (bottom_right[0], bottom_right[1]),\r\n (255, 255, 0), thickness=2)\r\n cv2.line(frame_copy, (top_left[0], top_left[1]), (top_right[0], top_right[1]), (255, 255, 0),\r\n thickness=2)\r\n cv2.line(frame_copy, (int(bottomcenter_x), int(bottomcenter_y)),\r\n (int(topcenter_x), int(topcenter_y)), (255, 255, 255), thickness=2) # T_B_line\r\n\r\n cv2.putText(frame_copy, \"bottom_angle:\" + str(bottom_angle), (30, 450), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(frame_copy, \"top_angle:\" + str(top_angle), (30, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n (0, 0, 0), 2)\r\n cv2.putText(frame_copy, \"T_B_angle:\" + str(T_B_angle), (30, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n (0, 0, 255), 2)\r\n\r\n cv2.putText(frame_copy, \"bottomcenter_x:\" + str(bottomcenter_x), (30, 480),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(frame_copy, \"y:\" + str(int(bottomcenter_y)), (300, 480), cv2.FONT_HERSHEY_SIMPLEX, 0.65,\r\n (0, 0, 0), 2) # (0, 0, 255)BGR\r\n\r\n cv2.putText(frame_copy, \"topcenter_x:\" + str(topcenter_x), (30, 180), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n cv2.putText(frame_copy, \"topcenter_y:\" + str(int(topcenter_y)), (230, 180),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR\r\n\r\n cv2.putText(frame_copy, 'C_percent:' + str(C_percent) + '%', (30, 100), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.65, (0, 0, 0), 2)\r\n cv2.putText(frame_copy, \"step:\" + str(step), (30, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0),\r\n 2) # (0, 0, 255)BGR\r\n\r\n cv2.circle(frame_copy, (int(topcenter_x), int(topcenter_y)), 5, [255, 0, 255], 2)\r\n cv2.circle(frame_copy, (int(bottomcenter_x), int(bottomcenter_y)), 5, [255, 0, 255], 2)\r\n cv2.circle(frame_copy, (top_right[0], top_right[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(frame_copy, (top_left[0], top_left[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(frame_copy, (bottom_right[0], bottom_right[1]), 5, [0, 255, 255], 2)\r\n cv2.circle(frame_copy, (bottom_left[0], bottom_left[1]), 5, [0, 255, 255], 2)\r\n cv2.imshow('Chest_Camera', frame_copy) # 显示图像\r\n cv2.imshow('chest_red_mask', Imask)\r\n cv2.waitKey(100)\r\n # 决策执行动作\r\n angle_ok_flag = False\r\n\r\n if step == 0: # 前进依据chest 调整大致位置,方向 看底边线调整角度\r\n\r\n if bottomcenter_y < 300:\r\n if bottom_angle > 3: # 需要左转\r\n if bottom_angle > 6:\r\n print(\"4085L 大左转一下 turn001L \", bottom_angle)\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"4088L bottom_angle > 3 需要小左转 turn001L \", bottom_angle)\r\n action_append(\"turn001L\")\r\n elif bottom_angle < -3: # 需要右转\r\n if bottom_angle < -6:\r\n # print(\"4092L 右da旋转 turn001R < -6 \", Head_L_R_angle)\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"4095L bottom_angle < -3 需要小右转 turn001R \", bottom_angle)\r\n action_append(\"turn001R\")\r\n elif -3 <= bottom_angle <= 3: # 角度正确\r\n print(\"4098L 角度合适\")\r\n\r\n if topcenter_x > topcenter_x_setr or topcenter_x < topcenter_x_setl:\r\n if topcenter_x > topcenter_x_setr:\r\n print(\"微微右移,\", topcenter_x)\r\n action_append(\"Right3move\")\r\n elif topcenter_x < topcenter_x_setl:\r\n print(\"微微左移,\", topcenter_x)\r\n action_append(\"Left3move\")\r\n\r\n else:\r\n print(\"位置合适\")\r\n print(\"快步走,bottomcenter_y\", bottomcenter_y)\r\n action_append(\"fastForward04\")\r\n\r\n elif bottomcenter_y < 380:\r\n if bottom_angle > 3: # 需要左转\r\n if bottom_angle > 6:\r\n print(\"4116L 大左转一下 turn001L \", bottom_angle)\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"4119L bottom_angle > 3 需要小左转 turn001L \", bottom_angle)\r\n action_append(\"turn001L\")\r\n elif bottom_angle < -3: # 需要右转\r\n if bottom_angle < -6:\r\n # print(\"4123L 右da旋转 turn001R < -6 \", Head_L_R_angle)\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"4126L bottom_angle < -3 需要小右转 turn001R \", bottom_angle)\r\n action_append(\"turn001R\")\r\n elif -3 <= bottom_angle <= 3: # 角度正确\r\n print(\"4129L 角度合适\")\r\n angle_ok_flag = True\r\n\r\n if angle_ok_flag:\r\n if topcenter_x > topcenter_x_setr or topcenter_x < topcenter_x_setl:\r\n if topcenter_x > topcenter_x_setr:\r\n print(\"微微右移,\", topcenter_x)\r\n action_append(\"Right02move\")\r\n elif topcenter_x < topcenter_x_setl:\r\n print(\"微微左移,\", topcenter_x)\r\n action_append(\"Left02move\")\r\n else:\r\n print(\"4141L 继续前行 forwardSlow0403\", bottomcenter_y)\r\n action_append(\"forwardSlow0403\")\r\n\r\n elif 380 <= bottomcenter_y < 430:\r\n if bottom_angle > 3: # 需要左转\r\n if bottom_angle > 6:\r\n print(\"4147L 大左转一下 turn001L \", bottom_angle)\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"4150L bottom_angle > 3 需要小左转 turn001L \", bottom_angle)\r\n action_append(\"turn001L\")\r\n elif bottom_angle < -3: # 需要右转\r\n if bottom_angle < -6:\r\n # print(\"4154L 右da旋转 turn001R < -6 \", Head_L_R_angle)\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"4157L bottom_angle < -3 需要小右转 turn001R \", bottom_angle)\r\n action_append(\"turn001R\")\r\n elif -3 <= bottom_angle <= 3: # 角度正确\r\n print(\"4160L 角度合适\")\r\n angle_ok_flag = True\r\n\r\n if angle_ok_flag:\r\n if topcenter_x > topcenter_x_setr or topcenter_x < topcenter_x_setl:\r\n if topcenter_x > topcenter_x_setr:\r\n print(\"微微右移,\", topcenter_x)\r\n action_append(\"Right02move\")\r\n elif topcenter_x < topcenter_x_setl:\r\n print(\"微微左移,\", topcenter_x)\r\n action_append(\"Left02move\")\r\n else:\r\n print(\"4172L 变小步继续前行 Forwalk00\", bottomcenter_y)\r\n action_append(\"Forwalk01\")\r\n\r\n elif 430 <= bottomcenter_y <= 540:\r\n if bottom_angle > 3: # 需要左转\r\n if bottom_angle > 6:\r\n print(\"4178L 大左转一下 turn001L \", bottom_angle)\r\n action_append(\"turn001L\")\r\n else:\r\n print(\"4181L bottom_angle > 3 需要小左转 turn001L \", bottom_angle)\r\n action_append(\"turn001L\")\r\n elif bottom_angle < -3: # 需要右转\r\n if bottom_angle < -6:\r\n print(\"4185L 右da旋转 turn001R < -6 \", bottom_angle)\r\n action_append(\"turn001R\")\r\n else:\r\n print(\"4188L bottom_angle < -3 需要小右转 turn001R \", bottom_angle)\r\n action_append(\"turn001R\")\r\n elif -3 <= bottom_angle <= 3: # 角度正确\r\n print(\"4191L 角度合适\")\r\n angle_ok_flag = True\r\n\r\n if angle_ok_flag:\r\n if topcenter_x > topcenter_x_setr or topcenter_x < topcenter_x_setl:\r\n if topcenter_x > topcenter_x_setr:\r\n print(\"微微右移,\", topcenter_x)\r\n action_append(\"Right02move\")\r\n elif topcenter_x < topcenter_x_setl:\r\n print(\"微微左移,\", topcenter_x)\r\n action_append(\"Left02move\")\r\n else:\r\n print(\"4203L 到达上台阶边沿,变前挪动 Forwalk00 bottomcenter_x:\", bottomcenter_x)\r\n action_append(\"Forwalk00\")\r\n\r\n\r\n elif bottomcenter_y > 540:\r\n print(\"然后开始第二步------上第一节台阶\")\r\n step = 1\r\n angle_ok_flag = False\r\n else:\r\n print(\"error 前进 C_percent:\", C_percent)\r\n print(\"bottomcenter_y:\", bottomcenter_y)\r\n\r\n elif step == 1: # 看中线调整角度上台阶----第一阶\r\n\r\n if top_angle < -2: # 右转\r\n print(\"4236L 右转 turn001R top_angle:\", top_angle)\r\n action_append(\"turn001R\")\r\n time.sleep(0.5) # timefftest\r\n elif top_angle > 2: # 左转\r\n print(\"4240L 左转 turn001L top_angle:\", top_angle)\r\n action_append(\"turn001L\")\r\n time.sleep(0.5) # timefftest\r\n elif -2 <= top_angle <= 2:\r\n print(\"前走一小步\")\r\n action_append(\"Forwalk00\")\r\n time.sleep(0.5)\r\n print(\"4247L 上台阶 上台阶 UpBridge\")\r\n action_append(\"UpBridge16\")\r\n print(\"————————————————————————开始上第二节台阶\")\r\n time.sleep(0.5)\r\n\r\n step = 2\r\n\r\n\r\n elif step == 2: # 看中线调整角度上台阶----第二阶\r\n # if 0 < T_B_angle < 85: # 右转\r\n # print(\"4257L 右转 turn001R T_B_angle:\",T_B_angle)\r\n # action_append(\"turn001R\")\r\n # time.sleep(0.5) # timefftest\r\n # elif -85 < T_B_angle < 0: # 左转\r\n # print(\"4261L 左转 turn001L T_B_angle:\",T_B_angle)\r\n # action_append(\"turn001L\")\r\n # time.sleep(0.5) # timefftest\r\n # elif T_B_angle <= -85 or T_B_angle >= 85:\r\n # print(\"4265L 上台阶 上台阶 UpBridge\")\r\n # action_append(\"UpBridge\")\r\n\r\n # print(\"————————————————————————开始上第三节台阶\")\r\n # time.sleep(0.5)\r\n\r\n # step = 3\r\n\r\n if top_angle < -2: # 右转\r\n print(\"4274L 右转 turn001R top_angle:\", top_angle)\r\n action_append(\"turn001R\")\r\n time.sleep(0.5) # timefftest\r\n elif top_angle > 2: # 左转\r\n print(\"4278L 左转 turn001L top_angle:\", top_angle)\r\n action_append(\"turn001L\")\r\n time.sleep(0.5) # timefftest\r\n elif -2 <= top_angle <= 2:\r\n print(\"前走一小步\")\r\n action_append(\"Forwalk00\")\r\n time.sleep(0.5)\r\n print(\"4282L 上台阶 上台阶 UpBridge\")\r\n action_append(\"UpBridge16\")\r\n print(\"————————————————————————开始上第三节台阶\")\r\n time.sleep(0.5)\r\n\r\n step = 3\r\n\r\n elif step == 3: # 看中线调整角度上台阶----第三阶\r\n\r\n\r\n if top_angle < -2: # 右转\r\n print(\"4310L 右转 turn001R top_angle:\", top_angle)\r\n action_append(\"turn001R\")\r\n time.sleep(0.5) # timefftest\r\n elif top_angle > 2: # 左转\r\n print(\"4314L 左转 turn001L top_angle:\", top_angle)\r\n action_append(\"turn001L\")\r\n time.sleep(0.5) # timefftest\r\n elif -2 <= top_angle <= 2:\r\n print(\"前走一小步\")\r\n action_append(\"Forwalk00\")\r\n action_append(\"Forwalk00\")\r\n time.sleep(0.5)\r\n print(\"4318L 上台阶 上台阶 UpBridge\")\r\n action_append(\"UpBridge16\")\r\n\r\n print(\"————————————————————————上台阶完毕,开始下台阶\")\r\n print(\"————————————————————————开始下第一节台阶\")\r\n time.sleep(0.5)\r\n\r\n step = 4\r\n\r\n # print(\"4328L 上台阶后,前进一步\")\r\n # action_append(\"Forwalk01\")\r\n\r\n # print(\"按键继续。。。\")\r\n # cv2.waitKey(0)\r\n\r\n elif step == 4: # 调整角度下台阶----第三阶\r\n time.sleep(0.5)\r\n if top_angle > 2: # 需要左转\r\n print(\"4337 top_angle > 2 需要小左转 \")\r\n action_append(\"turn001L\")\r\n elif top_angle < -2: # 需要右转\r\n print(\"4340 top_angle < -2 需要小右转 \")\r\n action_append(\"turn001R\")\r\n elif -2 <= top_angle <= 2: # 角度正确\r\n print(\"角度合适\")\r\n # percent_downstep1=two_color_analyze(frame,dec_point1,dec_point2,'red_floor','green_floor')\r\n # if percent_downstep1[1]>0.1 and percent_downstep1[3]>0.1:\r\n # if percent_downstep1[0]>0.1 and percent_downstep1[2]>0.1:\r\n # print(\"第一次下台阶啦\")\r\n # action_append(\"DownBridge\")\r\n # time.sleep(0.5)\r\n # step=5\r\n # else:\r\n # print(\"好像超出去太多了,退一点吧\")\r\n # action_append(\"Back3Run\")\r\n # else:\r\n # print(\"还不够,要再往前面走点\")\r\n # action_append(\"Forwalk00\")\r\n\r\n\r\n # 下面这个是哈工大代码原判断语句\r\n if topcenter_y < 385:\r\n print(\"微微前挪\")\r\n action_append(\"Forwalk00\")\r\n elif topcenter_y > 385:\r\n print(\"4348L 下台阶 下台阶 DownBridge topcenter_y:\", topcenter_y)\r\n action_append(\"DownBridge\")\r\n print(\"————————————————————————开始下第二节台阶\")\r\n time.sleep(0.5)\r\n step = 5\r\n\r\n elif step == 5: # 调整角度下台阶----第二阶\r\n time.sleep(0.5)\r\n if top_angle > 2: # 需要左转\r\n print(\"4357 top_angle > 2 需要小左转 \")\r\n action_append(\"turn001L\")\r\n elif top_angle < -2: # 需要右转\r\n print(\"4360 top_angle < -2 需要小右转 \")\r\n action_append(\"turn001R\")\r\n elif -2 <= top_angle <= 2: # 角度正确\r\n print(\"角度合适\")\r\n # percent_downstep2 = two_color_analyze(frame,dec_point1, dec_point2, 'green_floor', 'blue_floor')\r\n # if percent_downstep2[1] > 0.1 and percent_downstep2[3] > 0.1:\r\n # if percent_downstep2[0] > 0.1 and percent_downstep2[2] > 0.1:\r\n # print(\"第2次下台阶啦\")\r\n # action_append(\"DownBridge\")\r\n # time.sleep(0.5)\r\n # step = 5\r\n # else:\r\n # print(\"好像超出去太多了,退一点吧\")\r\n # action_append(\"Back3Run\")\r\n # else:\r\n # print(\"还不够,要再往前面走点\")\r\n # action_append(\"Forwalk00\")\r\n if topcenter_y < 380:\r\n print(\"微微前挪\")\r\n action_append(\"Forwalk00\")\r\n elif topcenter_y > 380:\r\n print(\"4368L 下台阶 下台阶 DownBridge topcenter_y:\", topcenter_y)\r\n action_append(\"DownBridge\")\r\n print(\"————————————————————————开始下第二节台阶\")\r\n time.sleep(0.5)\r\n step = 6\r\n\r\n elif step == 6: # 调整角度下斜坡----第三阶\r\n time.sleep(0.5)\r\n\r\n if top_angle > 2: # 需要左转\r\n print(\"4377 top_angle > 2 需要小左转 y:\", topcenter_y)\r\n action_append(\"turn001L\")\r\n elif top_angle < -2: # 需要右转\r\n print(\"4380 top_angle < -2 需要小右转 y:\", topcenter_y)\r\n action_append(\"turn001R\")\r\n elif -2 <= top_angle <= 2: # 角度正确\r\n\r\n print(\"角度合适\")\r\n print(\"topcenter_x=\", topcenter_x)\r\n if topcenter_x > topcenter_x_setr or topcenter_x < topcenter_x_setl:\r\n if topcenter_x > topcenter_x_setr:\r\n print(\"微微右移\", topcenter_x)\r\n action_append(\"Right02move\")\r\n elif topcenter_x < topcenter_x_setl:\r\n print(\"微微左移\", topcenter_x)\r\n action_append(\"Left02move\")\r\n\r\n # print(\"微微前挪 y:\",topcenter_y)\r\n # action_append(\"Forwalk00\")\r\n else:\r\n print(\"位置合适\")\r\n print(\"下斜坡,step=6.1,后倾预备\")\r\n action_append(\"Forwalk00\")\r\n action_append(\"Stand\")\r\n action_append(\"actBeforeXP\")\r\n step = 6.1\r\n\r\n elif step == 6.1:\r\n if area_max > 20:\r\n print(\"面积大于20则继续下坡走,XPforwalkSlow\")\r\n action_append(\"XPforwalkSlow\")\r\n else:\r\n print(\"再走几步就XP结束了\")\r\n\r\n action_append(\"XPforwalkSlow\")\r\n action_append(\"XPforwalkSlow\")\r\n action_append(\"XPforwalkSlow\")\r\n action_append(\"XPforwalkSlow\")\r\n step = 7\r\n\r\n elif step == 7: # 完成\r\n\r\n print(\"899L 完成floor\")\r\n action_append(\"Forwalk00\")\r\n action_append(\"Forwalk00\")\r\n action_append(\"Forwalk00\")\r\n action_append(\"Forwalk00\")\r\n action_append(\"Forwalk00\")\r\n break\r\n elif step == 6.1:\r\n print(\"找不到轮廓了,证明下坡结束了\")\r\n action_append(\"XPforwalkSlow\")\r\n action_append(\"XPforwalkSlow\")\r\n action_append(\"XPforwalkSlow\")\r\n action_append(\"XPforwalkSlow\")\r\n step = 7\r\n elif step == 7:\r\n print(\"899L 完成floor\")\r\n action_append(\"Forwalk00\")\r\n action_append(\"Forwalk00\")\r\n action_append(\"Forwalk00\")\r\n action_append(\"Forwalk00\")\r\n action_append(\"Forwalk00\")\r\n break\r\n else:\r\n print(\"未找到第一届蓝色台阶\")\r\n action_append(\"Forwalk00\")\r\n\r\n\r\n###################### 起 点-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-\r\ndef start_door():\r\n global HeadOrg_img, ChestOrg_img, state, state_sel, step, img_debug, door_flag\r\n start_door_flag = 0\r\n state_sel = 'start_door'\r\n state = 1\r\n if state == 1: # 初始化\r\n print(\"/-/-/-/-/-/-/-/-/-进入door\")\r\n step = 0\r\n else:\r\n pass\r\n\r\n while state == 1:\r\n\r\n if step == 0: # 判断门是否抬起\r\n if HeadOrg_img is None:\r\n continue\r\n\r\n org_img_copy = HeadOrg_img.copy()\r\n # org_img_copy = np.rot90(org_img_copy)\r\n handling = org_img_copy.copy()\r\n\r\n border = cv2.copyMakeBorder(handling, 12, 12, 16, 16, borderType=cv2.BORDER_CONSTANT,\r\n value=(255, 255, 255)) # 扩展白边,防止边界无法识别\r\n handling = cv2.resize(border, (chest_r_width, chest_r_height), interpolation=cv2.INTER_CUBIC) # 将图片缩放\r\n frame_gauss = cv2.GaussianBlur(handling, (21, 21), 0) # 高斯模糊\r\n frame_hsv = cv2.cvtColor(frame_gauss, cv2.COLOR_BGR2HSV) # 将图片转换到HSV空间\r\n\r\n frame_door_yellow = cv2.inRange(frame_hsv, color_range['yellow_door'][0],\r\n color_range['yellow_door'][1]) # 对原图像和掩模(颜色的字典)进行位运算\r\n frame_door_black = cv2.inRange(frame_hsv, color_range['black_door'][0],\r\n color_range['black_door'][1]) # 对原图像和掩模(颜色的字典)进行位运算\r\n\r\n frame_door = cv2.add(frame_door_yellow, frame_door_black)\r\n open_pic = cv2.morphologyEx(frame_door_yellow, cv2.MORPH_OPEN, np.ones((13, 13), np.uint8)) # 开运算 去噪点\r\n closed_pic = cv2.morphologyEx(open_pic, cv2.MORPH_CLOSE, np.ones((50, 50), np.uint8)) # 闭运算 封闭连接\r\n\r\n (image, contours, hierarchy) = cv2.findContours(closed_pic, cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_NONE) # 找出轮廓\r\n areaMaxContour, area_max = getAreaMaxContour1(contours) # 找出最大轮廓\r\n percent = round(100 * area_max / (chest_r_width * chest_r_height), 2) # 最大轮廓的百分比\r\n if areaMaxContour is not None:\r\n rect = cv2.minAreaRect(areaMaxContour) # 矩形框选\r\n box = np.int0(cv2.boxPoints(rect)) # 点的坐标\r\n if img_debug:\r\n cv2.drawContours(handling, [box], 0, (153, 200, 0), 2) # 将最小外接矩形画在图上\r\n\r\n if img_debug:\r\n cv2.putText(handling, 'area: ' + str(percent) + '%', (30, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (0, 0, 255), 2)\r\n cv2.imshow('handling', handling) # 显示图像\r\n\r\n # cv2.imshow('frame_door_yellow', frame_door_yellow) # 显示图像\r\n # cv2.imshow('frame_door_black', frame_door_black) # 显示图像\r\n\r\n k = cv2.waitKey(10)\r\n if k == 27:\r\n cv2.destroyWindow('open_after_closed')\r\n cv2.destroyWindow('handling')\r\n break\r\n elif k == ord('s'):\r\n print(\"save picture123\")\r\n cv2.imwrite(\"picture123.jpg\", org_img_copy) # 保存图片\r\n\r\n # 根据比例得到是否前进的信息\r\n if percent > 0.8: # 检测到横杆\r\n print(percent, \"%\")\r\n print(\"有障碍 等待 contours len:\", len(contours))\r\n action_append(\"Stand\")\r\n start_door_flag = 1\r\n time.sleep(3)\r\n else:\r\n if start_door_flag == 0:\r\n print(percent)\r\n print(\"暂未发现横杆 等待检测\")\r\n action_append(\"Stand\")\r\n\r\n elif start_door_flag == 1 and percent < 1:\r\n print(percent)\r\n # print(\"3894L 执行3步\")\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n\r\n print(\"3899L 执行快走555\")\r\n action_append(\"fastForward03\")\r\n action_append(\"forwardSlow0403\")\r\n action_append(\"forwardSlow0403\")\r\n # action_append(\"forwardSlow0403\")\r\n action_append(\"Stand\")\r\n step = 1\r\n\r\n else:\r\n print(percent, \"%\")\r\n print(\"有障碍 等待 contours len:\", len(contours))\r\n action_append(\"Stand\")\r\n time.sleep(3)\r\n\r\n elif step == 1:\r\n break\r\n\r\n\r\ndef get_img():\r\n global ChestOrg_img, HeadOrg_img, HeadOrg_img, chest_ret\r\n global ret\r\n global cap_chest\r\n while True:\r\n if 1:\r\n # if not img_debug:\r\n if cap_chest.isOpened():\r\n\r\n chest_ret, ChestOrg_img = cap_chest.read()\r\n ret, HeadOrg_img = cap_head.read()\r\n if (chest_ret == False) or (ret == False):\r\n print(\"ret fail ------------------\")\r\n if HeadOrg_img is None:\r\n print(\"HeadOrg_img error\")\r\n if ChestOrg_img is None:\r\n print(\"ChestOrg_img error\")\r\n\r\n else:\r\n time.sleep(0.01)\r\n ret = True\r\n print(\"4568L pic error \")\r\n\r\n else:\r\n ChestOrg_img = cv2.imread(\"../img_dbg/1.jpg\")\r\n\r\n\r\n# 读取图像线程\r\n\r\nth1 = threading.Thread(target=get_img)\r\nth1.setDaemon(True)\r\nth1.start()\r\n\r\n\r\ndef move_action():\r\n global org_img\r\n global step, level\r\n global golf_angle_hole\r\n global golf_angle_ball, golf_angle\r\n global golf_dis, golf_dis_y\r\n global golf_angle_flag, golf_dis_flag\r\n global golf_angle_start, golf_dis_start\r\n global golf_ok\r\n global golf_hole, golf_ball\r\n\r\n if real_test:\r\n CMDcontrol.CMD_transfer()\r\n\r\n\r\n# 动作执行线程\r\nth2 = threading.Thread(target=move_action)\r\nth2.setDaemon(True)\r\nth2.start()\r\n\r\nif __name__ == '__main__':\r\n if real_test:\r\n while len(CMDcontrol.action_list) > 0:\r\n print(\"等待启动\")\r\n time.sleep(1)\r\n action_append(\"HeadTurnMM\") # yw:headturnmm 是把头部转到100位置,应该是归零(归位)\r\n\r\n while True:\r\n if ChestOrg_img is not None and chest_ret:\r\n k = cv2.waitKey(10) # yw:换行符\r\n if k == 27: # yw:ESC键\r\n cv2.destroyWindow('camera_test')\r\n break\r\n\r\n if single_debug: # yw:每执行一次动作停顿一下\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n print(\"start door START\")\r\n t1 = cv2.getTickCount() # yw:这个函数返回CPU的时间(但是得到的是周期数,要换成秒的话��要除以频率),取两次时间就可以得到时间差。t2-t1\r\n f = cv2.getTickFrequency() # yw:这个函数返回CPU时间的频率。\r\n start_door()\r\n t2 = cv2.getTickCount()\r\n print(\"start door Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n print(\"Single log bridge START\")\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n Greenbridge('green_bridge')\r\n\r\n t2 = cv2.getTickCount()\r\n print(\"Single log bridge Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n cv2.destroyAllWindows()\r\n\r\n # yw:下面这几段代码,直接调用了相应的关卡函数,因为在关卡内部有识别该关卡的方法。\r\n print(\"Through obstacle START\") # yw:过雷阵\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n obstacle()\r\n t2 = cv2.getTickCount()\r\n print(\"Through obstacle Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n print(\"Through baffle START\") # yw:过挡板\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n baffle()\r\n t2 = cv2.getTickCount()\r\n print(\"Through baffle Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n print(\"Into Door START\")\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n into_the_door()\r\n t2 = cv2.getTickCount()\r\n print(\"Into Door START Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n while True:\r\n\r\n i = recognize()\r\n if i == 5 or i == 9:\r\n flag = 1\r\n print(\"Single log bridge START\")\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n if i == 5:\r\n Greenbridge('green_bridge')\r\n elif i == 9:\r\n Greenbridge('blue_bridge')\r\n t2 = cv2.getTickCount()\r\n print(\"Single log bridge Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n cv2.destroyAllWindows()\r\n break\r\n\r\n elif i == 1 or i == 10:\r\n flag = 2\r\n print(\"Through Pit START\")\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n if i == 1:\r\n hole_edge('green_hole_chest')\r\n elif i == 10:\r\n hole_edge('blue_hole_chest')\r\n t2 = cv2.getTickCount()\r\n print(\"Through Pit Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n cv2.destroyAllWindows()\r\n break\r\n elif i == 7:\r\n flag = 3\r\n print(\"判断当前关卡为台阶关卡\")\r\n print(\"Floor START\")\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n floor()\r\n t2 = cv2.getTickCount()\r\n print(\"Floor Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n elif i == 0:\r\n print(\"Error! 未识别到有效关卡\")\r\n action_append(\"Forwalk00\")\r\n # cv2.imshow(HeadOrg_img)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n continue # yw:这里便是有无限循环的可能了,如果一直识别不到关卡,应该做一些其他动作来跳出该循环。可在进入该循环时设一个t0,t如果大于比如10s那么跳出循环去做点什么。\r\n\r\n print(\"Kick ball START\")\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n kick_ball()\r\n t2 = cv2.getTickCount()\r\n print(\"Kick ball Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n while True:\r\n\r\n i = recognize()\r\n if i == 5 or i == 9:\r\n flag = 1\r\n print(\"Single log bridge START\")\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n if i == 5:\r\n Greenbridge('green_bridge')\r\n elif i == 9:\r\n Greenbridge('blue_bridge')\r\n t2 = cv2.getTickCount()\r\n print(\"Single log bridge Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n cv2.destroyAllWindows()\r\n break\r\n\r\n elif i == 1 or i == 10:\r\n flag = 2\r\n print(\"Through Pit START\")\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n if i == 1:\r\n hole_edge('green_hole_chest')\r\n elif i == 10:\r\n hole_edge('blue_hole_chest')\r\n t2 = cv2.getTickCount()\r\n print(\"Through Pit Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n cv2.destroyAllWindows()\r\n break\r\n elif i == 7:\r\n flag = 3\r\n print(\"判断当前关卡为台阶关卡\")\r\n print(\"Floor START\")\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n floor()\r\n t2 = cv2.getTickCount()\r\n print(\"Floor Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n elif i == 0:\r\n print(\"Error! 未识别到有效关卡\")\r\n action_append(\"Forwalk00\")\r\n action_append(\"Forwalk00\")\r\n # cv2.imshow(HeadOrg_img)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n continue # yw:这里便是有无限循环的可能了,如果一直识别不到关卡,应该做一些其他动作来跳出该循环。可在进入该循环时设一个t0,t如果大于比如10s那么跳出循环去做点什么。\r\n\r\n print(\"End Door START\")\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n end_door()\r\n t2 = cv2.getTickCount()\r\n print(\"End Door Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n print(\"End Door START\")\r\n t1 = cv2.getTickCount()\r\n f = cv2.getTickFrequency()\r\n end_door()\r\n t2 = cv2.getTickCount()\r\n print(\"End Door Execution time: {}\".format((t2 - t1) / f))\r\n if single_debug:\r\n print(\"Press any key to continue...\")\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n while (1):\r\n print(\"结束\")\r\n time.sleep(10000)\r\n\r\n\r\n else:\r\n print('image is empty chest_ret:', chest_ret)\r\n time.sleep(0.01)\r\n cv2.destroyAllWindows()\r\n","sub_path":"avatar_参赛版3.py","file_name":"avatar_参赛版3.py","file_ext":"py","file_size_in_byte":216949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225667564","text":"#!/usr/bin/env python\n'''\nAuthor: David Pierce Walker-Howell\n \n
    \"\"\".format(_(u'Применить'))))\n\n schools = course.school_set.all()\n\n context = {\n 'course': course,\n 'user_is_teacher': course.user_is_teacher(request.user),\n 'filter': f,\n 'school': schools[0] if schools else '',\n }\n return render_to_response('courses/queue.html', context, context_instance=RequestContext(request))\n\n\n@login_required\ndef gradebook(request, course_id, task_id=None, group_id=None):\n \"\"\"Page with course related information\n contexts:\n - tasklist\n - tasks_description\n \"\"\"\n user = request.user\n if not user.get_profile().is_active():\n raise PermissionDenied\n\n course = get_object_or_404(Course, id=course_id)\n if task_id:\n task = get_object_or_404(Task, id=task_id)\n else:\n task = None\n\n if group_id:\n group = get_object_or_404(Group, id=group_id)\n else:\n group = None\n\n schools = course.school_set.all()\n\n if course.private and not course.user_is_attended(request.user):\n return render_to_response('courses/course_forbidden.html',\n {\"course\": course,\n 'school': schools[0] if schools else '',\n 'invite_form': InviteActivationForm()},\n context_instance=RequestContext(request))\n\n tasklist_context = tasklist_shad_cpp(request, course, task, group)\n\n context = tasklist_context\n context['tasklist_template'] = 'courses/tasklist/shad_cpp.html'\n context['task_types'] = dict(Task().TASK_TYPE_CHOICES).items()\n context['group_gradebook'] = True if group else False\n context['show_hidden_tasks'] = request.session.get(\n str(request.user.id) + '_' + str(course.id) + '_show_hidden_tasks', False)\n context['school'] = schools[0] if schools else ''\n\n return render_to_response('courses/gradebook.html', context, context_instance=RequestContext(request))\n\n\n@login_required\ndef course_page(request, course_id):\n \"\"\"Page with course related information\n contexts:\n - tasklist\n - tasks_description\n \"\"\"\n user = request.user\n if not user.get_profile().is_active():\n raise PermissionDenied\n\n course = get_object_or_404(Course, id=course_id)\n schools = course.school_set.all()\n\n if course.private and not course.user_is_attended(request.user):\n return render_to_response('courses/course_forbidden.html',\n {\"course\": course,\n 'school': schools[0] if schools else '',\n 'invite_form': InviteActivationForm()},\n context_instance=RequestContext(request))\n course.can_edit = course.user_can_edit_course(user)\n if course.can_edit:\n groups = course.groups.all().order_by('name')\n tasks = [{'group': tgr.group, 'task': tgr.task} for tgr in\n TaskGroupRelations.objects.filter(task__course=course, group__in=groups, deleted=False).order_by(\n 'group', 'position')]\n else:\n groups = Group.objects.filter(students=user, course__in=[course])\n tasks = set([tgr.task for tgr in\n TaskGroupRelations.objects.filter(task__course=course, group__in=groups, deleted=False).order_by(\n 'group', 'position')])\n\n if StudentCourseMark.objects.filter(student=user, course=course):\n mark = StudentCourseMark.objects.get(student=user, course=course).mark\n else:\n mark = None\n\n context = {}\n\n context['course'] = course\n context['tasks'] = tasks\n context['mark'] = mark if mark else '--'\n context['visible_queue'] = course.user_can_see_queue(user),\n context['user_is_teacher'] = course.user_is_teacher(user)\n context['task_types'] = dict(Task().TASK_TYPE_CHOICES).items()\n context['show_hidden_tasks'] = request.session.get(\n str(request.user.id) + '_' + str(course.id) + '_show_hidden_tasks', False)\n context['school'] = schools[0] if schools else ''\n\n return render_to_response('courses/course.html', context, context_instance=RequestContext(request))\n\n\n@login_required\ndef seminar_page(request, course_id, task_id):\n \"\"\"Page with course related information\n contexts:\n - tasklist\n - tasks_description\n \"\"\"\n\n user = request.user\n if not user.get_profile().is_active():\n raise PermissionDenied\n\n course = get_object_or_404(Course, id=course_id)\n task = get_object_or_404(Task, id=task_id)\n schools = course.school_set.all()\n\n if course.private and not course.user_is_attended(request.user):\n return render_to_response('courses/course_forbidden.html',\n {\"course\": course,\n 'school': schools[0] if schools else '',\n 'invite_form': InviteActivationForm()},\n context_instance=RequestContext(request))\n course.can_edit = course.user_can_edit_course(user)\n\n if course.can_edit:\n groups = task.groups.all().order_by('name')\n tasks = [{'group': tgr.group, 'task': tgr.task} for tgr in\n TaskGroupRelations.objects.filter(task__parent_task=task, group__in=groups, deleted=False).order_by(\n 'group',\n 'position')]\n else:\n groups = Group.objects.filter(students=user, course__in=[course])\n tasks = set([tgr.task for tgr in\n TaskGroupRelations.objects.filter(task__parent_task=task, group__in=groups,\n deleted=False).order_by('group', 'position')])\n if Issue.objects.filter(task=task, student=user):\n mark = Issue.objects.get(task=task, student=user).mark\n else:\n mark = None\n\n context = {}\n context['course'] = course\n context['tasks'] = tasks\n context['mark'] = mark if mark else '--'\n context['visible_queue'] = course.user_can_see_queue(user),\n context['user_is_teacher'] = course.user_is_teacher(user)\n context['seminar'] = task\n context['task_types'] = dict(Task().TASK_TYPE_CHOICES).items()\n context['show_hidden_tasks'] = request.session.get(\n str(request.user.id) + '_' + str(course.id) + '_show_hidden_tasks', False)\n context['school'] = schools[0] if schools else ''\n\n return render_to_response('courses/course.html', context, context_instance=RequestContext(request))\n\n\ndef tasklist_shad_cpp(request, course, seminar=None, group=None):\n user = request.user\n user_is_attended = False\n user_is_attended_special_course = False\n is_seminar = False\n\n if seminar:\n is_seminar = True\n groups = seminar.groups.all().order_by('name')\n else:\n groups = course.groups.all().order_by('name')\n\n course.can_edit = course.user_can_edit_course(user)\n if course.can_be_chosen_by_extern:\n course.groups.add(course.group_with_extern)\n\n if group:\n groups = [group]\n\n group_x_student_x_task_takens = OrderedDict()\n group_x_task_list = {}\n group_x_max_score = {}\n default_teacher = {}\n show_hidden_tasks = request.session.get(str(request.user.id) + '_' + str(course.id) + '_show_hidden_tasks', False)\n\n for group in groups:\n student_x_task_x_task_takens = {}\n\n if is_seminar:\n tasks_for_groups = TaskGroupRelations.objects.filter(task__course=course, group=group, deleted=False,\n task__parent_task=seminar).order_by(\n 'position').select_related('task')\n else:\n tasks_for_groups = TaskGroupRelations.objects.filter(task__course=course, group=group, deleted=False,\n task__parent_task=None).order_by(\n 'position').select_related('task')\n\n if show_hidden_tasks:\n group_x_task_list[group] = [x.task for x in tasks_for_groups]\n else:\n group_x_task_list[group] = [x.task for x in tasks_for_groups if not x.task.is_hidden]\n\n group_x_max_score.setdefault(group, 0)\n\n for task in group_x_task_list[group]:\n\n if not task.is_hidden:\n if task.type == task.TYPE_SEMINAR:\n group_x_max_score[group] += sum([x.score_max for x in task.children.all()])\n else:\n group_x_max_score[group] += task.score_max\n if task.task_text is None:\n task.task_text = ''\n\n issues_students_in_group = Issue.objects.filter(task__in=group_x_task_list[group]).filter(\n student__group__in=[group]).order_by('student').select_related()\n\n from collections import defaultdict\n issues_x_student = defaultdict(list)\n for issue in issues_students_in_group.all():\n student_id = issue.student.id\n issues_x_student[student_id].append(issue)\n\n for student in group.students.filter(is_active=True):\n if user == student:\n user_is_attended = True\n user_is_attended_special_course = True\n\n student_task_takens = issues_x_student[student.id]\n\n task_x_task_taken = {}\n student_summ_scores = 0\n for task_taken in student_task_takens:\n task_x_task_taken[task_taken.task.id] = task_taken\n if not task_taken.task.is_hidden:\n student_summ_scores += task_taken.mark\n\n student_x_task_x_task_takens[student] = (task_x_task_taken, student_summ_scores)\n\n group_x_student_x_task_takens[group] = student_x_task_x_task_takens\n\n try:\n default_teacher[group] = DefaultTeacher.objects.get(course=course, group=group).teacher\n except DefaultTeacher.DoesNotExist:\n default_teacher[group] = None\n\n group_x_student_information = OrderedDict()\n for group, student_x_task_x_task_takens in group_x_student_x_task_takens.iteritems():\n group_x_student_information.setdefault(group, [])\n\n for student in sorted(student_x_task_x_task_takens.keys(),\n key=lambda x: u\"{0} {1}\".format(x.last_name, x.first_name)):\n if user == student:\n user_is_attended = True\n elif not course.user_can_see_transcript(user, student):\n continue\n\n mark_id, course_mark = get_course_mark(course, group, student)\n\n group_x_student_information[group].append((student,\n student_x_task_x_task_takens[student][0],\n student_x_task_x_task_takens[student][1],\n mark_id,\n course_mark))\n\n context = {\n 'course': course,\n 'course_mark_system_vals': course.mark_system.marks.all() if course.mark_system else None,\n 'group_information': group_x_student_information,\n 'group_tasks': group_x_task_list,\n 'group_x_max_score': group_x_max_score,\n 'default_teacher': default_teacher,\n\n 'user': user,\n 'user_is_attended': user_is_attended,\n 'user_is_attended_special_course': user_is_attended_special_course,\n 'user_is_teacher': course.user_is_teacher(user),\n\n 'seminar': seminar,\n 'visible_queue': course.user_can_see_queue(user),\n 'visible_hide_button': Task.objects.filter(Q(course=course) & Q(is_hidden=True)).count(),\n 'show_hidden_tasks': show_hidden_tasks\n }\n\n return context\n\n\ndef get_tasklist_context(request, course):\n return tasklist_shad_cpp(request, course)\n\n\ndef get_course_mark(course, group, student):\n mark_id = -1\n course_mark = '--'\n\n try:\n student_course_mark = StudentCourseMark.objects.get(course=course, group=group, student=student)\n if student_course_mark.mark:\n mark_id = student_course_mark.mark.id\n course_mark = unicode(student_course_mark)\n except StudentCourseMark.DoesNotExist:\n pass\n\n return mark_id, course_mark\n\n\ndef courses_list(request, year=None):\n if year is None:\n year_object = get_current_year()\n else:\n year_object = get_object_or_404(Year, start_year=year)\n\n if year_object is None:\n raise Http404\n\n courses_list = Course.objects.filter(year=year_object).order_by('name')\n\n context = {\n 'courses_list': courses_list,\n 'year': year_object,\n }\n\n return render_to_response('course_list.html', context, context_instance=RequestContext(request))\n\n\ndef edit_course_information(request):\n user = request.user\n\n if not request.method == 'POST':\n return HttpResponseForbidden()\n\n for key in ['course_id', 'course_information']:\n if key not in request.POST:\n return HttpResponseForbidden()\n\n try:\n course_id = int(request.POST['course_id'])\n course_information = request.POST['course_information'].strip()\n except ValueError: # not int\n return HttpResponseForbidden()\n\n course = get_object_or_404(Course, id=course_id)\n\n if not course.user_can_edit_course(user):\n return HttpResponseForbidden()\n\n if course_information and not course_information.startswith(u'
    '):\n course_information = u'
    ' + course_information + u'
    '\n course.information = course_information\n course.save()\n\n return HttpResponse(json.dumps({'info': course_information}),\n content_type=\"application/json\")\n\n\n@login_required\ndef set_spectial_course_attend(request):\n user = request.user\n if not request.method == 'POST':\n return HttpResponseForbidden()\n\n try:\n course_id = int(request.POST['course_id'])\n action = request.POST['action']\n except ValueError: # not int\n return HttpResponseForbidden()\n\n course = get_object_or_404(Course, id=course_id)\n\n if action == \"add\":\n course.add_user_to_group_with_extern(user)\n\n if action == \"remove\":\n course.remove_user_from_group_with_extern(user)\n\n return HttpResponse(\"OK\")\n\n\ndef default_teachers_generate_form(course, post_data=None):\n groups_teacher = {}\n groups_forms = {}\n groups = course.groups.all().order_by('name')\n\n for default_teacher in DefaultTeacher.objects.filter(course=course).filter(group__in=groups):\n groups_teacher[default_teacher.group.id] = default_teacher.teacher\n\n for group in groups:\n teacher = groups_teacher.get(group.id)\n groups_forms[group] = default_teacher_forms_factory(course, group, teacher, post_data)\n return groups_forms\n\n\ndef get_filename_extensions(course):\n extensions = FilenameExtension.objects.all().order_by('name')\n course_extensions = course.filename_extensions.all()\n return [(ext, True) if ext in course_extensions else (ext, False) for ext in extensions]\n\n\n@login_required\ndef course_settings(request, course_id):\n course = get_object_or_404(Course, id=course_id)\n if not course.user_is_teacher(request.user):\n return HttpResponseForbidden()\n\n schools = course.school_set.all()\n\n tasks_with_contest = {}\n if course.is_contest_integrated():\n for task in course.task_set.filter(contest_integrated=True, is_hidden=False):\n tasks_with_contest[task.contest_id] = tasks_with_contest.get(task.contest_id, list()) + [task]\n\n context = {'course': course,\n 'visible_queue': course.user_can_see_queue(request.user),\n 'user_is_teacher': course.user_is_teacher(request.user),\n 'school': schools[0] if schools else '',\n 'tasks_with_contest': tasks_with_contest,\n }\n\n if request.method != \"POST\":\n form = DefaultTeacherForm(course)\n context['form'] = form\n context['file_extensions'] = get_filename_extensions(course)\n return render_to_response('courses/settings.html', context, context_instance=RequestContext(request))\n\n form = DefaultTeacherForm(course, request.POST)\n context['form'] = form\n\n if not form.is_valid():\n context['file_extensions'] = get_filename_extensions(course)\n return render_to_response('courses/settings.html', context, context_instance=RequestContext(request))\n\n for group_key, teacher_id in form.cleaned_data.iteritems():\n teacher_id = int(teacher_id)\n group = form.groups[group_key]\n if teacher_id == 0:\n DefaultTeacher.objects.filter(course=course).filter(group=group).delete()\n else:\n teacher = User.objects.get(pk=teacher_id)\n default_teacher, _ = DefaultTeacher.objects.get_or_create(course=course, group=group)\n default_teacher.teacher = teacher\n default_teacher.save()\n\n for issue in Issue.objects.filter(task__course=course, task__groups=group):\n issue.set_teacher(default=True, groups=[group])\n\n if 'rb_extensions[]' in request.POST:\n course.filename_extensions = request.POST.getlist('rb_extensions[]')\n else:\n course.filename_extensions.clear()\n\n if 'show_task_one_file_upload' in request.POST:\n course.show_task_one_file_upload = True\n else:\n course.show_task_one_file_upload = False\n\n if 'default_task_one_file_upload' in request.POST:\n course.default_task_one_file_upload = True\n else:\n course.default_task_one_file_upload = False\n\n if 'show_accepted_after_contest_ok' in request.POST:\n course.show_accepted_after_contest_ok = True\n else:\n course.show_accepted_after_contest_ok = False\n\n if 'default_task_one_file_upload' in request.POST:\n course.default_accepted_after_contest_ok = True\n else:\n course.default_accepted_after_contest_ok = False\n\n course.save()\n\n return HttpResponseRedirect('')\n\n\ndef change_visibility_hidden_tasks(request):\n if not request.method == 'POST':\n return HttpResponseForbidden()\n\n course = get_object_or_404(Course, id=int(request.POST['course_id']))\n if not course.user_is_teacher(request.user):\n return HttpResponseForbidden()\n\n session_var_name = str(request.user.id) + '_' + request.POST['course_id'] + '_show_hidden_tasks'\n request.session[session_var_name] = not request.session.get(session_var_name, False)\n\n return HttpResponse(\"OK\")\n\n\n@login_required\ndef set_course_mark(request):\n if request.method != 'POST':\n return HttpResponseForbidden()\n\n course = get_object_or_404(Course, id=request.POST['course_id'])\n group = get_object_or_404(Group, id=request.POST['group_id'])\n student = get_object_or_404(User, id=request.POST['student_id'])\n if request.POST['mark_id'] != '-1':\n mark = get_object_or_404(MarkField, id=request.POST['mark_id'])\n else:\n mark = MarkField()\n\n student_course_mark = StudentCourseMark()\n try:\n student_course_mark = StudentCourseMark.objects.get(course=course, group=group, student=student)\n except StudentCourseMark.DoesNotExist:\n student_course_mark.course = course\n student_course_mark.group = group\n student_course_mark.student = student\n\n student_course_mark.teacher = request.user\n student_course_mark.update_time = datetime.datetime.now()\n student_course_mark.mark = mark\n student_course_mark.save()\n\n return HttpResponse(json.dumps({'mark': unicode(mark), 'student_course_mark_id': student_course_mark.id}),\n content_type=\"application/json\")\n\n\n@login_required\ndef set_task_mark(request):\n if request.method != 'POST':\n return HttpResponseForbidden()\n\n task_id = request.POST['task_id']\n task = get_object_or_404(Task, id=task_id)\n if not task.course.user_is_teacher(request.user):\n return HttpResponseForbidden()\n\n issue, created = Issue.objects.get_or_create(task_id=task_id, student_id=request.POST['student_id'])\n\n mark = 0\n if request.POST['mark_value'] == '-':\n issue.set_status_by_tag(IssueStatus.STATUS_NEW)\n else:\n mark = float(request.POST['mark_value'])\n if mark <= 0:\n issue.set_status_by_tag(IssueStatus.STATUS_REWORK)\n else:\n issue.set_status_by_tag(IssueStatus.STATUS_ACCEPTED)\n\n issue.set_byname('mark', mark)\n\n return HttpResponse(json.dumps({'mark': mark,\n 'color': issue.status_field.color}),\n content_type=\"application/json\")\n\n\n@login_required\ndef change_table_tasks_pos(request):\n if request.method != 'POST':\n return HttpResponseForbidden()\n\n course = get_object_or_404(Course, id=int(request.POST['course_id']))\n if not course.user_is_teacher(request.user):\n return HttpResponseForbidden()\n\n group = get_object_or_404(Group, id=int(request.POST['group_id']))\n deleting_ids_from_groups = json.loads(request.POST['deleting_ids_from_groups'])\n if deleting_ids_from_groups:\n for task_id, group_ids in deleting_ids_from_groups.iteritems():\n\n group_ids = list(set(group_ids))\n task = get_object_or_404(Task, id=int(task_id))\n task_groups = task.groups.filter(id__in=group_ids)\n for tg in task_groups:\n if Issue.objects.filter(task=task, student__in=tg.students.all()).count():\n return HttpResponseForbidden()\n task.groups.remove(*task.groups.filter(id__in=group_ids))\n task.save()\n\n for task_relations in TaskGroupRelations.objects.filter(task=task, group__id__in=group_ids):\n task_relations.deleted = True\n task_relations.save()\n\n if 'task_deleted[]' in request.POST:\n task_deleted = map(lambda x: int(x), dict(request.POST)['task_deleted[]'])\n for task in Task.objects.filter(id__in=task_deleted):\n if not Issue.objects.filter(task=task).count():\n try:\n task.delete()\n TaskGroupRelations.objects.get(task=task, group=group).delete()\n except TaskGroupRelations.DoesNotExist:\n pass\n else:\n return HttpResponseForbidden()\n\n if 'task_order[]' in request.POST:\n task_order = map(lambda x: int(x), dict(request.POST)['task_order[]'])\n\n for task_relations in TaskGroupRelations.objects.select_related('task') \\\n .filter(task__id__in=task_order).filter(group=group):\n task_relations.position = task_order.index(task_relations.task.id)\n task_relations.save()\n\n return HttpResponse(\"OK\")\n\n\n@login_required\ndef ajax_update_contest_tasks(request):\n if not request.is_ajax():\n return HttpResponseForbidden()\n\n if 'tasks_with_contest[]' not in request.POST or 'contest_id' not in request.POST:\n return HttpResponseForbidden()\n\n contest_id = int(request.POST['contest_id'])\n\n response = {'is_error': False,\n 'contest_id': contest_id,\n 'error': '',\n 'tasks_title': {}}\n\n got_info, contest_info = get_contest_info(contest_id)\n if got_info:\n problem_req = FakeResponse()\n problem_req = requests.get(settings.CONTEST_API_URL + 'problems?contestId=' + str(contest_id),\n headers={'Authorization': 'OAuth ' + settings.CONTEST_OAUTH})\n problems = []\n if 'error' in problem_req:\n response['is_error'] = True\n if 'IndexOutOfBoundsException' in problem_req['error']['name']:\n response['error'] = _(u'Такого контеста не существует')\n else:\n response['error'] = _(u'Ошибка Я.Контеста: ') + problem_req['error']['message']\n if 'result' in problem_req.json():\n problems = problem_req.json()['result']['problems']\n\n contest_responses = [contest_info, problems]\n else:\n response['is_error'] = True\n if \"You're not allowed to view this contest.\" in contest_info:\n response['error'] = _(u\"У anytask нет прав на данный контест\")\n elif \"Contest with specified id does not exist.\" in contest_info:\n response['error'] = _(u'Такого контеста не существует')\n else:\n response['error'] = _(u'Ошибка Я.Контеста: ') + contest_info\n\n if not response['is_error']:\n for task in Task.objects.filter(id__in=dict(request.POST)['tasks_with_contest[]']):\n alias = task.problem_id\n if contest_id != task.contest_id:\n continue\n\n for problem in contest_responses[0]['problems']:\n if problem['alias'] == alias:\n task.title = problem['problemTitle']\n task.task_text = prettify_contest_task_text(problem['statement'])\n if 'endTime' in contest_responses[0]:\n deadline = contest_responses[0]['endTime'].split('+')[0]\n task.deadline_time = datetime.datetime.strptime(deadline, '%Y-%m-%dT%H:%M:%S.%f')\n else:\n task.deadline_time = None\n break\n\n for problem in contest_responses[1]:\n if problem['title'] == alias:\n if 'score' in problem:\n task.score_max = problem['score']\n\n task.save()\n response['tasks_title'][task.id] = task.title\n\n return HttpResponse(json.dumps(response),\n content_type=\"application/json\")\n\n\n@login_required\ndef ajax_rejudge_contest_tasks(request):\n if not request.is_ajax():\n return HttpResponseForbidden()\n\n if 'tasks_with_contest[]' not in request.POST:\n return HttpResponseForbidden()\n\n for issue in Issue.objects.filter(task_id__in=dict(request.POST)['tasks_with_contest[]']):\n contest_rejudge(issue)\n\n return HttpResponse(\"OK\")\n","sub_path":"anytask/courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":30038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"606767760","text":"#!/usr/bin/env python3\n\n\"\"\" \nLaelaps python modules setup.py script.\n\"\"\"\n\n## \\file \n##\n## $LastChangedDate: 2016-03-18 12:47:57 -0600 (Fri, 18 Mar 2016) $\n## $Rev: 4362 $\n##\n## \\brief Laelaps Python Setup Script.\n##\n## \\author Robin Knight (robin.knight@roadnarrows.com)\n## \n## \\par Copyright\n## \\h_copy 2015-2017. RoadNarrows LLC.\\n\n## http://www.roadnarrows.com\\n\n## All Rights Reserved\n##\n# @EulaBegin@\n# \n# Permission is hereby granted, without written agreement and without\n# license or royalty fees, to use, copy, modify, and distribute this\n# software and its documentation for any purpose, provided that\n# (1) The above copyright notice and the following two paragraphs\n# appear in all copies of the source code and (2) redistributions\n# including binaries reproduces these notices in the supporting\n# documentation. Substantial modifications to this software may be\n# copyrighted by their authors and need not follow the licensing terms\n# described here, provided that the new terms are clearly indicated in\n# all files where they apply.\n# \n# IN NO EVENT SHALL THE AUTHOR, ROADNARROWS LLC, OR ANY MEMBERS/EMPLOYEES\n# OF ROADNARROW LLC OR DISTRIBUTORS OF THIS SOFTWARE BE LIABLE TO ANY\n# PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL\n# DAMAGES ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,\n# EVEN IF THE AUTHORS OR ANY OF THE ABOVE PARTIES HAVE BEEN ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n# \n# THE AUTHOR AND ROADNARROWS LLC SPECIFICALLY DISCLAIM ANY WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\n# FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN\n# \"AS IS\" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO\n# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.\n# \n# @EulaEnd@\n\nimport os\nimport sys\nfrom distutils.core import setup, Extension\n\n## RN Package Root Directory (not python package)\npkgroot = '../..'\n\n#\n## Package pydoc additional information (required by rnmake utilities)\n#\nPyDocInfo = {\n 'org_initials': 'RNR',\n 'index_template': pkgroot+\"/docs/pydoc.html.tpl\",\n 'images_dir': pkgroot+\"/docs/images\",\n 'images': {\n 'ORG_LOGO': 'Logo.png',\n 'FAVICON': 'favicon.png',\n },\n}\n\n#\n## Package Information (required by setup and rnmake utilities)\n#\nPkgInfo = {\n 'name': 'Laelaps',\n 'version': '1.0.0',\n 'description': 'RoadNarrows Laelaps Python Package',\n 'long_description':\"\"\"\nThe Laelaps python package provides modules and plug-in extension for the\nLaelaps mobile robotic platform.\n\"\"\",\n 'author': 'Robin Knight',\n 'author_email': 'robin.knight@roadnarrows.com',\n 'maintainer': 'RoadNarrows LLC',\n 'url': 'http://www.roadnarrows.com/',\n 'platforms': \"any\",\n 'license':\"\"\"\nThis is free python modules and binary extensions software; see the source for\ncopying conditions. There is NO warranty; not even for MERCHANTABILITY or\nFITNESS FOR A PARTICULAR PURPOSE.\nCopyright (C) 2011-2013 RoadNarrows LLC\n\"\"\",\n 'packages': ['Laelaps', 'Laelaps.images'],\n 'package_dir': {'Laelaps':'modules/Laelaps',\n 'Laelaps.images':'modules/Laelaps/images'},\n 'package_data': {'Laelaps':['_ImuMspMsgs.*', '_RoboClawMsgs.*',\n '_WatchDogMsgs.*'],\n 'Laelaps.images':['*.png', '*.jpg']},\n 'scripts': [],\n}\n\n#\n## List of XML files\n#\nxmlfiles = [ ]\n\n\n## run\nif __name__ == \"__main__\":\n setup(**PkgInfo)\n","sub_path":"Laelaps/sw/pyModules/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"40318411","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport geoip2.database\nimport MySQLdb\nimport csv\n\n# データベースの読み込み\nreader = geoip2.database.Reader('/usr/local/share/GeoIP/GeoLite2-City.mmdb')\n\nconnect = MySQLdb.connect(host = \"localhost\", db = \"macsdb\", user = \"root\",passwd=\"07140708\", charset=\"utf8\")\ncursor = connect.cursor()\n\n\n#sql = \"select distinct(inet_ntoa(ip_src)),cc_src from packet_tcp_160801;\"\n#sql = \"select distinct(inet_ntoa(ip_src)),cc_src from packet_tcp_161017;\"\nsql = \"select distinct(inet_ntoa(ip_src)),cc_src from packet_tcp_161020_2;\"\n\ncursor.execute(sql)\nresult = cursor.fetchall()\n\nf = open('geoip2.csv', 'ab')\n\ncsvWriter = csv.writer(f)\n\nfor row in result:\n listdata = []\n record = reader.city(row[0])\n #print row[0], row[1], record.country.name\n listdata.append(row[0])\n listdata.append(row[1])\n listdata.append(record.country.name)\n csvWriter.writerow(listdata)\n\nf.close()\ncursor.close()\nconnect.close()\n","sub_path":"geoip.py","file_name":"geoip.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"316618985","text":"import random\r\n\r\nMINE = -2\r\nEMPTY = -1\r\nOCCUPIED = 1\r\nOUT = -3\r\nTAX = 10\r\ndx = [0, 1, 0, -1]\r\ndy = [1, 0, -1, 0]\r\n\r\nclass Minesweeper:\r\n def set_mines(self):\r\n number_of_mines = (self.n ** 2 * TAX) // 100\r\n self.mines = {}\r\n for i in range(number_of_mines):\r\n while True:\r\n x = random.randint(0, self.n - 1)\r\n y = random.randint(0, self.n - 1)\r\n print(x, y)\r\n if self.board[x][y] == EMPTY:\r\n break\r\n self.board[x][y] = MINE\r\n\r\n def __init__(self, n):\r\n self.n = n;\r\n self.board = [0] * n\r\n for i in range(n):\r\n self.board[i] = [EMPTY] * n\r\n\r\n self.set_mines()\r\n\r\n \r\n def change(self, c):\r\n if c >= 0:\r\n return str(c)\r\n return '.'\r\n \r\n def __str__(self):\r\n ans = \" \" + \"\".join([str(i) for i in range(self.n)]) + \"\\n\"\r\n for i in range(self.n):\r\n ans += (str(i))\r\n ans += \"\".join(list(map(self.change, self.board[i])))\r\n ans += (str(i) + \"\\n\")\r\n ans += \" \" + \"\".join([str(i) for i in range(self.n)]) + \"\\n\"\r\n return ans\r\n\r\n def inside_board(self, x, y):\r\n return x >= 0 and x < self.n and y >= 0 and y < self.n\r\n \r\n def fill(self, x, y):\r\n cnt = 0\r\n for i in range(len(dx)):\r\n if self.inside_board(x + dx[i], y + dy[i]) and self.board[x + dx[i]][y + dy[i]] == MINE:\r\n cnt += 1\r\n return cnt\r\n\r\n def attack(self, x, y):\r\n if not self.inside_board(x, y):\r\n return [OUT, \"Posição fora do tabuleiro\"]\r\n if self.board[x][y] == MINE:\r\n return [MINE, \"MORREU!!\"]\r\n if self.board[x][y] == EMPTY:\r\n self.board[x][y] = self.fill(x, y)\r\n return [EMPTY, \"Posição atacada com sucesso!!\"]\r\n return [OCCUPIED, \"Voce já atacou essa posição\"]\r\n \r\n\r\nn = int(input(\"Tamanho da matriz (N x N): \"))\r\nmine = Minesweeper(n)\r\n\r\nprint(mine)\r\nwhile True:\r\n lin = [int(i) for i in input(\"Digite uma posicao (X, Y): \").split()]\r\n [flag, message] = mine.attack(lin[0], lin[1])\r\n print(message)\r\n if flag == MINE:\r\n break\r\n print(mine)\r\n","sub_path":"Python/Minesweeper.py","file_name":"Minesweeper.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"437067608","text":"from Body import Body\nfrom System import System\nimport numpy as np\n\nbodies = [Body(np.array([0., 0.]), np.array([0., 0.]), 1.989e30, 6.9551e11, 'Planet A'),\n Body(np.array([1.496e14, 0.]), np.array([0., 3e9]), 5.972e26, 6.371e9, 'Planet B'),\n Body(np.array([1.496e14, 2.25e14]), np.array([5e9, -4e9]), 2.972e28, 6.371e9, 'Planet C')]\nsystem = System(bodies)\n\ndef days(number):\n # given input of number of days, outputs number of seconds\n return 86400 * number\n\ntf = days(10)\ndt = days(0.001)\n\nsystem.simulate(tf, dt)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"46846314","text":"#!usr/bin/python\n\n\"\"\"\nBefore running this script, make sure that you have sqlite3 \ninstalled in your user area. You can check this by typing \"$sqlite3 --version\"\nat the command line\n\nOnce the sqlite3 is installed, check that you are in the directory where you\nwish the database to be created. at the command prompt, enter \"sqlite3 nameofdatabase.db\"\n\nThis should create the database. You can check this by typing \".databases\"\nafter the sqlite3> prompt. It should display the information about the database created.\n\nThen, type \".quit\" to exit the database.\n\nTo create the flagstat database, you can use the script by typing \"python flagstat_db_2.py nameofdatabase.db\".\n\"\"\"\n\nimport os\nimport sys\nimport sqlite3\nimport time\n\ndef connect_flagstat_db(db_dir, db_file):\n\n#Create a connection to the SQLite database specified by the db_file\n\n #db_dir = os.path.dirname(db)\n try:\n conn = sqlite3.connect(db_file)\n #print \"connection to sqlite3 established\"\n return conn\n\n except Exception as err:\n print (err)\n with open(os.path.join(db_dir, 'connerrlog2.txt'), 'a') as connerrlog:\n connerrlog.writelines('Exception occurred: {} \\n'.format(err))\n\ndef execute_sql(db_dir, conn, sqlstatement):\n\n #print sqlstatement\n\n try:\n result = conn.cursor().execute(sqlstatement)\n\n conn.commit()\n return result\n\n except Exception as err:\n print (err)\n with open(os.path.join(db_dir,'exerrlog2.txt'), 'a') as exerrlog:\n exerrlog.writelines('Exception occurred: {} \\n'.format(err))\n raise\n\n\"\"\"Functions to create tables for flagstat data\"\"\"\n\ndef create_sample_table(db_dir, conn):\n\n print (\"creating sample table in\", db_dir)\n print (\"connecting to db:\", conn)\n\n action = \"\"\"\n CREATE TABLE IF NOT EXISTS sample (id INTEGER PRIMARY KEY AUTOINCREMENT, \n sample_name TEXT NOT NULL, in_total_passed INT NOT NULL, mapped_passed INT NOT NULL, duplicates_passed INT NOT NULL,\n import_date TEXT, runfolder_id INT NOT NULL, CONSTRAINT fk_runfolder FOREIGN KEY (runfolder_id) REFERENCES runfolder(id))\"\"\"\n #in_total_failed INT NOT NULL, duplicates_failed INT NOT NULL, mapped_failed INT NOT NULL\n \n execute_sql(db_dir, conn, action)\n\ndef create_runfolder_table(db_dir, conn):\n\n print (\"creating runfolder table in\", db_dir)\n print (\"connecting to db\", conn)\n\n action = \"\"\"\n CREATE TABLE IF NOT EXISTS runfolder (id INTEGER PRIMARY KEY AUTOINCREMENT, \n runfolder_name TEXT NOT NULL, total_reads INT, mapped_reads INT, duplicate_reads INT, import_date TEXT)\"\"\"\n #in_total_failed INT NOT NULL, duplicates_failed INT NOT NULL, mapped_failed INT NOT NULL\n \n execute_sql(db_dir, conn, action)\n\ndef main(db_file):\n\n start = time.clock()\n\n #provide directory to where database is stored\n db_path = os.path.abspath(db_file)\n db_dir = os.path.dirname(db_path)\n #print \"path to the directory: {}\".format(db_dir)\n\n #refer the connect_flagstat_db as db in other functions\n conn = connect_flagstat_db(db_dir, db_file)\n #print db\n\n # function calls to create tables in the database\n create_sample_table(db_dir, conn)\n\n create_runfolder_table(db_dir, conn)\n end = time.clock()\n time_taken = end - start\n with open(os.path.join(db_dir, 'time.txt'), 'a') as time_text:\n time_text.writelines(\"time taken to create the database : {} \\n\".format(time_taken))\n\n\nif __name__ == \"__main__\":\n \n main(sys.argv[1])\n \n \n \"\"\"from terminal navigate to the directory where the script is stored,\n \n enter \"python flagstat_db_2.py\"\n \n followed by the filepath to the database you wish to create- sys.argv[1]\"\"\"\n\n\n\n","sub_path":"flagstat_db_2.py","file_name":"flagstat_db_2.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541069007","text":"class Solution:\n def reverseBits(self, n):\n ans = 0\n for i in range(32):\n ans += n & 1\n n >>=1\n ans <<=1 \n return ans>>1\n\ns = Solution()\nprint(s.reverseBits(43261596))\n'''\none line solution:\nreturn int(''.join(reversed(bin(n)[2:].zfill(32))),2)\n'''","sub_path":"190. Reverse Bits.py","file_name":"190. Reverse Bits.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"74707628","text":"# Countdown\n# Create a function that accepts a number as an input. Return a new list that\n# counts down by one, from the number (as the 0th element) down to\n# 0 (as the last element).\n# Example: countdown(5) should return the list: [5,4,3,2,1,0]\ndef countdown(num):\n num_list = []\n for i in range(num, 0 - 1, -1):\n num_list.append(i)\n return num_list\n\ncountdown(5)\n\n\n# Print and Return\n# Create a function that will receive a list with two numbers.\n# Print the first value and return the second.\n# Example: print_and_return([1,2]) should print 1 and return 2\ndef print_and_return(list2):\n print(list2[0])\n return list2[1]\n\nprint_and_return([1,2])\n\n\n# First Plus Length\n# Create a function that accepts a list and returns the sum of the first value\n# in the list plus the list's length.\n# Example: first_plus_length([1,2,3,4,5]) should\n# return 6 (first value: 1 + length: 5)\ndef first_plus_length(alist):\n return alist[0] + len(alist)\n\nfirst_plus_length([1,2,3,4,5])\n\n\n# This Length, That Value\n# Write a function that accepts two integers as parameters: size and value.\n# The function should create and return a list whose\n# length is equal to the given size, and whose values are all the given value.\n# Example: length_and_value(4,7) should return [7,7,7,7]\n# Example: length_and_value(6,2) should return [2,2,2,2,2,2]\ndef length_and_value(size, value):\n alist = []\n for i in range(size):\n alist.append(value)\n return alist\n\nlength_and_value(4,7)\nlength_and_value(6,2)\n\n\n# Values Greater than Second (Optional)\n# Write a function that accepts a list and creates a new list containing only\n# the values from the original list that are greater than its 2nd value.\n# Print how many values this is and then return the new list.\n# If the list has less than 2 elements, have the function return False\n# Example: values_greater_than_second([5,2,3,2,1,4]) should print 3 and\n# return [5,3,4]\n# Example: values_greater_than_second([3]) should return False\ndef values_greater_than_second(alist):\n if len(alist) < 2:\n return False\n new_list = []\n for i in alist:\n if i > alist[1]:\n new_list.append(i)\n print(f\"There are {len(new_list)} values in the new list\")\n return new_list\n\nvalues_greater_than_second([5,2,3,2,1,4])\nvalues_greater_than_second([3])\n","sub_path":"basic_functions_II.py","file_name":"basic_functions_II.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529488303","text":"from skimage.filters import gabor\nfrom skimage import data, io\nfrom matplotlib import pyplot as plt \nimport numpy as np\nimport math\n#image = data.coins()\n\n\n\nclass frequency:\n\tdef __init__(self, image):\n\t\tself.e_i_list = []\n\t\tself.image = image\n\t\t\n\tdef calcul(self):\n\t\tfor i in range(7):\n\t\t\tfreq=1/(math.sqrt(2)*(i+1))\n\t\t\t#print freq\n\t\t\te_ij=[]#a list of imgs\n\t\t\tfor j in range(8):#j is theta\n\t\t\t\tthet=j/8*math.pi\n\t\t\t\te_j=np.ndarray(self.image.shape)#img (3)\t\t\n\t\t\t\tfilt_real, filt_imag = gabor(self.image, frequency=freq,theta=thet,sigma_x=1,sigma_y=1)\n\t\t\t\tfor y in range(filt_real.shape[0]):\n\t\t\t\t\tfor x in range(filt_real.shape[1]):\n\t\t\t\t\t\te_j[y][x]=math.sqrt(filt_real[y][x]**2+filt_imag[y][x]**2)\n\t\t\t\t#io.imshow(e_j)\n\t\t\t\t#io.show()\n\t\t\t\te_ij.append(e_j)\n\t\t\te_i=np.ndarray(e_ij[0].shape)\n\t\t\temax=0\n\t\t\temin=255\n\t\t\tfor y in range(e_ij[0].shape[0]):\n\t\t\t\tfor x in range(e_ij[0].shape[1]):\n\t\t\t\t\te_ijmax=0\n\t\t\t\t\te_ijmaxindex=0\n\t\t\t\t\tfor n in range(len(e_ij)):\n\t\t\t\t\t\tif e_ij[n][y][x]>e_ijmax:\n\t\t\t\t\t\t\te_ijmax=e_ij[n][y][x]\n\t\t\t\t\t\t\te_ijmaxindex=n\n\t\t\t\t\tif e_ijmaxindex==0:\n\t\t\t\t\t\te_i[y][x]=e_ijmax+e_ij[7][y][x]+e_ij[e_ijmaxindex+1][y][x]\n\t\t\t\t\telif e_ijmaxindex==7:\n\t\t\t\t\t\te_i[y][x]=e_ijmax+e_ij[e_ijmaxindex-1][y][x]+e_ij[0][y][x]\n\t\t\t\t\telse:\n\t\t\t\t\t\te_i[y][x]=e_ijmax+e_ij[e_ijmaxindex-1][y][x]+e_ij[e_ijmaxindex+1][y][x]\n\t\t\t\t\tif e_i[y][x]>emax:\n\t\t\t\t\t\temax=e_i[y][x]\n\t\t\t\t\tif e_i[y][x] 1 when there is shared ownership)\n\t\tknoedler_group = [knoedler]\n\t\tif shared_people:\n\t\t\tpeople = []\n\t\t\trights = []\n\t\t\trole = 'shared-buyer' if incoming else 'shared-seller'\n\t\t\tremaining = Fraction(1, 1)\n# \t\t\tprint(f'{1+len(shared_people)}-way split:')\n\t\t\tfor i, p in enumerate(shared_people):\n\t\t\t\tperson_dict = self.helper.copy_source_information(p, data)\n\t\t\t\tperson = self.helper.add_person(\n\t\t\t\t\tperson_dict,\n\t\t\t\t\trecord=sales_record,\n\t\t\t\t\trelative_id=f'{role}_{i+1}'\n\t\t\t\t)\n\t\t\t\tname = p.get('name', p.get('auth_name', '(anonymous)'))\n\t\t\t\tshare = p.get('share', '1/1')\n\t\t\t\ttry:\n\t\t\t\t\tshare_frac = Fraction(share)\n\t\t\t\t\tremaining -= share_frac\n\n\t\t\t\t\tright = self.ownership_right(share_frac, person)\n\n\t\t\t\t\trights.append(right)\n\t\t\t\t\tpeople.append(person_dict)\n\t\t\t\t\tknoedler_group.append(person)\n# \t\t\t\t\tprint(f' {share:<10} {name:<50}')\n\t\t\t\texcept ValueError as e:\n\t\t\t\t\twarnings.warn(f'ValueError while handling shared rights ({e}): {pprint.pformat(p)}')\n\t\t\t\t\traise\n\t\t\t\t\t\n# \t\t\tprint(f' {str(remaining):<10} {knoedler._label:<50}')\n\t\t\tk_right = self.ownership_right(remaining, knoedler)\n\t\t\trights.insert(0, k_right)\n\n\t\t\ttotal_right = vocab.OwnershipRight(ident='', label=f'Total Right of Ownership of {object_label}')\n\t\t\ttotal_right.applies_to = hmo\n\t\t\tfor right in rights:\n\t\t\t\ttotal_right.part = right\n\n\t\t\tracq = model.RightAcquisition(ident='')\n\t\t\tracq.establishes = total_right\n\t\t\ttx.part = racq\n\n\t\t\tdata['_people'].extend(people)\n\n\tdef _add_prov_entry_payment(self, data:dict, tx, knoedler_price_part, price_info, people, people_agents, shared_people, shared_people_agents, date, incoming):\n\t\tknoedler = self.helper.static_instances.get_instance('Group', 'knoedler')\n\t\tknoedler_group = [knoedler]\n\n\t\tsales_record = get_crom_object(data['_record'])\n\t\thmo = get_crom_object(data['_object'])\n\t\tsn_ident = self.helper.stock_number_identifier(data['_object'], date)\n\t\t\n\t\tprice_data = {}\n\t\tif price_info and 'currency' in price_info:\n\t\t\tprice_data['currency'] = price_info['currency']\n\t\t\n\t\tamnt = get_crom_object(price_info)\n\t\tknoedler_price_part_amnt = get_crom_object(knoedler_price_part)\n\t\t\n\t\tprice_amount = None\n\t\twith suppress(AttributeError):\n\t\t\tprice_amount = amnt.value\n\t\tparts = [(knoedler, knoedler_price_part_amnt)]\n\t\tif shared_people:\n\t\t\trole = 'shared-buyer' if incoming else 'shared-seller'\n\t\t\tfor i, p in enumerate(shared_people):\n\t\t\t\tperson_dict = self.helper.copy_source_information(p, data)\n\t\t\t\tperson = self.helper.add_person(\n\t\t\t\t\tperson_dict,\n\t\t\t\t\trecord=sales_record,\n\t\t\t\t\trelative_id=f'{role}_{i+1}'\n\t\t\t\t)\n\t\t\t\tknoedler_group.append(person)\n\t\t# Check if a joint owner is either a seller or a buyer\n\t\tpeople_ids = set([x.id for x in people])\n\t\tknoedler_group_ids = set([x.id for x in knoedler_group])\n\t\tjoint_owner_also_seller_or_buyer_id = people_ids.intersection(knoedler_group_ids)\n\n\t\tpaym = None\n\t\tif amnt:\n\t\t\ttx_uri = tx.id\n\t\t\tpayment_id = tx_uri + '-Payment'\n\t\t\tpaym = model.Payment(ident=payment_id, label=f'Payment for {sn_ident}')\n\t\t\ttx.part = paym\n\t\t\t# If a joint owner is a seller or a buyer the payment node is empty and all the monetary ammount's information is moved to \n\t\t\t# AttributeAssignment -> Monetary Ammount\n\t\t\t# P9->E13->p141->E97->P90->full_amount\n\t\t\t# P9->E13->p141->E97->P180->currency\n\t\t\t# P9->E13->p141->E97->p67i->E33->P190->note\n\t\t\tif not joint_owner_also_seller_or_buyer_id:\n\t\t\t\tpaym.paid_amount = amnt\n\t\t\t\tfor kp in knoedler_group:\n\t\t\t\t\tif incoming:\n\t\t\t\t\t\tpaym.paid_from = kp\n\t\t\t\t\telse:\n\t\t\t\t\t\tpaym.paid_to = kp\n\t\t\telse:\n\t\t\t\tassignment_id = tx_uri + '-Attribute assignment'\n\t\t\t\tassignment = model.AttributeAssignment(ident=assignment_id, label=f\"Attribute assignment for {sn_ident}\")\n\t\t\t\tassignment.assigned = amnt\n\t\t\t\tfor kp in knoedler_group:\n\t\t\t\t\tassignment.carried_out_by = kp\n\t\t\t\ttx.part = assignment\n\t\t\tfor p in shared_people_agents:\n\t\t\t\t# when an agent is acting on behalf of the buyer/seller, model their involvement in a sub-activity\n\t\t\t\tsubpaym_role = 'Buyer' if incoming else 'Seller'\n\t\t\t\tsubpaym = model.Activity(ident='', label=f\"{subpaym_role}'s agent's role in payment\")\n\t\t\t\tsubpaym.classified_as = vocab.instances[f'{subpaym_role}sAgent']\n\t\t\t\tsubpaym.carried_out_by = p\n\t\t\t\tpaym.part = subpaym\n\n\t\t\tfor i, partdata in enumerate(parts):\n\t\t\t\tperson, part_amnt = partdata\n\t\t\t\t# add the part is there are multiple parts (shared tx), or if\n\t\t\t\t# this is the single part we know about, but its value is not\n\t\t\t\t#the same as the whole tx amount\n\t\t\t\tdifferent_amount = False\n\t\t\t\twith suppress(AttributeError):\n\t\t\t\t\tif amnt.value != part_amnt.value:\n\t\t\t\t\t\tdifferent_amount = True\n\t\t\t\tif len(parts) > 1 or different_amount:\n\t\t\t\t\tshared_payment_id = tx_uri + f'-Payment-{i}-share'\n\t\t\t\t\tshared_paym = model.Payment(ident=shared_payment_id, label=f\"{person._label} share of payment for {sn_ident}\")\n\t\t\t\t\tif part_amnt:\n\t\t\t\t\t\tshared_paym.paid_amount = part_amnt\n\t\t\t\t\tif incoming:\n\t\t\t\t\t\tshared_paym.paid_from = person\n\t\t\t\t\t\t# Partial payment of share from Knoedler to joint owner who is also the seller\n\t\t\t\t\t\tif joint_owner_also_seller_or_buyer_id:\n\t\t\t\t\t\t\tfor purchase_buyer in people:\n\t\t\t\t\t\t\t\tshared_paym.paid_to = purchase_buyer\n\t\t\t\t\telse:\n\t\t\t\t\t\tshared_paym.paid_to = person\n\t\t\t\t\t\t# Partial payment of share to Knoedler from joint owner who is also the buyer\n\t\t\t\t\t\tif joint_owner_also_seller_or_buyer_id:\n\t\t\t\t\t\t\tfor sale_buyer in people:\n\t\t\t\t\t\t\t\tshared_paym.paid_from = sale_buyer\n\t\t\t\t\tpaym.part = shared_paym\n\t\t\t\t\t\n\t\t# If a joint owner is a seller or a buyer the payment node is empty\n\t\tif not joint_owner_also_seller_or_buyer_id:\n\t\t\tfor person in people:\n\t\t\t\tif paym:\n\t\t\t\t\tif incoming:\n\t\t\t\t\t\tpaym.paid_to = person\n\t\t\t\t\telse:\n\t\t\t\t\t\tpaym.paid_from = person\n\t\tfor p in people_agents:\n\t\t\t# when an agent is acting on behalf of the buyer/seller, model their involvement in a sub-activity\n\t\t\tif paym:\n\t\t\t\tsubpaym_role = 'Seller' if incoming else 'Buyer'\n\t\t\t\tsubpaym = model.Activity(ident='', label=f\"{subpaym_role}'s agent's role in payment\")\n\t\t\t\tsubpaym.classified_as = vocab.instances[f'{subpaym_role}sAgent']\n\t\t\t\tsubpaym.carried_out_by = p\n\t\t\t\tpaym.part = subpaym\n\n\tdef _add_prov_entry_acquisition(self, data:dict, tx, from_people, from_agents, to_people, to_agents, date, incoming, purpose=None):\n\t\trec = data['book_record']\n\t\tbook_id, page_id, row_id = record_id(rec)\n\n\t\thmo = get_crom_object(data['_object'])\n\t\tsn_ident = self.helper.stock_number_identifier(data['_object'], date)\n\n\t\tdir = 'In' if incoming else 'Out'\n\t\tif purpose == 'returning':\n\t\t\tdir_label = 'Knoedler return'\n\t\telse:\n\t\t\tdir_label = 'Knoedler Purchase' if incoming else 'Knoedler Sale'\n\t\tacq_id = self.helper.make_proj_uri('ACQ', dir, book_id, page_id, row_id)\n\t\tacq = model.Acquisition(ident=acq_id)\n\t\tif self.helper.transaction_contains_multiple_objects(data, incoming):\n\t\t\tmulti_label = self.helper.transaction_multiple_object_label(data, incoming)\n\t\t\ttx._label = f'{dir_label} of Stock Numbers {multi_label} ({date})'\n\t\t\tname = f'{dir_label} of {sn_ident}'\n\t\t\tacq._label = name\n\t\telse:\n\t\t\tsn_ident = self.helper.stock_number_identifier(data['_object'], date)\n\t\t\tname = f'{dir_label} of {sn_ident}'\n\t\t\ttx.identified_by = model.Name(ident='', content=name)\n\t\t\ttx._label = name\n\t\t\tacq._label = name\n\t\tacq.identified_by = model.Name(ident='', content=name)\n\t\tacq.transferred_title_of = hmo\n\t\t\n\t\tfor p in from_people:\n\t\t\tacq.transferred_title_from = p\n\t\tfor p in from_agents:\n\t\t\t# when an agent is acting on behalf of the seller, model their involvement in a sub-activity\n\t\t\tsubacq = model.Activity(ident='', label=\"Seller's agent's role in acquisition\")\n\t\t\tsubacq.classified_as = vocab.instances['SellersAgent']\n\t\t\tsubacq.carried_out_by = p\n\t\t\tacq.part = subacq\n\t\tfor p in to_people:\n\t\t\tacq.transferred_title_to = p\n\t\tfor p in to_agents:\n\t\t\t# when an agent is acting on behalf of the buyer, model their involvement in a sub-activity\n\t\t\tsubacq = model.Activity(ident='', label=\"Buyer's agent's role in acquisition\")\n\t\t\tsubacq.classified_as = vocab.instances['BuyersAgent']\n\t\t\tsubacq.carried_out_by = p\n\t\t\tacq.part = subacq\n\n\t\ttx.part = acq\n\n\tdef _prov_entry(self, data, date_key, participants, price_info=None, knoedler_price_part=None, shared_people=None, incoming=False, purpose=None, buy_sell_modifiers=None):\n\t\tTHROUGH = CaseFoldingSet(buy_sell_modifiers['through'])\n\t\tFOR = CaseFoldingSet(buy_sell_modifiers['for'])\n\n\t\tif shared_people is None:\n\t\t\tshared_people = []\n\n\t\tfor k in ('_prov_entries', '_people'):\n\t\t\tdata.setdefault(k, [])\n\n\t\tparenthetical_parts = []\n\t\tdate = implode_date(data[date_key]) if date_key in data else None\n\t\tif date:\n\t\t\tparenthetical_parts.append(date)\n\t\t\n\t\todata = data['_object']\n\t\tsales_record = get_crom_object(data['_record'])\n\n\t\ttx = self._empty_tx(data, incoming, purpose=purpose)\n\t\ttx_uri = tx.id\n\t\tif 'knoedler_number' not in odata:\n\t\t\ttx.referred_to_by = vocab.Note(ident='', content='No Knoedler stock number was assigned to the object that is the subject of this provenance activity.')\n\n\t\ttx_data = add_crom_data(data={'uri': tx_uri}, what=tx)\n\t\tif date_key:\n\t\t\tself.set_date(tx, data, date_key)\n\n\t\trole = 'seller' if incoming else 'buyer'\n\t\tpeople_data = [\n\t\t\tself.helper.copy_source_information(p, data)\n\t\t\tfor p in participants\n\t\t]\n\t\t\n\t\tpeople = []\n\t\tpeople_agents = []\n\t\tfor i, p_data in enumerate(people_data):\n\t\t\tmod = self.modifiers(p_data, 'auth_mod')\n\t\t\tperson = self.helper.add_person(\n\t\t\t\tp_data,\n\t\t\t\trecord=sales_record,\n\t\t\t\trelative_id=f'{role}_{i+1}'\n\t\t\t)\n\t\t\tif THROUGH.intersects(mod):\n\t\t\t\tpeople_agents.append(person)\n\t\t\telse:\n\t\t\t\tpeople.append(person)\n\t\t\n\t\tknoedler = self.helper.static_instances.get_instance('Group', 'knoedler')\n\t\tknoedler_group = [knoedler]\n\t\tknoedler_group_agents = []\n\t\tif shared_people:\n\t\t\t# these are the people that joined Knoedler in the purchase/sale\n\t\t\trole = 'shared-buyer' if incoming else 'shared-seller'\n\t\t\tfor i, p_data in enumerate(shared_people):\n\t\t\t\tmod = self.modifiers(p_data, 'auth_mod')\n\t\t\t\tperson_dict = self.helper.copy_source_information(p_data, data)\n\t\t\t\tperson = self.helper.add_person(\n\t\t\t\t\tperson_dict,\n\t\t\t\t\trecord=sales_record,\n\t\t\t\t\trelative_id=f'{role}_{i+1}'\n\t\t\t\t)\n\t\t\t\tif THROUGH.intersects(mod):\n\t\t\t\t\tknoedler_group_agents.append(person)\n\t\t\t\telse:\n\t\t\t\t\tknoedler_group.append(person)\n\n\t\tfrom_people = []\n\t\tfrom_agents = []\n\t\tto_people = []\n\t\tto_agents = []\n\t\tif incoming:\n\t\t\tfrom_people = people\n\t\t\tfrom_agents = people_agents\n\t\t\tto_people = knoedler_group\n\t\t\tto_agents = knoedler_group_agents\n\t\telse:\n\t\t\tfrom_people = knoedler_group\n\t\t\tfrom_agents = knoedler_group_agents\n\t\t\tto_people = people\n\t\t\tto_agents = people_agents\n\n\t\tif incoming:\n\t\t\tself._add_prov_entry_rights(data, tx, shared_people, incoming)\n\t\tself._add_prov_entry_payment(data, tx, knoedler_price_part, price_info, people, people_agents, shared_people, knoedler_group_agents, date, incoming)\n\t\tself._add_prov_entry_acquisition(data, tx, from_people, from_agents, to_people, to_agents, date, incoming, purpose=purpose)\n\n# \t\tprint('People:')\n# \t\tfor p in people:\n# \t\t\tprint(f'- {getattr(p, \"_label\", \"(anonymous)\")}')\n# \t\tprint('Shared People:')\n# \t\tfor p in shared_people:\n# \t\t\tprint(f'- {getattr(p, \"_label\", \"(anonymous)\")}')\n# # \t\tself._add_prov_entry_custody_transfer(data, tx, people, incoming)\n\n\t\tdata['_prov_entries'].append(tx_data)\n\t\tdata['_people'].extend(people_data)\n\t\treturn tx\n\n\tdef add_return_tx(self, data, buy_sell_modifiers):\n\t\trec = data['book_record']\n\t\tbook_id, page_id, row_id = record_id(rec)\n\n\t\tpurch_info = data.get('purchase')\n\t\tsale_info = data.get('sale')\n\t\tsellers = data['purchase_seller']\n\t\tshared_people = []\n\t\tfor p in sellers:\n\t\t\tself.helper.copy_source_information(p, data)\n\t\tin_tx = self._prov_entry(data, 'entry_date', sellers, purch_info, incoming=True, buy_sell_modifiers=buy_sell_modifiers)\n\t\tout_tx = self._prov_entry(data, 'entry_date', sellers, sale_info, incoming=False, purpose='returning', buy_sell_modifiers=buy_sell_modifiers)\n\t\treturn (in_tx, out_tx)\n\n\tdef add_incoming_tx(self, data, buy_sell_modifiers):\n\t\tprice_info = data.get('purchase')\n\t\tknoedler_price_part = data.get('purchase_knoedler_share')\n\t\tshared_people = data.get('purchase_buyer')\n\t\tsellers = data['purchase_seller']\n\t\tfor p in sellers:\n\t\t\tself.helper.copy_source_information(p, data)\n\t\t\n\t\ttx = self._prov_entry(data, 'entry_date', sellers, price_info, knoedler_price_part, shared_people, incoming=True, buy_sell_modifiers=buy_sell_modifiers)\n\t\t\n\t\tprev_owners = data.get('prev_own', [])\n\t\tlot_object_key = self.helper.transaction_key_for_record(data, incoming=True)\n\t\tif prev_owners:\n\t\t\tself.model_prev_owners(data, prev_owners, tx, lot_object_key)\n\n\t\treturn tx\n\n\tdef model_prev_owners(self, data, prev_owners, tx, lot_object_key):\n\t\tsales_record = get_crom_object(data['_record'])\n\t\tfor i, p in enumerate(prev_owners):\n\t\t\trole = 'prev_own'\n\t\t\tperson_dict = self.helper.copy_source_information(p, data)\n\t\t\tperson = self.helper.add_person(\n\t\t\t\tperson_dict,\n\t\t\t\trecord=sales_record,\n\t\t\t\trelative_id=f'{role}_{i+1}'\n\t\t\t)\n\t\t\tdata['_people'].append(person_dict)\n\n\t\tts = None # TODO\n\t\tprev_post_owner_records = [(prev_owners, True)]\n\n\t\tdata['_record'] = data['_record']\n\t\thmo = get_crom_object(data['_object'])\n\t\tfor owner_data, rev in prev_post_owner_records:\n\t\t\tif rev:\n\t\t\t\trev_name = 'prev-owner'\n\t\t\telse:\n\t\t\t\trev_name = 'post-owner'\n# \t\t\tignore_fields = {'own_so', 'own_auth_l', 'own_auth_d'}\n\t\t\ttx_data = add_crom_data(data={}, what=tx)\n\t\t\tfor seq_no, owner_record in enumerate(owner_data):\n\t\t\t\trecord_id = f'{rev_name}-{seq_no+1}'\n# \t\t\t\tif not any([bool(owner_record.get(k)) for k in owner_record.keys() if k not in ignore_fields]):\n# \t\t\t\t\t# some records seem to have metadata (source information, location, or notes)\n# \t\t\t\t\t# but no other fields set these should not constitute actual records of a prev/post owner.\n# \t\t\t\t\tcontinue\n\t\t\t\tself.handle_prev_post_owner(data, hmo, tx_data, 'Sold', lot_object_key, owner_record, record_id, rev, ts, make_label=prov_entry_label)\n\n\tdef add_outgoing_tx(self, data, buy_sell_modifiers):\n\t\tprice_info = data.get('sale')\n\t\tknoedler_price_part = data.get('sale_knoedler_share')\n\t\tshared_people = data.get('purchase_buyer')\n\t\tbuyers = data['sale_buyer']\n\t\tfor p in buyers:\n\t\t\tself.helper.copy_source_information(p, data)\n\t\treturn self._prov_entry(data, 'sale_date', buyers, price_info, knoedler_price_part, shared_people, incoming=False, buy_sell_modifiers=buy_sell_modifiers)\n\n\t@staticmethod\n\tdef set_date(event, data, date_key, date_key_prefix=''):\n\t\t'''Associate a timespan with the event.'''\n\t\tdate = implode_date(data[date_key], date_key_prefix)\n\t\tif date:\n\t\t\tbegin = implode_date(data[date_key], date_key_prefix, clamp='begin')\n\t\t\tend = implode_date(data[date_key], date_key_prefix, clamp='eoe')\n\t\t\tbounds = [begin, end]\n\t\telse:\n\t\t\tbounds = []\n\t\tif bounds:\n\t\t\tts = timespan_from_outer_bounds(*bounds)\n\t\t\tts.identified_by = model.Name(ident='', content=date)\n\t\t\tevent.timespan = ts\n\nclass ModelDestruction(TransactionHandler):\n\thelper = Option(required=True)\n\tmake_la_person = Service('make_la_person')\n\tbuy_sell_modifiers = Service('buy_sell_modifiers')\n\ttransaction_classification = Service('transaction_classification')\n\n\tdef __call__(self, data:dict, make_la_person, buy_sell_modifiers, transaction_classification):\n\t\trec = data['book_record']\n\t\tdate = implode_date(data['sale_date'])\n\t\thmo = get_crom_object(data['_object'])\n\n\t\ttitle = self.helper.title_value(data['_object'].get('title'))\n\t\tshort_title = truncate_with_ellipsis(title, 100) or title\n\n\t\t# The destruction URI is just the object URI with a suffix. When URIs are\n\t\t# reconciled during prev/post sale rewriting, this will allow us to also reconcile\n\t\t# the URIs for the destructions (of which there should only be one per object)\n\t\tdest_uri = hmo.id + '-Destruction'\n\t\td = model.Destruction(ident=dest_uri, label=f'Destruction of “{short_title}”')\n\t\tif rec.get('verbatim_notes'):\n\t\t\td.referred_to_by = vocab.Note(ident='', content=rec['verbatim_notes'])\n\t\thmo.destroyed_by = d\n\n\t\tin_tx = self.add_incoming_tx(data, buy_sell_modifiers)\n\t\tin_tx_cl = transaction_classification.get('Purchase')\n\t\tin_tx.classified_as = model.Type(ident=in_tx_cl.get('url'), label=in_tx_cl.get('label'))\n\n\t\treturn data\n\nclass ModelTheftOrLoss(TransactionHandler):\n\thelper = Option(required=True)\n\tmake_la_person = Service('make_la_person')\n\tbuy_sell_modifiers = Service('buy_sell_modifiers')\n\ttransaction_classification = Service('transaction_classification')\n\n\tdef __call__(self, data:dict, make_la_person, buy_sell_modifiers, transaction_classification):\n\t\trec = data['book_record']\n\t\tpi_rec = data['pi_record_no']\n\t\thmo = get_crom_object(data['_object'])\n\t\tsn_ident = self.helper.stock_number_identifier(data['_object'], None)\n\t\t\n\t\tin_tx = self.add_incoming_tx(data, buy_sell_modifiers)\n\t\tin_tx_cl = transaction_classification.get('Purchase')\n\t\tin_tx.classified_as = model.Type(ident=in_tx_cl.get('url'), label=in_tx_cl.get('label'))\n\n\t\ttx_out = self._empty_tx(data, incoming=False)\n\n\t\ttx_type = rec['transaction']\n\t\tlabel_type = None\n\t\tif tx_type == 'Lost':\n\t\t\tlabel_type = 'Loss'\n\t\t\ttransfer_class = vocab.Loss\n\t\telse:\n\t\t\tlabel_type = 'Theft'\n\t\t\ttransfer_class = vocab.Theft\n\n\t\ttx_cl = transaction_classification.get(tx_type)\n\t\tif tx_cl:\n\t\t\tlabel = tx_cl.get('label')\t\n\t\t\turl = tx_cl.get('url')\n\t\t\ttx_out.classified_as = model.Type(ident=url,label=label)\n\t\telse:\n\t\t\twarnings.warn(f'*** No classification found for transaction type: {tx_type!r}')\n\n\t\ttx_out._label = f'{label_type} of {sn_ident}'\n\t\ttx_out.identified_by = model.Name(ident='', content=tx_out._label)\n\t\ttx_out_data = add_crom_data(data={'uri': tx_out.id, 'label': tx_out._label}, what=tx_out)\n\n\t\ttitle = self.helper.title_value(data['_object'].get('title'))\n\t\tshort_title = truncate_with_ellipsis(title, 100) or title\n\n\t\t# It's conceivable that there could be more than one theft of an object (if it was\n\t\t# recovered after the first theft). Therefore, the theft URI must not share a\n\t\t# prefix with the object URI, otherwise all such thefts would be merged during\n\t\t# URI reconciliation as part of the prev/post sale rewriting.\n\t\ttheft_uri = hmo.id.replace('#', f'#{label_type.upper()},')\n\n\t\twarnings.warn('TODO: parse Theft/Loss note for date and location')\n\t\t# Examples:\n\t\t# \"Dec 1947 Looted by Germans during war\"\n\t\t# \"July 1959 Lost in Paris f.111\"\n\t\t# \"Lost at Sea on board Str Europe lost April 4/74\"\n\n\t\tnotes = rec.get('verbatim_notes')\n\t\tif notes and 'Looted' in notes:\n\t\t\ttransfer_class = vocab.Looting\n\t\tt = transfer_class(ident=theft_uri, label=f'{label_type} of “{short_title}”')\n\t\tt.transferred_custody_from = self.helper.static_instances.get_instance('Group', 'knoedler')\n\t\tt.transferred_custody_of = hmo\n\n\t\tif notes:\n\t\t\tt.referred_to_by = vocab.Note(ident='', content=notes)\n\n\t\ttx_out.part = t\n\n\t\tdata['_prov_entries'].append(tx_out_data)\n\t\treturn data\n\nclass ModelFinalSale(TransactionHandler):\n\t'''\n\tAdd ProvenanceEntry/Acquisition modeling for the sale leading to the final\n\tknown location (owner) of an object.\n\t'''\n\thelper = Option(required=True)\n\tmake_la_person = Service('make_la_person')\n\tbuy_sell_modifiers = Service('buy_sell_modifiers')\n\n\tdef __call__(self, data:dict, make_la_person, buy_sell_modifiers):\n\t\tdata = data.copy()\n\t\t\n\t\t# reset prov entries and people because we're only interested in those\n\t\t# related to the final sale on this branch of the graph\n\t\todata = data['_object'].copy()\n\t\tsales_record = get_crom_object(data['_record'])\n\t\tself.helper.copy_source_information(data, odata)\n\n\t\todata.setdefault('_prov_entries', [])\n\t\todata.setdefault('_people', [])\n\t\thmo = get_crom_object(odata)\n\t\torg = odata.get('_final_org')\n\t\tif org:\n\t\t\todata['_record'] = data['_record']\n\t\t\trec = data['book_record']\n\t\t\tbook_id = rec['stock_book_no']\n\t\t\tpage_id = rec['page_number']\n\t\t\trow_id = rec['row_number']\n\n\t\t\tprice_info = None\n\t\t\tknoedler_price_part = None\n\t\t\tshared_people = [org]\n\t\t\tsellers = []\n\t\t\tdate_key = None\n\t\t\ttx = self._prov_entry(data, date_key, sellers, price_info, knoedler_price_part, shared_people, incoming=True, buy_sell_modifiers=buy_sell_modifiers)\n\t\t\n\t\t\tcurrent_tx = self.add_outgoing_tx(data, buy_sell_modifiers)\n\t\t\tcurrent_tx_data = add_crom_data(data={}, what=current_tx)\n\t\t\n\t\t\tlot_object_key = list(self.helper.transaction_key_for_record(data, incoming=True))\n\t\t\ttx_data = {'uri': tx.id, 'label': f'Event leading to the currently known location of {hmo._label}'}\n\t\t\tadd_crom_data(data=tx_data, what=tx)\n\t\t\t\n\t\t\tself.handle_prev_post_owner(odata, hmo, current_tx_data, 'Sold', lot_object_key, org, f'final-owner-1', False, None, make_label=prov_entry_label)\n\t\t\todata = {k: v for k, v in odata.items() if k in ('_prov_entries', '_people')}\n\t\t\tyield odata\n\nclass ModelSale(TransactionHandler):\n\t'''\n\tAdd ProvenanceEntry/Acquisition modeling for a sold object. This includes an acquisition\n\tTO Knoedler from seller(s), and another acquisition FROM Knoedler to buyer(s).\n\t'''\n\thelper = Option(required=True)\n\tmake_la_person = Service('make_la_person')\n\tbuy_sell_modifiers = Service('buy_sell_modifiers')\n\ttransaction_classification = Service('transaction_classification')\n\n\tdef __call__(self, data:dict, make_la_person, buy_sell_modifiers, transaction_classification, in_tx=None, out_tx=None):\n\t\tsellers = data['purchase_seller']\n\t\tif not in_tx:\n\t\t\tif len(sellers):\n\t\t\t\t# if there are sellers in this record, then model the incoming transaction.\n\t\t\t\tin_tx = self.add_incoming_tx(data, buy_sell_modifiers)\n\t\t\t\ttx_cl = transaction_classification.get('Purchase')\n\t\t\t\tin_tx.classified_as = model.Type(ident=tx_cl.get('url'), label=tx_cl.get('label'))\n\t\t\telse:\n\t\t\t\t# if there are no sellers, then this is an object that was previously unsold, and should be modeled as an inventory activity\n\t\t\t\tinv = self._new_inventorying(data)\n\t\t\t\tappraisal = self._apprasing_assignment(data)\n\t\t\t\tinv_label = inv._label\n\t\t\t\tin_tx = self._empty_tx(data, incoming=True)\n\t\t\t\tin_tx.part = inv\n\t\t\t\tif appraisal:\n\t\t\t\t\tin_tx.part = appraisal\n\t\t\t\tin_tx.identified_by = model.Name(ident='', content=inv_label)\n\t\t\t\tin_tx._label = inv_label\n\t\t\t\tin_tx_data = add_crom_data(data={'uri': in_tx.id, 'label': inv_label}, what=in_tx)\n\t\t\t\tdata.setdefault('_prov_entries', [])\n\t\t\t\tdata['_prov_entries'].append(in_tx_data)\n\n\t\tif not out_tx:\n\t\t\tout_tx = self.add_outgoing_tx(data, buy_sell_modifiers)\n\n\t\t\ttransaction = data['book_record']['transaction']\n\t\t\ttx_cl = transaction_classification.get(transaction)\n\t\t\tif tx_cl:\n\t\t\t\tlabel = tx_cl.get('label')\t\n\t\t\t\turl = tx_cl.get('url')\n\t\t\t\tout_tx.classified_as = model.Type(ident=url,label=label)\n\t\t\telse:\n\t\t\t\twarnings.warn(f'*** No classification found for transaction type: {transaction!r}')\n\n\t\tin_tx.ends_before_the_start_of = out_tx\n\t\tout_tx.starts_after_the_end_of = in_tx\n\t\tyield data\n\nclass ModelReturn(ModelSale):\n\thelper = Option(required=True)\n\tmake_la_person = Service('make_la_person')\n\tbuy_sell_modifiers = Service('buy_sell_modifiers')\n\ttransaction_classification = Service('transaction_classification')\n\n\tdef __call__(self, data:dict, make_la_person, buy_sell_modifiers, transaction_classification):\n\t\tsellers = data.get('purchase_seller', [])\n\t\tbuyers = data.get('sale_buyer', [])\n\t\tif not buyers:\n\t\t\tbuyers = sellers.copy()\n\t\t\tdata['sale_buyer'] = buyers\n\t\tin_tx, out_tx = self.add_return_tx(data, buy_sell_modifiers)\n\t\tin_tx_cl = transaction_classification.get('Purchase')\n\t\tin_tx.classified_as = model.Type(ident=in_tx_cl.get('url'), label=in_tx_cl.get('label'))\n\t\tyield from super().__call__(data, make_la_person, buy_sell_modifiers, transaction_classification,in_tx=in_tx, out_tx=out_tx)\n\nclass ModelUnsoldPurchases(TransactionHandler):\n\thelper = Option(required=True)\n\tmake_la_person = Service('make_la_person')\n\tbuy_sell_modifiers = Service('buy_sell_modifiers')\n\ttransaction_classification = Service('transaction_classification')\n\n\tdef __call__(self, data:dict, make_la_person, buy_sell_modifiers, transaction_classification):\n\t\trec = data['book_record']\n\t\tpi_rec = data['pi_record_no']\n\t\todata = data['_object']\n\t\tbook_id, page_id, row_id = record_id(rec)\n\t\tsales_record = get_crom_object(data['_record'])\n\t\tdate = implode_date(data['entry_date'])\n\t\t\n\t\tsellers = data['purchase_seller']\n\t\tif len(sellers) == 0:\n\t\t\t# if there are no sellers in this record (and it is \"Unsold\" by design of the caller),\n\t\t\t# then this is actually an Inventorying event, and handled in ModelInventorying\n\t\t\treturn\n\n\t\thmo = get_crom_object(odata)\n\t\tobject_label = f'“{hmo._label}”'\n\n\t\tsn_ident = self.helper.stock_number_identifier(odata, date)\n\n\t\tin_tx = self.add_incoming_tx(data, buy_sell_modifiers)\n\t\tin_tx_cl = transaction_classification.get('Purchase')\n\t\tin_tx.classified_as = model.Type(ident=in_tx_cl.get('url'), label=in_tx_cl.get('label'))\n\t\tyield data\n\nclass ModelInventorying(TransactionHandler):\n\thelper = Option(required=True)\n\tmake_la_person = Service('make_la_person')\n\tbuy_sell_modifiers = Service('buy_sell_modifiers')\n\ttransaction_classification = Service('transaction_classification')\n\n\tdef __call__(self, data:dict, make_la_person, buy_sell_modifiers, transaction_classification):\n\t\trec = data['book_record']\n\t\tpi_rec = data['pi_record_no']\n\t\todata = data['_object']\n\t\tbook_id, page_id, row_id = record_id(rec)\n\t\tsales_record = get_crom_object(data['_record'])\n\t\tdate = implode_date(data['entry_date'])\n\t\tfor k in ('_prov_entries', '_people'):\n\t\t\tdata.setdefault(k, [])\n\t\t\n\t\tsellers = data['purchase_seller']\n\t\tif len(sellers) > 0:\n\t\t\t# if there are sellers in this record (and it is \"Unsold\" by design of the caller),\n\t\t\t# then this is not an actual Inventorying event, and handled in ModelUnsoldPurchases\n\t\t\treturn\n\n\t\thmo = get_crom_object(odata)\n\t\tobject_label = f'“{hmo._label}”'\n\n\t\tsn_ident = self.helper.stock_number_identifier(odata, date)\n\n\t\tinv = self._new_inventorying(data)\n\t\tappraisal = self._apprasing_assignment(data)\n\t\tinv_label = inv._label\n\n\t\ttx_out = self._empty_tx(data, incoming=False)\n\t\ttx_out._label = inv_label\n\t\ttx_out.identified_by = model.Name(ident='', content=inv_label)\n\n\t\ttransaction = rec['transaction']\n\t\ttx_cl = transaction_classification.get(transaction)\n\t\tif tx_cl:\n\t\t\tlabel = tx_cl.get('label')\n\t\t\turl = tx_cl.get('url')\n\t\t\ttx_out.classified_as = model.Type(ident=url,label=label)\n\t\telse:\n\t\t\twarnings.warn(f'*** No classification found for transaction type: {transaction!r}')\n\n\t\tinv_uri = self.helper.make_proj_uri('INV', book_id, page_id, row_id)\n\t\tinv = vocab.Inventorying(ident=inv_uri, label=inv_label)\n\t\tinv.identified_by = model.Name(ident='', content=inv_label)\n\t\tinv.encountered = hmo\n\t\tinv.carried_out_by = self.helper.static_instances.get_instance('Group', 'knoedler')\n\t\tself.set_date(inv, data, 'entry_date')\n\n\t\ttx_out.part = inv\n\t\tif appraisal:\n\t\t\ttx_out.part = appraisal\n\t\tself.set_date(tx_out, data, 'entry_date')\n\n\t\ttx_out_data = add_crom_data(data={'uri': tx_out.id, 'label': inv_label}, what=tx_out)\n\t\tdata['_prov_entries'].append(tx_out_data)\n\n\t\tyield data\n\n#mark - Knoedler Pipeline class\n\nclass KnoedlerPipeline(PipelineBase):\n\t'''Bonobo-based pipeline for transforming Knoedler data from CSV into JSON-LD.'''\n\tdef __init__(self, input_path, data, **kwargs):\n\t\tproject_name = 'knoedler'\n\t\tself.input_path = input_path\n\t\tself.services = None\n\n\t\thelper = KnoedlerUtilityHelper(project_name)\n\t\tsuper().__init__(project_name, helper=helper)\n\t\thelper.static_instances = self.static_instances\n\n\t\tvocab.register_instance('form type', {'parent': model.Type, 'id': '300444970', 'label': 'Form'})\n\n\t\tvocab.register_vocab_class('ConstructedTitle', {'parent': model.Name, 'id': '300417205', 'label': 'Constructed Title'})\n\n\t\tvocab.register_vocab_class('SaleAsReturn', {\"parent\": model.Activity, \"id\":\"300445014\", \"label\": \"Sale (Return to Original Owner)\"})\n\n\t\tvocab.register_vocab_class('EntryNumber', {\"parent\": model.Identifier, \"id\":\"300445023\", \"label\": \"Entry Number\"})\n\t\tvocab.register_vocab_class('PageNumber', {\"parent\": model.Identifier, \"id\":\"300445022\", \"label\": \"Page Number\"})\n\t\tvocab.register_vocab_class('BookNumber', {\"parent\": model.Identifier, \"id\":\"300445021\", \"label\": \"Book Number\"})\n\n\t\tvocab.register_vocab_class('PageTextForm', {\"parent\": model.LinguisticObject, \"id\":\"300194222\", \"label\": \"Page\", \"metatype\": \"form type\"})\n\t\tvocab.register_vocab_class('EntryTextForm', {\"parent\": model.LinguisticObject, \"id\":\"300438434\", \"label\": \"Entry\", \"metatype\": \"form type\"})\n\n\t\tself.graph = None\n\t\tself.models = kwargs.get('models', settings.arches_models)\n\t\tself.header_file = data['header_file']\n\t\tself.files_pattern = data['files_pattern']\n\t\tself.limit = kwargs.get('limit')\n\t\tself.debug = kwargs.get('debug', False)\n\n\t\tfs = bonobo.open_fs(input_path)\n\t\twith fs.open(self.header_file, newline='') as csvfile:\n\t\t\tr = csv.reader(csvfile)\n\t\t\tself.headers = [v.lower() for v in next(r)]\n\n\tdef _construct_same_object_map(self, same_objects):\n\t\t'''\n\t\tSame objects data comes in as a list of identity equivalences (each being a list of ID strings).\n\t\tID strings may appear in multiple equivalences. For example, these 3 equivalences\n\t\trepresent a single object with 4 ID strings:\n\n\t\t\t[['1','2','3'], ['1','3'], ['2','4']]\n\n\t\tThis function computes a dict mapping every ID string to a canonical\n\t\trepresentative ID for that object (being the first ID value, lexicographically):\n\n\t\t\t{\n\t\t\t\t'1': '1',\n\t\t\t\t'2': '1',\n\t\t\t\t'3': '1',\n\t\t\t\t'4': '1',\n\t\t\t}\n\t\t'''\n\t\tsame_objects_map = {}\n\t\tsame_objects_map = {k: sorted(l) for l in same_objects for k in l}\n\t\tfor k in same_objects_map:\n\t\t\tv = same_objects_map[k]\n\t\t\torig = set(v)\n\t\t\tvv = set(v)\n\t\t\tfor kk in v:\n\t\t\t\tvv |= set(same_objects_map[kk])\n\t\t\tif vv != orig:\n\t\t\t\tkeys = v + [k]\n\t\t\t\tfor kk in keys:\n\t\t\t\t\tsame_objects_map[kk] = sorted(vv)\n\n\t\tsame_object_id_map = {k: v[0] for k, v in same_objects_map.items()}\n\t\tleaders = set()\n\t\tfor k in same_objects_map:\n\t\t\tleaders.add(same_objects_map[k][0])\n\t\treturn same_object_id_map\n\n\tdef setup_services(self):\n\t\t'''Return a `dict` of named services available to the bonobo pipeline.'''\n\t\tservices = super().setup_services()\n\n\t\tpeople_groups = set()\n\t\tpg_file = pathlib.Path(settings.pipeline_tmp_path).joinpath('people_groups.json')\n\t\twith suppress(FileNotFoundError):\n\t\t\twith pg_file.open('r') as fh:\n\t\t\t\tdata = json.load(fh)\n\t\t\t\tfor key in data['group_keys']:\n\t\t\t\t\tpeople_groups.add(tuple(key))\n\t\tservices['people_groups'] = people_groups\n\n\t\tsame_objects = services.get('objects_same', {}).get('objects', [])\n\t\tsame_object_id_map = self._construct_same_object_map(same_objects)\n\t\tservices['same_objects_map'] = same_object_id_map\n\n\t\tdifferent_objects = services.get('objects_different', {}).get('knoedler_numbers', [])\n\t\tservices['different_objects'] = different_objects\n\n\t\tsellers_to_be_deleted = services.get('sellers_to_be_deleted', {}).get('pi_record_no', [])\n\t\tservices['sellers_to_be_deleted'] = sellers_to_be_deleted\n\n\t\t# make these case-insensitive by wrapping the value lists in CaseFoldingSet\n\t\tfor name in ('attribution_modifiers',):\n\t\t\tif name in services:\n\t\t\t\tservices[name] = {k: CaseFoldingSet(v) for k, v in services[name].items()}\n\n\t\tif 'attribution_modifiers' in services:\n\t\t\tattribution_modifiers = services['attribution_modifiers']\n\t\t\tPROBABLY = attribution_modifiers['probably by']\n\t\t\tPOSSIBLY = attribution_modifiers['possibly by']\n\t\t\tattribution_modifiers['uncertain'] = PROBABLY | POSSIBLY\n\n\t\tservices.update({\n\t\t\t# to avoid constructing new MakeLinkedArtPerson objects millions of times, this\n\t\t\t# is passed around as a service to the functions and classes that require it.\n\t\t\t'make_la_person': MakeLinkedArtPerson(),\n\t\t\t'make_la_lo': MakeLinkedArtLinguisticObject(),\n\t\t\t'make_la_hmo': MakeLinkedArtHumanMadeObject(),\n\t\t\t'make_la_org': MakeLinkedArtOrganization(),\n\t\t\t'counts': defaultdict(int)\n\t\t})\n\t\treturn services\n\n\tdef add_sales_chain(self, graph, records, services, serialize=True):\n\t\t'''Add transformation of sales records to the bonobo pipeline.'''\n\t\tsales_records = graph.add_chain(\n# \t\t\t\"star_record_no\",\n# \t\t\t\"pi_record_no\",\n\t\t\tPreserveCSVFields(key='star_csv_data', order=self.headers),\n\t\t\tKeyManagement(\n\t\t\t\tdrop_empty=True,\n\t\t\t\toperations=[\n\t\t\t\t\t{\n\t\t\t\t\t\t'group': {\n\t\t\t\t\t\t\t'present_location': {\n\t\t\t\t\t\t\t\t'postprocess': lambda x, _: strip_key_prefix('present_loc_', x),\n\t\t\t\t\t\t\t\t'properties': (\n\t\t\t\t\t\t\t\t\t\"present_loc_geog\",\n\t\t\t\t\t\t\t\t\t\"present_loc_inst\",\n\t\t\t\t\t\t\t\t\t\"present_loc_acc\",\n\t\t\t\t\t\t\t\t\t\"present_loc_note\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t'group_repeating': {\n\t\t\t\t\t\t\t'_artists': {\n\t\t\t\t\t\t\t\t'rename_keys': {\n\t\t\t\t\t\t\t\t\t\"artist_name\": 'name',\n\t\t\t\t\t\t\t\t\t\"artist_authority\": 'auth_name',\n\t\t\t\t\t\t\t\t\t\"artist_nationality\": 'nationality',\n\t\t\t\t\t\t\t\t\t\"artist_attribution_mod\": 'attribution_mod',\n\t\t\t\t\t\t\t\t\t\"artist_attribution_mod_auth\": 'attrib_mod_auth',\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'postprocess': [\n\t\t\t\t\t\t\t\t\tfilter_empty_person,\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t'prefixes': (\n\t\t\t\t\t\t\t\t\t\"artist_name\",\n\t\t\t\t\t\t\t\t\t\"artist_authority\",\n\t\t\t\t\t\t\t\t\t\"artist_nationality\",\n\t\t\t\t\t\t\t\t\t\"artist_attribution_mod\",\n\t\t\t\t\t\t\t\t\t\"artist_attribution_mod_auth\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'purchase_seller': {\n\t\t\t\t\t\t\t\t'postprocess': [\n\t\t\t\t\t\t\t\t\tlambda d, p: delete_sellers(d, p, services),\n\t\t\t\t\t\t\t\t\tfilter_empty_person,\n\t\t\t\t\t\t\t\t\tlambda x, _: strip_key_prefix('purchase_seller_', x),\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t'prefixes': (\n\t\t\t\t\t\t\t\t\t\"purchase_seller_name\",\n\t\t\t\t\t\t\t\t\t\"purchase_seller_loc\",\n\t\t\t\t\t\t\t\t\t\"purchase_seller_auth_name\",\n\t\t\t\t\t\t\t\t\t\"purchase_seller_auth_loc\",\n\t\t\t\t\t\t\t\t\t\"purchase_seller_auth_mod\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'purchase_buyer': {\n\t\t\t\t\t\t\t\t'rename_keys': {\n\t\t\t\t\t\t\t\t\t'purchase_buyer_own': 'name',\n\t\t\t\t\t\t\t\t\t'purchase_buyer_share': 'share',\n\t\t\t\t\t\t\t\t\t'purchase_buyer_share_auth': 'auth_name',\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'postprocess': [filter_empty_person],\n\t\t\t\t\t\t\t\t'prefixes': (\n\t\t\t\t\t\t\t\t\t\"purchase_buyer_own\",\n\t\t\t\t\t\t\t\t\t\"purchase_buyer_share\",\n\t\t\t\t\t\t\t\t\t\"purchase_buyer_share_auth\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'prev_own': {\n\t\t\t\t\t\t\t\t'rename_keys': {\n\t\t\t\t\t\t\t\t\t'prev_own': 'name',\n\t\t\t\t\t\t\t\t\t'prev_own_auth': 'auth_name',\n\t\t\t\t\t\t\t\t\t'prev_own_loc': 'loc',\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'prefixes': (\n\t\t\t\t\t\t\t\t\t\"prev_own\",\n\t\t\t\t\t\t\t\t\t\"prev_own_auth\",\n\t\t\t\t\t\t\t\t\t\"prev_own_loc\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'sale_buyer': {\n\t\t\t\t\t\t\t\t'postprocess': [\n\t\t\t\t\t\t\t\t\tlambda x, _: strip_key_prefix('sale_buyer_', x),\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t'prefixes': (\n\t\t\t\t\t\t\t\t\t\"sale_buyer_name\",\n\t\t\t\t\t\t\t\t\t\"sale_buyer_loc\",\n\t\t\t\t\t\t\t\t\t\"sale_buyer_auth_name\",\n\t\t\t\t\t\t\t\t\t\"sale_buyer_auth_addr\",\n\t\t\t\t\t\t\t\t\t\"sale_buyer_auth_mod\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t},\n\t\t\t\t\t\t'group': {\n\t\t\t\t\t\t\t'consigner': {\n\t\t\t\t\t\t\t\t'postprocess': lambda x, _: strip_key_prefix('consign_', x),\n\t\t\t\t\t\t\t\t'properties': (\n\t\t\t\t\t\t\t\t\t\"consign_no\",\n\t\t\t\t\t\t\t\t\t\"consign_name\",\n\t\t\t\t\t\t\t\t\t\"consign_loc\",\n\t\t\t\t\t\t\t\t\t\"consign_ulan\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'object': {\n\t\t\t\t\t\t\t\t'properties': (\n\t\t\t\t\t\t\t\t\t\"knoedler_number\",\n\t\t\t\t\t\t\t\t\t\"title\",\n\t\t\t\t\t\t\t\t\t\"subject\",\n\t\t\t\t\t\t\t\t\t\"genre\",\n\t\t\t\t\t\t\t\t\t\"object_type\",\n\t\t\t\t\t\t\t\t\t\"materials\",\n\t\t\t\t\t\t\t\t\t\"dimensions\",\n\t\t\t\t\t\t\t\t\t\"present_location\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'sale_date': {\n\t\t\t\t\t\t\t\t'postprocess': lambda x, _: strip_key_prefix('sale_date_', x),\n\t\t\t\t\t\t\t\t'properties': (\n\t\t\t\t\t\t\t\t\t\"sale_date_year\",\n\t\t\t\t\t\t\t\t\t\"sale_date_month\",\n\t\t\t\t\t\t\t\t\t\"sale_date_day\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'entry_date': {\n\t\t\t\t\t\t\t\t'postprocess': lambda x, _: strip_key_prefix('entry_date_', x),\n\t\t\t\t\t\t\t\t'properties': (\n\t\t\t\t\t\t\t\t\t\"entry_date_year\",\n\t\t\t\t\t\t\t\t\t\"entry_date_month\",\n\t\t\t\t\t\t\t\t\t\"entry_date_day\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'purchase': {\n\t\t\t\t\t\t\t\t'rename_keys': {\n\t\t\t\t\t\t\t\t\t\"purch_amount\": 'amount',\n\t\t\t\t\t\t\t\t\t\"purch_currency\": 'currency',\n\t\t\t\t\t\t\t\t\t\"purch_note\": 'note',\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'postprocess': [lambda d, p: add_crom_price(d, p, services)],\n\t\t\t\t\t\t\t\t'properties': (\n\t\t\t\t\t\t\t\t\t\"purch_amount\",\n\t\t\t\t\t\t\t\t\t\"purch_currency\",\n\t\t\t\t\t\t\t\t\t\"purch_note\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'sale': {\n\t\t\t\t\t\t\t\t'rename_keys': {\n\t\t\t\t\t\t\t\t\t\"price_amount\": 'amount',\n\t\t\t\t\t\t\t\t\t\"price_currency\": 'currency',\n\t\t\t\t\t\t\t\t\t\"price_note\": 'note',\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'postprocess': [lambda d, p: add_crom_price(d, p, services)],\n\t\t\t\t\t\t\t\t'properties': (\n\t\t\t\t\t\t\t\t\t\"price_amount\",\n\t\t\t\t\t\t\t\t\t\"price_currency\",\n\t\t\t\t\t\t\t\t\t\"price_note\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'purchase_knoedler_share': {\n\t\t\t\t\t\t\t\t'rename_keys': {\n\t\t\t\t\t\t\t\t\t\"knoedpurch_amt\": 'amount',\n\t\t\t\t\t\t\t\t\t\"knoedpurch_curr\": 'currency',\n\t\t\t\t\t\t\t\t\t\"knoedpurch_note\": 'note',\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'postprocess': [lambda d, p: add_crom_price(d, p, services)],\n\t\t\t\t\t\t\t\t'properties': (\n\t\t\t\t\t\t\t\t\t\"knoedpurch_amt\",\n\t\t\t\t\t\t\t\t\t\"knoedpurch_curr\",\n\t\t\t\t\t\t\t\t\t\"knoedpurch_note\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'sale_knoedler_share': {\n\t\t\t\t\t\t\t\t'rename_keys': {\n\t\t\t\t\t\t\t\t\t\"knoedshare_amt\": 'amount',\n\t\t\t\t\t\t\t\t\t\"knoedshare_curr\": 'currency',\n\t\t\t\t\t\t\t\t\t\"knoedshare_note\": 'note',\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'postprocess': [lambda d, p: add_crom_price(d, p, services)],\n\t\t\t\t\t\t\t\t'properties': (\n\t\t\t\t\t\t\t\t\t\"knoedshare_amt\",\n\t\t\t\t\t\t\t\t\t\"knoedshare_curr\",\n\t\t\t\t\t\t\t\t\t\"knoedshare_note\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'book_record': {\n\t\t\t\t\t\t\t\t'properties': (\n\t\t\t\t\t\t\t\t\t\"stock_book_no\",\n\t\t\t\t\t\t\t\t\t\"page_number\",\n\t\t\t\t\t\t\t\t\t\"row_number\",\n\t\t\t\t\t\t\t\t\t\"description\",\n\t\t\t\t\t\t\t\t\t\"folio\",\n\t\t\t\t\t\t\t\t\t\"link\",\n\t\t\t\t\t\t\t\t\t\"heading\",\n\t\t\t\t\t\t\t\t\t\"subheading\",\n\t\t\t\t\t\t\t\t\t\"verbatim_notes\",\n\t\t\t\t\t\t\t\t\t\"working_note\",\n\t\t\t\t\t\t\t\t\t\"transaction\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'post_owner': {\n\t\t\t\t\t\t\t\t'rename_keys': {\n\t\t\t\t\t\t\t\t\t\"post_owner\": 'name',\n\t\t\t\t\t\t\t\t\t\"post_owner_auth\": 'auth_name',\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'properties': (\n\t\t\t\t\t\t\t\t\t\"post_owner\",\n\t\t\t\t\t\t\t\t\t\"post_owner_auth\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t),\n\t\t\tRecordCounter(name='records', verbose=self.debug),\n\t\t\t_input=records.output\n\t\t)\n\n\t\tbooks = self.add_book_chain(graph, sales_records)\n\t\tpages = self.add_page_chain(graph, books)\n\t\trows = self.add_row_chain(graph, pages)\n\t\tobjects = self.add_object_chain(graph, rows)\n\n\t\ttx = graph.add_chain(\n\t\t\tTransactionSwitch(),\n\t\t\t_input=objects.output\n\t\t)\n\t\treturn tx\n\n\tdef add_transaction_chains(self, graph, tx, services, serialize=True):\n\t\tinventorying = graph.add_chain(\n\t\t\tExtractKeyedValue(key='Unsold'),\n\t\t\tModelInventorying(helper=self.helper),\n\t\t\t_input=tx.output\n\t\t)\n\n\t\tunsold_purchases = graph.add_chain(\n\t\t\tExtractKeyedValue(key='Unsold'),\n\t\t\tModelUnsoldPurchases(helper=self.helper),\n\t\t\t_input=tx.output\n\t\t)\n\n\t\tsale = graph.add_chain(\n\t\t\tExtractKeyedValue(key='Sold'),\n\t\t\tModelSale(helper=self.helper),\n\t\t\t_input=tx.output\n\t\t)\n\n\t\treturned = graph.add_chain(\n\t\t\tExtractKeyedValue(key='Returned'),\n\t\t\tModelReturn(helper=self.helper),\n\t\t\t_input=tx.output\n\t\t)\n\n\t\tdestruction = graph.add_chain(\n\t\t\tExtractKeyedValue(key='Destroyed'),\n\t\t\tModelDestruction(helper=self.helper),\n\t\t\t_input=tx.output\n\t\t)\n\n\t\ttheft = graph.add_chain(\n\t\t\tExtractKeyedValue(key='Stolen'),\n\t\t\tModelTheftOrLoss(helper=self.helper),\n\t\t\t_input=tx.output\n\t\t)\n\n\t\tloss = graph.add_chain(\n\t\t\tExtractKeyedValue(key='Lost'),\n\t\t\tModelTheftOrLoss(helper=self.helper),\n\t\t\t_input=tx.output\n\t\t)\n\n\t\t# activities are specific to the inventorying chain\n\t\tactivities = graph.add_chain( ExtractKeyedValues(key='_activities'), _input=inventorying.output )\n\t\tif serialize:\n\t\t\tself.add_serialization_chain(graph, activities.output, model=self.models['Inventorying'])\n\n\t\t# people and prov entries can come from any of these chains:\n\t\tfor branch in (sale, destruction, theft, loss, inventorying, unsold_purchases, returned):\n\t\t\tprov_entry = graph.add_chain( ExtractKeyedValues(key='_prov_entries'), _input=branch.output )\n\t\t\tpeople = graph.add_chain( ExtractKeyedValues(key='_people'), _input=branch.output )\n\n\t\t\tif serialize:\n\t\t\t\tself.add_serialization_chain(graph, prov_entry.output, model=self.models['ProvenanceEntry'])\n\t\t\t\tself.add_person_or_group_chain(graph, people)\n\n\tdef add_book_chain(self, graph, sales_records, serialize=True):\n\t\tbooks = graph.add_chain(\n# \t\t\tadd_book,\n\t\t\tAddBook(static_instances=self.static_instances, helper=self.helper),\n\t\t\t_input=sales_records.output\n\t\t)\n\t\tphys = graph.add_chain(\n\t\t\tExtractKeyedValue(key='_physical_book'),\n\t\t\t_input=books.output\n\t\t)\n\t\ttext = graph.add_chain(\n\t\t\tExtractKeyedValue(key='_text_book'),\n\t\t\t_input=books.output\n\t\t)\n\t\tact = graph.add_chain( ExtractKeyedValues(key='_activities'), _input=text.output )\n\t\tif serialize:\n\t\t\tself.add_serialization_chain(graph, act.output, model=self.models['ProvenanceEntry'])\n\t\t\tself.add_serialization_chain(graph, phys.output, model=self.models['HumanMadeObject'])\n\t\t\tself.add_serialization_chain(graph, text.output, model=self.models['LinguisticObject'])\n\t\treturn books\n\n\tdef add_page_chain(self, graph, books, serialize=True):\n\t\tpages = graph.add_chain(\n\t\t\tAddPage(static_instances=self.static_instances, helper=self.helper),\n\t\t\t_input=books.output\n\t\t)\n\t\ttext = graph.add_chain(\n\t\t\tExtractKeyedValue(key='_text_page'),\n\t\t\t_input=pages.output\n\t\t)\n\t\tact = graph.add_chain( ExtractKeyedValues(key='_activities'), _input=text.output )\n\t\tif serialize:\n\t\t\tself.add_serialization_chain(graph, act.output, model=self.models['ProvenanceEntry'])\n\t\t\tself.add_serialization_chain(graph, text.output, model=self.models['LinguisticObject'])\n\t\treturn pages\n\n\tdef add_row_chain(self, graph, pages, serialize=True):\n\t\trows = graph.add_chain(\n\t\t\tAddRow(static_instances=self.static_instances, helper=self.helper),\n\t\t\t_input=pages.output\n\t\t)\n\t\ttext = graph.add_chain(\n\t\t\tExtractKeyedValue(key='_text_row'),\n\t\t\t_input=rows.output\n\t\t)\n\t\tact = graph.add_chain( ExtractKeyedValues(key='_activities'), _input=text.output )\n\t\tif serialize:\n\t\t\tself.add_serialization_chain(graph, act.output, model=self.models['ProvenanceEntry'])\n\t\t\tself.add_serialization_chain(graph, text.output, model=self.models['LinguisticObject'])\n\t\treturn rows\n\n\tdef add_object_chain(self, graph, rows, serialize=True):\n\t\tobjects = graph.add_chain(\n\t\t\tPopulateKnoedlerObject(helper=self.helper),\n\t\t\tAddArtists(helper=self.helper),\n\t\t\t_input=rows.output\n\t\t)\n\n\t\tpeople = graph.add_chain( ExtractKeyedValues(key='_people'), _input=objects.output )\n\t\thmos1 = graph.add_chain( ExtractKeyedValues(key='_physical_objects'), _input=objects.output )\n\t\thmos2 = graph.add_chain( ExtractKeyedValues(key='_original_objects'), _input=objects.output )\n\t\ttexts = graph.add_chain( ExtractKeyedValues(key='_linguistic_objects'), _input=objects.output )\n\t\tgroups1 = graph.add_chain( ExtractKeyedValues(key='_organizations'), _input=objects.output )\n\t\tgroups2 = graph.add_chain( ExtractKeyedValues(key='_organizations'), _input=hmos1.output )\n\t\todata = graph.add_chain(\n\t\t\tExtractKeyedValue(key='_object'),\n\t\t\t_input=objects.output\n\t\t)\n\t\tfinal_sale = graph.add_chain(\n\t\t\tModelFinalSale(helper=self.helper),\n\t\t\t_input=objects.output\n\t\t)\n\t\tprov_entry = graph.add_chain(\n\t\t\tExtractKeyedValues(key='_prov_entries'),\n\t\t\t_input=final_sale.output\n\t\t)\n\t\tpeople2 = graph.add_chain( ExtractKeyedValues(key='_people'), _input=final_sale.output )\n\t\towners = self.add_person_or_group_chain(graph, hmos1, key='_other_owners', serialize=serialize)\n\n\t\titems = graph.add_chain(\n\t\t\tExtractKeyedValue(key='_visual_item'),\n\t\t\tpipeline.linkedart.MakeLinkedArtRecord(),\n\t\t\t_input=hmos1.output\n\t\t)\n\n# \t\tconsigners = graph.add_chain( ExtractKeyedValue(key='_consigner'), _input=objects.output )\n\t\tartists = graph.add_chain(\n\t\t\tExtractKeyedValues(key='_artists'),\n\t\t\t_input=objects.output\n\t\t)\n\t\t\n\t\tif serialize:\n\t\t\tself.add_serialization_chain(graph, items.output, model=self.models['VisualItem'])\n\t\t\tself.add_serialization_chain(graph, hmos1.output, model=self.models['HumanMadeObject'])\n\t\t\tself.add_serialization_chain(graph, hmos2.output, model=self.models['HumanMadeObject'])\n\t\t\tself.add_serialization_chain(graph, texts.output, model=self.models['LinguisticObject'])\n# \t\t\tself.add_serialization_chain(graph, consigners.output, model=self.models['Group'])\n\t\t\tself.add_person_or_group_chain(graph, groups1)\n\t\t\tself.add_person_or_group_chain(graph, groups2)\n\t\t\tself.add_person_or_group_chain(graph, artists)\n\t\t\tself.add_person_or_group_chain(graph, people)\n\t\t\tself.add_person_or_group_chain(graph, people2)\n\t\t\tself.add_person_or_group_chain(graph, owners)\n\t\t\tself.add_person_or_group_chain(graph, odata, key='_organizations')\n\t\t\tself.add_serialization_chain(graph, prov_entry.output, model=self.models['ProvenanceEntry'])\n\t\t\t_ = self.add_places_chain(graph, odata, key='_locations', serialize=serialize, include_self=True)\n\t\treturn objects\n\n\tdef _construct_graph(self, services=None):\n\t\t'''\n\t\tConstruct bonobo.Graph object(s) for the entire pipeline.\n\t\t'''\n\t\tg = bonobo.Graph()\n\n\t\tcontents_records = g.add_chain(\n\t\t\tMatchingFiles(path='/', pattern=self.files_pattern, fs='fs.data.knoedler'),\n\t\t\tCurriedCSVReader(fs='fs.data.knoedler', limit=self.limit, field_names=self.headers),\n\t\t)\n\t\tsales = self.add_sales_chain(g, contents_records, services, serialize=True)\n\t\tself.add_transaction_chains(g, sales, services, serialize=True)\n\n\t\tself.graph = g\n\t\treturn sales\n\n\tdef get_graph(self, **kwargs):\n\t\t'''Return a single bonobo.Graph object for the entire pipeline.'''\n\t\tif not self.graph:\n\t\t\tself._construct_graph(**kwargs)\n\n\t\treturn self.graph\n\n\tdef run(self, services=None, **options):\n\t\t'''Run the Knoedler bonobo pipeline.'''\n\t\tif self.verbose:\n\t\t\tprint(f'- Limiting to {self.limit} records per file', file=sys.stderr)\n\t\tif not services:\n\t\t\tservices = self.get_services(**options)\n\n\t\tif self.verbose:\n\t\t\tprint('Running graph...', file=sys.stderr)\n\t\tgraph = self.get_graph(services=services, **options)\n\t\tself.run_graph(graph, services=services)\n\n\t\tif self.verbose:\n\t\t\tprint('Serializing static instances...', file=sys.stderr)\n\t\tfor model, instances in self.static_instances.used_instances().items():\n\t\t\tg = bonobo.Graph()\n\t\t\tnodes = self.serializer_nodes_for_model(model=self.models[model], use_memory_writer=False)\n\t\t\tvalues = instances.values()\n\t\t\tsource = g.add_chain(GraphListSource(values))\n\t\t\tself.add_serialization_chain(g, source.output, model=self.models[model], use_memory_writer=False)\n\t\t\tself.run_graph(g, services={})\n\n\nclass KnoedlerFilePipeline(KnoedlerPipeline):\n\t'''\n\tKnoedler pipeline with serialization to files based on Arches model and resource UUID.\n\n\tIf in `debug` mode, JSON serialization will use pretty-printing. Otherwise,\n\tserialization will be compact.\n\t'''\n\tdef __init__(self, input_path, data, **kwargs):\n\t\tsuper().__init__(input_path, data, **kwargs)\n\t\tself.writers = []\n\t\tself.output_path = kwargs.get('output_path')\n\n\tdef serializer_nodes_for_model(self, *args, model=None, use_memory_writer=True, **kwargs):\n\t\tnodes = []\n\t\tif self.debug:\n\t\t\tif use_memory_writer:\n\t\t\t\tw = MergingMemoryWriter(directory=self.output_path, partition_directories=True, compact=False, model=model)\n\t\t\telse:\n\t\t\t\tw = MergingFileWriter(directory=self.output_path, partition_directories=True, compact=False, model=model)\n\t\t\tnodes.append(w)\n\t\telse:\n\t\t\tif use_memory_writer:\n\t\t\t\tw = MergingMemoryWriter(directory=self.output_path, partition_directories=True, compact=True, model=model)\n\t\t\telse:\n\t\t\t\tw = MergingFileWriter(directory=self.output_path, partition_directories=True, compact=True, model=model)\n\t\t\tnodes.append(w)\n\t\tself.writers += nodes\n\t\treturn nodes\n\n\tdef run(self, **options):\n\t\t'''Run the Knoedler bonobo pipeline.'''\n\t\tstart = timeit.default_timer()\n\t\tservices = self.get_services(**options)\n\t\tsuper().run(services=services, **options)\n\t\tprint(f'Pipeline runtime: {timeit.default_timer() - start}', file=sys.stderr)\n\n\t\tcount = len(self.writers)\n\t\tfor seq_no, w in enumerate(self.writers):\n\t\t\tprint('[%d/%d] writers being flushed' % (seq_no+1, count))\n\t\t\tif isinstance(w, MergingMemoryWriter):\n\t\t\t\tw.flush()\n\n\t\tprint('====================================================')\n\t\tprint('Total runtime: ', timeit.default_timer() - start)\n","sub_path":"pipeline/projects/knoedler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":78050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"13938356","text":"# -*- coding: utf-8 -*-\n\nimport llbc\n\nclass pyllbcPacket(object):\n def __init__(self, svc, sender_svc_id, session_id, local_ip, local_port, peer_ip, peer_port, opcode, status, status_desc, data, extdata1, extdata2, extdata3, packet_cobj):\n self.svc = svc\n self.sender_svc_id = sender_svc_id\n self.session_id = session_id\n self.local_ip = local_ip\n self.local_port = local_port\n self.peer_ip = peer_ip\n self.peer_port = peer_port\n self.opcode = opcode\n self.status = status\n self.status_desc = status_desc\n self.data = data\n self.extdata1 = extdata1\n self.extdata2 = extdata2\n self.extdata3 = extdata3\n\n self._packet_cobj = packet_cobj\n\n self._str = None\n\n @property\n def recver_svc_id(self):\n return self.svc.id\n\n def __str__(self):\n if self._str is None:\n d = {\n 'sender_svc_id': self.sender_svc_id,\n 'recver_svc_id': self.recver_svc_id,\n 'session_id': self.session_id,\n 'local_addr': '{}:{}'.format(self.local_ip, self.local_port),\n 'peer_addr': '{}:{}'.format(self.peer_ip, self.peer_port),\n 'opcode': self.opcode,\n 'status': self.status,\n 'status_desc': self.status_desc,\n 'data': str(self.data),\n 'extdata1': self.extdata1,\n 'extdata2': self.extdata2,\n 'extdata3': self.extdata3,\n }\n self._str = str(d)\n return self._str\n\nllbc.Packet = pyllbcPacket\n\n","sub_path":"wrap/pyllbc/script/comm/Packet.py","file_name":"Packet.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338709844","text":"# Copyright (c) Barefoot Networks, Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\")\n\nfrom p4_hlir.hlir import p4_header_instance\nfrom ebpfType import EbpfType\nfrom compilationException import CompilationException\nfrom programSerializer import ProgramSerializer\nimport typeFactory\n\n\nclass EbpfInstanceBase(object):\n def __init__(self):\n pass\n\n\nclass SimpleInstance(EbpfInstanceBase):\n # A header or a metadata instance (but not array elements)\n def __init__(self, hlirInstance, factory, isMetadata):\n super(SimpleInstance, self).__init__()\n self.hlirInstance = hlirInstance\n self.name = hlirInstance.base_name\n self.type = factory.build(hlirInstance.header_type, isMetadata)\n\n def declare(self, serializer):\n assert isinstance(serializer, ProgramSerializer)\n self.type.declare(serializer, self.name, False)\n\n\nclass EbpfHeader(SimpleInstance):\n \"\"\" Represents a header instance from a P4 program \"\"\"\n def __init__(self, hlirHeaderInstance, factory):\n super(EbpfHeader, self).__init__(hlirHeaderInstance, factory, False)\n if hlirHeaderInstance.metadata:\n raise CompilationException(True, \"Metadata passed to EpbfHeader\")\n if hlirHeaderInstance.index is not None:\n self.name += \"_\" + str(hlirHeaderInstance.index)\n\n\nclass EbpfMetadata(SimpleInstance):\n \"\"\"Represents a metadata instance from a P4 program\"\"\"\n def __init__(self, hlirMetadataInstance, factory):\n super(EbpfMetadata, self).__init__(hlirMetadataInstance, factory, True)\n if not hlirMetadataInstance.metadata:\n raise CompilationException(\n True, \"Header instance passed to EpbfMetadata {0}\",\n hlirMetadataInstance)\n if hlirMetadataInstance.index is not None:\n raise CompilationException(\n True, \"Unexpected metadata array {0}\", self.hlirInstance)\n if hasattr(hlirMetadataInstance, \"initializer\"):\n self.initializer = hlirMetadataInstance.initializer\n else:\n self.initializer = None\n\n def emitInitializer(self, serializer):\n assert isinstance(serializer, ProgramSerializer)\n if self.initializer is None:\n self.type.emitInitializer(serializer)\n else:\n for key in self.initializer.keys():\n serializer.appendFormat(\n \".{0} = {1},\", key, self.initializer[key])\n\n\nclass EbpfHeaderStack(EbpfInstanceBase):\n \"\"\"Represents a header stack instance; there is one instance of\n this class for each STACK, and not for each\n element of the stack, as in the HLIR\"\"\"\n def __init__(self, hlirInstance, indexVar, factory):\n super(EbpfHeaderStack, self).__init__()\n\n # indexVar: name of the ebpf variable that\n # holds the current index for this stack\n assert isinstance(indexVar, str)\n assert isinstance(factory, typeFactory.EbpfTypeFactory)\n assert isinstance(hlirInstance, p4_header_instance)\n\n self.indexVar = indexVar\n self.name = hlirInstance.base_name\n self.basetype = factory.build(hlirInstance.header_type, False)\n assert isinstance(self.basetype, EbpfType)\n self.arraySize = hlirInstance.max_index + 1\n self.hlirInstance = hlirInstance\n\n def declare(self, serializer):\n assert isinstance(serializer, ProgramSerializer)\n self.basetype.declareArray(serializer, self.name, self.arraySize)\n","sub_path":"src/cc/frontends/p4/compiler/ebpfInstance.py","file_name":"ebpfInstance.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"18855432","text":"import GeneticAlgorithm as ga\nimport datetime\nimport random\nfrom decimal import Decimal\n\nShapeList = [1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4]\n\nWIDTH = ga.WIDTH\nHIGHT = ga.HIGHT\n\n# 遺伝子情報の長さ\n#遺伝子の長さは5で割り切れるものにすること\nGENOM_LENGTH = 100\n\n# 遺伝子集団の大きさ\nMAX_GENOM_LIST = 500\n# 遺伝子選択数\nSELECT_GENOM = 20\n# 個体突然変異確率\nINDIVIDUAL_MUTATION = 0.1\n# 遺伝子突然変異確率\nGENOM_MUTATION = 0.1\n# 繰り返す世代数\nMAX_GENERATION = 80\n\n#平均適応度記録用配列 : GROWTH\nGROWTH_min = []\nGROWTH_max = []\nGROWTH_ave = []\n\nene = 5\nene_count = 0\n#絶対座標\n\ndef create_genom(length):\n \"\"\"\n 引数で指定された桁のランダムな遺伝子情報を生成、格納したgenomClassで返します。\n 遺伝子は5つで1ペア\n\n genom[n + 0] = 回転角(0,90,180,270の四方向)(0 to 3)\n genom[n + 1] = 移動したy座標 (-3 to 3(相対y座標位置))\n genom[n + 2] = x絶対座標\n genom[n + 3] = y絶対座標\n genom[n + 4] = 精製する形の種類の識別番号 \n      nペア(GENOM_LENGTH / 5)\n\n :param length: 遺伝子情報の長さ\n :return: 生成した個体集団genomClass\n \"\"\"\n abs_X = 0\n abs_Y = 0\n genome_list = []\n\n countshape = 0\n\n for i in range(int(float(length)/5)):\n\n temp = [random.randint(0,3),random.randint(0,HIGHT-1),ShapeList[countshape]]\n\n\n endflag = False\n count = 0\n while(not endflag):\n endflag = True\n \n abs_X += 0\n abs_Y += 0\n\n genome_list.append(temp[0])\n genome_list.append(temp[1])\n genome_list.append(abs_X)\n genome_list.append(abs_Y)\n genome_list.append(temp[2])\n\n countshape += 1\n\n #?R?[?X?A?E?g????\n \"\"\"\n if(genome_list[len(genome_list)-2]>=HIGHT):#yが上に外れているとき\n genome_list[len(genome_list)-2]=HIGHT-1\n genome_list[len(genome_list)-3]+=1\n genome_list[len(genome_list)-1]=1\n elif(genome_list[len(genome_list)-2]>0):#yが下に外れているとき:\n genome_list[len(genome_list)-2]=0\n genome_list[len(genome_list)-3]+=1\n if(genome_list[len(genome_list)-2]>=HIGHT-2 and genome_list[len(genome_list)-1]>=2):#yが上に外れているとき\n genome_list[len(genome_list)-1]=1\n \"\"\"\n return ga.genom(genome_list, 0)\n\n\ndef evaluation(ga):\n\n #上部:評価関数の内部関数\n # getTime() : その遺伝子ごとの最終的な経過時間を計算して、返す。\n def getTime(stockShape):\n time = 0\n for x in stockShape:\n if(x==1):\n time += 1000\n elif(x==2):\n time += 2500\n elif(x==3):\n time += 3000\n elif(x==4):\n time += 6000\n return time\n\n # getDiversity() : 金型の種類が偏りを判定し、評価値を返す。\n def getDiversity(stockShape):\n MaxP = GENOM_LENGTH / Decimal(5) \n evaluation = 0\n ShapeList = [0,0,0,0] #array6\n\n for x in stockShape:\n if(x==1):\n ShapeList[0] += 1\n evaluation += 100\n elif(x==2):\n ShapeList[1] += 1\n evaluation += 200\n elif(x==3):\n ShapeList[2] += 1\n evaluation += 600\n elif(x==4):\n ShapeList[3] += 1\n evaluation += 500\n \n if(ShapeList[0] > MaxP/Decimal(len(ShapeList))):\n evaluation -= 100*(ShapeList[0]-MaxP/Decimal(len(ShapeList)))\n if(ShapeList[1] > MaxP/Decimal(len(ShapeList))):\n evaluation -= 200*(ShapeList[1]-MaxP/Decimal(len(ShapeList)))\n if(ShapeList[2] > MaxP/Decimal(len(ShapeList))):\n evaluation -= 300*(ShapeList[2]-MaxP/Decimal(len(ShapeList)))\n if(ShapeList[3] > MaxP/Decimal(len(ShapeList))):\n evaluation -= 1200*(ShapeList[2]-MaxP/Decimal(len(ShapeList)))\n\n return evaluation\n\n \n\n \"\"\"評価関数\n マーカーとマーカーの座標の差が少ないほどよい\n 位置がかぶった場合はマイナス\n 鉄板より外れたら大マイナス\n 空白が少ない程、評価値が高い。\n 経過時間が少ないほど評価値が高い(金型の種類の差より、移動にかかる時間の方が比重が大きい)\n 金型の種類が偏っていいない程評価値が高い(一定割合を超えるとマイナス) \n evalist: マーカーを置いたことのある座標を記録する\n :param ga: 評価を行うgenomClass\n :return: 評価処理をしたgenomClassを返す\n \"\"\"\n\n eval = 0\n SumMoveTime = 0\n evalist = []\n count = 0\n checkX = 0\n checkY = 0\n checkRota = 0\n endX = 0\n checkShape = 0\n StockShape = []\n before = (0,0)\n\n add = True\n\n \n for x in ga.getGenom():\n\n if(count % 5 == 4):\n checkShape = x\n\n if(checkY < HIGHT and checkX < WIDTH and checkY >= 0):\n if(ga.getReservation(checkY,checkX)==1):\n now = (checkX,checkY)\n for x in range(len(evalist)):\n if(evalist[x]==now):\n add = False\n break\n \n if(add):\n Mult = 1\n AddMoveTime = 0\n evalist.append(now)\n if(checkShape==2):\n if(checkRota == 0 or checkRota == 2):\n evalist.append((now[0]+1,now[1]))\n evalist.append((now[0]+2,now[1]))\n elif(checkRota == 1 or checkRota == 3):\n evalist.append((now[0],now[1]+1))\n evalist.append((now[0],now[1]+2))\n Mult = 3\n if(checkShape==3):\n evalist.append((now[0]+1,now[1]))\n evalist.append((now[0],now[1]+1))\n evalist.append((now[0]+1,now[1]+1))\n Mult = 4\n if(checkShape==4):\n if(checkRota == 0 or checkRota == 2):\n evalist.append((now[0]+1,now[1]))\n evalist.append((now[0]+2,now[1]))\n evalist.append((now[0],now[1]+1))\n evalist.append((now[0]+1,now[1]+1))\n evalist.append((now[0]+2,now[1]+1))\n if(checkRota == 1 or checkRota == 3):\n evalist.append((now[0],now[1]+1))\n evalist.append((now[0],now[1]+2))\n evalist.append((now[0]+1,now[1]))\n evalist.append((now[0]+1,now[1]+1))\n evalist.append((now[0]+1,now[1]+2))\n Mult = 6\n\n eval += (5 *Mult)\n now = before\n else:\n eval -= 200\n\n else:\n eval -= 300\n StockShape.append(checkShape)\n\n if(count % 5 == 3):\n \n checkY = x\n \n elif(count % 5 == 2):\n checkX = x\n if(x>endX):\n endX = x\n elif(count % 5 == 0):\n checkRota = x\n \n count += 1\n\n for y in range(HIGHT):\n for x in range(endX+1):\n if(ga.getReservation(y,x)==0):\n eval -= 5\n\n if(eval <= 0):\n eval = 0\n\n return eval\n\n\ndef select(ga, elite):\n \"\"\"選択関数です。エリート選択\n :param ga: 選択を行うgenomClassの配列\n :return: 選択処理をした一定のエリート、genomClassを返す\n \"\"\"\n # 現行世代個体集団の評価を高い順番にソートする\n sort_result = sorted(ga, reverse=True, key=lambda u: u.evaluation)\n # 一定の上位を抽出する\n result = [sort_result.pop(0) for i in range(elite)]\n return result\n\n\ndef crossover(ga_one, ga_second):\n \"\"\"交叉関数。二点交叉を行う。\n 今回は遺伝子は5つで1ペアなため、交叉点は5の倍数になり、ペアを分断しないようにする。\n :param ga: 交叉させるgenomClassの配列\n :param ga_one:\n :param ga_second:\n :return: 二つの子孫genomClassを格納したリスト返す\n \"\"\"\n copy_put = [[0 for i in range(WIDTH)] for j in range(HIGHT)]\n copy2put = [[0 for i in range(WIDTH)] for j in range(HIGHT)]\n # 子孫を格納するリストを生成しま\\\\\\す\n genom_list = []\n # 入れ替える二点の点を設定します→[1:25]\n\n cross_one = (random.randint(0, GENOM_LENGTH/5)*5)#必ず3の倍数(ペアが3ずつなので)\n cross_second = random.randint(cross_one, GENOM_LENGTH)\n\n\n endflag = False\n while(not endflag):\n if(cross_second % 5 ==0):\n endflag = True\n else:\n cross_second = random.randint(cross_one,GENOM_LENGTH)\n\n # 遺伝子を取り出します\n one = ga_one.getGenom()\n second = ga_second.getGenom()\n # 交叉させます\n progeny_one = one[:cross_one] + second[cross_one:cross_second] + one[cross_second:]\n progeny_second = second[:cross_one] + one[cross_one:cross_second] + second[cross_second:]\n\n \n\n\n\n # genomClassインスタンスを生成して子孫をリストに格納する\n genom_list.append(ga.genom(progeny_one, 0))\n genom_list.append(ga.genom(progeny_second, 0))\n return genom_list\n\n\ndef next_generation_gene_create(ga, ga_elite, ga_progeny):\n \"\"\"\n 世代交代処理\n :param ga: 現行世代個体集団\n :param ga_elite: 現行世代エリート集団\n :param ga_progeny: 現行世代子孫集団\n :return: 次世代個体集団\n \"\"\"\n # 現行世代個体集団の評価を低い順番にソートする\n next_generation_geno = sorted(ga, reverse=False, key=lambda u: u.evaluation)\n # 追加するエリート集団と子孫集団の合計ぶんを取り除く\n for i in range(0, len(ga_elite) + len(ga_progeny)):\n next_generation_geno.pop(0)\n # エリート集団と子孫集団を次世代集団を次世代へ追加します\n next_generation_geno.extend(ga_elite)\n next_generation_geno.extend(ga_progeny)\n return next_generation_geno\n\n\ndef mutation(ga, induvidual_mutation, genom_mutation):\n \"\"\"突然変異\n 絶対座標には変化を加えず、突然変異はそれ以外.\n\n :param ga: genomClass\n :return: 突然変異処理をしたgenomClassを返す\"\"\"\n ga_list = []\n for i in ga:\n # 個体に対して一定の確率で突然変異が起きる\n if induvidual_mutation > (random.randint(0, 100) / Decimal(100)):\n genom_list = []\n count = 0\n for i_ in i.getGenom():\n # 個体の遺伝子情報一つ一つに対して突然変異がおこる\n if genom_mutation > (random.randint(0, 100) / Decimal(100)):\n if(count%5 == 4):\n genom_list.append(i_)\n elif(count%5 == 3):\n genom_list.append(i_)\n elif(count%5 == 2):\n genom_list.append(i_)\n elif(count%5 == 1):\n genom_list.append(random.randint(0,HIGHT-1))\n elif(count%5 == 0):\n genom_list.append(random.randint(0,3))\n else:\n genom_list.append(i_)\n count += 1\n i.setGenom(genom_list)\n ga_list.append(i)\n else:\n ga_list.append(i)\n return ga_list\n\n\nif __name__ == '__main__':\n\n # 一番最初の現行世代個体集団を生成します。\n current_generation_individual_group = []\n for i in range(MAX_GENOM_LIST):\n current_generation_individual_group.append(create_genom(GENOM_LENGTH))\n\n for count_ in range(1, MAX_GENERATION + 1):\n \n # 現行世代個体集団の遺伝子を評価し、genomClassに代入します\n for i in range(MAX_GENOM_LIST):\n evaluation_result = evaluation(current_generation_individual_group[i])\n current_generation_individual_group[i].setEvaluation(evaluation_result)\n # エリート個体を選択します\n elite_genes = select(current_generation_individual_group,SELECT_GENOM)\n # エリート遺伝子を交叉させ、リストに格納します\n progeny_gene = []\n for i in range(0, SELECT_GENOM):\n progeny_gene.extend(crossover(elite_genes[i - 1], elite_genes[i]))\n # 次世代個体集団を現行世代、エリート集団、子孫集団から作成します\n next_generation_individual_group = next_generation_gene_create(current_generation_individual_group,\n elite_genes, progeny_gene)\n # 次世代個体集団全ての個体に突然変異を施します。\n if(len(GROWTH_ave) >= 2 and (GROWTH_ave[len(GROWTH_ave)-1]>=GROWTH_ave[len(GROWTH_ave)-2])):\n ene_count += 1\n elif(len(GROWTH_ave) >= 2 and 5*(-(GROWTH_ave[len(GROWTH_ave)-1]-GROWTH_ave[len(GROWTH_ave)-2])/ene)>=random.randint(1,100)):\n ene_count +=1\n '''\n #確率確認用\n if(len(GROWTH_ave) >= 2):\n print('----------Metropolis------------')\n print(5*(-(GROWTH_ave[len(GROWTH_ave)-1]-GROWTH_ave[len(GROWTH_ave)-2])/ene))\n print('----------メトロポリス------------')\n '''\n\n if(ene_count>=5):\n print('温度が下がりました')\n INDIVIDUAL_MUTATION = Decimal(INDIVIDUAL_MUTATION) - (Decimal(INDIVIDUAL_MUTATION)/Decimal(MAX_GENERATION))\n GENOM_MUTATION = Decimal(GENOM_MUTATION) - (Decimal(GENOM_MUTATION)/Decimal(MAX_GENERATION))\n ene -= (ene/Decimal(MAX_GENERATION))\n ene_count = 0\n\n\n next_generation_individual_group = mutation(next_generation_individual_group,INDIVIDUAL_MUTATION,INDIVIDUAL_MUTATION )\n\n # 1世代の進化的計算終了。評価に移ります\n\n # 各個体適用度を配列化します。\n\n\n fits = [i.getEvaluation() for i in current_generation_individual_group]\n fits.sort()\n for ev in range(len(fits)):\n if(ev == len(fits)/Decimal(2)):\n current_generation_individual_group[ev].setGenom(current_generation_individual_group[ev].getGenom())\n for y in range(HIGHT):\n for x in range(WIDTH):\n print(current_generation_individual_group[ev].getReservation(y,x),end=\"\")\n print(\"\")\n break\n\n # 進化結果を評価します\n min_ = min(fits)\n max_ = max(fits)\n avg_ = sum(fits) / Decimal(len(fits))\n\n # 現行世代の進化結果を出力します\n print( \"-----第{}世代の結果-----\".format(count_))\n print (\" Min:{}\".format(min_))\n print (\" Max:{}\".format(max_))\n print (\" Avg:{}\".format(avg_))\n\n GROWTH_min.append(min_)\n GROWTH_max.append(max_)\n GROWTH_ave.append(avg_)\n\n ene -= 1/Decimal(MAX_GENERATION)\n\n \n # 現行世代と次世代を入れ替えます\n current_generation_individual_group = next_generation_individual_group\n #最終結果出力\n print(\"最も優れた個体は{}\".format(elite_genes[0].getGenom()))\n elite_genes[0].setGenom(elite_genes[0].getGenom())\n for y in range(HIGHT):\n for x in range(WIDTH):\n print(elite_genes[0].getReservation(y,x),end=\"\")\n print(\"\")\n\n\n #最優良個体のプランファイル書き出し\n #data_path = 'data/plan.csv' #テスト\n data_path = '../../steelcut_rota/steelcut/data/plan.csv' #本物\n\n with open(data_path,mode='w') as f:\n EG = elite_genes[0].getGenom()\n count = 1\n while(count < len(EG)):\n f.write(str(EG[count+1]))\n f.write(',')\n f.write(str(EG[count+2]))\n f.write(',')\n f.write(str(EG[count+3]))\n f.write(',')\n f.write(str(EG[count-1]))\n f.write('\\n')\n count += 5\n f.close()\n \n dt_now = datetime.datetime.now()\n dateN = dt_now.strftime('FG%Y%m%d-%H-%M-%S')\n growth_path = 'data/SA/'+str(dateN)+'.csv'\n\n with open(growth_path,mode='w') as f:\n for x in range(len(GROWTH_ave)):\n f.write(str(GROWTH_min[x]))\n f.write(',')\n f.write(str(GROWTH_max[x]))\n f.write(',')\n f.write(str(GROWTH_ave[x]))\n f.write('\\n')\n f.close()\n \n\n","sub_path":"GA/gaAStray/annealing.py","file_name":"annealing.py","file_ext":"py","file_size_in_byte":17115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"513968350","text":"from queue import Queue\n\ntry:\n\tfhandle = open(\"test.txt\", 'r')\nexcept:\n\tprint(\"No such file.\")\n\texit()\n\npozycje = {'a': 1, 'b': 2, 'c':3, 'd': 4, 'e': 5, 'f': 6, 'g':7, 'h': 8}\n\nq = Queue(0)\n\ndef mate(state):\n\t#print(state)\n\twKingPos = state['wKing']\n\tbKingPos = state['bKing']\n\twRookPos = state['wRook']\n\tdistK = (wKingPos[0] - bKingPos[0])**2 + (wKingPos[1] - bKingPos[1])**2\n\t#left wall\n\tif bKingPos[0] == 1 and distK == 4 and wRookPos[0] == 1:\n\t\treturn True\n\t#right wall\n\tif bKingPos[0] == 8 and distK == 4 and wRookPos[0] == 8:\n\t\treturn True \n\t#ceiling\n\tif bKingPos[1] == 8 and distK == 4 and wRookPos[1] == 8:\n\t\treturn True\n\t#floor \n\tif bKingPos[1] == 1 and distK == 4 and wRookPos[1] == 1:\n\t\treturn True\n\treturn False\n\ndef is_empty(state, pos):\n\tfigury = ['wKing', 'wRook', 'bKing']\n\tfor figura in figury:\n\t\tif state[figura] == pos:\n\t\t\treturn False \n\treturn True\n\ndef inbound(pos):\n\tif pos[0] >=1 and pos[0] <=8 and pos[1] >=1 and pos[1] <=8:\n\t\treturn True\n\treturn False\n\ndef legal(pos, state, figure):\n\tif not is_empty(state, pos):\n\t\treturn False\n\tif figure == 'wRook':\n\t\t#trying to improve the speed by forcing rook to check every turn\n\t\t#if pos[0] != state['bKing'][0] and pos[1] != state['bKing'][1]:\n\t\t#\treturn False\n\t\t#checking collision\n\t\tif (pos[0] == state['bKing'][0] and pos[1] < state['bKing'][1] and state['wRook'][1] > state['bKing'][1]) or (pos[0] == state['bKing'][0] and pos[1] > state['bKing'][1] and state['wRook'][1] < state['bKing'][1]) or (pos[1] == state['bKing'][1] and pos[0] < state['bKing'][0] and state['wRook'][0] > state['bKing'][0]) or (pos[1] == state['bKing'][1] and pos[0] > state['bKing'][0] and state['wRook'][0] < state['bKing'][0]):\n\t\t\treturn False\n\t\tif (pos[0] == state['wKing'][0] and pos[1] < state['wKing'][1] and state['wRook'][1] > state['wKing'][1]) or (pos[0] == state['wKing'][0] and pos[1] > state['wKing'][1] and state['wRook'][1] < state['wKing'][1]) or (pos[1] == state['wKing'][1] and pos[0] < state['wKing'][0] and state['wRook'][0] > state['wKing'][0]) or (pos[1] == state['wKing'][1] and pos[0] > state['wKing'][0] and state['wRook'][0] < state['wKing'][0]):\n\t\t\treturn False\n\t\t#checking if at least one field away from bKing\n\t\tdist = (pos[0] - state['bKing'][0])**2 + (pos[1] - state['bKing'][1])**2\n\t\tif dist == 1:\n\t\t\treturn False\n\t\treturn True\n\tif figure == 'wKing':\n\t\tposB = state['bKing']\n\t\tdist = (pos[0] - posB[0])**2 + (pos[1] - posB[1])**2\n\t\tif dist >= 4:\n\t\t\treturn True\n\t\treturn False\n\tif figure == 'bKing':\n\t\tposW = state['wKing']\n\t\tdist = (pos[0] - posW[0])**2 + (pos[1] - posW[1])**2\n\t\tif dist >= 4 and pos[0] != state['wRook'][0] and pos[1] != state['wRook'][1]:\n\t\t\treturn True\n\t\treturn False\n \ndef kingSurroundings(pos):\n\treturn [(pos[0]-1, pos[1]-1),(pos[0]-1, pos[1]),(pos[0]-1, pos[1]+1),(pos[0], pos[1]-1),(pos[0], pos[1]+1),(pos[0]+1, pos[1]-1),(pos[0]+1, pos[1]),(pos[0]+1, pos[1]+1)]\n\ndef rookSurroundings(pos):\n\tresult = []\n\tfor x in range(1,9):\n\t\t\tif (x, pos[1]) != pos:\n\t\t\t\tresult.append((x,pos[1]))\n\t\t\tif (pos[0], x) != pos:\n\t\t\t\tresult.append((pos[0], x))\n\treturn result\n\ndef possible_moves(state, figure):\n\tif state['turn'] == 'white' and figure == 'bKing':\n\t\treturn []\n\tif state['turn'] == 'black' and figure != 'bKing':\n\t\treturn []\n\t#rozwazyc reszte przypadkow\n\tresult = []\n\tif figure == 'wKing':\n\t\tcurr_pos = state['wKing']\n\t\tcandidates = kingSurroundings(curr_pos)\n\t\tfor candidate in candidates:\n\t\t\t#need to check not only if inbound but also if legal\n\t\t\tif inbound(candidate) and legal(candidate, state, 'wKing'):\n\t\t\t\tresult.append(candidate)\n\t\treturn result\n\tif figure == 'wRook':\n\t\tcurr_pos = state['wRook']\n\t\tcandidates = rookSurroundings(curr_pos)\n\t\tfor candidate in candidates:\n\t\t\t#need to check not only if inbound but also if legal\n\t\t\tif inbound(candidate) and legal(candidate, state, 'wRook'):\n\t\t\t\tresult.append(candidate)\n\t\treturn result\n\tif figure == 'bKing':\n\t\tcurr_pos = state['bKing']\n\t\tcandidates = kingSurroundings(curr_pos)\n\t\tfor candidate in candidates:\n\t\t\t#need to check not only if inbound but also if legal\n\t\t\tif inbound(candidate) and legal(candidate, state, 'bKing'):\n\t\t\t\tresult.append(candidate)\n\t\treturn result\n\n\ndef switch_turn(turn):\n\tif turn == 'white':\n\t\treturn 'black'\n\treturn 'white'\n\ndef bfs(state):\n\tq.put(state)\n\tvisited_states = set()\n\n\twhile not q.empty():\n\t\tcurr_state = q.get()\n\t\tvisited_states.add(tuple(curr_state))\n\t\t\n\t\tif mate(curr_state):\n\t\t\treturn curr_state['moves']\n\n\t\t#Adding possible moves of white King\n\t\twKingMoves = possible_moves(curr_state, 'wKing')\n\t\tfor move in wKingMoves:\n\t\t\tnext_state = curr_state.copy()\n\t\t\tnext_state['wKing'] = move\n\t\t\tnext_state['moves'] = next_state.get('moves', 0) + 1\n\t\t\tnext_state['turn'] = switch_turn(next_state.get('turn', None))\n\t\t\ttup = []\n\t\t\tfor key in next_state.keys():\n\t\t\t\ttup.append(next_state[key])\n\t\t\tif not tuple(tup) in visited_states:\n\t\t\t\tq.put(next_state)\n\n\t\t#Adding possible moves of white Rook\n\t\twRookMoves = possible_moves(curr_state, 'wRook')\n\t\tfor move in wRookMoves:\n\t\t\tnext_state = curr_state.copy()\n\t\t\tnext_state['wRook'] = move\n\t\t\tnext_state['moves'] = next_state.get('moves', 0) + 1\n\t\t\tnext_state['turn'] = switch_turn(next_state.get('turn', None))\n\t\t\ttup = []\n\t\t\tfor key in next_state.keys():\n\t\t\t\ttup.append(next_state[key])\n\t\t\tif not tuple(tup) in visited_states:\n\t\t\t\tq.put(next_state)\n\n\t\t#Adding possible moves of black King\n\t\tbKingMoves = possible_moves(curr_state, 'bKing')\n\t\tfor move in bKingMoves:\n\t\t\tnext_state = curr_state.copy()\n\t\t\tnext_state['bKing'] = move\n\t\t\tnext_state['moves'] = next_state.get('moves', 0) + 1\n\t\t\tnext_state['turn'] = switch_turn(next_state.get('turn', None))\n\t\t\ttup = []\n\t\t\tfor key in next_state.keys():\n\t\t\t\ttup.append(next_state[key])\n\t\t\tif not tuple(tup) in visited_states:\n\t\t\t\tq.put(next_state)\n\n\treturn -1\n\n\nfor line in fhandle:\n\tstart, white_king, white_rook, black_king = line.split()\n\n\tstate = {'wKing': (pozycje[white_king[0]], int(white_king[1])), 'wRook': (pozycje[white_rook[0]], int(white_rook[1])), 'bKing': (pozycje[black_king[0]], int(black_king[1])), 'turn': start, 'moves': 0}\n\n\n\n\tprint(bfs(state))\n","sub_path":"AI/P1/zad1.py","file_name":"zad1.py","file_ext":"py","file_size_in_byte":6048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626834224","text":"import mongoengine as me\nfrom flask_mongoengine import MongoEngine\nfrom mongoengine.fields import StringField, DateField, ListField, IntField, ObjectIdField, EmbeddedDocumentField, ReferenceField, DictField, DateTimeField\nfrom .db import mongoEngine\n\nclass Campaign(mongoEngine.Document):\n _id = ObjectIdField()\n name = StringField(required=True)\n description = StringField()\n startTime = DateTimeField(required=True)\n endTime = DateTimeField(required=True)\n keyword = StringField(required=True)\n links = ListField(mongoEngine.StringField())\n total_comments = IntField()\n total_pos = IntField()\n total_neg = IntField()\n total_neu = IntField()\n status = StringField(required=True)\n\nclass Comment(mongoEngine.Document):\n _id = ObjectIdField()\n text = StringField(required=True)\n post_id = StringField()\n label = StringField()\n date = DateTimeField()\n\nclass Post(mongoEngine.Document):\n _id = ObjectIdField()\n post_id = StringField()\n campaign = StringField()\n source = StringField()\n date = DateField()\n comments = DictField()\n url = StringField()\n text = StringField()\n total_comments = IntField()\n total_pos = IntField()\n total_neg = IntField()\n total_neu = IntField()\n reactions = IntField()\n like = IntField()\n love = IntField()\n haha = IntField()\n wow = IntField()\n sad = IntField()\n care = IntField()\n angry = IntField()","sub_path":"app/database/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"147790768","text":"# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pgl\nimport paddle\n\n\ndef set_seed(seed):\n paddle.seed(seed)\n np.random.seed(seed)\n\n\ndef normalize(feat):\n return feat / np.maximum(np.sum(feat, -1, keepdims=True), 1)\n\n\ndef cheby(i, x):\n if i == 0:\n return 1\n elif i == 1:\n return x\n else:\n T0 = 1\n T1 = x\n for ii in range(2, i + 1):\n T2 = 2 * x * T1 - T0\n T0, T1 = T1, T2\n return T2\n\n\ndef random_splits(\n dataset,\n seed,\n train_rate=0.6,\n val_rate=0.2, ):\n #print(dataset.y.shape[0])\n percls_trn = int(\n round(train_rate * dataset.y.shape[0] / dataset.num_classes))\n val_lb = int(round(val_rate * dataset.y.shape[0]))\n\n index = [i for i in range(0, dataset.y.shape[0])]\n num_classes = dataset.num_classes\n train_idx = []\n rnd_state = np.random.RandomState(seed)\n for c in range(num_classes):\n class_idx = np.where(dataset.y == c)[0]\n if len(class_idx) < percls_trn:\n train_idx.extend(class_idx)\n else:\n train_idx.extend(\n rnd_state.choice(\n class_idx, percls_trn, replace=False))\n rest_index = [i for i in index if i not in train_idx]\n val_idx = rnd_state.choice(rest_index, val_lb, replace=False)\n test_idx = [i for i in rest_index if i not in val_idx]\n\n dataset.train_index = train_idx\n dataset.val_index = val_idx\n dataset.test_index = test_idx\n return dataset\n\n\ndef load_data(name, seed, normalized_feature=True):\n if name == 'cora':\n dataset = pgl.dataset.CoraDataset()\n elif name == \"pubmed\":\n dataset = pgl.dataset.CitationDataset(\"pubmed\", symmetry_edges=True)\n elif name == \"citeseer\":\n dataset = pgl.dataset.CitationDataset(\"citeseer\", symmetry_edges=True)\n else:\n raise ValueError(name + \" dataset doesn't exists\")\n\n dataset.graph.node_feat[\"words\"] = normalize(dataset.graph.node_feat[\n \"words\"])\n dataset.graph.tensor()\n\n dataset = random_splits(dataset, seed)\n\n train_index = dataset.train_index\n dataset.train_label = paddle.to_tensor(\n np.expand_dims(dataset.y[train_index], -1))\n dataset.train_index = paddle.to_tensor(np.expand_dims(train_index, -1))\n\n val_index = dataset.val_index\n dataset.val_label = paddle.to_tensor(\n np.expand_dims(dataset.y[val_index], -1))\n dataset.val_index = paddle.to_tensor(np.expand_dims(val_index, -1))\n\n test_index = dataset.test_index\n dataset.test_label = paddle.to_tensor(\n np.expand_dims(dataset.y[test_index], -1))\n dataset.test_index = paddle.to_tensor(np.expand_dims(test_index, -1))\n\n return dataset\n","sub_path":"examples/chebnetii/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"164371384","text":"import os\nimport json\nimport shutil\nimport asyncio\nimport tarfile\nimport paramiko\nimport settings\n\nimport logging\n\nimport argparse\n\nfrom aiopg.sa import create_engine\n\nfrom datetime import datetime, timedelta, timezone\n\nfrom dateutil.parser import parse as date_parse\nfrom dateutil.relativedelta import *\n\nfrom concurrent.futures import ProcessPoolExecutor\n\nfrom sqlalchemy import select, join, and_\n\nfrom collections import defaultdict\n\nfrom models import *\n\n\n# set up logger\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\"%(asctime)s:%(name)s %(levelname)s:%(message)s\")\n\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\ndef scp_target_file(host, template=None):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(host,\n username=settings.USERNAME,\n password=settings.PASSWORD)\n logger.info(\"search {} for {} template\".format(host, template))\n raw_cmd = 'find {} -name \"{}.tar.bz2\" -o -name \"*.log\"'.format(settings.CDR_SOURCE_FOLDER, template)\n\n stdin, stdout, stderr = ssh.exec_command(raw_cmd)\n file_list = stdout.read().splitlines()\n logger.info(\"found total: {} files\".format(len(file_list)))\n logger.info(\"found: {}\".format(file_list))\n ftp = ssh.open_sftp()\n filenames = []\n for index, f_ in enumerate(file_list):\n dir_path, filename = os.path.split(f_)\n dir_path = dir_path.decode(\"utf-8\").replace(settings.CDR_SOURCE_FOLDER, \"\")\n dir_path = dir_path.split(os.sep)[1]\n filename = filename.decode(\"utf-8\")\n logger.info(\"find file %s\" % filename)\n f_name = \"{}_{}\".format(index, filename)\n ftp.get(f_.decode(\"utf-8\"),\n os.path.join(settings.LOCAL_FILE_FOLDER,\n f_name))\n yield f_name, dir_path, filename\n\n\ndef _extract_data(f):\n for line in f:\n yield line\n\n\ndef _extract_stream(bytestream):\n for line in bytestream:\n yield line\n\n\ndef _dump_tmp(filename, data):\n with open(filename, \"w\") as out:\n r = {}\n for k, v in data.items():\n r[k] = v.to_json()\n out.write(json.dumps(r))\n\n\ndef _extract_tar(filename):\n tar = tarfile.open(filename)\n for t_info in tar:\n if \".cdr\" in t_info.name:\n yield tar, t_info\n\n\ndef _extract_multiple_data(line):\n line = line.decode(\"utf-8\").split(\"?\")\n dump = {\n \"ani\": line[12],\n \"dnis\": line[49],\n #\"lrn_dnis\": line[80],\n \"status\": line[7][:3] if line [7] != \"NULL\" else None,\n \"duration\": int(line[50]),\n \"call_id\": line[1],\n \"ring_time\": line[52],\n \"busy\": True if \"USER_BUSY\" in line[-1] else False,\n \"is_final\": line[99],\n \"non_zero_call\": 1 if int(line[50]) > 0 else 0,\n \"termination_trunk_id\": line[29],\n }\n\n dump[\"total_ingress_ani\"] = 1 if bool(line[99]) else 0\n dump[\"valid_ingress_ani\"] = 1 if bool(line[99]) and bool(line[29]) else 0\n\n dump[\"total_ingress_dnis\"] = 1 if bool(line[29]) else 0\n dump[\"valid_ingress_dnis\"] = 1 if bool(line[29]) and bool(line[10]) else 0\n\n if line[51] and int(line[51]) > 0:\n dump[\"num_call_ringtone\"] = 1\n else:\n dump[\"num_call_ringtone\"] = 0\n\n #dump['last_date'] = datetime.fromtimestamp(float(str(line[3][:10])))\n return dump\n\n\ndef __group_by_term(term, calls):\n res = defaultdict(list)\n for call in calls:\n res[call[term]].append(call)\n return res\n\n\n# # def __daterange(start_date, end_date):\n# # for n in range(int ((end_date - start_date).days)):\n# # yield start_date + relativedelta(n)\n\n\n# # def __get_time_range(start_dt, end_dt):\n# # return start_dt.hour, end_dt.hour,\n\n\ndef __get_file_names(start_dt, end_dt=None):\n\n if not end_dt:\n start_dt = date_parse(start_dt)\n return [start_dt.strftime(\"%Y-%m-%d\")], __get_time_range(start_dt, start_dt)\n\n start_dt = date_parse(start_dt)\n end_dt = date_parse(end_dt)\n\n dates = __daterange(start_dt, end_dt)\n time_tuple = __get_time_range(start_dt, end_dt)\n\n return [d.strftime(\"%Y-%m-%d\") for d in dates], time_tuple\n\n\ndef process_file(filename, folder=None):\n if not filename:\n raise Exception(\"File not found\")\n\n cur_file = None\n for tarfile, f_info in _extract_tar(filename):\n\n if cur_file != f_info.name:\n cur_file = f_info.name\n logger.info(\"Parsing: %s\" % cur_file)\n\n f = tarfile.extractfile(f_info)\n lines = (line for line in _extract_data(f))\n frames = []\n for line in lines:\n _line = _extract_multiple_data(line)\n _line[\"time\"] = \"{}:{}\".format(cur_file.split(\".\")[0][-4:-2], cur_file.split(\".\")[0][-2:])\n # call_info = {k:v for k,v in _line}\n\n def is_good_call(call):\n if call[\"duration\"] and call[\"duration\"] != \"0\":\n return True\n\n return bool(call[\"status\"] not in [\"487\", \"402\"] and\n int(call[\"num_call_ringtone\"]) > 500)\n\n _line[\"failed\"] = not is_good_call(_line)\n frames.append(_line)\n yield frames\n\n\nasync def create_row(frames, folder=None, raw_filename=None):\n try:\n raw_date = raw_filename.split(\".\")[0]\n await add_dnis_statistics(frames, folder, raw_date)\n await add_ani_statistics(frames, folder, raw_date)\n return raw_date\n except Exception as e:\n logger.exception(e)\n return []\n\n\nasync def _get_engine():\n engine = await create_engine(**settings.DB_CONNECTION)\n return engine\n\n\nasync def upsert(term, model, grouped_values, folder, raw_date):\n engine = await _get_engine()\n\n def merge_rows(d1, d2, good_fields, bad_fields=[]):\n merged = {}\n for k, v in d1.items():\n if k in d2:\n if k in good_fields:\n merged[k] = int(d1.get(k, 0) or 0) + int(d2.get(k, 0) or 0)\n return merged\n\n async with engine.acquire() as conn:\n for key, aggregated_values in grouped_values.items():\n aggregated_values[\"ip\"] = folder\n aggregated_values[\"date\"] = raw_date\n\n q_ = getattr(model.c, term)\n search_q_ = and_(and_(q_ == key, model.c.date == raw_date), model.c.ip == folder)\n result = await conn.execute(select(model.c).where(search_q_))\n result = [r for r in result]\n if result:\n try:\n good_fields = [\"total_ingress\", \"valid_ingress\", \"code_200\", \"code_404\",\n \"code_503\", \"code_486\", \"code_487\", \"code_402\", \"code_404\",\n \"code_other_4xx\", \"code_other_5xx\",\n \"num_call_ringtone\", \"duration\", \"non_zero_call\"]\n\n val = merge_rows(aggregated_values, result[0], good_fields=good_fields)\n await conn.execute(model.update().where(search_q_)\n .values(**val))\n except Exception as e:\n logger.exception(e)\n logger.error(val)\n else:\n try:\n await conn.execute(model.insert().values(**aggregated_values))\n except Exception as e:\n logger.exception(e)\n print (aggregated_values)\n\n\nSTATUSES = (\"200\", \"404\", \"503\", \"486\", \"487\", \"402\", \"480\")\ndef status_code(frame):\n status = frame.get(\"status\", None)\n\n if not status:\n return\n\n if frame[\"is_final\"] != \"1\":\n return\n\n if status in STATUSES:\n return \"code_%s\" % status\n else:\n if status and status[0] == \"4\":\n return \"code_other_4xx\"\n elif status and status[0] == \"5\":\n return \"code_other_5xx\"\n\n\ndef group_by(term, frames, exclude_list=[]):\n frames = __group_by_term(term, frames)\n grouped_by_key = {}\n\n for key, frameset in frames.items():\n result = {}\n for frame in frameset:\n\n if term == \"ani\" and not frame[\"is_final\"]:\n continue\n if term == \"dnis\" and not frame[\"termination_trunk_id\"]:\n continue\n\n frame[\"total_ingress\"] = frame.pop(\"total_ingress_{}\".format(term), 0)\n frame[\"valid_ingress\"] = frame.pop(\"valid_ingress_{}\".format(term), 0)\n\n # if term == \"ani\":\n # del frame[\"total_ingress_dnis\"]\n # del frame[\"valid_ingress_dnis\"]\n\n # if term == \"dnis\":\n # del frame[\"total_ingress_ani\"]\n # del frame[\"valid_ingress_ani\"]\n\n\n if not result:\n result = {k: v for k, v in frame.items() if k not in exclude_list and k}\n s_code = status_code(frame)\n if s_code and s_code != \"None\":\n result[s_code] = 1\n continue\n\n if s_code:\n if s_code in result:\n result[s_code] += 1\n else:\n result[s_code] = 1\n grouped_by_key[key] = result\n\n return grouped_by_key\n\n\nasync def add_dnis_statistics(frames, folder=None, raw_date=None):\n \"\"\"\n adds/updates statistics and counts statuses\n \"\"\"\n logger.info(\"add dnis statistics for %d frames\" % len(frames))\n dnis_exclude = (\"call_id\", \"lrn_dnis\", \"status\", \"is_final\", \"ani\", \"failed\", \"busy\", \"ring_time\",\n \"valid_ingress_dnis\", \"total_ingress_dnis\", \"valid_ingress_ani\", \"total_ingress_ani\",\n \"termination_trunk_id\")\n return await upsert(\"dnis\", DnisStatistics, group_by(\"dnis\", frames, dnis_exclude), folder, raw_date)\n\n\nasync def add_ani_statistics(frames, folder=None, raw_date=None):\n \"\"\"\n add statistics for ani\n \"\"\"\n\n logger.info(\"add ani statistics for %d frames\" % len(frames))\n ani_exclude = (\"call_id\", \"lrn_dnis\", \"status\", \"is_final\", \"dnis\", \"failed\", \"busy\", \"ring_time\",\n \"valid_ingress_dnis\", \"total_ingress_dnis\", \"valid_ingress_ani\", \"total_ingress_ani\",\n \"termination_trunk_id\")\n return await upsert(\"ani\", AniStatistics, group_by(\"ani\", frames, ani_exclude), folder, raw_date)\n\n\nasync def add_calls(frames):\n \"\"\"\n add new call row to database, if there is no such pair:\n dnis and ani.\n if there is such pair -> do update\n \"\"\"\n\n def group_by(frames):\n terms = defaultdict(list)\n\n for frame in frames:\n terms[\"{}_{}\".format(frame.get(\"dnis\", None),\n frame.get(\"ani\", None))].append(frame)\n grouped_terms = {}\n for term, mini_frames in terms.items():\n res = {}\n for m_fr in mini_frames:\n if not res:\n res = {k: v for k, v in m_fr.items() if k not in (\"lrn_dnis\", \"num_call_ringtone\", \"status\",)}\n continue\n res[\"call_id\"] = m_fr[\"call_id\"]\n res[\"failed\"] = m_fr[\"failed\"]\n if not res[\"failed\"]:\n egr_count = res.get(\"num_valid_egress\", 0)\n res[\"num_valid_egress\"] = egr_count + 1\n res[\"ring_time\"] = res.get(\"ring_time\", 0) + m_fr.get(\"ring_time\", 0)\n res[\"time\"] = m_fr[\"time\"]\n res[\"busy\"] = m_fr[\"busy\"]\n res[\"duration\"] = res.get(\"duration\", 0) + m_fr.get(\"duration\", 0)\n\n grouped_terms[term] = res\n\n return grouped_terms\n\n logger.info(\"add calls for %d frames\" % len(frames))\n grouped_frames = group_by(frames)\n\n engine = await _get_engine()\n async with engine.acquire() as conn:\n for term, val in grouped_frames.items():\n dnis, ani = term.split(\"_\")\n result = await conn.execute(select(Calls.c).where(and_(Calls.c.ani == ani,\n Calls.c.dnis == dnis)))\n result = [r for r in result]\n if result:\n try:\n await conn.execute(Calls.update().where(and_(Calls.c.ani == ani,\n Calls.c.dnis == dnis))\n .values(**val))\n\n except Exception as e:\n logger.exception(str(e))\n\n else:\n await conn.execute(Calls.insert().values(**val))\n return True\n\n\nasync def add_dnis(frames):\n \"\"\"\n add new dns row to database, if there is no such dnis in database\n \"\"\"\n def __diff(l1, l2):\n l1 = set(l1)\n l2 = set(l2)\n return list(l1.difference(l2))\n\n logger.info(\"add dnis\")\n engine = await _get_engine()\n\n dnis = __group_by_term(\"dnis\", frames)\n dnis_nums = list(dnis.keys())\n\n async with engine.acquire() as conn:\n result = await conn.execute(select([Dnis.c.dnis]).where(Dnis.c.dnis.in_(dnis_nums)))\n result = [r[0] for r in result]\n dnis_to_insert = __diff(dnis_nums, result)\n\n for dnis_ in dnis_to_insert:\n await conn.execute(Dnis.insert().values(dnis=dnis_,\n is_mobile=True,\n carrier=\"Any text\"))\n\n return dnis\n\nasync def get_file(host):\n\n if settings.DEBUG:\n folder = \"localhost\"\n file_path = os.path.join(settings.LOCAL_FILE_FOLDER, \"0_2016-07-24.tar.bz2\")\n for frame in process_file(file_path, folder):\n frame = await create_row(frame, folder, \"2016-07-24\")\n\n parser = argparse.ArgumentParser(description=\"add date\")\n parser.add_argument(\"--date\", help=\"add required date\")\n options = parser.parse_args()\n\n search_date = options.date or datetime.now().strftime(\"%Y-%m-%d\")\n\n logger.info(\"got {} from input\".format(search_date))\n logger.info(\"search on host: %s\" % host)\n\n for filename, ip_folder, raw_filename in scp_target_file(host, search_date):\n file_path = os.path.join(settings.LOCAL_FILE_FOLDER, filename)\n logger.info(\"got file: {} from {}\".format(file_path, ip_folder))\n\n for frame in process_file(file_path, ip_folder):\n frame = await create_row(frame, ip_folder, raw_filename)\n\n # try:\n # if os.path.isfile(file_path):\n # os.unlink(file_path)\n # except Exception as e:\n # print(e)\n\n return \"done\"\n\n\ndef enable_process(host):\n loop = asyncio.get_event_loop()\n loop.run_until_complete(get_file(host))\n return loop\n\nif __name__ == \"__main__\":\n logger.info(\"Start CDR Bad Numbers\")\n loops = []\n with ProcessPoolExecutor(max_workers=4) as executor:\n for loop in executor.map(enable_process, settings.CDR_SERVICE_HOSTS):\n loops.append(loop)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"649345257","text":"#coding:utf8\nimport json\nimport tornado.web as t_web\nimport tornado.ioloop as t_io\nimport tornado.options as t_opt\nimport tornado.httpserver as t_http\nfrom tornado.web import RequestHandler,url,StaticFileHandler\nfrom tornado.options import options,define\nimport os\n\ndefine(\"port\",default=\"8000\",type=int,help=\"this is port\")\n\nclass IndexHandler(RequestHandler):\n def get(self):\n house_info={\n \"price\":398,\n \"title\": \"宽窄巷子+160平大空间+文化保护区双地铁\",\n \"score\": 5.4,\n \"comments\": 6,\n \"position\": \"北京市丰台区六里桥地铁\"\n }\n self.render(\"index.html\",**house_info)\n\nif __name__ == \"__main__\":\n current_path = os.path.dirname(__file__)\n app = t_web.Application(\n [\n #本质是目录拼接\n (r'^/$', IndexHandler),\n (r'^/view/(.*)$', StaticFileHandler,{\"path\":os.path.join(current_path,\"statics/html\")}),\n ],\n debug=True,\n #本目录下的statics目录,jingtai文件路径\n static_path=os.path.join(current_path, \"statics\"),\n #本目录下的templates目录,模板文件路径\n template_path=os.path.join(current_path, \"templates\"),\n )\n httpServer = t_http.HTTPServer(app)\n httpServer.listen(options.port)\n t_io.IOLoop.current().start()\n\n","sub_path":"tornado/tornado模板/templates1.py","file_name":"templates1.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"78519930","text":"# pygame template\nimport pygame\nimport random\nfrom pygame.locals import K_ESCAPE, KEYDOWN, QUIT, MOUSEBUTTONDOWN, K_BACKSPACE, K_1, K_2, K_3\n\n#Initialize\npygame.init()\n#Set screen dimensions\nWIDTH = 1000\nHEIGHT = 800\nSIZE = (WIDTH, HEIGHT)\n#Create screen\nscreen = pygame.display.set_mode(SIZE)\n#Create clock\nclock = pygame.time.Clock()\n#Name the program\npygame.display.set_caption(\"Faction Defence\")\n\n# ---------------------------\n# Fonts\nmenu_font = pygame.font.SysFont(\"Georgia\", 25, True, False)\nhealth_font = pygame.font.SysFont(\"Georgia\", 14, True, False)\ngame_font = pygame.font.SysFont(\"Georgia\", 14, False, False)\nfaction_name_font = pygame.font.SysFont(\"Georgia\", 32, True, False)\ngame_title_font = pygame.font.SysFont(\"Georgia\", 96, True, False)\n\n# Colours\nDEFAULT = (49, 51, 53)\ncurrent_faction_colour = (-1, -1, -1)\n\n# Buttons\n#General\nplay_button = pygame.Rect(400, 700, 200, 100) # (MIDDLE)\nview_back_button = pygame.Rect(300, 645, 100, 50) # (ON THE FACTION SELECT SCREEN)\n\n#Select Buttons\nselect_button_1 = pygame.Rect(300, 200, 190, 85)\nselect_button_2 = pygame.Rect(700, 200, 190, 85)\nselect_button_3 = pygame.Rect(300, 600, 190, 85)\nselect_button_4 = pygame.Rect(700, 600, 190, 85)\n\n#Info/View Buttons\nview_button_1 = pygame.Rect(300, 290, 190, 85)\nview_button_2 = pygame.Rect(700, 290, 190, 85)\nview_button_3 = pygame.Rect(300, 690, 190, 85)\nview_button_4 = pygame.Rect(700, 690, 190, 85)\n\n# ---------------------------\n#Game runs while running is True\nrunning = True\n#The player's selected faction - 0 is none\nfaction = 0\n# Level\nlevel = 1\n# Health\ntotal_health = 10000\ncurrent_health = 10000\n#XP for level\nrequired_XP = level*1000\ncurrent_XP = 0\n#Used to update the enemies (add new enemies)\noutlaw_counter = 0\nspeedy_counter = 0\nbrute_counter = 0\nshifter_counter = 0\nchief_counter = 0\n#Speed of the enemies (can be altered through abilities)\noutlaw_speed = 1\nspeedy_speed = 5\nbrute_speed = 0.5\nshifter_speed = 1\nchief_speed = 0.5\n#Cooldown Counters - 0 is on cooldown\npyro_1_cd = 0\npyro_2_cd = 0\npyro_3_cd = 0\nnaturo_1_cd = 0\nnaturo_2_cd = 0\nnaturo_3_cd = 0\ncryo_1_cd = 0\ncryo_2_cd = 0\ncryo_3_cd = 0\nelectro_1_cd = 0\nelectro_2_cd = 0\nelectro_3_cd = 0\n\n# ---------------------------\n\n#Store all of the outlaws (Costs 50 hp) (Has TBD hp)\noutlaws = []\n# Which outlaws need to be removed\npending_removal_outlaws = []\n\n#Store all of the speedy outlaws (Costs 50 hp) (Has TBD hp)\nspeedy = []\n# Which speedy need to be removed\npending_removal_speedy = []\n\n#Store all of the brutes (Costs 100 hp) (Has TBD hp)\nbrutes = []\n# Which brutes need to be removed\npending_removal_brutes = []\n\n#Store all of the shifters (Costs 25 hp) (Has TBD hp)\nshifters = []\n# Which shifters need to be removed\npending_removal_shifters = []\n\n#Store all of the chiefs (Costs 250 hp) (Has TBD hp)\nchiefs = []\n# Which outlaws need to be removed\npending_removal_chiefs = []\n\n\n\n# ---------------------------\ncolour_oscillator = 0\noscillation_direction = 1\n#Drawing Faction Icons\ndef faction_1_icon(x, y):\n global colour_oscillator\n global oscillation_direction\n pygame.draw.rect(screen, (255, 0, 0), [x, y, 190, 190], 0)\n name_text = faction_name_font.render(\"PYRO\", True, (colour_oscillator,0,0))\n screen.blit(name_text, [x + 50,y + 75])\n # Update colour\n if colour_oscillator == 0:\n oscillation_direction = 1\n colour_oscillator += 1\n elif colour_oscillator == 255:\n oscillation_direction = -1\n colour_oscillator -= 1\n else:\n colour_oscillator+=oscillation_direction\ndef faction_2_icon(x, y):\n global colour_oscillator\n global oscillation_direction\n pygame.draw.rect(screen, (0, 255, 0), [x, y, 190, 190], 0)\n name_text = faction_name_font.render(\"NATURO\", True, (0, colour_oscillator, 0))\n screen.blit(name_text, [x + 20, y + 75])\n # Update colour\n if colour_oscillator == 0:\n oscillation_direction = 1\n colour_oscillator += 1\n elif colour_oscillator == 255:\n oscillation_direction = -1\n colour_oscillator -= 1\n else:\n colour_oscillator += oscillation_direction\ndef faction_3_icon(x, y):\n global colour_oscillator\n global oscillation_direction\n pygame.draw.rect(screen, (0, 0, 255), [x, y, 190, 190], 0)\n name_text = faction_name_font.render(\"CRYO\", True, (0, 0, colour_oscillator))\n screen.blit(name_text, [x + 45, y + 75])\n # Update colour\n if colour_oscillator == 0:\n oscillation_direction = 1\n colour_oscillator += 1\n elif colour_oscillator == 255:\n oscillation_direction = -1\n colour_oscillator -= 1\n else:\n colour_oscillator += oscillation_direction\ndef faction_4_icon(x, y):\n global colour_oscillator\n global oscillation_direction\n pygame.draw.rect(screen, (255, 255, 0), [x, y, 190, 190], 0)\n name_text = faction_name_font.render(\"ELECTRO\", True, (colour_oscillator, colour_oscillator, 0))\n screen.blit(name_text, [x + 15, y + 75])\n # Update colour\n if colour_oscillator == 0:\n oscillation_direction = 1\n colour_oscillator += 1\n elif colour_oscillator == 255:\n oscillation_direction = -1\n colour_oscillator -= 1\n else:\n colour_oscillator += oscillation_direction\n\n# Faction 1 Info\ndef view_faction1():\n #Icon\n faction_1_icon(300,30)\n pygame.draw.rect(screen, (175, 0, 175), [300, 30, 190, 190], 2)\n #Faction Name\n faction_name_text = faction_name_font.render(\"Faction: PYRO\", True, (255,0,0))\n screen.blit(faction_name_text, [300, 225])\n #Faction Description\n line_1 = game_font.render(\"The Pyro wield the elementary power of heat and fire\", True, (0,0,0))\n line_2 = game_font.render(\"Strengths: High area damage and moderate sustain\", True, (0,0,0))\n line_3 = game_font.render(\"Weaknesses: No displacement and disruption effects\", True, (0,0,0))\n screen.blit(line_1, [300, 275])\n screen.blit(line_2, [300, 300])\n screen.blit(line_3, [300, 325])\n #Abilities icons\n abilities(300, 425, 0, 0, 0)\n #Abilities description\n #Basic Attack\n basic = game_font.render(\"Mouse clicks kill a single enemy and plunder experience\", True, (0,0,0))\n screen.blit(basic, [355, 425])\n #Ability 1\n ability_1_ln1 = game_font.render(\"Go on a fiery rampage!!!\", True, (0,0,0))\n ability_1_ln2 = game_font.render(\"Your next 5 basic attacks restore 100 health and reduce all cooldowns by 1 second\", True, (0,0,0))\n ability_1_cd = game_font.render(f\"Cooldown: 5 seconds\", True, (0,0,0))\n screen.blit(ability_1_ln1, [355, 480])\n screen.blit(ability_1_ln2, [355, 494])\n screen.blit(ability_1_cd, [355, 508])\n #Ability 2\n ability_2_ln1 = game_font.render(\"Launch a fireball with radius 100, killing all units hit\", True, (0,0,0))\n ability_2_ln2 = game_font.render(\"Note: Does not plunder experience\", True, (0,0,0))\n ability_2_cd = game_font.render(f\"Cooldown: 5 seconds\", True, (0, 0, 0))\n screen.blit(ability_2_ln1, [355, 535])\n screen.blit(ability_2_ln2, [355, 549])\n screen.blit(ability_2_cd, [355, 563])\n #Ability 3\n ability_3_ln1 = game_font.render(\"Instantly incinerate all enemies on the battlefield\", True, (0,0,0))\n ability_3_ln2 = game_font.render(\"Note: Does not plunder experience\", True, (0,0,0))\n ability_3_cd = game_font.render(f\"Cooldown: 20 seconds\", True, (0, 0, 0))\n screen.blit(ability_3_ln1, [355, 590])\n screen.blit(ability_3_ln2, [355, 604])\n screen.blit(ability_3_cd, [355, 618])\n# Faction 2 Info\ndef view_faction2():\n #Icon\n faction_2_icon(300,30)\n pygame.draw.rect(screen, (175, 0, 175), [300, 30, 190, 190], 2)\n #Faction Name\n faction_name_text = faction_name_font.render(\"Faction: NATURO\", True, (0,255,0))\n screen.blit(faction_name_text, [300, 225])\n #Faction Description\n line_1 = game_font.render(\"The Naturo wield the ancient ability to harness nature and heal\", True, (0,0,0))\n line_2 = game_font.render(\"Strengths: High sustain and moderate disruption effects\", True, (0, 0, 0))\n line_3 = game_font.render(\"Weaknesses: No damaging effects\", True, (0, 0, 0))\n screen.blit(line_1, [300, 275])\n screen.blit(line_2, [300, 300])\n screen.blit(line_3, [300, 325])\n #Abilities icons\n abilities(300, 425, 0, 0, 0)\n # Abilities description\n # Basic Attack\n basic = game_font.render(\"Mouse clicks kill a single enemy and plunder experience\", True, (0, 0, 0))\n screen.blit(basic, [355, 425])\n # Ability 1\n ability_1_ln1 = game_font.render(\"Harness natural vegetation to heal for a small amount\", True, (0, 0, 0))\n ability_1_ln2 = game_font.render(\"Instantly restore 250 health\", True, (0, 0, 0))\n ability_1_cd = game_font.render(f\"Cooldown: 2 seconds\", True, (0, 0, 0))\n screen.blit(ability_1_ln1, [355, 480])\n screen.blit(ability_1_ln2, [355, 494])\n screen.blit(ability_1_cd, [355, 508])\n # Ability 2\n ability_2_ln1 = game_font.render(\"Hijack the evolutionary tree to alter the enemies' size\", True, (0, 0, 0))\n ability_2_ln2 = game_font.render(\"All enemies are doubled in size for 3 seconds\", True, (0, 0, 0))\n ability_2_cd = game_font.render(f\"Cooldown: 5 seconds\", True, (0, 0, 0))\n screen.blit(ability_2_ln1, [355, 535])\n screen.blit(ability_2_ln2, [355, 549])\n screen.blit(ability_2_cd, [355, 563])\n # Ability 3\n ability_3_ln1 = game_font.render(\"Harness the inconceivably immense power of grapes to heal for a large amount\", True, (0, 0, 0))\n ability_3_ln2 = game_font.render(\"Instantly restore 50% of missing health\", True, (0, 0, 0))\n ability_3_cd = game_font.render(f\"Cooldown: 10 seconds\", True, (0, 0, 0))\n screen.blit(ability_3_ln1, [355, 590])\n screen.blit(ability_3_ln2, [355, 604])\n screen.blit(ability_3_cd, [355, 618])\n# Faction 3 Info\ndef view_faction3():\n #Icon\n faction_3_icon(300,30)\n pygame.draw.rect(screen, (175, 0, 175), [300, 30, 190, 190], 2)\n #Faction Name\n faction_name_text = faction_name_font.render(\"Faction: CRYO\", True, (0,0,255))\n screen.blit(faction_name_text, [300, 225])\n #Faction Description\n line_1 = game_font.render(\"The Cryo wield the freezing weapons of water and ice\", True, (0,0,0))\n line_2 = game_font.render(\"Strengths: High amount of disruption effects and moderate sustain\", True, (0, 0, 0))\n line_3 = game_font.render(\"Weaknesses: No damaging effects\", True, (0, 0, 0))\n screen.blit(line_1, [300, 275])\n screen.blit(line_2, [300, 300])\n screen.blit(line_3, [300, 325])\n #Abilities icons\n abilities(300, 425, 0, 0, 0)\n # Abilities description\n # Basic Attack\n basic = game_font.render(\"Mouse clicks kill a single enemy and plunder experience\", True, (0, 0, 0))\n screen.blit(basic, [355, 425])\n # Ability 1\n ability_1_ln1 = game_font.render(\"Apply cryotherapy to heal a moderate amount\", True, (0, 0, 0))\n ability_1_ln2 = game_font.render(\"Restore 1200 health over 10 seconds\", True, (0, 0, 0))\n ability_1_cd = game_font.render(f\"Cooldown: 15 seconds\", True, (0, 0, 0))\n screen.blit(ability_1_ln1, [355, 480])\n screen.blit(ability_1_ln2, [355, 494])\n screen.blit(ability_1_cd, [355, 508])\n # Ability 2\n ability_2_ln1 = game_font.render(\"Create a cold gust of wind\", True, (0, 0, 0))\n ability_2_ln2 = game_font.render(\"Slows all enemies by 75% for 4 seconds\", True, (0, 0, 0))\n ability_2_cd = game_font.render(f\"Cooldown: 6 seconds\", True, (0, 0, 0))\n screen.blit(ability_2_ln1, [355, 535])\n screen.blit(ability_2_ln2, [355, 549])\n screen.blit(ability_2_cd, [355, 563])\n # Ability 3\n ability_3_ln1 = game_font.render(\"Conjure a freezing blizzard\", True, (0, 0, 0))\n ability_3_ln2 = game_font.render(\"Freezes all enemies in place for 8 seconds\", True, (0, 0, 0))\n ability_3_cd = game_font.render(f\"Cooldown: 15 seconds\", True, (0, 0, 0))\n screen.blit(ability_3_ln1, [355, 590])\n screen.blit(ability_3_ln2, [355, 604])\n screen.blit(ability_3_cd, [355, 618])\n# Faction 4 Info\ndef view_faction4():\n #Icon\n faction_4_icon(300,30)\n pygame.draw.rect(screen, (175, 0, 175), [300, 30, 190, 190], 2)\n #Faction Name\n faction_name_text = faction_name_font.render(\"Faction: ELECTRO\", True, (255,255,0))\n screen.blit(faction_name_text, [300, 225])\n #Faction Description\n line_1 = game_font.render(\"The Electro wield the innovative tools of light and electricity\", True, (0,0,0))\n line_2 = game_font.render(\"Strengths: High area damage and moderate displacement effectd\", True, (0, 0, 0))\n line_3 = game_font.render(\"Weaknesses: No sustaining effects\", True, (0, 0, 0))\n screen.blit(line_1, [300, 275])\n screen.blit(line_2, [300, 300])\n screen.blit(line_3, [300, 325])\n #Abilities icons\n abilities(300, 425, 0, 0, 0)\n # Abilities description\n # Basic Attack\n basic = game_font.render(\"Mouse clicks kill a single enemy and plunder experience\", True, (0, 0, 0))\n screen.blit(basic, [355, 425])\n # Ability 1\n ability_1_ln1 = game_font.render(\"Warp space-time to concentrate all enemies on target location\", True, (0, 0, 0))\n ability_1_ln2 = game_font.render(\"Note: No damage is dealt to any enemies\", True, (0, 0, 0))\n ability_1_cd = game_font.render(f\"Cooldown: 10 seconds\", True, (0, 0, 0))\n screen.blit(ability_1_ln1, [355, 480])\n screen.blit(ability_1_ln2, [355, 494])\n screen.blit(ability_1_cd, [355, 508])\n # Ability 2\n ability_2_ln1 = game_font.render(\"Launch an electric ray in a line, killing all units whose centre is hit\", True, (0, 0, 0))\n ability_2_ln2 = game_font.render(\"Note: Does not plunder experience\", True, (0, 0, 0))\n ability_2_cd = game_font.render(f\"Cooldown: 8 seconds\", True, (0, 0, 0))\n screen.blit(ability_2_ln1, [355, 535])\n screen.blit(ability_2_ln2, [355, 549])\n screen.blit(ability_2_cd, [355, 563])\n # Ability 3\n ability_3_ln1 = game_font.render(\"Create a circular zap field which follows the cursor for 7 seconds, killing all units hit\", True, (0, 0, 0))\n ability_3_ln2 = game_font.render(\"Note: Does not plunder experience\", True, (0, 0, 0))\n ability_3_cd = game_font.render(f\"Cooldown: 30 seconds\", True, (0, 0, 0))\n screen.blit(ability_3_ln1, [355, 590])\n screen.blit(ability_3_ln2, [355, 604])\n screen.blit(ability_3_cd, [355, 618])\n\n#Display Faction select\ndef faction_select():\n # Faction Select Grid 2x2\n pygame.draw.line(screen, (0, 0, 0), [600, 0], [600, 800], 1)\n pygame.draw.line(screen, (0, 0, 0), [200, 400], [1000, 400], 1)\n # Faction 1 Display\n faction_1_icon(300, 5)\n pygame.draw.rect(screen, (0, 0, 0), select_button_1, 0)\n select_text_1 = menu_font.render(\"Select\", True, (255, 255, 255))\n screen.blit(select_text_1, [355, 225])\n pygame.draw.rect(screen, (0, 0, 0), view_button_1, 0)\n view_text_1 = menu_font.render(\"View\", True, (255, 255, 255))\n screen.blit(view_text_1, [360, 315])\n # Faction 2 Display\n faction_2_icon(700, 5)\n pygame.draw.rect(screen, (0, 0, 0), select_button_2, 0)\n select_text_2 = menu_font.render(\"Select\", True, (255, 255, 255))\n screen.blit(select_text_2, [755, 225])\n pygame.draw.rect(screen, (0, 0, 0), view_button_2, 0)\n view_text_2 = menu_font.render(\"View\", True, (255, 255, 255))\n screen.blit(view_text_2, [760, 315])\n # Faction 3 Display\n faction_3_icon(300, 405)\n pygame.draw.rect(screen, (0, 0, 0), select_button_3, 0)\n select_text_3 = menu_font.render(\"Select\", True, (255, 255, 255))\n screen.blit(select_text_3, [355, 625])\n pygame.draw.rect(screen, (0, 0, 0), view_button_3, 0)\n view_text_3 = menu_font.render(\"View\", True, (255, 255, 255))\n screen.blit(view_text_3, [360, 715])\n # Faction 4 Display\n faction_4_icon(700, 405)\n pygame.draw.rect(screen, (0, 0, 0), select_button_4, 0)\n select_text_4 = menu_font.render(\"Select\", True, (255, 255, 255))\n screen.blit(select_text_4, [755, 625])\n pygame.draw.rect(screen, (0, 0, 0), view_button_4, 0)\n view_text_4 = menu_font.render(\"View\", True, (255, 255, 255))\n screen.blit(view_text_4, [760, 715])\n\n#Display Ability List\ndef abilities(x, y, r, g, b):\n # Basic Attack (Click)\n pygame.draw.rect(screen, (r, g, b), [x, y, 50, 50], 0)\n passive_text = game_font.render(\"B\", True, (255-r, 255-g, 255-b))\n screen.blit(passive_text, [x + 2, y])\n # Ability 1 icon\n pygame.draw.rect(screen, (r, g, b), [x, y+55, 50, 50], 0)\n a1_text = game_font.render(\"1\", True, (255 - r, 255 - g, 255 - b))\n screen.blit(a1_text, [x + 2, y + 55])\n # Ability 2 icon\n pygame.draw.rect(screen, (r, g, b), [x, y+110, 50, 50], 0)\n a2_text = game_font.render(\"2\", True, (255 - r, 255 - g, 255 - b))\n screen.blit(a2_text, [x + 2, y + 110])\n # Ultimate Ability icon\n pygame.draw.rect(screen, (r, g, b), [x, y+165, 50, 50], 0)\n ult_text = game_font.render(\"3\", True, (255 - r, 255 - g, 255 - b))\n screen.blit(ult_text, [x + 2, y + 165])\n\n # Ability CD display\n if faction == 1:\n a_1_cd_text = game_font.render(str(round(pyro_1_cd / 60, 2)), True, (255 - r, 255 - g, 255 - b))\n a_2_cd_text = game_font.render(str(round(pyro_2_cd / 60, 2)), True, (255 - r, 255 - g, 255 - b))\n a_3_cd_text = game_font.render(str(round(pyro_3_cd / 60, 2)), True, (255 - r, 255 - g, 255 - b))\n screen.blit(a_1_cd_text, [x + 2, y + 75])\n screen.blit(a_2_cd_text, [x + 2, y + 130])\n screen.blit(a_3_cd_text, [x + 2, y + 185])\n elif faction == 2:\n a_1_cd_text = game_font.render(str(round(naturo_1_cd / 60, 2)), True, (255 - r, 255 - g, 255 - b))\n a_2_cd_text = game_font.render(str(round(naturo_2_cd / 60, 2)), True, (255 - r, 255 - g, 255 - b))\n a_3_cd_text = game_font.render(str(round(naturo_3_cd / 60, 2)), True, (255 - r, 255 - g, 255 - b))\n screen.blit(a_1_cd_text, [x + 2, y + 75])\n screen.blit(a_2_cd_text, [x + 2, y + 130])\n screen.blit(a_3_cd_text, [x + 2, y + 185])\n elif faction == 3:\n a_1_cd_text = game_font.render(str(round(cryo_1_cd / 60, 2)), True, (255 - r, 255 - g, 255 - b))\n a_2_cd_text = game_font.render(str(round(cryo_2_cd / 60, 2)), True, (255 - r, 255 - g, 255 - b))\n a_3_cd_text = game_font.render(str(round(cryo_3_cd / 60, 2)), True, (255 - r, 255 - g, 255 - b))\n screen.blit(a_1_cd_text, [x + 2, y + 75])\n screen.blit(a_2_cd_text, [x + 2, y + 130])\n screen.blit(a_3_cd_text, [x + 2, y + 185])\n elif faction == 4:\n a_1_cd_text = game_font.render(str(round(electro_1_cd / 60, 2)), True, (255 - r, 255 - g, 255 - b))\n a_2_cd_text = game_font.render(str(round(electro_2_cd / 60, 2)), True, (255 - r, 255 - g, 255 - b))\n a_3_cd_text = game_font.render(str(round(electro_3_cd / 60, 2)), True, (255 - r, 255 - g, 255 - b))\n screen.blit(a_1_cd_text, [x + 2, y + 75])\n screen.blit(a_2_cd_text, [x + 2, y + 130])\n screen.blit(a_3_cd_text, [x + 2, y + 185])\n\n#Display Left HUD\ndef left_hud(r,g,b):\n global current_health\n global total_health\n # LEFT BOARD\n pygame.draw.rect(screen, (0, 0, 0), [0, 0, 200, 800], 0)\n # Health Bar\n pygame.draw.rect(screen, (175, 0, 0), [5, 5, 190, 20], 0)\n pygame.draw.rect(screen, (34, 139, 34), [5, 5, 190 * (current_health / total_health), 20], 0)\n health_text = health_font.render(f\"{current_health}/{total_health}\", True, (0, 0, 0))\n screen.blit(health_text, [60, 5])\n # Faction Icon + Border\n pygame.draw.rect(screen, (r, g, b), [5, 30, 190, 190], 0)\n pygame.draw.rect(screen, (175, 0, 175), [5, 30, 190, 190], 2)\n # Display Abilities\n abilities(5, 225, 255, 255, 255)\n # Display Enemies\n enemies_info()\n\n#Display Level\ndef level_display():\n global required_XP\n global current_XP\n global level\n #Level Bar (XP)\n pygame.draw.rect(screen, (211, 211, 211), [5, 500, 190, 20], 0)\n pygame.draw.rect(screen, (0, 204, 255), [5, 500, 190*(current_XP/required_XP), 20], 0)\n #Experience Text (XP)\n xp_text = health_font.render(f\"{current_XP}/{required_XP}\", True, (0, 0, 0))\n screen.blit(xp_text, [60, 500])\n #Level Text\n level_text = health_font.render(f\"Level {level}\", True, (255, 255, 255))\n screen.blit(level_text, [60, 475])\n\n#Display the information relating to the enemies\ndef enemies_info():\n #Outlaw\n pygame.draw.circle(screen, (255, 255, 255), [30,550], 10)\n outlaw_text = health_font.render(\"Deals 100 Damage\", True, (255, 255, 255))\n screen.blit(outlaw_text, [57, 540])\n #Speedy\n pygame.draw.circle(screen, (150, 150, 150), [30,575], 10)\n speedy_text = health_font.render(\"Deals 500 Damage\", True, (255, 255, 255))\n screen.blit(speedy_text, [57, 565])\n #Brute\n pygame.draw.circle(screen, (150, 0, 0), [30,625], 25)\n brute_text = health_font.render(\"Deals 1000 Damage\", True, (255, 255, 255))\n screen.blit(brute_text, [57, 615])\n #Shifter\n temp_shifter_colour = random.randint(0, 255)\n pygame.draw.circle(screen, (temp_shifter_colour, temp_shifter_colour, temp_shifter_colour), [30,675], 15)\n shifter_text_1 = health_font.render(\"Deals 500 Damage\", True, (255, 255, 255))\n shifter_text_2 = health_font.render(\"Shifts Colour\", True, (255, 255, 255))\n screen.blit(shifter_text_1, [57, 660])\n screen.blit(shifter_text_2, [57, 675])\n #Chief\n pygame.draw.rect(screen, (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),[5, 700, 50, 50], 0)\n chief_text_1 = health_font.render(\"Deals 2500 Damage\", True, (255, 255, 255))\n chief_text_2 = health_font.render(\"Spawns Enemies\", True, (255, 255, 255))\n screen.blit(chief_text_1, [57, 710])\n screen.blit(chief_text_2, [57, 725])\n #Win condition\n wincon_text = health_font.render(\"Complete Level 10 to Win!\", True, (255, 255, 255))\n screen.blit(wincon_text, [5, 765])\n\n\n# ---------------------------\n#Pyro Abilities\n#Healing Attacks Counter - 0 means none left\nhealing_attacks = 0\ndef pyro_1():\n global pyro_1_cd\n if pyro_1_cd == 0:\n global healing_attacks\n healing_attacks = 5\n #Update cooldown\n pyro_1_cd = 300\n\n#Animating the circle\ncircle_time = 0\ncircle_x = -1\ncircle_y = -1\ndef pyro_2(x,y):\n global circle_time\n global pyro_2_cd\n global circle_x\n global circle_y\n if pyro_2_cd == 0:\n circle_time = 20\n #For displaying the explosion circle\n circle_x = x\n circle_y = y\n aoe_circle = pygame.draw.circle(screen, (255, 125, 0), [x,y], 100)\n for i in range(len(outlaws)):\n aoe_hit = aoe_circle.collidepoint(outlaws[i])\n if aoe_hit == 1:\n pending_removal_outlaws.append((outlaws[i][0]-outlaw_speed, outlaws[i][1]))\n for i in range(len(speedy)):\n aoe_hit = aoe_circle.collidepoint(speedy[i])\n if aoe_hit == 1:\n pending_removal_speedy.append((speedy[i][0]-speedy_speed, speedy[i][1]))\n for i in range(len(brutes)):\n aoe_hit = aoe_circle.collidepoint(brutes[i])\n if aoe_hit == 1:\n pending_removal_brutes.append((brutes[i][0]-brute_speed, brutes[i][1]))\n for i in range(len(shifters)):\n aoe_hit = aoe_circle.collidepoint(shifters[i])\n if aoe_hit == 1:\n pending_removal_shifters.append((shifters[i][0]-shifter_speed, shifters[i][1]))\n for i in range(len(chiefs)):\n aoe_hit = aoe_circle.collidepoint(chiefs[i])\n if aoe_hit == 1:\n pending_removal_chiefs.append((chiefs[i][0]-chief_speed, chiefs[i][1]))\n # Update cooldown\n pyro_2_cd = 300\n\ndef pyro_ult():\n global pyro_3_cd\n if pyro_3_cd == 0:\n #Wipes the whole board\n outlaws.clear()\n speedy.clear()\n brutes.clear()\n shifters.clear()\n chiefs.clear()\n pyro_3_cd = 1200\n\n\n#Naturo Abilities\ndef naturo_1():\n global current_health\n global total_health\n global naturo_1_cd\n if naturo_1_cd == 0:\n amount_healed = 250\n if current_health+amount_healed>=total_health:\n current_health = total_health\n else:\n current_health += amount_healed\n # Update cooldown\n naturo_1_cd = 120\n\n#Time under polymorph\npolymorph_time_counter = 0\ndef naturo_2():\n global polymorph_time_counter\n global naturo_2_cd\n if naturo_2_cd == 0:\n polymorph_time_counter = 180\n # Update cooldown\n naturo_2_cd = 300\n\ndef naturo_ult():\n global current_health\n global total_health\n global naturo_3_cd\n if naturo_3_cd == 0:\n amount_healed = int(0.5*(total_health-current_health))\n current_health+=amount_healed\n # Update cooldown\n naturo_3_cd = 600\n\n\n#Cryo Abilities\n#Healing time\ncryo_time = 0\ndef cryo_1():\n global cryo_time\n global cryo_1_cd\n if cryo_1_cd == 0:\n cryo_time = 600\n # Update cooldown\n cryo_1_cd = 900\n\n#Slow time\nslow_time = 0\ndef cryo_2():\n global slow_time\n global cryo_2_cd\n if cryo_2_cd == 0:\n slow_time = 240\n # Update cooldown\n cryo_2_cd = 360\n#Freeze time\nfreeze_time = 0\ndef cryo_ult():\n global freeze_time\n global cryo_3_cd\n if cryo_3_cd == 0:\n freeze_time = 480\n # Update cooldown\n cryo_3_cd = 900\n\n#Electro Abilities\ndef electro_1(x, y):\n global electro_1_cd\n if electro_1_cd == 0:\n for i in range(len(outlaws)):\n outlaws[i] = (x, y)\n for i in range(len(speedy)):\n speedy[i] = (x, y)\n for i in range(len(brutes)):\n brutes[i] = (x, y)\n for i in range(len(shifters)):\n shifters[i] = (x, y)\n for i in range(len(chiefs)):\n chiefs[i] = (x, y)\n\n # Update cooldown\n electro_1_cd = 600\n\n#Animating the Line\nline_time = 0\nline_y = -1\ndef electro_2(y):\n global line_time\n global line_y\n global electro_2_cd\n if electro_2_cd == 0:\n line_time = 20\n aoe_line = pygame.draw.rect(screen, (255, 255, 0), [200, y-8, 800, 16], 0)\n\n #line_y is for displaying the line\n line_y = y\n for i in range(len(outlaws)):\n aoe_hit = aoe_line.collidepoint(outlaws[i])\n if aoe_hit == 1:\n pending_removal_outlaws.append((outlaws[i][0]-outlaw_speed, outlaws[i][1]))\n for i in range(len(speedy)):\n aoe_hit = aoe_line.collidepoint(speedy[i])\n if aoe_hit == 1:\n pending_removal_speedy.append((speedy[i][0]-speedy_speed, speedy[i][1]))\n for i in range(len(brutes)):\n aoe_hit = aoe_line.collidepoint(brutes[i])\n if aoe_hit == 1:\n pending_removal_brutes.append((brutes[i][0]-brute_speed, brutes[i][1]))\n for i in range(len(shifters)):\n aoe_hit = aoe_line.collidepoint(shifters[i])\n if aoe_hit == 1:\n pending_removal_shifters.append((shifters[i][0]-shifter_speed, shifters[i][1]))\n for i in range(len(chiefs)):\n aoe_hit = aoe_line.collidepoint(chiefs[i])\n if aoe_hit == 1:\n pending_removal_chiefs.append((chiefs[i][0]-chief_speed, chiefs[i][1]))\n\n # Update cooldown\n electro_2_cd = 480\n\n#Time left on the shockwave\nshockwave_time = 0\ndef electro_ult():\n global shockwave_time\n global electro_3_cd\n if electro_3_cd == 0:\n shockwave_time = 360\n # Update cooldown\n electro_3_cd = 1800\n\n\n\n# ---------------------------\n\n#Display entry screen with the left HUD and faction select\ndef entry_screen():\n #Health\n global faction\n global total_health\n global current_health\n #faction being viewed - 0 means none\n view = 0\n\n play = True\n while play and faction == 0:\n global current_faction_colour\n # EVENT HANDLING\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n play = False\n elif faction == 0 and view != 0 and event.key == K_BACKSPACE:\n view = 0\n elif event.type == QUIT:\n play = False\n elif event.type == MOUSEBUTTONDOWN:\n if faction == 0:\n #If a faction has not been selected yet (i.e. On Faction Selection Screen)\n select_1_hit = select_button_1.collidepoint(event.pos)\n select_2_hit = select_button_2.collidepoint(event.pos)\n select_3_hit = select_button_3.collidepoint(event.pos)\n select_4_hit = select_button_4.collidepoint(event.pos)\n view_1_hit = view_button_1.collidepoint(event.pos)\n view_2_hit = view_button_2.collidepoint(event.pos)\n view_3_hit = view_button_3.collidepoint(event.pos)\n view_4_hit = view_button_4.collidepoint(event.pos)\n if view == 0:\n if select_1_hit == 1:\n faction = 1\n current_faction_colour = (255, 0, 0)\n elif select_2_hit == 1:\n faction = 2\n current_faction_colour = (0, 255, 0)\n elif select_3_hit == 1:\n faction = 3\n current_faction_colour = (0, 0, 255)\n elif select_4_hit == 1:\n faction = 4\n current_faction_colour = (255, 255, 0)\n elif view_1_hit == 1:\n view = 1\n elif view_2_hit == 1:\n view = 2\n elif view_3_hit == 1:\n view = 3\n elif view_4_hit == 1:\n view = 4\n else:\n back_button_hit = view_back_button.collidepoint(event.pos)\n if back_button_hit == 1:\n view = 0\n\n # GAME STATE UPDATES\n # All game math and comparisons happen here\n\n # DRAWING\n screen.fill((175, 175, 175))# always the first drawing command\n\n # LEFT SCREEN\n left_hud(DEFAULT[0], DEFAULT[1], DEFAULT[2])\n # RIGHT SCREEN\n if faction == 0:\n if view == 0:\n faction_select()\n elif view == 1:\n view_faction1()\n #Back Button\n pygame.draw.rect(screen, (0,0,0), view_back_button, 0)\n back_text = menu_font.render(\"BACK\", True, (175, 0, 175))\n screen.blit(back_text, [310, 653])\n elif view == 2:\n view_faction2()\n #Back Button\n pygame.draw.rect(screen, (0,0,0), view_back_button, 0)\n back_text = menu_font.render(\"BACK\", True, (175, 0, 175))\n screen.blit(back_text, [310, 653])\n elif view == 3:\n view_faction3()\n #Back Button\n pygame.draw.rect(screen, (0,0,0), view_back_button, 0)\n back_text = menu_font.render(\"BACK\", True, (175, 0, 175))\n screen.blit(back_text, [310, 653])\n elif view == 4:\n view_faction4()\n #Back Button\n pygame.draw.rect(screen, (0,0,0), view_back_button, 0)\n back_text = menu_font.render(\"BACK\", True, (175, 0, 175))\n screen.blit(back_text, [310, 653])\n\n elif faction == 1:\n faction_1_icon(5, 30)\n pygame.draw.rect(screen, (175, 0, 175), [5, 30, 190, 190], 2)\n elif faction == 2:\n faction_2_icon(5, 30)\n pygame.draw.rect(screen, (175, 0, 175), [5, 30, 190, 190], 2)\n elif faction == 3:\n faction_3_icon(5, 30)\n pygame.draw.rect(screen, (175, 0, 175), [5, 30, 190, 190], 2)\n elif faction == 4:\n faction_4_icon(5, 30)\n pygame.draw.rect(screen, (175, 0, 175), [5, 30, 190, 190], 2)\n\n # Must be the last two lines\n # of the game loop\n pygame.display.flip()\n clock.tick(60)\n\n\n#Display menu function\ndef run_menu():\n global running\n global faction\n menu = True\n while menu and faction == 0:\n # EVENT HANDLING\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n menu = False\n running = False\n elif event.type == QUIT:\n menu = False\n running = False\n elif event.type == MOUSEBUTTONDOWN:\n play_hit = play_button.collidepoint(event.pos)\n if play_hit == 1:\n menu_to_game_transition()\n entry_screen()\n\n\n # GAME STATE UPDATES\n # All game math and comparisons happen here\n\n # DRAWING\n screen.fill((255, 255, 255)) # always the first drawing command\n\n #Load wallpaper\n image = pygame.image.load(\"castle_invasion.jpg\")\n screen.blit(image, (0, 0))\n # Title\n title_faction = game_title_font.render(\"Faction\", True, (0, 0, 0))\n title_wars = game_title_font.render(\"Defence\", True, (0, 0, 0))\n screen.blit(title_faction, [335, 5])\n screen.blit(title_wars, [320, 90])\n\n # BUTTON\n #Play button\n pygame.draw.ellipse(screen, (0, 0, 0), play_button, 0)\n play_text = menu_font.render(\"PLAY\", True, (255, 255, 0))\n screen.blit(play_text, [460, 735])\n\n # Must be the last two lines\n # of the game loop\n pygame.display.flip()\n clock.tick(60)\n\n#Used to do the below transition\ntransition_time = 60\ny_pos = 90\ndef menu_to_game_transition():\n global transition_time\n global y_pos\n y_pos = 90\n transition_time = 60\n while transition_time > 0:\n # #Load wallpaper\n image = pygame.image.load(\"castle_invasion.jpg\")\n screen.blit(image, (0, 0))\n # Title\n title_faction = game_title_font.render(\"Faction\", True, (0, 0, 0))\n title_wars = game_title_font.render(\"Defence\", True, (0, 0, 0))\n screen.blit(title_faction, [335, 5])\n screen.blit(title_wars, [320, y_pos*1.1])\n y_pos *= 1.1\n #Update transition_time\n transition_time-=1\n pygame.display.flip()\n clock.tick(60)\n\ndef ending_screen():\n global level\n global current_health\n\n playing = True\n won = False\n y_position = 1\n while playing:\n # EVENT HANDLING\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n playing = False\n elif event.type == QUIT:\n playing = False\n #Test if user won\n if level >= 11:\n won = True\n\n #DRAW\n screen.fill((255,255,255))\n # Different ending screens depending on how the game concludes\n\n if won:\n #Victory wallpaper\n image = pygame.image.load(\"victory_wallpaper.jpg\")\n screen.blit(image, (0, 0))\n #Victory text\n end_text = game_title_font.render(\"Victory!\", True, (0, 70, 0))\n #Bits - Rain/Hail\n for i in range(85):\n rand_x = random.randint(0, 1000)\n rand_y = random.randint(0, 800)\n pygame.draw.circle(screen, (255, 255, 255), [rand_x, rand_y], 1)\n else:\n #Defeat wallpaper\n image = pygame.image.load(\"defeat_wallpaper.jpg\")\n screen.blit(image, (0, 0))\n #Defeat text\n end_text = game_title_font.render(\"Defeat...\", True, (170, 0, 0))\n #Bits - Moths concentrated on lamp\n for i in range(85):\n rand_x = random.randint(250, 400)\n rand_y = random.randint(550, 700)\n pygame.draw.circle(screen, (0, 0, 0), [rand_x, rand_y], 2)\n #Bits - Random moths\n for i in range(170):\n rand_x = random.randint(0, 1000)\n rand_y = random.randint(0, 800)\n pygame.draw.circle(screen, (0, 0, 0), [rand_x, rand_y], 2)\n #Updare position\n if y_position <= 800:\n #Display it dropping\n screen.blit(end_text, [320, y_position * 1.1])\n y_position*=1.1\n else:\n #Display at center\n screen.blit(end_text, [320, 340])\n\n #Display\n pygame.display.flip()\n clock.tick(60)\n\n\n# ---------------------------\ndef run_outlaws():\n global outlaw_counter\n global level\n global current_health\n global current_XP\n global required_XP\n global pending_removal_outlaws\n # Add outlaws if outlaw counter is 0\n if outlaw_counter == 0:\n for i in range(level):\n outlaws.append((1000, random.randint(0, 800)))\n # Update current outlaws (Add them for removal if left hud is hit, otherwise move left)\n for i in range(len(outlaws)):\n if outlaws[i][0] <= 200:\n current_health -= 100\n pending_removal_outlaws.append(outlaws[i])\n else:\n outlaws[i] = (outlaws[i][0]-outlaw_speed, outlaws[i][1])\n # Remove everything that needs to be removed from the outlaws\n for i in range(len(pending_removal_outlaws)):\n # Test first (to protect against double clicks)\n if outlaws.__contains__(pending_removal_outlaws[i]):\n outlaws.remove(pending_removal_outlaws[i])\n # Clear the pending\n pending_removal_outlaws.clear()\n # Update the counter\n outlaw_counter = (outlaw_counter + 1) % 120\n # Update Level\n if current_XP >= required_XP:\n level += 1\n current_XP = 0\n required_XP = level * 1000\n\ndef run_speedy():\n global speedy_counter\n global level\n global current_health\n global current_XP\n global required_XP\n global pending_removal_speedy\n # Add speedy if speedy counter is 0\n if speedy_counter == 0:\n for i in range(level):\n speedy.append((1000, random.randint(0, 800)))\n # Update current speedy (Add them for removal if left hud is hit, otherwise move left)\n for i in range(len(speedy)):\n if speedy[i][0] <= 200:\n current_health -= 500\n #Remove by value and not index now\n pending_removal_speedy.append(speedy[i])\n else:\n speedy[i] = (speedy[i][0]-speedy_speed, speedy[i][1])\n # Remove everything that needs to be removed from the speedy\n for i in range(len(pending_removal_speedy)):\n # Test first (to protect against double clicks)\n if speedy.__contains__(pending_removal_speedy[i]):\n speedy.remove(pending_removal_speedy[i])\n\n # Clear the pending\n pending_removal_speedy.clear()\n # Update the counter\n speedy_counter = (speedy_counter + 1) % 720\n # Update Level\n if current_XP >= required_XP:\n level += 1\n current_XP = 0\n required_XP = level * 1000\n\ndef run_brutes():\n global brute_counter\n global level\n global current_health\n global current_XP\n global required_XP\n global pending_removal_brutes\n # Add brute if brute counter is 0\n if brute_counter == 0:\n for i in range(level):\n brutes.append((1000, random.randint(0, 800)))\n # Update current brutes (Add them for removal if left hud is hit, otherwise move left)\n for i in range(len(brutes)):\n if brutes[i][0] <= 200:\n current_health -= 1000\n pending_removal_brutes.append(brutes[i])\n else:\n brutes[i] = (brutes[i][0]-brute_speed, brutes[i][1])\n # Remove everything that needs to be removed from the outlaws\n for i in range(len(pending_removal_brutes)):\n # Test first (to protect against double clicks)\n if brutes.__contains__(pending_removal_brutes[i]):\n brutes.remove(pending_removal_brutes[i])\n # Clear the pending\n pending_removal_brutes.clear()\n # Update the counter\n brute_counter = (brute_counter + 1) % 480\n # Update Level\n if current_XP >= required_XP:\n level += 1\n current_XP = 0\n required_XP = level * 1000\n\n\ndef run_shifters():\n global shifter_counter\n global level\n global current_health\n global current_XP\n global required_XP\n global pending_removal_shifters\n # Add shifter if shifter counter is 0\n if shifter_counter == 0:\n for i in range(level):\n shifters.append((1000, random.randint(0, 800)))\n # Update current shifters (Add them for removal if left hud is hit, otherwise move left)\n for i in range(len(shifters)):\n if shifters[i][0] <= 200:\n current_health -= 500\n pending_removal_shifters.append(shifters[i])\n else:\n shifters[i] = (shifters[i][0]-shifter_speed, shifters[i][1])\n # Remove everything that needs to be removed from the shifters\n for i in range(len(pending_removal_shifters)):\n #Test first (to protect against double clicks)\n if shifters.__contains__(pending_removal_shifters[i]):\n shifters.remove(pending_removal_shifters[i])\n # Clear the pending\n pending_removal_shifters.clear()\n # Update the counter\n shifter_counter = (shifter_counter + 1) % 600\n # Update Level\n if current_XP >= required_XP:\n level += 1\n current_XP = 0\n required_XP = level * 1000\n\n\ndef run_chiefs():\n global chief_counter\n global level\n global current_health\n global current_XP\n global required_XP\n global pending_removal_chiefs\n # Add chief if chief counter is 0\n if chief_counter == 0:\n for i in range(level):\n chiefs.append((1000, random.randint(0, 750)))\n # Update current chiefs (Add them for removal if left hud is hit, otherwise move left)\n for i in range(len(chiefs)):\n if chiefs[i][0] <= 200:\n current_health -= 2500\n pending_removal_chiefs.append(chiefs[i])\n else:\n chiefs[i] = (chiefs[i][0]-chief_speed, chiefs[i][1])\n # Remove everything that needs to be removed from the chiefs\n for i in range(len(pending_removal_chiefs)):\n #Test first (to protect against double clicks)\n if chiefs.__contains__(pending_removal_chiefs[i]):\n chiefs.remove(pending_removal_chiefs[i])\n # Clear the pending\n pending_removal_chiefs.clear()\n # Update the counter\n chief_counter = (chief_counter + 1) % 6000\n # Update Level\n if current_XP >= required_XP:\n level += 1\n current_XP = 0\n required_XP = level * 1000\n\n\n\n# ---------------------------\n#Shifter rgb\nshifter_colour = 0\nshifter_direction = 1\n#ACTUAL CODE TO RUN GAME\nwhile running and current_health > 0 and level < 11:\n run_menu()\n # EVENT HANDLING\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n elif event.key == K_1:\n if faction == 1:\n pyro_1()\n elif faction == 2:\n naturo_1()\n elif faction == 3:\n cryo_1()\n else:\n electro_1(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\n elif event.key == K_2:\n if faction == 1:\n pyro_2(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\n elif faction == 2:\n naturo_2()\n elif faction == 3:\n cryo_2()\n else:\n electro_2(pygame.mouse.get_pos()[1])\n elif event.key == K_3:\n if faction == 1:\n pyro_ult()\n elif faction == 2:\n naturo_ult()\n elif faction == 3:\n cryo_ult()\n else:\n electro_ult()\n elif event.type == QUIT:\n running = False\n elif event.type == MOUSEBUTTONDOWN:\n #If the mouse hit ANY target\n hit = False\n #Test if something is hit\n for i in range(len(outlaws)):\n if polymorph_time_counter == 0:\n if not hit:\n outlaw_hit = pygame.draw.circle(screen, (0, 0, 0), outlaws[i], 10).collidepoint(event.pos)\n if outlaw_hit == 1:\n current_XP+=50\n pending_removal_outlaws.append((outlaws[i][0]-outlaw_speed, outlaws[i][1]))\n hit = True\n else:\n if not hit:\n outlaw_hit = pygame.draw.circle(screen, (0, 0, 0), outlaws[i], 20).collidepoint(event.pos)\n if outlaw_hit == 1:\n current_XP+=50\n pending_removal_outlaws.append((outlaws[i][0]-outlaw_speed, outlaws[i][1]))\n hit = True\n for i in range(len(speedy)):\n if polymorph_time_counter == 0:\n if not hit:\n speedy_hit = pygame.draw.circle(screen, (0, 0, 0), speedy[i], 10).collidepoint(event.pos)\n if speedy_hit == 1:\n current_XP+=100\n pending_removal_speedy.append((speedy[i][0]-speedy_speed, speedy[i][1]))\n hit = True\n else:\n if not hit:\n speedy_hit = pygame.draw.circle(screen, (0, 0, 0), speedy[i], 20).collidepoint(event.pos)\n if speedy_hit == 1:\n current_XP += 100\n pending_removal_speedy.append((speedy[i][0]-speedy_speed, speedy[i][1]))\n hit = True\n for i in range(len(brutes)):\n if polymorph_time_counter == 0:\n if not hit:\n brute_hit = pygame.draw.circle(screen, (0, 0, 0), brutes[i], 25).collidepoint(event.pos)\n if brute_hit == 1:\n current_XP+=250\n pending_removal_brutes.append((brutes[i][0]-brute_speed, brutes[i][1]))\n hit = True\n else:\n if not hit:\n brute_hit = pygame.draw.circle(screen, (0, 0, 0), brutes[i], 50).collidepoint(event.pos)\n if brute_hit == 1:\n current_XP += 250\n pending_removal_brutes.append((brutes[i][0]-brute_speed, brutes[i][1]))\n hit = True\n for i in range(len(shifters)):\n if polymorph_time_counter == 0:\n if not hit:\n shifter_hit = pygame.draw.circle(screen, (0, 0, 0), shifters[i], 15).collidepoint(event.pos)\n if shifter_hit == 1:\n current_XP+=100\n pending_removal_shifters.append((shifters[i][0]-shifter_speed, shifters[i][1]))\n hit = True\n else:\n if not hit:\n shifter_hit = pygame.draw.circle(screen, (0, 0, 0), shifters[i], 30).collidepoint(event.pos)\n if shifter_hit == 1:\n current_XP += 100\n pending_removal_shifters.append((shifters[i][0]-shifter_speed, shifters[i][1]))\n hit = True\n for i in range(len(chiefs)):\n if polymorph_time_counter == 0:\n if not hit:\n chief_hit = pygame.Rect([chiefs[i][0], chiefs[i][1], 50, 50]).collidepoint(event.pos)\n if chief_hit == 1:\n current_XP+=500\n pending_removal_chiefs.append((chiefs[i][0]-chief_speed, chiefs[i][1]))\n for j in range(level):\n outlaws.append((chiefs[i][0]-chief_speed, random.randint(0,800)))\n if j%2 == 0:\n speedy.append((chiefs[i][0]-chief_speed, random.randint(0,800)))\n if j%4 == 0:\n brutes.append(((chiefs[i][0]-chief_speed, random.randint(0,800))))\n if j%5 == 0:\n shifters.append((chiefs[i][0]-chief_speed, random.randint(0,800)))\n hit = True\n else:\n if not hit:\n chief_hit = pygame.Rect([chiefs[i][0], chiefs[i][1], 100, 100]).collidepoint(event.pos)\n if chief_hit == 1:\n current_XP += 500\n pending_removal_chiefs.append((chiefs[i][0]-chief_speed, chiefs[i][1]))\n for j in range(level):\n outlaws.append((chiefs[i][0]-chief_speed, random.randint(0,800)))\n if j%5 == 0:\n speedy.append((chiefs[i][0]-chief_speed, random.randint(0,800)))\n if j%2 == 0:\n brutes.append(((chiefs[i][0]-chief_speed, random.randint(0,800))))\n if j%4 == 0:\n shifters.append((chiefs[i][0]-chief_speed, random.randint(0,800)))\n hit = True\n if hit and faction==1 and healing_attacks!=0:\n if current_health + 100 >= total_health:\n current_health = total_health\n else:\n current_health+=100\n #Reduces Coooldowns too\n if pyro_1_cd > 60:\n pyro_1_cd-=60\n else:\n pyro_1_cd = 0\n if pyro_2_cd > 60:\n pyro_2_cd-=60\n else:\n pyro_2_cd = 0\n if pyro_3_cd > 60:\n pyro_3_cd-=60\n else:\n pyro_3_cd = 0\n #Decrease number left\n healing_attacks-=1\n\n # GAME STATE UPDATES\n # All game math and comparisons happen here\n #SHIFT COLOUR FOR SHIFTERS\n if shifter_colour == 255:\n shifter_direction = -1\n shifter_colour-=1\n elif shifter_colour == 0:\n shifter_direction = 1\n shifter_colour+=1\n else:\n shifter_colour+=shifter_direction\n\n #Load the enemies\n run_outlaws()\n run_speedy()\n run_brutes()\n run_shifters()\n run_chiefs()\n\n #Update cds\n if pyro_1_cd > 0:\n pyro_1_cd -= 1\n if pyro_2_cd > 0:\n pyro_2_cd -= 1\n if pyro_3_cd > 0:\n pyro_3_cd -= 1\n if naturo_1_cd > 0:\n naturo_1_cd -= 1\n if naturo_2_cd > 0:\n naturo_2_cd -= 1\n if naturo_3_cd > 0:\n naturo_3_cd -= 1\n if cryo_1_cd > 0:\n cryo_1_cd -= 1\n if cryo_2_cd > 0:\n cryo_2_cd -= 1\n if cryo_3_cd > 0:\n cryo_3_cd -= 1\n if electro_1_cd > 0:\n electro_1_cd -= 1\n if electro_2_cd > 0:\n electro_2_cd -= 1\n if electro_3_cd > 0:\n electro_3_cd -= 1\n\n # DRAWING\n screen.fill((255, 255, 255)) # always the first drawing command\n # LEFT SCREEN\n if faction!=0:\n left_hud(current_faction_colour[0], current_faction_colour[1], current_faction_colour[2])\n\n # LEVEL + XP DISPLAY\n level_display()\n\n # DRAW ENEMIES\n if polymorph_time_counter == 0:\n for i in range(len(outlaws)):\n pygame.draw.circle(screen, (0, 0, 0), outlaws[i], 10)\n for i in range(len(speedy)):\n pygame.draw.circle(screen, (150, 150, 150), speedy[i], 10)\n for i in range(len(brutes)):\n pygame.draw.circle(screen, (150, 0, 0), brutes[i], 25)\n for i in range(len(shifters)):\n pygame.draw.circle(screen, (shifter_colour, shifter_colour, shifter_colour), shifters[i], 15)\n for i in range(len(chiefs)):\n pygame.draw.rect(screen, (random.randint(0,255), random.randint(0,255), random.randint(0,255)), [chiefs[i][0], chiefs[i][1], 50, 50], 0)\n else:\n for i in range(len(outlaws)):\n pygame.draw.circle(screen, (0, 0, 0), outlaws[i], 20)\n for i in range(len(speedy)):\n pygame.draw.circle(screen, (150, 150, 150), speedy[i], 20)\n for i in range(len(brutes)):\n pygame.draw.circle(screen, (150, 0, 0), brutes[i], 50)\n for i in range(len(shifters)):\n pygame.draw.circle(screen, (shifter_colour, shifter_colour, shifter_colour), shifters[i], 30)\n for i in range(len(chiefs)):\n pygame.draw.rect(screen, (random.randint(0,255), random.randint(0,255), random.randint(0,255)), [chiefs[i][0], chiefs[i][1], 100, 100], 0)\n polymorph_time_counter-=1\n\n #Draw circle for Pyro's ability 2\n if circle_time > 0:\n pygame.draw.circle(screen, (12*circle_time, 6*circle_time, 0), [circle_x, circle_y], 100)\n circle_time-=1\n\n #Apply healing for Cryo's ability 1\n if cryo_time > 0:\n if current_health!=total_health:\n current_health+=2\n cryo_time-=1\n\n #Apply slow for Cryo's ability 2\n if slow_time > 0:\n outlaw_speed = 0.25\n speedy_speed = 1.25\n brute_speed = 0.125\n shifter_speed = 0.25\n chief_speed = 0.125\n slow_time-=1\n else:\n outlaw_speed = 1\n speedy_speed = 5\n brute_speed = 0.5\n shifter_speed = 1\n chief_speed = 0.5\n\n #Apply freeze for Cryo's untimate\n if freeze_time > 0:\n outlaw_speed = 0\n speedy_speed = 0\n brute_speed = 0\n shifter_speed = 0\n chief_speed = 0\n freeze_time-=1\n\n outlaw_counter-=1\n speedy_counter-=1\n brute_counter-=1\n shifter_counter-=1\n chief_counter-=1\n\n\n\n #Draw Line for Electro's ability 2\n if line_time > 0:\n pygame.draw.rect(screen, (255, 255, 0), [200, line_y-8, 800, 16], 0)\n line_time-=1\n\n #Apply shockwave purge for Electro's Ultimate\n if shockwave_time > 0:\n aoe_circle = pygame.draw.circle(screen, (255, 255, 0), [pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1]], 100)\n for i in range(len(outlaws)):\n aoe_hit = aoe_circle.collidepoint(outlaws[i])\n if aoe_hit == 1:\n pending_removal_outlaws.append((outlaws[i][0]-outlaw_speed, outlaws[i][1]))\n for i in range(len(speedy)):\n aoe_hit = aoe_circle.collidepoint(speedy[i])\n if aoe_hit == 1:\n pending_removal_speedy.append((speedy[i][0]-speedy_speed, speedy[i][1]))\n for i in range(len(brutes)):\n aoe_hit = aoe_circle.collidepoint(brutes[i])\n if aoe_hit == 1:\n pending_removal_brutes.append((brutes[i][0]-brute_speed, brutes[i][1]))\n for i in range(len(shifters)):\n aoe_hit = aoe_circle.collidepoint(shifters[i])\n if aoe_hit == 1:\n pending_removal_shifters.append((shifters[i][0]-shifter_speed, shifters[i][1]))\n for i in range(len(chiefs)):\n aoe_hit = aoe_circle.collidepoint(chiefs[i])\n if aoe_hit == 1:\n pending_removal_chiefs.append((chiefs[i][0]-chief_speed, chiefs[i][1]))\n shockwave_time-=1\n\n # Must be the last two lines\n # of the game loop\n pygame.display.flip()\n clock.tick(60)\n\n#Display screen end\nending_screen()\npygame.quit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":55697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"546542565","text":"from collections import namedtuple\nimport traceback\nimport logging\nimport attr\nimport os\n\nfrom .. import repositories, entities, services\n\nlogger = logging.getLogger(name=__name__)\n\n\n@attr.s\nclass Dataset(entities.BaseEntity):\n \"\"\"\n Dataset object\n \"\"\"\n # dataset information\n id = attr.ib()\n url = attr.ib()\n name = attr.ib()\n annotated = attr.ib(repr=False)\n creator = attr.ib()\n projects = attr.ib(repr=False)\n itemsCount = attr.ib()\n metadata = attr.ib(repr=False)\n directoryTree = attr.ib(repr=False)\n export = attr.ib(repr=False)\n\n # name change when to_json\n created_at = attr.ib()\n items_url = attr.ib(repr=False)\n readable_type = attr.ib(repr=False)\n access_level = attr.ib(repr=False)\n driver = attr.ib(repr=False)\n\n # api\n _client_api = attr.ib(type=services.ApiClient, repr=False)\n _instance_map = attr.ib(default=None, repr=False)\n\n # entities\n _project = attr.ib(default=None, repr=False)\n\n # repositories\n _datasets = attr.ib(repr=False, default=None)\n _repositories = attr.ib(repr=False)\n\n # defaults\n _ontology_ids = attr.ib(default=None, repr=False)\n _labels = attr.ib(default=None, repr=False)\n _directory_tree = attr.ib(default=None, repr=False)\n\n @staticmethod\n def _protected_from_json(project: entities.Project,\n _json: dict,\n client_api: services.ApiClient,\n datasets=None,\n is_fetched=True):\n \"\"\"\n Same as from_json but with try-except to catch if error\n :param is_fetched: is Entity fetched from Platform\n :param _json: _json response from host\n :param project: dataset's project\n :param datasets: Datasets repository\n :param client_api: client_api\n :return: Dataset object\n \"\"\"\n try:\n dataset = Dataset.from_json(project=project,\n _json=_json,\n client_api=client_api,\n datasets=datasets,\n is_fetched=is_fetched)\n status = True\n except Exception:\n dataset = traceback.format_exc()\n status = False\n return status, dataset\n\n @classmethod\n def from_json(cls,\n project: entities.Project,\n _json: dict,\n client_api: services.ApiClient,\n datasets=None,\n is_fetched=True):\n \"\"\"\n Build a Dataset entity object from a json\n\n :param is_fetched: is Entity fetched from Platform\n :param _json: _json response from host\n :param project: dataset's project\n :param datasets: Datasets repository\n :param client_api: client_api\n :return: Dataset object\n \"\"\"\n inst = cls(metadata=_json.get('metadata', None),\n directoryTree=_json.get('directoryTree', None),\n readable_type=_json.get('readableType', None),\n access_level=_json.get('accessLevel', None),\n created_at=_json.get('createdAt', None),\n itemsCount=_json.get('itemsCount', None),\n annotated=_json.get('annotated', None),\n projects=_json.get('projects', None),\n creator=_json.get('creator', None),\n items_url=_json.get('items', None),\n export=_json.get('export', None),\n driver=_json.get('driver', None),\n name=_json.get('name', None),\n url=_json.get('url', None),\n id=_json.get('id', None),\n datasets=datasets,\n client_api=client_api,\n project=project)\n inst.is_fetched = is_fetched\n return inst\n\n def to_json(self):\n \"\"\"\n Returns platform _json format of object\n\n :return: platform json format of object\n \"\"\"\n _json = attr.asdict(self, filter=attr.filters.exclude(attr.fields(Dataset)._client_api,\n attr.fields(Dataset)._project,\n attr.fields(Dataset)._datasets,\n attr.fields(Dataset)._repositories,\n attr.fields(Dataset)._ontology_ids,\n attr.fields(Dataset)._labels,\n attr.fields(Dataset)._directory_tree,\n attr.fields(Dataset)._instance_map,\n attr.fields(Dataset).access_level,\n attr.fields(Dataset).readable_type,\n attr.fields(Dataset).created_at,\n attr.fields(Dataset).items_url))\n _json.update({'items': self.items_url})\n _json['readableType'] = self.readable_type\n _json['createdAt'] = self.created_at\n _json['accessLevel'] = self.access_level\n return _json\n\n @property\n def labels(self):\n if self._labels is None:\n self._labels = self.recipes.list()[0].ontologies.list()[0].labels\n return self._labels\n\n @property\n def labels_flat_dict(self):\n flatten_dict = dict()\n\n def add_to_dict(tag: str, father: entities.Label):\n flatten_dict[tag] = father\n for child in father.children:\n add_to_dict('{}.{}'.format(tag, child.tag), child)\n\n for label in self.labels:\n add_to_dict(label.tag, label)\n return flatten_dict\n\n @property\n def instance_map(self):\n if self._instance_map is None:\n labels = [label for label in self.labels_flat_dict]\n labels.sort()\n # each label gets index as instance id\n self._instance_map = {label: (i_label + 1) for i_label, label in enumerate(labels)}\n return self._instance_map\n\n @instance_map.setter\n def instance_map(self, value: dict):\n \"\"\"\n instance mapping for creating instance mask\n :param value: dictionary {label: map_id}\n \"\"\"\n if not isinstance(value, dict):\n raise ValueError('input must be a dictionary of {lable_name: instance_id}')\n self._instance_map = value\n\n @property\n def ontology_ids(self):\n if self._ontology_ids is None:\n self._ontology_ids = list()\n if self.metadata is not None and 'system' in self.metadata and 'recipes' in self.metadata['system']:\n recipe_ids = self.get_recipe_ids()\n for rec_id in recipe_ids:\n recipe = self.recipes.get(recipe_id=rec_id)\n self._ontology_ids += recipe.ontologyIds\n return self._ontology_ids\n\n @_repositories.default\n def set_repositories(self):\n reps = namedtuple('repositories',\n field_names=['items', 'recipes', 'datasets', 'assignments', 'tasks', 'annotations',\n 'ontologies'])\n if self._project is None:\n datasets = repositories.Datasets(client_api=self._client_api, project=self._project)\n else:\n datasets = self._project.datasets\n\n r = reps(items=repositories.Items(client_api=self._client_api, dataset=self, datasets=datasets),\n recipes=repositories.Recipes(client_api=self._client_api, dataset=self),\n assignments=repositories.Assignments(project=self._project, client_api=self._client_api, dataset=self),\n tasks=repositories.Tasks(client_api=self._client_api, project=self._project, dataset=self),\n annotations=repositories.Annotations(client_api=self._client_api, dataset=self),\n datasets=datasets,\n ontologies=repositories.Ontologies(client_api=self._client_api, dataset=self))\n return r\n\n @property\n def items(self):\n assert isinstance(self._repositories.items, repositories.Items)\n return self._repositories.items\n\n @property\n def ontologies(self):\n assert isinstance(self._repositories.ontologies, repositories.Ontologies)\n return self._repositories.ontologies\n\n @property\n def recipes(self):\n assert isinstance(self._repositories.recipes, repositories.Recipes)\n return self._repositories.recipes\n\n @property\n def datasets(self):\n assert isinstance(self._repositories.datasets, repositories.Datasets)\n return self._repositories.datasets\n\n @property\n def assignments(self):\n assert isinstance(self._repositories.assignments, repositories.Assignments)\n return self._repositories.assignments\n\n @property\n def tasks(self):\n assert isinstance(self._repositories.tasks, repositories.Tasks)\n return self._repositories.tasks\n\n @property\n def annotations(self):\n assert isinstance(self._repositories.annotations, repositories.Annotations)\n return self._repositories.annotations\n\n @property\n def project(self):\n if self._project is None:\n # get from cache\n project = self._client_api.state_io.get('project')\n if project is not None:\n # build entity from json\n p = entities.Project.from_json(_json=project, client_api=self._client_api)\n # check if dataset belongs to project\n if p.id in self.projects:\n self._project = p\n if self._project is None:\n self._project = repositories.Projects(client_api=self._client_api).get(project_id=self.projects[0],\n fetch=None)\n assert isinstance(self._project, entities.Project)\n return self._project\n\n @project.setter\n def project(self, project):\n if not isinstance(project, entities.Project):\n raise ValueError('Must input a valid Project entity')\n self._project = project\n\n @property\n def directory_tree(self):\n if self._directory_tree is None:\n self._directory_tree = self.project.datasets.directory_tree(dataset_id=self.id)\n assert isinstance(self._directory_tree, entities.DirectoryTree)\n return self._directory_tree\n\n def __copy__(self):\n return Dataset.from_json(_json=self.to_json(),\n project=self._project,\n client_api=self._client_api,\n is_fetched=self.is_fetched,\n datasets=self.datasets)\n\n def __get_local_path__(self):\n if self._project is not None:\n local_path = os.path.join(services.service_defaults.DATALOOP_PATH,\n 'projects',\n self.project.name,\n 'datasets',\n self.name)\n else:\n local_path = os.path.join(services.service_defaults.DATALOOP_PATH,\n 'datasets',\n '%s_%s' % (self.name, self.id))\n return local_path\n\n @staticmethod\n def serialize_labels(labels_dict):\n \"\"\"\n Convert hex color format to rgb\n\n :param labels_dict: dict of labels\n :return: dict of converted labels\n \"\"\"\n dataset_labels_dict = dict()\n for label, color in labels_dict.items():\n dataset_labels_dict[label] = '#%02x%02x%02x' % color\n return dataset_labels_dict\n\n def get_recipe_ids(self):\n \"\"\"\n Get dataset recipe Ids\n\n :return: list of recipe ids\n \"\"\"\n return self.metadata['system']['recipes']\n\n def delete(self, sure=False, really=False):\n \"\"\"\n Delete a dataset forever!\n\n :param sure: are you sure you want to delete?\n :param really: really really?\n :return:\n \"\"\"\n return self.datasets.delete(dataset_id=self.id,\n sure=sure,\n really=really)\n\n def update(self, system_metadata=False):\n \"\"\"\n Update dataset field\n\n :param system_metadata: bool - True, if you want to change metadata system\n :return:\n \"\"\"\n return self.datasets.update(dataset=self,\n system_metadata=system_metadata)\n\n def clone(self, clone_name, filters=None, with_items_annotations=True, with_metadata=True,\n with_task_annotations_status=True):\n \"\"\"\n Clone dataset\n\n :param clone_name: new dataset name\n :param filters: Filters entity or a query dict\n :param with_items_annotations: clone all item's annotations\n :param with_metadata: clone metadata\n :param with_task_annotations_status: clone task annotations status\n :return:\n \"\"\"\n return self.datasets.clone(dataset_id=self.id,\n filters=filters,\n clone_name=clone_name,\n with_metadata=with_metadata,\n with_items_annotations=with_items_annotations,\n with_task_annotations_status=with_task_annotations_status)\n\n def download_annotations(self,\n local_path=None,\n filters=None,\n annotation_options=None,\n overwrite=False,\n thickness=1,\n with_text=False,\n remote_path=None,\n num_workers=32):\n\n return self.datasets.download_annotations(dataset=self,\n local_path=local_path,\n overwrite=overwrite,\n filters=filters,\n annotation_options=annotation_options,\n thickness=thickness,\n with_text=with_text,\n remote_path=remote_path,\n num_workers=num_workers)\n\n def checkout(self):\n \"\"\"\n Checkout the dataset\n\n :return:\n \"\"\"\n self.datasets.checkout(dataset=self)\n\n def open_in_web(self):\n \"\"\"\n Open the dataset in web platform\n\n :return:\n \"\"\"\n self.datasets.open_in_web(dataset=self)\n\n def add_label(self, label_name, color=None, children=None, attributes=None, display_label=None, label=None,\n recipe_id=None, ontology_id=None):\n \"\"\"\n Add single label to dataset\n\n :param label_name:\n :param color:\n :param children:\n :param attributes:\n :param display_label:\n :param label:\n :param recipe_id: optional\n :param ontology_id: optional\n :return: label entity\n \"\"\"\n # get recipe\n if recipe_id is None:\n recipe_id = self.get_recipe_ids()[0]\n recipe = self.recipes.get(recipe_id=recipe_id)\n\n # get ontology\n if ontology_id is None:\n ontology_id = recipe.ontologyIds[0]\n ontology = recipe.ontologies.get(ontology_id=ontology_id)\n\n # add label\n added_label = ontology.add_label(label_name=label_name,\n color=color,\n children=children,\n attributes=attributes,\n display_label=display_label,\n label=label,\n update_ontology=True)\n\n return added_label\n\n def add_labels(self, label_list, ontology_id=None, recipe_id=None):\n \"\"\"\n Add labels to dataset\n\n :param label_list:\n :param recipe_id: optional\n :param ontology_id: optional\n :return: label entities\n \"\"\"\n # get recipe\n if recipe_id is None:\n recipe_id = self.get_recipe_ids()[0]\n recipe = self.recipes.get(recipe_id=recipe_id)\n\n # get ontology\n if ontology_id is None:\n ontology_id = recipe.ontologyIds[0]\n ontology = recipe.ontologies.get(ontology_id=ontology_id)\n\n # add labels to ontology\n added_labels = ontology.add_labels(label_list=label_list, update_ontology=True)\n\n return added_labels\n\n def download(\n self,\n filters=None,\n local_path=None,\n file_types=None,\n annotation_options=None,\n overwrite=False,\n to_items_folder=True,\n thickness=1,\n with_text=False,\n without_relative_path=None\n ):\n \"\"\"\n Download dataset by filters.\n Filtering the dataset for items and save them local\n Optional - also download annotation, mask, instance and image mask of the item\n\n :param local_path: local folder or filename to save to.\n :param filters: Filters entity or a dictionary containing filters parameters\n :param to_items_folder: Create 'items' folder and download items to it\n :param overwrite: optional - default = False\n :param file_types: a list of file type to download. e.g ['video/webm', 'video/mp4', 'image/jpeg', 'image/png']\n :param annotation_options: download annotations options: dl.ViewAnnotationOptions.list()\n :param with_text: optional - add text to annotations, default = False\n :param thickness: optional - line thickness, if -1 annotation will be filled, default =1\n :param without_relative_path: string - remote path - download items without the relative path from platform\n :return: Output (list)\n \"\"\"\n return self.items.download(filters=filters,\n local_path=local_path,\n file_types=file_types,\n annotation_options=annotation_options,\n overwrite=overwrite,\n to_items_folder=to_items_folder,\n thickness=thickness,\n with_text=with_text,\n without_relative_path=without_relative_path)\n\n def delete_labels(self, label_names):\n \"\"\"\n Delete labels from dataset's ontologies\n\n :param label_names: label object/ label name / list of label objects / list of label names\n :return:\n \"\"\"\n for recipe in self.recipes.list():\n for ontology in recipe.ontologies.list():\n ontology.delete_labels(label_names=label_names)\n self._labels = None\n","sub_path":"dtlpy/entities/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":19538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"420601608","text":"# Introduction to NumPy\n\nimport numpy as np\n\nvector = np.array([10,20,30])\nmatrix = np.array([[5,10,15], [20,25,30], [35, 40, 45]])\n\nvector_shape = np.shape(vector) # Results in (3,)\nmatrix_shape = np.shape(matrix) # Results in (3,3)\n\n# all the rows and the first 2 columns of world_alcohol \n# the first 10 rows and the first column of world_alcohol \n# the first 10 rows and all of the columns of world_alcohol \nfirst_two_columns = world_alcohol[:,0:2]\nfirst_ten_years = world_alcohol[0:10,0]\nfirst_ten_rows = world_alcohol[0:10,:]\n\n# first 20 rows of the columns at index 1 and 2 of world_alcohol\nfirst_twenty_regions = world_alcohol[0:20,1:3]\n\n# Compare the third column of world_alcohol to the string Algeria\n# Select only the rows in world_alcohol where country_is_algeria is True\ncountry_is_algeria = world_alcohol[:,2] == \"Algeria\"\ncountry_algeria = world_alcohol[country_is_algeria,:]\n\n# Perform comparison for the first column of world_alcohol to the string \"1986\", AND (&) \n# the third column of world_alcohol to the string \"Algeria\".\n# Enclose each condition in parentheses, and join the conditions with &.\n# Assign the rows that is_algeria_and_1986 selects to rows_with_algeria_and_1986.\n\nis_algeria_and_1986 = (world_alcohol[:,0] == \"1986\") & (world_alcohol[:,2] == \"Algeria\")\nrows_with_algeria_and_1986 = world_alcohol[is_algeria_and_1986,:]\n\n\n# Replace all instances of the string 1986 in the first column of world_alcohol with the string 2014.\nchange_year = world_alcohol[:,0] == \"1986\"\nworld_alcohol[change_year,0] = \"2014\"\n\n# Replace all instances of the string Wine in the fourth column of world_alcohol with the string Grog.\nchange_bt = world_alcohol[:,3] == \"Wine\"\nworld_alcohol[change_bt,3] = \"Grog\"\n\n# Extract the fifth column from world_alcohol and convert it to the float data type\nalcohol_consumption = world_alcohol[:,4]\nalcohol_consumption = alcohol_consumption.astype(float)\n\n# Get the sum, mean, and maximum value for the \"alcohol_consumption\" vector\ntotal_alcohol = alcohol_consumption.sum()\naverage_alcohol = alcohol_consumption.mean()\nmax_alcohol = alcohol_consumption.max()\n\n\n# Create a matrix called canada_1986 that only contains the rows in world_alcohol where the first \n# column is the string 1986 and the third column is the string Canada. Extract the fifth column of \n# canada_1986, replace any empty strings ('') with the string 0, and convert the column to the \n# float data type. Assign the result to canada_alcohol, then get its sum and assign that to total_canadian_drinking.\n\nis_canada_1986 = (world_alcohol[:,0] == \"1986\") & (world_alcohol[:,2] == \"Canada\")\ncanada_1986 = world_alcohol[is_canada_1986,:]\ncanada_alcohol = canada_1986[:,4]\nblanks = canada_alcohol == \"\"\ncanada_alcohol[blanks] = \"0\"\ncanada_alcohol = canada_alcohol.astype(float)\ntotal_canadian_drinking = canada_alcohol.sum()\n\n\n\n\n\n\n### Introduction to Pandas\n\n# import pandas and read the \"food_info.csv\" file into a variable\nimport pandas\n\nfood_info = pandas.read_csv(\"food_info.csv\")\n\n# Select the first 20 rows of food_info and print them\nfirst_twenty = food_info.head(20)\nprint(first_twenty)\n\n# Assign the 100th row of food_info to a Series object\nhundredth_row = food_info.loc[99]\n\n# Select the last 5 rows of food_info and assign to the variable last_rows\nfir = len(food_info)\nlast_rows = food_info.loc[fir-5:fir]\n\n# Assign the \"FA_Sat_(g)\" column to the variable saturated_fat.\n# Assign the \"Cholestrl_(mg)\" column to the variable cholesterol\n# Assign the 'Selenium_(mcg)' and 'Thiamin_(mg)' columns to selenium_thiamin\nsaturated_fat = food_info[\"FA_Sat_(g)\"]\ncholesterol = food_info[\"Cholestrl_(mg)\"]\nselenium_thiamin = food_info[['Selenium_(mcg)', 'Thiamin_(mg)']]\n\n## Select and display only the columns that use grams for measurement (that end with \"(g)\")\n\n# - Use the columns attribute to return the column names in food_info and convert to a list by calling the method tolist()\n# - Create a new list, gram_columns, containing only the column names that end in \"(g)\". The string method endswith() \n# returns True if the string object calling the method ends with the string passed into the parentheses.\n# - Pass gram_columns into bracket notation to select just those columns and assign the resulting dataframe to gram_df\n# - Then use the dataframe method head() to display the first 3 rows of gram_df\n\nget_columns = food_info.columns\ncolumn_list = get_columns.tolist()\n\n\ngram_columns = []\nfor cl in column_list:\n if cl.endswith(\"(g)\") == True:\n gram_columns.append(cl)\n\ngram_df = food_info[gram_columns]\nprint(gram_df.head(3))\n\n\n### Data Manipulation with pandas\n\n# Divide the \"Sodium_(mg)\" column by 1000 to convert the values to grams, and assign the result to sodium_grams.\n# Multiply the \"Sugar_Tot_(g)\" column by 1000 to convert to milligrams, and assign the result to sugar_milligrams.\nsodium_grams = food_info[\"Sodium_(mg)\"] / 1000\nsugar_milligrams = food_info[\"Sugar_Tot_(g)\"] * 1000\n\n# Calculate grams of protein per gram of water (\"Protein_(g)\" column divided by \"Water_(g)\" column) \ngrams_of_protein_per_gram_of_water = food_info[\"Protein_(g)\"] / food_info[\"Water_(g)\"]\n# Calculate calcium and iron (\"Calcium_(mg)\" column plus \"Iron_(mg)\" column)\nmilligrams_of_calcium_and_iron = food_info[\"Calcium_(mg)\"] + food_info[\"Iron_(mg)\"]\n\n# Calculate the forumla: Score = 2 x (Protein_(g)) - 0.75 x (Lipid_Tot_(g))\nweighted_protein = food_info[\"Protein_(g)\"]*2\nweighted_fat = food_info[\"Lipid_Tot_(g)\"]*-0.75\ninitial_rating = weighted_fat + weighted_protein \n\n# Normalize the \"Protein_(g)\" and \"Lipid_Tot_(g)\" columns\nmax_protein = food_info[\"Protein_(g)\"].max()\nmax_fat = food_info[\"Lipid_Tot_(g)\"].max()\n\nnormalized_protein = food_info[\"Protein_(g)\"] / max_protein\nnormalized_fat = food_info[\"Lipid_Tot_(g)\"] / max_fat\n\n# Actual normalized values\nnormalized_protein = (food_info[\"Protein_(g)\"] - food_info[\"Protein_(g)\"].min()) / (food_info[\"Protein_(g)\"].max() - food_info[\"Protein_(g)\"].min())\nnormalized_fat = (food_info[\"Lipid_Tot_(g)\"] - food_info[\"Lipid_Tot_(g)\"].min()) / (food_info[\"Lipid_Tot_(g)\"].max() - food_info[\"Lipid_Tot_(g)\"].min())\n\n# Add a new column to food_info for the normalized protein and fat arrays\nfood_info[\"Normalized_Protein\"] = normalized_protein\nfood_info[\"Normalized_Fat\"] = normalized_fat\n\n# Create a new column for Normalized Nutrition Index\nfood_info[\"Norm_Nutr_Index\"] = (2 * food_info[\"Normalized_Protein\"]) - (0.75*food_info[\"Normalized_Fat\"])\n\n# Sort food_info by \"Norm_Nutr_Index\" in descending order, in place\nfood_info.sort_values(\"Norm_Nutr_Index\", inplace = True, ascending = False)\n\n\n### Working with Missing Data\n\nimport pandas as pd\ntitanic_survival = pandas.read_csv(\"titanic_survival.csv\")\n\n# Create a Series of just the \"age\" column. Use it to create another series of True/False values determined by \n# whether or not each row is null. Create another series of rows containing only True values. Get a count of True\n# values by determining the length of that final series.\n\nage = titanic_survival[\"age\"]\nprint(age.loc[10:20])\n\nage_is_null = pandas.isnull(age)\nage_null_true = age[age_is_null]\nage_null_count = len(age_null_true)\nprint(age_null_count)\n\n# Create a vector that only contains values from the \"age\" column that aren't NaN, then calculate the mean of that vector\n\nage_is_null = pd.isnull(titanic_survival[\"age\"])\nhas_age = age_is_null == False\ntitanic_w_age = titanic_survival[\"age\"][has_age]\ncorrect_mean_age = sum(titanic_w_age) / len(titanic_w_age)\n\n# Get the average of \"fare\" with Series.mean()\ncorrect_mean_fare = titanic_survival[\"fare\"].mean()\n\n\n# Get the mean of all passengers in each class and store them in a dictionary\npassenger_classes = [1, 2, 3]\nfares_by_class = {}\nfor pc in passenger_classes:\n current_class = titanic_survival[titanic_survival[\"pclass\"] == pc]\n fares_by_class[pc] = current_class[\"fare\"].mean()\n \n\n# Get the mean age of all passengers, by class, using a pivot table\npassenger_age = titanic_survival.pivot_table(index=\"pclass\", values=\"age\")\n\n# Get a sum of the \"fare\" and \"survived\" values, grouped by embarkation port\nport_stats = titanic_survival.pivot_table(index=\"embarked\", values=[\"fare\", \"survived\"], aggfunc=np.sum)\n\n# Drop all columns in titanic_survival that have missing values \ndrop_na_columns = titanic_survival.dropna(axis = 1)\n\n# Drop all rows in titanic_survival where the columns \"age\" or \"sex\" have missing values\nnew_titanic_survival = titanic_survival.dropna(axis = 0, subset = [\"age\",\"sex\"])\n\n# Get the first ten rows from new_titanic_survival \nfirst_ten_rows = new_titanic_survival.iloc[0:10]\n# Get the fifth row from new_titanic_survival\nrow_position_fifth = new_titanic_survival.iloc[4]\n# Get the row with index label 25 from new_titanic_survival\nrow_index_25 = new_titanic_survival.loc[25]\n\n# Get the value at row index label 1100, column index label \"age\" from new_titanic_survival\nrow_index_1100_age = new_titanic_survival.loc[1100, \"age\"]\n# Get the value at row index label 25, column index label \"survived\"\nrow_index_25_survived = new_titanic_survival.loc[25, \"survived\"]\n# Get the first 5 rows and first three columns from new_titanic_survival\nfive_rows_three_cols = new_titanic_survival.iloc[0:5, 0:3]\n\n# Reindex the new_titanic_survival dataframe so the row indexes start from 0, and the old index is dropped\ntitanic_reindexed = new_titanic_survival.reset_index(drop = True)\n\n# Write a function that counts the number of null elements in a Series\n# Use the DataFrame.apply() method along with your function to run across all the columns in titanic_survival\ndef null_count(n):\n column_is_null = pd.isnull(n)\n column_null_true = n[column_is_null]\n return len(column_null_true)\n\ncolumn_null_count = titanic_survival.apply(null_count)\n\n\n# Create a function that returns the string \"minor\" if someone is under 18, \"adult\" if they are \n# equal to or over 18, and \"unknown\" if their age is null. Then, use the function along with\n# .apply() to find the correct label for everyone in titanic_survival\n\ndef is_minor(row):\n if pd.isnull(row[\"age\"]):\n return \"unknown\" \n elif row[\"age\"] < 18:\n return \"minor\"\n else:\n return \"adult\"\n \nage_labels = titanic_survival.apply(is_minor, axis=1)\n\n# Create a pivot table that calculates the mean survival chance(\"survived\") for each age \n# group (\"age_labels\") of the dataframe titanic_survival\nage_group_survival = titanic_survival.pivot_table(index=\"age_labels\", values=\"survived\", aggfunc=np.mean)\n\n\n### Challenge: Summarizing Data\n\nimport pandas as pd\nimport numpy as np\n\nall_ages = pd.read_csv(\"all-ages.csv\")\nrecent_grads = pd.read_csv(\"recent-grads.csv\")\n\n# Use the Total column to calculate the number of people who fall under each Major_category in each data set\n# Store the result as a separate dictionary for each data set. The key for the dictionary should be the Major_category, \n# and the value should be the total count. For the counts from all_ages, store the results as a dictionary named \n# aa_cat_counts. For the counts from recent_grads, store the results as a dictionary named rg_cat_counts.\naa_cat_counts = {}\nrg_cat_counts = {}\n\naa_cat_counts = dict(all_ages.pivot_table(index = \"Major_category\", values = \"Total\", aggfunc=np.sum))\n\nrg_cat_counts = dict(recent_grads.pivot_table(index = \"Major_category\", values = \"Total\", aggfunc=np.sum))\n\n# Use the Low_wage_jobs and Total columns to calculate the proportion of recent college graduates that worked low wage jobs\nlow_wage_proportion = float(recent_grads[\"Low_wage_jobs\"].sum())/float(recent_grads[\"Total\"].sum())\n\n# Use a for loop to iterate over majors. For each major, use Boolean filtering to find the corresponding row in both \n# DataFrames. Compare the values for Unemployment_rate to see which DataFrame has a lower value. Increment rg_lower_count \n# if the value for Unemployment_rate is lower for recent_grads than it is for all_ages.\nmajors = recent_grads['Major'].unique()\nrg_lower_count = 0\n\nfor m in majors:\n ur_all_ages = float(all_ages[all_ages[\"Major\"] == m][\"Unemployment_rate\"])\n ur_recent_grads = float(recent_grads[recent_grads[\"Major\"] == m][\"Unemployment_rate\"])\n if ur_all_ages > ur_recent_grads:\n rg_lower_count += 1\n \nprint(rg_lower_count)\n\n\n\n\n### Panda Internals: Series\n\nimport pandas as pd\n\nfandango = pd.read_csv(\"fandango_score_comparison.csv\")\nseries_film = fandango['FILM']\nseries_rt = fandango['RottenTomatoes']\n\n# Create a new Series object named series_custom that has a string index (based on the \n# values from film_names), and contains all of the Rotten Tomatoes scores from series_rt.\nfilm_names = series_film.values\nrt_scores = series_rt.values\n\nseries_custom = pandas.Series(data=rt_scores, index=film_names)\n\n# Assign the values in series_custom at indexes 5 through 10 to the variable fiveten\nfiveten = series_custom[5:11]\n\n# Sort the original index via sorted(), then reindex series_custom with it\nsorted_by_index = series_custom.reindex(sorted(original_index))\n\n# Sort series_custom by index, then by values\nsc2 = series_custom.sort_index()\nsc3 = series_custom.sort_values()\n\n# Normalize series_custom by dividing its values by 20\nseries_normalized = series_custom/20\n\n# Return a filtered Series object that only contains values between 50 and 75\ncriteria_one = series_custom > 50\ncriteria_two = series_custom < 75\n\nboth_criteria = series_custom[criteria_one & criteria_two]\n\n\n# Get the mean of RottenTomatoes critic and user scores for each movie, which are in separate Series objects\nrt_critics = Series(fandango['RottenTomatoes'].values, index=fandango['FILM'])\nrt_users = Series(fandango['RottenTomatoes_User'].values, index=fandango['FILM'])\n\nrt_mean = (rt_critics+rt_users)/2\n\n\n### Pandas Internals: Dataframes\n\nimport pandas as pd\n\n# Read CSV file into a variable, then print the first 2 rows, as well as the index of the dataframe\nfandango = pd.read_csv(\"fandango_score_comparison.csv\")\nprint(fandango.head(2))\nprint(fandango.index)\n\n\n# Return a dataframe containing just the first and last rows of fandango\nfirst_last = fandango.iloc[[0, len(fandango)-1]]\n\n# Assign the FILM column as the custom index for the dataframe. Also, specify that we don't want to drop \n# the FILM column from the dataframe. We want to keep the original dataframe, so assign the new one to fandango_films\nfandango_films = fandango.set_index(\"FILM\", drop=False)\n\n# Assign the FILM column as the custom index for the dataframe, but do it in place (as opposed to returning a new\n# dataframe). Let set_index drop the column\nfandango.set_index(\"FILM\", inplace=True)\n\n# Select \"The Lazarus Effect (2015)\", \"Gett: The Trial of Viviane Amsalem (2015)\", and \"Mr. Holmes (2015)\" from fandango_films\nbest_movies_ever = fandango_films.loc[[\"The Lazarus Effect (2015)\", \"Gett: The Trial of Viviane Amsalem (2015)\", \"Mr. Holmes (2015)\"]]\n\n\nimport numpy as np\n\n# returns the data types as a Series\ntypes = fandango_films.dtypes\n# filter data types to just floats, index attributes returns just column names\nfloat_columns = types[types.values == 'float64'].index\n# use bracket notation to filter columns to just float columns\nfloat_df = fandango_films[float_columns]\n# `x` is a Series object representing a column. This returns the max for each column\nmax_df = float_df.apply(lambda x: np.max(x))\n\n# Use the apply() method on float_df to divide each value by 2\nhalved_df = float_df.apply(lambda x: x/2)\n\n# Use the apply() method to calculate the average of each movie's values for RT_user_norm and Metacritic_user_nom \nrt_mt_user = float_df[['RT_user_norm', 'Metacritic_user_nom']]\nrt_mt_means = rt_mt_user.apply(lambda x: np.mean(x), axis=1)\nprint(rt_mt_means[0:5])\n\n","sub_path":"Step 2/Course 1/2.1.py","file_name":"2.1.py","file_ext":"py","file_size_in_byte":15647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"412989326","text":"\"\"\" Given a binary tree, design an algorithm which creates a linked list\nof all nodes at each level (e.g. if you have a tree with depth D, you'll\nhave D linked list. \"\"\"\n\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def insert(self, data):\n if self.head is None:\n self.head = Node(data)\n else:\n new_node = Node(data)\n new_node.next = self.head\n self.head = new_node\n\n def __str__(self):\n els = []\n el = self.head\n while el is not None:\n els.append(el.data)\n el = el.next\n return \"->\".join(map(str, els))\n\n\nclass NodeTree:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n\nclass BinaryTree:\n def __init__(self, data):\n self.root = NodeTree(data)\n\n def insert(self, data):\n cur_node = self.root\n while cur_node is not None:\n if data > cur_node.data:\n if cur_node.right is None:\n cur_node.right = NodeTree(data)\n break\n else:\n cur_node = cur_node.right\n else:\n if cur_node.left is None:\n cur_node.left = NodeTree(data)\n break\n else:\n cur_node = cur_node.left\n\n\ndef output_tree(tree, level=\" \"):\n l_el = \"-\" if tree.left is None else tree.left.data\n r_el = \"-\" if tree.right is None else tree.right.data\n print(f\"{level} {tree.data} ({l_el} {r_el})\")\n if tree.left is not None:\n output_tree(tree.left, level=level*2)\n if tree.right is not None:\n output_tree(tree.right, level=level*2)\n\n\ndef traverse_tree(node, level, lls):\n if node is None:\n return\n lls[level].insert(node.data)\n traverse_tree(node.left, level+1, lls)\n traverse_tree(node.right, level+1, lls)\n\n\ndef gather_linked_lists_by_level(tree):\n lls = [LinkedList() for _ in range(3)]\n traverse_tree(tree.root, level=0, lls=lls)\n return lls\n\n\nif __name__ == \"__main__\":\n tree = BinaryTree(5)\n tree.insert(6)\n tree.insert(4)\n tree.insert(7)\n tree.insert(5.5)\n tree.insert(4.5)\n tree.insert(1)\n output_tree(tree.root)\n\n lls = gather_linked_lists_by_level(tree)\n print(\"LINKED LISTS: \")\n for i, ll in enumerate(lls):\n print(f\"Level {i}: {ll} \")\n","sub_path":"problems/27_list_of_depths.py","file_name":"27_list_of_depths.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"461348470","text":"import pandas as pd\nfrom pyecharts.charts import Boxplot\n\n# 导入Excel文件\ndf = pd.read_excel('Tips.xlsx')\ny_data=[list(df['总消费'])]\n\nboxplot=Boxplot() #创建箱形图\n# 为箱形图添加数据\nboxplot.add_xaxis([\"\"])\nboxplot.add_yaxis('',y_axis=boxplot.prepare_data(y_data))\n# 渲染图表到HTML文件,存放在程序所在目录下\nboxplot.render(\"myboxplot.html\")\n\n\n","sub_path":"Python数据分析从入门到精通/MR/Code/07/13/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"162403328","text":"\nimport random\nfrom model.group import Group\n\n\n\ndef test_delete_some_group(app, db, json_groups, check_ui):\n group = json_groups\n if len (db.get_group_list()) == 0:\n app.group.create(group)\n old_groups = db.get_group_list()\n group = random.choice(old_groups)\n app.group.delete_group_by_id(group.id)\n assert len(old_groups) - 1 == app.group.count()\n new_groups = db.get_group_list()\n old_groups.remove(group)\n assert old_groups == new_groups\n if check_ui:\n assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)","sub_path":"test/test_del_group.py","file_name":"test_del_group.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"635899752","text":"\nfrom database import init_db, db_session\nfrom models import User, Post\nimport datetime\n\ninit_db()\n\n\n# User model example\n#db_session.add(User('admin3', 'admin3@localhost'))\n#result = User.query.filter_by(name='admin3').all()\n#db_session.commit()\n#print (result)\n\n\n\ndt = datetime.date.today()\n# Post model example\np = Post('test222', 'test1111222', dt)\ndb_session.add(p)\nres = db_session.query(Post).all()\ndb_session.commit()\nprint(Post.query.all())\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"493748801","text":"#! python\n\n\nrun_requests_api = False\nrun_swagger_client_api = False\nrun_graphics = True\n\n#region Test Requests Web API\nif run_requests_api:\n import requests\n # NOTE, 9000 is battleShips port\n games = requests.get('http://10.44.37.98:9000/games/')\n print(games.json())\n\n\n # gameids = requests.get('http://10.44.37.98:9000/games/140401855626936/')\n # print(gameids.json())\n#endregion\n\n\n#region Test Swagger_Client Web API\nif run_swagger_client_api:\n # default get start code from nsw yaml\n # from __future__ import print_function\n import sys # for using print('function: {}'.format(sys._getframe().f_code.co_name))\n import time\n import swagger_client\n from swagger_client.rest import ApiException\n from swagger_client import configuration\n from pprint import pprint\n\n # create an instance of the API class\n # api_instance = swagger_client.DefaultApi(swagger_client.ApiClient(configuration))\n api_instance = swagger_client.DefaultApi(swagger_client.ApiClient())\n game_id = 'game_id_example' # str | The ID of the game to return\n\n try:\n # Delete the given game\n api_instance.games_game_id_delete(game_id)\n except ApiException as e:\n print(\"Exception when calling DefaultApi->games_game_id_delete: %s\\n\" % e)\n#endregion\n\n\n#region Graphic Drawing\nif run_graphics:\n from generaltools import *\n from MyDialog import MyDialog_JoinGameDlg, MyDialog_StartGameDlg\n\n # game board\n number_of_cells = 11\n w = 20\n font_szie = 12\n\n # default properties\n grid_texts_default = [\n ['none','1', '2', '3', '4', '5', '6', '7', '8', '9', '10'],\n ['A', 'Hit', 'Hit', 'Hit', 'Hit', 'Hit', 'Hit', 'Hit', 'Hit', 'Hit', 'Hit'],\n ['B', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none'],\n ['C', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none'],\n ['D', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none'],\n ['E', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none'],\n ['F', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none'],\n ['G', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none'],\n ['H', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none'],\n ['I', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none'],\n ['J', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?']]\n grid_colours_default = [\n ['grey','grey', 'grey', 'grey', 'grey', 'grey', 'grey', 'grey', 'grey', 'grey', 'grey'],\n ['grey','white','white','white','white','white','white','white','white','white','white'],\n ['grey','white','white','white','white','white','white','white','white','white','white'],\n ['grey','white','white','white','white','white','white','white','white','white','white'],\n ['grey','white','white','white','white','white','white','white','white','white','white'],\n ['grey','white','white','white','white','white','white','white','white','white','white'],\n ['grey','white','white','white','white','white','white','white','white','white','white'],\n ['grey','white','white','white','white','white','white','white','white','white','white'],\n ['grey','white','white','white','white','white','white','white','white','white','white'],\n ['grey','white','white','white','white','white','white','white','white','white','white'],\n ['grey','white','white','white','white','white','white','white','white','white','white']]\n\n\n root = tkinter.Tk()\n root.wm_title(\"Embedding in Tk\")\n\n global_total_rows_in_plot = 1\n\n fig = Figure(figsize=(6, 3), dpi=100) # set figure size\n\n plotCanvas = FigureCanvasTkAgg(fig, master=root) # a tk.DrawingArea.\n plotCanvas.draw()\n plotCanvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n\n\n color = \"grey\" # \"#ffffff\"\n toolbar = NavigationToolbar2Tk(plotCanvas, root)\n toolbar.config(background=color)\n toolbar._message_label.config(background=color)\n toolbar.update() # toolbar.pack(side=BOTTOM)\n plotCanvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n\n\n def on_key_press(event):\n print(\"you pressed {}\".format(event.key))\n key_press_handler(event, plotCanvas, toolbar)\n\n\n plotCanvas.mpl_connect(\"key_press_event\", on_key_press)\n\n\n def _quit():\n root.quit() # stops mainloop\n root.destroy() # this is necessary on Windows to prevent\n # Fatal Python Error: PyEval_RestoreThread: NULL tstate\n\n\n button = tkinter.Button(master=root, text=\"Quit\", command=_quit)\n button.pack(side=tkinter.BOTTOM)\n\n def update_plot(number_of_cells, font_szie, w, total_rows_in_plot_param, fig_param, plot_canvas_param,\n grid_texts_default_param, grid_colours_default_param, game):\n player1_is_winner = False\n player2_is_winner = False\n if game.winner == game.player1.name:\n player1_is_winner = True\n player2_is_winner = False\n elif game.winner == game.player2.name:\n player1_is_winner = False\n player2_is_winner = True\n # player1_knowledge_param, player2_knowledge_param):\n grid_texts_1, grid_colours_1 = update_grid_text_and_color(\n grid_texts_default_param, grid_colours_default_param, game.player1.knowledge)\n grid_texts_2, grid_colours_2 = update_grid_text_and_color(\n grid_texts_default_param, grid_colours_default_param, game.player2.knowledge)\n image_1 = draw_game_board_on_image(number_of_cells, w, font_szie, grid_texts_1, grid_colours_1,\n player1_is_winner, draw_debug=False)\n image_title_1 = game.player1.name # \"player1\"\n image_2 = draw_game_board_on_image(number_of_cells, w, font_szie, grid_texts_2, grid_colours_2,\n player2_is_winner, draw_debug=False)\n image_title_2 = game.player2.name # \"player2\"\n pairs = dict()\n pairs[image_title_1] = image_1\n pairs[image_title_2] = image_2\n display_all_images_in_plot(total_rows_in_plot_param, pairs, fig_param, plot_canvas_param)\n\n\n # def get_random_move_without_player_knowledge(int_list, chr_val_list):\n # random.shuffle(int_list)\n # # NOTE, always use first number in the shuffled int_list as index to grid number, i.e. 0\n # # and use the last number in the shuffled int_list as the index to chr_val_list, i.e. len(int_list)-1\n # # NOTE, numbers in int_list are one-based because top row is used for labelling,\n # # when used for indexing chr_val_list, it has to be converted to zero-based\n # grid_ref = chr(chr_val_list[int_list[len(int_list) - 1] - 1]) + str(int_list[0])\n # return grid_ref\n\n\n def get_random_move_with_player_knowledge(int_list, chr_val_list, game):\n knowledge = list()\n if game.move == game.player1.name:\n knowledge = game.player1.knowledge\n elif game.move == game.player2.name:\n knowledge = game.player2.knowledge\n print('knowledge={}'.format(knowledge))\n candidate_hit_list = list()\n # NOTE, based on game.move, determine from which player's knoowledge to generate candidate hit list\n # and, based on that player's knowledge, if a cell already hit, ie. 'X', or Miss, i.e. '.'\n # don't add in the candidate hit list, only add if cell = '.'\n # The, randomly pick a hit from the candidate hit list\n for i in range(len(knowledge)):\n for j, char in enumerate(knowledge[i]):\n if char != '?':\n continue\n\n letter = chr(chr_val_list[i])\n int_str = str(int_list[j])\n grid_ref = letter + int_str\n candidate_hit_list.append(grid_ref)\n print('candidate_hit_list={}'.format(candidate_hit_list))\n # NOTE, candidate_hit_list is already a list of grid_ref strings,\n # doesn't really matter which one to sue, so, always use the first one\n\n grid_ref = random.choice(candidate_hit_list)\n print('randomly generated grid_ref={}'.format(grid_ref))\n return grid_ref\n\n\n def get_stratigic_move_with_player_knowledge(int_list, chr_val_list, game):\n knowledge = list()\n if game.move == game.player1.name:\n knowledge = game.player1.knowledge\n elif game.move == game.player2.name:\n knowledge = game.player2.knowledge\n print('knowledge={}'.format(knowledge))\n candidate_hit_list = list()\n candicate_hit_list_adjacent_to_hit_cell = list()\n total_rows = len(knowledge)\n total_cols = len(knowledge[0])\n for i in range(len(knowledge)):\n for j, char in enumerate(knowledge[i]):\n if char == '.':\n continue\n elif char == 'X':\n # if encounter a hit cell, check the adjacent cells, if a \"?\", most likely part of a ship\n # first check left and right\n if (j-1) >= 0 and knowledge[i][j-1] == '?':\n letter = chr(chr_val_list[i])\n int_str = str(int_list[j-1])\n grid_ref = letter + int_str\n candicate_hit_list_adjacent_to_hit_cell.append(grid_ref)\n elif (j+1) < total_cols and knowledge[i][j+1] == '?':\n letter = chr(chr_val_list[i])\n int_str = str(int_list[j+1])\n grid_ref = letter + int_str\n candicate_hit_list_adjacent_to_hit_cell.append(grid_ref)\n # then check upper and lower, note,\n # do not check upper left and upper right,\n # nor, lower left and lower right\n # ship doesn't go diagonal\n if (i-1) >= 0 and knowledge[i-1][j] == '?':\n letter = chr(chr_val_list[i-1])\n int_str = str(int_list[j])\n grid_ref = letter + int_str\n candicate_hit_list_adjacent_to_hit_cell.append(grid_ref)\n elif (i+1) < total_rows and knowledge[i+1][j] == '?':\n letter = chr(chr_val_list[i+1])\n int_str = str(int_list[j])\n grid_ref = letter + int_str\n candicate_hit_list_adjacent_to_hit_cell.append(grid_ref)\n else:\n letter = chr(chr_val_list[i])\n int_str = str(int_list[j])\n grid_ref = letter + int_str\n candidate_hit_list.append(grid_ref)\n print('candidate_hit_list={}\\ncandicate_hit_list_adjacent_to_hit_cell={}'.format(\n candidate_hit_list, candicate_hit_list_adjacent_to_hit_cell))\n if (len(candicate_hit_list_adjacent_to_hit_cell) != 0):\n grid_ref = candicate_hit_list_adjacent_to_hit_cell[0]\n print('grid_ref={}'.format(grid_ref))\n else:\n grid_ref = random.choice(candidate_hit_list)\n print('no candidate adjacent to the hit cell, randomly generated grid_ref={}'.format(grid_ref))\n return grid_ref\n\n\n def get_initial_fleet_state(game):\n import copy\n\n fleet_state = list()\n for ship in game.fleet:\n ship_state = dict()\n ship_state['name'] = ship.name\n ship_state['size'] = ship.size\n ship_state['occupied_grid_refs'] = []\n print('ship_state = {}'.format(ship_state))\n fleet_state.append(ship_state)\n\n print('fleet_state = {}'.format(fleet_state))\n return fleet_state\n\n\n def get_stratigic_move_with_player_knowledge_enforce_non_adjacent_ships(int_list, chr_val_list, game,\n player1_fleet_state,\n player2_fleet_state):\n import copy\n knowledge = list()\n fleet_state = list()\n if game.move == game.player1.name:\n knowledge = game.player1.knowledge\n fleet_state = player1_fleet_state\n elif game.move == game.player2.name:\n knowledge = game.player2.knowledge\n fleet_state = player2_fleet_state\n print('knowledge={}'.format(knowledge))\n candidate_hit_list = list()\n candicate_hit_list_adjacent_to_hit_cell = list()\n total_rows = len(knowledge)\n total_cols = len(knowledge[0])\n for i in range(len(knowledge)):\n for j, char in enumerate(knowledge[i]):\n if char == '.':\n continue\n elif char == 'X':\n # if encounter a hit cell, check the adjacent cells, if a \"?\", most likely part of a ship\n # first determine if to make a move, ie. check left and right, then up and down\n # if the hit cell in a ship that already marked as destroyed, ie. in consecutive 'X's and with\n # '.'s on either sides, close_end = True\n add_left = False\n add_right = False\n add_upper = False\n add_lower = False\n horizontal_len = 1 # NOTE, starting from X so len already 1\n vertical_len = 1 # NOTE, starting from X so len already 1\n\n left_col = j\n right_col = j\n left_end = ''\n right_end = ''\n # while (0 <= left_col) and (knowledge[i][left_col] == 'X'):\n # left_col -= 1\n # horizontal_len += 1\n # left_col += 1\n for left_col in range(j-1, -1, -1):\n if knowledge[i][left_col] == 'X':\n horizontal_len += 1\n else:\n break\n # NOTE, left_end can be 'X' if the hit cell reached the edge\n left_end = knowledge[i][left_col]\n for right_col in range(j+1, total_cols):\n if knowledge[i][right_col] == 'X':\n horizontal_len += 1\n else:\n break\n right_end = knowledge[i][right_col]\n # NOTE, only need to consider vertical if horizontal_len = 1\n upper_row = i\n lower_row = i\n upper_end = ''\n lower_end = ''\n for upper_row in range(i-1, -1, -1):\n if knowledge[upper_row][j] == 'X':\n vertical_len += 1\n else:\n break\n upper_end = knowledge[upper_row][j]\n for lower_row in range(i+1, total_rows):\n if knowledge[lower_row][j] == 'X':\n vertical_len += 1\n else:\n break\n lower_end = knowledge[lower_row][j]\n\n # NOTE, in non-adjacent layout, if horizontal_len > vertical_len vertical_len = 1\n if horizontal_len > vertical_len:\n start_col = left_col\n if left_end != 'X':\n start_col = left_col + 1\n end_col = right_col\n if right_end != 'X':\n end_col = right_col - 1\n size = end_col - start_col + 1\n letter = chr(chr_val_list[i])\n int_str = str(int_list[start_col])\n start_grid_ref = letter + int_str\n int_str = str(int_list[end_col])\n end_grid_ref = letter + int_str\n fleet_state_copy = copy.deepcopy(fleet_state)\n for item in fleet_state:\n if item['size'] == size:\n update_fleet_state = False\n # need to consider left_end == '' and right_end == '', as 'X' cell can be at grid edge\n if (left_end != '?' and right_end != '?' and\n len(item['occupied_grid_refs']) == 0):\n update_fleet_state = True\n # check if all ships with a size greater than the current size been\n # marksed as destroyed, if so, no need to make a move just mark the ship\n # with the current size as destroyed in fleet_state, because even\n # if one end has a '?', it's more likely be a miss cell\n else:\n all_longer_ships_destroyed = True\n for item_copy in fleet_state_copy:\n if item_copy['size'] > size and len(item_copy['occupied_grid_refs']) == 0:\n all_longer_ships_destroyed = False\n break\n if all_longer_ships_destroyed: # current continuous 'X' cells must be a ship\n update_fleet_state = True\n else:\n if left_end == '?':\n add_left = True\n if right_end == '?':\n add_right = True\n if update_fleet_state:\n item['occupied_grid_refs'] = [start_grid_ref, end_grid_ref]\n break\n # do not break here, as there are two size 2. and two size 1 ships\n\n # NOTE, in non-adjacent layout, if vertical_len > horizontal_len horizontal_len = 1\n elif vertical_len > horizontal_len:\n start_row = upper_row\n if upper_end != 'X':\n start_row = upper_row + 1\n end_row = lower_row\n if lower_end != 'X':\n end_row = lower_row - 1\n size = end_row - start_row + 1\n letter = chr(chr_val_list[start_row])\n int_str = str(int_list[j])\n start_grid_ref = letter + int_str\n letter = chr(chr_val_list[end_row])\n end_grid_ref = letter + int_str\n fleet_state_copy = copy.deepcopy(fleet_state)\n for item in fleet_state:\n if item['size'] == size:\n update_fleet_state = False\n # need to consider upper_end == '' and lower_end == '', as 'X' cell can be at grid edge\n if (upper_end != '?' and lower_end != '?' and\n len(item['occupied_grid_refs']) == 0):\n update_fleet_state = True\n # do not break here, as there are two size 2. and two size 1 ships\n # check if all ships with a size greater than the current size been\n # marksed as destroyed, if so, no need to make a move just mark the ship\n # with the current size as destroyed in fleet_state, because even\n # if one end has a '?', it's more likely be a miss cell\n else:\n all_longer_ships_destroyed = True\n for item_copy in fleet_state_copy:\n if item_copy['size'] > size and len(item_copy['occupied_grid_refs']) == 0:\n all_longer_ships_destroyed = False\n break\n if all_longer_ships_destroyed:\n update_fleet_state = True\n else:\n if upper_end == '?':\n add_upper = True\n if lower_end == '?':\n add_lower = True\n if update_fleet_state:\n item['occupied_grid_refs'] = [start_grid_ref, end_grid_ref]\n break\n # this is teh case horizontal_len == vertical_len = 1,\n # in which case, all four adjacent cells added as candidates if a '?'\n else:\n if (j - 1) >= 0 and knowledge[i][j - 1] == '?':\n add_left = True\n elif (j + 1) < total_cols and knowledge[i][j + 1] == '?':\n add_right = True\n if (i - 1) >= 0 and knowledge[i - 1][j] == '?':\n add_upper = True\n elif (i + 1) < total_rows and knowledge[i + 1][j] == '?':\n add_lower = True\n\n if add_left:\n letter = chr(chr_val_list[i])\n int_str = str(int_list[left_col])\n grid_ref_temp = letter + int_str\n candicate_hit_list_adjacent_to_hit_cell.append(grid_ref_temp)\n if add_right:\n letter = chr(chr_val_list[i])\n int_index = right_col\n int_str = str(int_list[right_col])\n grid_ref_temp = letter + int_str\n candicate_hit_list_adjacent_to_hit_cell.append(grid_ref_temp)\n if add_upper:\n letter = chr(chr_val_list[upper_row])\n int_str = str(int_list[j])\n grid_ref_temp = letter + int_str\n candicate_hit_list_adjacent_to_hit_cell.append(grid_ref_temp)\n if add_lower:\n letter = chr(chr_val_list[lower_row])\n int_str = str(int_list[j])\n grid_ref_temp = letter + int_str\n candicate_hit_list_adjacent_to_hit_cell.append(grid_ref_temp)\n else: # in this case, char == '?'\n letter = chr(chr_val_list[i])\n int_str = str(int_list[j])\n grid_ref_temp = letter + int_str\n candidate_hit_list.append(grid_ref_temp)\n\n print('candidate_hit_list={}\\ncandicate_hit_list_adjacent_to_hit_cell={}'.format(\n candidate_hit_list, candicate_hit_list_adjacent_to_hit_cell))\n\n if (len(candicate_hit_list_adjacent_to_hit_cell) != 0):\n grid_ref = candicate_hit_list_adjacent_to_hit_cell[0]\n print('grid_ref={}'.format(grid_ref))\n else:\n grid_ref = random.choice(candidate_hit_list)\n print('no candidate adjacent to the hit cell, randomly generated grid_ref={}'.format(grid_ref))\n\n return grid_ref, player1_fleet_state, player2_fleet_state\n\n\n display_players = True\n display_graphic = True\n # start_new_game = False\n # play_with_another_player = False\n\n make_random_move = False\n enforce_non_adjacent_ships = True\n\n # root.withdraw() # hide the little root window\n\n def main():\n if display_players:\n # getting board information from the server\n # from __future__ import print_function\n import sys # for using print('function: {}'.format(sys._getframe().f_code.co_name))\n import time\n import swagger_client\n from swagger_client.rest import ApiException\n from swagger_client import configuration\n from pprint import pprint\n import swagger_client\n from swagger_client.models import GetGame, Move, Player, StartGame, GetPlayer\n import tkinter.messagebox as messageBox\n import copy\n\n # global start_new_game\n # global play_with_another_player\n start_new_game = False\n play_with_another_player = False\n # play_as_spacific_player = False\n\n default_server = '10.44.37.98:9000'\n game_id = ''\n player = ''\n server = ''\n player1 = ''\n player2 = ''\n\n api_instance_1 = swagger_client.DefaultApi(swagger_client.ApiClient())\n games = api_instance_1.games_get() # 'http://10.44.37.98:9000/games/')\n print('games=\\n{}'.format(games))\n\n if messageBox.askokcancel(\"BattleShips\", \"Start a new game?\", icon=\"question\"):\n # NOTE, the order of the params provide has to be in the same order as defined in\n # MyDialog_StartGameDlg.__init__\n start_new_game = True\n game_def = None\n d = MyDialog_StartGameDlg(\"Start a new game\", default_server, player1, player2, root)\n game_def = d.result\n if not game_def:\n messageBox.showerror('error', 'two player names are needed to start a new game', icon=\"error\")\n else:\n while game_def['start_another_game']:\n d = MyDialog_StartGameDlg(\"Start a new game\", default_server, player1, player2, root)\n game_def = d.result\n server = game_def['server']\n player1 = game_def['player1']\n player2 = game_def['player2']\n print('starting a new game: server={}, player1={}, player2={}\\n'.format(server, player1, player2))\n # NOTE, api_instance_1.games_post returns a game_id object, then id method returns the string\n game_id = api_instance_1.games_post(body=StartGame(player1, player2)).id\n print('new game started, game_id={}'.format(game_id))\n\n if messageBox.askokcancel(\"BattleShips\", \"Play with another player?\", icon=\"question\"):\n play_with_another_player = True\n\n # NOTE, the order of the params provide has to be in the same order as defined in\n # MyDialog_JoinGameDlg.__init__\n if ((start_new_game and play_with_another_player) or not start_new_game):\n game_def = None\n # NOTE, if started a new game, game_id has a value, else, empty\n d = MyDialog_JoinGameDlg(\"Join a game\", game_id, '',\n play_with_another_player, root)\n game_def = d.result\n # Rule I:\n # either game_id or player value must be provided, cannot be both empty\n # Rule II:\n # If play with another player, player name must be provided:\n # Rule III:\n # if player provided, it should be the self-player, and two scenarios entail:\n # 1. game_id provided, the player only plays with a specific game\n # 2. game_id not provided, the player plays multiple games that request the player's participation\n # Rules IV:\n # if player not provided, game_id must be provided, and the client plays both players\n if not game_def:\n errorMsg = 'current setting: play_with_another_player = {}, ' \\\n ' some information missing'.format(play_with_another_player)\n messageBox.showerror('error', errorMsg, icon=\"error\")\n else:\n game_id = game_def['game_id']\n player = game_def['player']\n\n if (player == ''):\n player_str = 'play two players'\n else:\n player_str = player\n print('\\njoined a game: game_id: {}, player: {}\\n'.format(game_id, player_str))\n\n # drawing graphics based on information from the server\n # NOTE, number range from 1 to 11, as last 11 is excluded,\n # chr value range from A to K, as last K is exlcluded\n # and top row and left col are for labelling\n int_list = list(range(1, 11))\n chr_val_list = list(range(ord('A'), ord('K')))\n print('int_list={}, chr_val_list={}'.format(int_list, chr_val_list))\n while (True): # keep the client alive checking if anyone requires a game\n game_ids = list()\n if game_id == '':\n player_info = api_instance_1.players_name_get(player)\n game_ids = player_info.games\n else:\n game_ids.append(game_id)\n print('current player = {}, game_ids = {}'.format(player, game_ids))\n for this_game_id in game_ids:\n print('this_game_id = {}, getting game information'.format(this_game_id))\n game = api_instance_1.games_game_id_get(this_game_id)\n print('game=\\n{}'.format(game))\n player1_fleet_state = get_initial_fleet_state(game)\n player2_fleet_state = copy.deepcopy(player1_fleet_state)\n if ((game.winner == '') and\n (not play_with_another_player or (play_with_another_player and game.move == player))):\n if make_random_move:\n grid_ref = get_random_move_with_player_knowledge(int_list, chr_val_list, game)\n elif enforce_non_adjacent_ships:\n grid_ref, player1_fleet_state, player2_fleet_state = \\\n get_stratigic_move_with_player_knowledge_enforce_non_adjacent_ships(\n int_list, chr_val_list, game, player1_fleet_state, player2_fleet_state)\n else:\n grid_ref = get_stratigic_move_with_player_knowledge(int_list, chr_val_list, game)\n print('this_game_id={}, grid_ref={}. game.move={}'.format(this_game_id,grid_ref, game.move))\n move_result = api_instance_1.games_game_id_grid_ref_put(this_game_id, grid_ref, body=Move(game.move))\n print('game.move={}\\ngrid_ref={}\\nmove_result=\\n{}'.format(game.move, grid_ref, move_result))\n game = api_instance_1.games_game_id_get(this_game_id)\n print('this_game_id={}\\ngame=\\n{}'.format(this_game_id, game))\n update_plot(number_of_cells, font_szie, w, global_total_rows_in_plot, fig, plotCanvas,\n grid_texts_default, grid_colours_default, game)\n elif display_graphic:\n test_realtime_display_graphics(number_of_cells, font_szie, w,\n global_total_rows_in_plot, fig, plotCanvas,\n grid_texts_default, grid_colours_default, sleep_param=0.5)\n else:\n test_title_img_pairs_list = generate_test_title_img_pairs_list()\n print('test_title_img_pairs_list = \\n{}'.format(test_title_img_pairs_list))\n test_realtime_display_group_of_images(global_total_rows_in_plot, test_title_img_pairs_list, fig, plotCanvas)\n\n root.mainloop()\n\n if __name__ == '__main__':\n main()\n#endregion\n\n\n","sub_path":"BattleShips/BattleShips.py","file_name":"BattleShips.py","file_ext":"py","file_size_in_byte":32441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"514122741","text":"from __future__ import print_function, absolute_import\n\nimport numpy as np\nimport torch\nfrom sklearn.cluster import KMeans\nfrom torch import nn\nfrom torch.autograd import Variable\n\n\ndef pair_euclidean_dist(inputs_x, inputs_y):\n n = inputs_x.size(0)\n m = inputs_y.size(0)\n xx = torch.pow(inputs_x, 2).sum(dim=1, keepdim=True).expand(\n n, m)\n yy = torch.pow(inputs_y, 2).sum(dim=1, keepdim=True).expand(m, n).t()\n dist = xx + yy\n dist.addmm_(1, -2, inputs_x, inputs_y.t())\n return dist\n\n\nclass KmeanLoss(nn.Module):\n def __init__(self, alpha=16, n_cluster=2, beta=0.5):\n super(KmeanLoss, self).__init__()\n self.alpha = alpha\n self.beta = beta\n self.n_clusters = n_cluster\n\n def cluster(self, inputs, targets):\n X = inputs.data.cpu().numpy()\n y = targets.data.cpu().numpy()\n\n result = dict()\n for y_ in set(y):\n # print(y_)\n idx_ = np.where(y == y_)\n # print(idx_[0])\n X_i = X[idx_[0]]\n # print(X_i)\n kmeans = KMeans(n_clusters=3, random_state=1).fit(X_i)\n pred_cluster = kmeans.labels_\n print(pred_cluster)\n\n for i in range(len(y)):\n\n k = str(y[i]) + ' ' + str(pred_cluster[i])\n if k in result:\n result[k].append(i)\n else:\n result[k] = [i]\n split_ = result.values()\n return split_\n\n def forward(self, inputs, targets):\n split_ = self.cluster(inputs, targets)\n\n num_dim = inputs.size(1)\n n = inputs.size(0)\n centers = []\n inputs_list = []\n targets_ = []\n\n cluster_mat = np.ones([n, len(split_)])\n for i, split_i in enumerate(split_):\n size_ = len(split_i)\n if size_ > 1:\n for k in split_i:\n cluster_mat[k][i] = float(size_ * size_) / ((size_ - 1) * (size_ - 1))\n targets_.append(targets[split_i[0]])\n input_ = torch.cat([inputs[i].resize(1, num_dim) for i in split_i], 0)\n centers.append(torch.mean(input_, 0))\n inputs_list.append(input_)\n\n if self.use_cuda:\n cluster_mat = Variable(torch.FloatTensor(cluster_mat)).cuda().detach()\n else:\n cluster_mat = Variable(torch.FloatTensor(cluster_mat)).detach()\n\n targets_ = torch.cat(targets_)\n\n centers = [center.resize(1, num_dim) for center in centers]\n centers = torch.cat(centers, 0)\n\n centers_dist = pair_euclidean_dist(inputs, centers) * cluster_mat\n\n loss = []\n\n for i, target in enumerate(targets):\n dist = centers_dist[i]\n pos_pair_mask = (targets_ == target)\n pos_pair = torch.masked_select(dist, pos_pair_mask)\n\n dist = torch.masked_select(dist, dist > 1e-3)\n pos_pair = torch.masked_select(pos_pair, pos_pair > 1e-3)\n\n base = (torch.max(dist) + torch.min(dist)).data[0] / 2\n pos_exp = torch.sum(torch.exp(-self.alpha * (pos_pair - base)))\n a_exp = torch.sum(torch.exp(-self.alpha * (dist - base)))\n loss_ = - torch.log(pos_exp / a_exp)\n loss.append(loss_)\n\n return torch.mean(torch.cat(loss))\n\n def cuda(self, device_id=None):\n \"\"\"Moves all model parameters and buffers to the GPU.\n\n Arguments:\n device_id (int, optional): if specified, all parameters will be\n copied to that device\n \"\"\"\n self.use_cuda = True\n return self._apply(lambda t: t.cuda(device_id))\n","sub_path":"losses/others/kmean_loss.py","file_name":"kmean_loss.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"192346911","text":"\"\"\"A Spawner for JupyterHub to allow the Hub to be run as non-root.\n\nThis spawns a mediator process with sudo, which then takes actions on behalf of the user.\n\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport sys\nsys.path.append(\"/etc\")\nfrom anywhere_server_details import server_id\n\nfrom pystalkd.Beanstalkd import Connection\nimport json\nimport os\n\nfrom tornado import gen\nfrom tornado.process import Subprocess\n\nfrom traitlets import List, Unicode, Bool\n\nfrom jupyterhub.spawner import LocalProcessSpawner\nfrom jupyterhub.utils import random_port\n\nclass SudoSpawner(LocalProcessSpawner):\n\n sudospawner_path = Unicode('sudospawner', config=True,\n help=\"Path to sudospawner script\"\n )\n sudo_args = List(['-nH'], config=True,\n help=\"Extra args to pass to sudo\"\n )\n debug_mediator = Bool(False, config=True,\n help=\"Extra log output from the mediator process for debugging\",\n )\n\n @gen.coroutine\n def do(self, action, **kwargs):\n \"\"\"Instruct the mediator process to take a given action\"\"\"\n kwargs['action'] = action\n try:\n os.makedirs(\"/sys/fs/cgroup/cpu,cpuacct/users/\" + self.user.name)\n except FileExistsError:\n pass\n cmd = [\"/usr/bin/cgexec\", \"-g\", \"cpu,cpuacct:users/\" + self.user.name, 'sudo', '-u', self.user.name]\n cmd.extend(self.sudo_args)\n cmd.append(self.sudospawner_path)\n if self.debug_mediator:\n cmd.append('--logging=debug')\n\n p = Subprocess(cmd, stdin=Subprocess.STREAM, stdout=Subprocess.STREAM)\n yield p.stdin.write(json.dumps(kwargs).encode('utf8'))\n p.stdin.close()\n data = yield p.stdout.read_until_close()\n if p.returncode:\n raise RuntimeError(\"Spawner subprocess failed with exit code: %r\" % p.returncode)\n try:\n return json.loads(data.decode('utf8'))\n except:\n print(\"Spawner child process returned:\", data.decode('utf-8'))\n raise\n\n @gen.coroutine\n def start(self):\n self.user.server.ip = self.ip\n self.user.server.port = random_port()\n self.db.commit()\n # only args, not the base command\n reply = yield self.do(action='spawn', args=self.get_args(), env=self.env)\n connection = Connection(\"localhost\")\n connection.use(\"tarpit_queue_for_server_{}\".format(server_id))\n connection.put(json.dumps(dict(username=self.user.name)))\n self.pid = reply['pid']\n\n @gen.coroutine\n def _signal(self, sig):\n reply = yield self.do('kill', pid=self.pid, signal=sig)\n return reply['alive']\n\n","sub_path":"sudospawner/spawner.py","file_name":"spawner.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"108401725","text":"#!/usr/bin/env python\n'''\nCreated on May 14, 2014\n@author: reid\n\nModified on May 21, 2015\n'''\n\nimport re, sys, nltk, operator\nfrom nltk.parse import DependencyGraph\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\n# Read the lines of an individual dependency parse\ndef read_dep(fh):\n dep_lines = []\n for line in fh:\n line = line.strip()\n if len(line) == 0:\n return \"\\n\".join(dep_lines)\n elif re.match(r\"^QuestionId:\\s+(.*)$\", line):\n # You would want to get the question id here and store it with the parse\n continue\n dep_lines.append(line)\n \n return \"\\n\".join(dep_lines) if len(dep_lines) > 0 else None\n \n\n# Read the dependency parses from a file\ndef read_dep_parses(depfile):\n fh = open(depfile, 'r')\n\n # list to store the results\n graphs = []\n \n # Read the lines containing the first parse.\n dep = read_dep(fh)\n \n # While there are more lines:\n # 1) create the DependencyGraph\n # 2) add it to our list\n # 3) try again until we're done\n while dep is not None:\n graph = DependencyGraph(dep)\n graphs.append(graph)\n \n dep = read_dep(fh)\n fh.close()\n \n return graphs \n \ndef find_main(graph):\n for node in graph.nodes.values():\n if node['rel'] == 'ROOT':\n return node\n return None\n \ndef find_node(word, graph):\n for node in graph.nodes.values():\n if node[\"word\"] == word:\n return node\n return None\n \ndef get_dependents(node, graph):\n results = []\n for item in node[\"deps\"]:\n address = node[\"deps\"][item][0]\n dep = graph.nodes[address]\n results.append(dep)\n results = results + get_dependents(dep, graph)\n \n return results\n\n\ndef find_answer(qgraph, sgraph):\n qmain = find_main(qgraph)\n qword = qmain[\"word\"]\n \n snode = find_node(qword, sgraph)\n \n for node in sgraph.nodes.values():\n #print(\"node in nodes.values():\", node)\n if node.get('head', None) == snode[\"address\"]:\n #print(\"Our parent is:\", snode)\n #print(\"Our relation is:\", node['rel'])\n if node['rel'] == \"prep\":\n deps = get_dependents(node, sgraph)\n deps = sorted(deps, key=operator.itemgetter(\"address\"))\n \n return \" \".join(dep[\"word\"] for dep in deps)\n\nif __name__ == '__main__':\n\t# Keeping data files in /data/ folder ~ mjgates\n text_file = \"data/fables-01.sch\"\n dep_file = \"data/fables-01.sch.dep\"\n q_file = \"data/fables-01.questions.dep\"\n \n # Read the dependency graphs into a list \n sgraphs = read_dep_parses(dep_file)\n qgraphs = read_dep_parses(q_file)\n\n # Get the first question\n qgraph = qgraphs[0] \n \n # The answer is in the second sentence\n # You would have to figure this out like in the chunking demo\n sgraph = sgraphs[1]\n \n lmtzr = WordNetLemmatizer()\n for node in sgraph.nodes.values():\n tag = node[\"tag\"]\n word = node[\"word\"]\n if word is not None:\n if tag.startswith(\"V\"):\n print(lmtzr.lemmatize(word, 'v'))\n else:\n print(lmtzr.lemmatize(word, 'n'))\n print()\n\n answer = find_answer(qgraph, sgraph)\n print(answer)\n\n","sub_path":"asg6/stub_code/dependency-demo-stub.py","file_name":"dependency-demo-stub.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"163599310","text":"import random\nfrom random import randint\n\n# db = {\"апельсин\": \"orange\", \"яблоко\": \"apple\", \"картофель\": \"potato\", \"сок\": \"juice\", \"пингвин\": \"penguin\",\n# \"черепаха\": \"turtle\", \"молоко\": \"milk\", \"коробка\": \"box\", \"дверь\": \"door\", \"окно\": \"window\",\n# \"ручка\": \"pen\", \"небо\": \"sky\", \"банан\": \"banana\", \"дерево\": \"tree\", \"мяч\": \"ball\",\n# \"книга\": \"book\", \"бумага\": \"paper\", \"тростник\": \"cane\", \"корабль\": \"ship\", \"стакан\": \"glass\"}\n\n\n# db\n# file of scores def write_score\n# with open(\"file_of_scores.txt\", \"w\") as f:\n# f.write('')\ndb_sentence = {\"right\": \"You guessed ____, you spent 6 tries\",\n \"choose\": \"____ variance follow\",\n \"name\": \"my ____ is Max\",\n \"eated\": \"I ____ apple\",\n \"buys\": \"My friend ____ a black phone\"}\n\ndef load_db():\n \"\"\"load database from txt file\n path_db - path to txt file\"\"\"\n path_db = \"db_ru_en.txt\" # for Andy and Andemir\n db = {}\n with open(path_db, \"r\") as f:\n k = f.read()\n c = k.split('\\n')\n for i in c:\n kluch = i.split('-')[0]\n znach = i.split('-')[1]\n db[kluch] = znach\n return db\n\ndef write_score(name, point):\n \"\"\"\n :param name: name player\n :param point: score\n :return: None\n \"\"\"\n path = \"file_of_scores.txt\"\n with open(path, \"r\") as f:\n d = f.readlines()\n is_add = False\n for i in range(len(d)):\n if name in d[i]:\n new_point = int(d[i].split(\"-\")[1]) + point\n d[i] = name + \" - \" + str(new_point) + \"\\n\"\n is_add = True\n break\n\n if not is_add:\n text = name + \" - \" + str(point) + \"\\n\"\n d.append(text)\n\n text = \"\"\n for line in d:\n text += line\n\n with open(path, \"w\") as f:\n f.write(text)\n\ndef show_helps(letters, word):\n show = []\n for i in range(len(db[word])):\n show.append('_ ')\n for x in letters:\n show[x] = db[word][x]\n show_letters = \"\"\n for i in show:\n show_letters += i\n return show_letters\n\ndef guess_word(word):\n \"\"\"guess correct answer by word\n mode 2\"\"\"\n max_XP = 10\n help_letters = []\n while True:\n print(word, \"in english\")\n answer = input(\"\")\n if answer == db[word]:\n print(\"Excellent! You guessed right, you spent\", str(10 - max_XP + 1), \"tries\")\n return max_XP\n else:\n ran = randint(0, len(db[word])-1)\n if ran in help_letters:\n ran = randint(0, len(db[word])-1)\n help_letters.append(ran)\n print(\"Sorry, but you didn't guess right:( Try again\")\n print(show_helps(help_letters, word))\n if max_XP > 0:\n max_XP -= 1\n\ndef guess_word_by_variance(word):\n \"\"\"mode 1\"\"\"\n max_XP = 5\n print(word, \"in english, choose variance follow\")\n some_words = random.sample(list(db.values()), 4)\n some_words[randint(0, 3)] = db[word]\n print(some_words)\n while True:\n answer = input()\n if answer == db[word] or answer == str(some_words.index(db[word]) + 1):\n print(\"Excellent! You guessed right, you spent\", str(5 - max_XP + 1), \"tries\")\n return max_XP\n else:\n print(\"Sorry, but you didn't guess right:( Try again\")\n max_XP -= 1\n\ndef guess_word_by_sentence(word):\n XP = 0\n print(\"choose the correct option for this sentence: \", db_sentence[word])\n some_words = random.sample(list(db_sentence.keys()), 4)\n if word in some_words:\n print(some_words)\n else:\n some_words[randint(0, 3)] = word\n print(some_words)\n while True:\n answer = input(': ')\n if answer == word or answer == str(some_words.index(word) + 1):\n XP += 5\n print(\"Congratulation! You guessed right, you spent\", str(XP), \"tries\")\n break\n else:\n print(\"Sorry, but you didn't guess right:( Try again\")\n return XP\n\nif __name__ == \"__main__\":\n print(\"Hello! what's your name?\")\n name = input(\" \")\n XP = 0\n db = load_db()\n while True:\n print(\"\"\"what mode do you prefer?\n write 1 if guess word by variance, write 2 if guess word by letters, write 3 if guess word by sentence\n write exit to close program\n \"\"\")\n mode = input()\n if mode == '1':\n tasks = random.sample(db.keys(), 5)\n for word in tasks:\n XP += guess_word_by_variance(word)\n elif mode == '2':\n tasks = random.sample(db.keys(), 5)\n for word in tasks:\n XP += guess_word(word)\n elif mode == '3':\n tasks = random.sample(db_sentence.keys(), 5)\n for word in tasks:\n XP += guess_word_by_sentence(word)\n elif mode == \"exit\":\n break\n print(\"Congratulations! You scored {} XP\".format(XP))\n\n write_score(name, XP)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"80816266","text":"#!/usr/bin/python\n# ex:set fileencoding=utf-8:\n\nfrom __future__ import unicode_literals\n\nfrom django.forms import ModelForm\n\nfrom .models import Invoice\n\n\nclass InvoiceUpdateForm(ModelForm):\n class Meta:\n model = Invoice\n exclude = ['invoice_number', 'products']\n\n\nclass InvoiceCreateForm(ModelForm):\n class Meta:\n model = Invoice\n exclude = [\n 'invoice_number', 'state', 'invoice',\n 'shipping_address', 'invoice_address',\n 'valid_until', 'products'\n ]\n","sub_path":"djangobmf/contrib/invoice/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615415102","text":"#! python3\nAPPID = 'cc42e57d84cce9cfc9718a7d08001229'\n#APPID = '54bdb867b4615b2a26b479636caf6721'\nimport json, requests, sys, textMyself\nimport pprint\n##if len(sys.argv) < 2:\n## print('Usage: getOpenWeather.py city_name, 2-letter_country_code')\n## sys.exit()\n##location = ' '.join(sys.argv[1:])\nurl ='https://api.openweathermap.org/data/2.5/onecall?lat=33.441792&lon=-94.037689&exclude=hourly,daily&appid=%s' % (APPID)\n##https://api.openweathermap.org/data/2.5/onecall?lat=33.441792&lon=-94.037689&\n##exclude=hourly,daily&appid=cc42e57d84cce9cfc9718a7d08001229\n#https://api.openweathermap.org/data/2.5/forecast/daily?q=San Francisco, CA&cnt=3&APPID=54bdb867b4615b2a26b479636caf6721\nresponse = requests.get(url)\nresponse.raise_for_status()\n#print(response.text)\nweatherData = json.loads(response.text)\n#pprint.pprint(weatherData)\n#print(weatherData)\n##w = weatherData['list']\n##print('Current weather in %s:' % (location))\n##print(w[0]['weather'][0]['main'], '-', w[0]['weather'][0]['description'])\n##print()\n##print('Tomorrow:')\n##print(w[1]['weather'][0]['main'], '-', w[1]['weather'][0]['description'])\n##print()\n##print('Day after tomorrow:')\n##print(w[2]['weather'][0]['main'], '-', w[2]['weather'][0]['description'])\n\nw = weatherData['current']['weather'][0]\nprint('Current weather for today...')\nprint(w)\n#textMyself.textmyself(str(w))\n","sub_path":"getOpenWeather.py","file_name":"getOpenWeather.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"502297774","text":"\"\"\"\nFinds anagram\n\"\"\"\n\n\n# Time complexity: O(nLog n)\n# Space complexity: O(1)\n\ndef check_anagram(string1: str, string2: str):\n\n # if lengths are different then can not be anagram\n if len(string1) != len(string2):\n return False\n\n string1 = sorted(string1)\n string2 = sorted(string2)\n\n if string1 == string2:\n return True\n\n return False\n\n\n# Time complexity: O(n), Space complexity: O(n)\ndef check_anagram2(string1: str, string2: str):\n count1 = {}\n count2 = {}\n\n if len(string1) != len(string2):\n return False\n\n for ch1 in string1:\n ascaii = ord(ch1)\n\n value = count1.get(ascaii, 0) + 1\n\n count1.update({ascaii: value})\n for ch2 in string2:\n ascaii = ord(ch2)\n value = count2.get(ascaii, 0) + 1\n count2.update({ascaii: value})\n\n for (c1, v1) in count1.items():\n v2 = count2.get(c1, -1)\n if v1 != v2:\n return False\n\n return True\n\n\nif __name__ == '__main__':\n is_anagram = check_anagram2(\"abcd\", \"dabc\")\n print(is_anagram)\n\n\n","sub_path":"anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"406212024","text":"import sys\r\nfrom queue import Queue\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import *\r\nimport time\r\nimport subprocess\r\n# import queue as Queue\r\n# from queue import Empty\r\n# The new Stream Object which replaces the default stream associated with sys.stdout\r\n# This object just puts data in a queue!\r\n\r\nstart=''\r\n\r\nclass WriteStream(object):\r\n def __init__(self,queue):\r\n self.queue = queue\r\n\r\n def write(self, text):\r\n self.queue.put(text)\r\n\r\n# A QObject (to be run in a QThread) which sits waiting for data to come through a Queue.Queue().\r\n# It blocks until data is available, and one it has got something from the queue, it sends\r\n# it to the \"MainThread\" by emitting a Qt Signal \r\nclass MyReceiver(QObject):\r\n mysignal = pyqtSignal(str)\r\n\r\n def __init__(self,queue,*args,**kwargs):\r\n QObject.__init__(self,*args,**kwargs)\r\n self.queue = queue\r\n\r\n @pyqtSlot()\r\n def run(self):\r\n while True:\r\n text = self.queue.get()\r\n self.mysignal.emit(text)\r\n\r\n# An example QObject (to be run in a QThread) which outputs information with print\r\nclass LongRunningThing(QObject):\r\n @pyqtSlot()\r\n def run(self):\r\n # for i in range(1000):\r\n # print (i)\r\n subp=subprocess.Popen(start,shell=True,stdout=subprocess.PIPE)\r\n c=subp.stdout.readline()\r\n while c:\r\n try:\r\n c = str(c,encoding='gb18030') \r\n except:\r\n pass\r\n # c.replace(\"b'\",\"\")\r\n c = subp.stdout.readline()\r\n print (c.decode('gb18030'))\r\n # self.showDataWindow.setText(c)\r\n \r\n subp.wait()\r\n\r\n# An Example application QWidget containing the textedit to redirect stdout to\r\nclass MainUi(QWidget):\r\n def __init__(self,*args,**kwargs):\r\n QWidget.__init__(self,*args,**kwargs)\r\n self.layout = QVBoxLayout(self)\r\n self.setMinimumHeight(500) #窗体最小高度\r\n self.setMinimumWidth(1000) #窗体最小宽度\r\n self.textedit = QTextEdit()\r\n self.button = QPushButton('start sniff')\r\n self.button.clicked.connect(self.start_thread)\r\n self.layout.addWidget(self.textedit)\r\n self.layout.addWidget(self.button)\r\n self.textedit.setReadOnly(True)\r\n # self.layout.setStretchFactor(self.textedit,4)\r\n # self.layout.setStretchFactor(self.button,2)\r\n \r\n @pyqtSlot()\r\n # def run(self):\r\n # for i in range(1000):\r\n # print(i)\r\n @pyqtSlot(str)\r\n def append_text(self,text):\r\n self.textedit.moveCursor(QTextCursor.End)\r\n self.textedit.insertPlainText( text )\r\n\r\n @pyqtSlot()\r\n def start_thread(self):\r\n self.thread = QThread()\r\n self.long_running_thing = LongRunningThing()\r\n self.long_running_thing.moveToThread(self.thread)\r\n self.thread.started.connect(self.long_running_thing.run)\r\n self.thread.start()\r\n\r\ndef startSniff():\r\n global start\r\n env = sys.argv[1]\r\n ip = sys.argv[2]\r\n port = sys.argv[3]\r\n md5 = sys.argv[4]\r\n pack = sys.argv[5]\r\n flag = sys.argv[6]\r\n if(env == 'Windows'):\r\n if(ip == 'unknown' and port == 'unknown'):\r\n if(pack == 'unknown'):\r\n print('全局模式')\r\n start = 'MySniff.exe --eth=' + flag + ' --debug '\r\n elif(pack == 'tcp=fault' or pack == 'tcp=http' or pack == 'tcp=ftp' or pack == 'tcp=telnet'):\r\n print('抓取敏感数据模式')\r\n start = 'MySniff.exe --eth=' + flag + ' -- ' + pack + '--debug'\r\n else:\r\n print(pack+' 协议模式')\r\n start = 'MySniff.exe --eth=' + flag + ' --' + pack + '--debug'\r\n elif(ip != 'unknown' and port == 'unknown'):\r\n if(pack == 'unknown'):\r\n print('IP全局模式')\r\n start = 'MySniff.exe --eth=' + flag + ' --debug ' + ' --ip ' + ip\r\n elif(pack == 'tcp=fault' or pack == 'tcp=http' or pack == 'tcp=ftp' or pack == 'tcp=telnet'):\r\n print('IP抓取敏感数据模式')\r\n start = 'MySniff.exe --eth=' + flag + ' -- ' + pack + '--debug' + ' --ip ' + ip\r\n else:\r\n print(pack+' AND IP协议模式')\r\n start = 'MySniff.exe --eth=' + flag + ' --' + pack + '--debug' + ' --ip ' + ip\r\n elif(ip =='unknown' and port != 'unknown'):\r\n if(pack == 'unknown'):\r\n print('PORT全局模式')\r\n start = 'MySniff.exe --eth=' + flag + ' --debug ' + ' --port '+ port\r\n elif(pack == 'tcp=fault' or pack == 'tcp=http' or pack == 'tcp=ftp' or pack == 'tcp=telnet'):\r\n print('PORT抓取敏感数据模式')\r\n start = 'MySniff.exe --eth=' + flag + ' -- ' + pack + '--debug' + ' --port '+ port\r\n else:\r\n print(pack+' AND PORT 协议模式')\r\n start = 'MySniff.exe --eth=' + flag + ' --' + pack + '--debug' + ' --port '+ port\r\n elif(ip != 'unknown' and port != 'unknown'):\r\n if(pack == 'unknown'):\r\n print('全局模式')\r\n start = 'MySniff.exe --eth=' + flag + ' --debug ' + ' --port '+ port + ' --ip ' + ip\r\n elif(pack == 'tcp=fault' or pack == 'tcp=http' or pack == 'tcp=ftp' or pack == 'tcp=telnet'):\r\n print('抓取敏感数据模式')\r\n start = 'MySniff.exe --eth=' + flag + ' -- ' + pack + '--debug' + ' --port '+ port + ' --ip ' + ip\r\n else:\r\n print(pack+' 协议模式')\r\n start = 'MySniff.exe --eth=' + flag + ' --' + pack + '--debug' + ' --port '+ port + ' --ip ' + ip\r\n elif(env == 'Linux'):\r\n if(ip == 'unknown' and port == 'unknown'):\r\n if(pack == 'unknown'):\r\n print('全局模式')\r\n start = './MySniff --eth=' + flag + ' --debug '\r\n elif(pack == 'tcp=fault' or pack == 'tcp=http' or pack == 'tcp=ftp' or pack == 'tcp=telnet'):\r\n print('抓取敏感数据模式')\r\n start = './MySniff --eth=' + flag + ' -- ' + pack + '--debug'\r\n else:\r\n print(pack+' 协议模式')\r\n start = './MySniff --eth=' + flag + ' --' + pack + '--debug'\r\n elif(ip != 'unknown' and port == 'unknown'):\r\n if(pack == 'unknown'):\r\n print('IP全局模式')\r\n start = './MySniff --eth=' + flag + ' --debug ' + ' --ip ' + ip\r\n elif(pack == 'tcp=fault' or pack == 'tcp=http' or pack == 'tcp=ftp' or pack == 'tcp=telnet'):\r\n print('IP抓取敏感数据模式')\r\n start = './MySniff --eth=' + flag + ' -- ' + pack + '--debug' + ' --ip ' + ip\r\n else:\r\n print(pack+' AND IP协议模式')\r\n start = './MySniff --eth=' + flag + ' --' + pack + '--debug' + ' --ip ' + ip\r\n elif(ip =='unknown' and port != 'unknown'):\r\n if(pack == 'unknown'):\r\n print('PORT全局模式')\r\n start = './MySniff --eth=' + flag + ' --debug ' + ' --port '+ port\r\n elif(pack == 'tcp=fault' or pack == 'tcp=http' or pack == 'tcp=ftp' or pack == 'tcp=telnet'):\r\n print('PORT抓取敏感数据模式')\r\n start = './MySniff --eth=' + flag + ' -- ' + pack + '--debug' + ' --port '+ port\r\n else:\r\n print(pack+' AND PORT 协议模式')\r\n start = './MySniff --eth=' + flag + ' --' + pack + '--debug' + ' --port '+ port\r\n elif(ip != 'unknown' and port != 'unknown'):\r\n if(pack == 'unknown'):\r\n print('全局模式')\r\n start = './MySniff --eth=' + flag + ' --debug ' + ' --port '+ port + ' --ip ' + ip\r\n elif(pack == 'tcp=fault' or pack == 'tcp=http' or pack == 'tcp=ftp' or pack == 'tcp=telnet'):\r\n print('抓取敏感数据模式')\r\n start = './MySniff --eth=' + flag + ' -- ' + pack + '--debug' + ' --port '+ port + ' --ip ' + ip\r\n else:\r\n print(pack+' 协议模式')\r\n start = './MySniff --eth=' + flag + ' --' + pack + '--debug' + ' --port '+ port + ' --ip ' + ip\r\n print(start)\r\n # Create Queue and redirect sys.stdout to this queue\r\n queue = Queue()\r\n sys.stdout = WriteStream(queue)\r\n\r\n # Create QApplication and QWidget\r\n qapp = QApplication(sys.argv) \r\n app = MainUi()\r\n app.show()\r\n\r\n # Create thread that will listen on the other end of the queue, and send the text to the textedit in our application\r\n thread = QThread()\r\n my_receiver = MyReceiver(queue)\r\n my_receiver.mysignal.connect(app.append_text)\r\n my_receiver.moveToThread(thread)\r\n thread.started.connect(my_receiver.run)\r\n thread.start()\r\n\r\n qapp.exec_()\r\n\r\nif __name__ == '__main__':\r\n startSniff()\r\n","sub_path":"beginSniff.py","file_name":"beginSniff.py","file_ext":"py","file_size_in_byte":8960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"549420993","text":"import functools\nimport logging\nimport uuid\n\nfrom operator import __or__ as OR\n\nfrom django.conf import settings\nfrom django.contrib.postgres.fields import JSONField\nfrom django.contrib.postgres.fields.jsonb import KeyTransform\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.utils.functional import cached_property\n\nfrom constants.experiment_groups import ExperimentGroupLifeCycle\nfrom constants.experiments import ExperimentLifeCycle\nfrom db.models.abstract_jobs import TensorboardJobMixin\nfrom db.models.unique_names import GROUP_UNIQUE_NAME_FORMAT\nfrom db.models.utils import (\n DescribableModel,\n DiffModel,\n LastStatusMixin,\n NameableModel,\n PersistenceModel,\n RunTimeModel,\n StatusModel,\n TagModel\n)\nfrom libs.spec_validation import validate_group_hptuning_config, validate_group_spec_content\nfrom schemas.hptuning import HPTuningConfig, Optimization\nfrom schemas.specifications import GroupSpecification\n\n_logger = logging.getLogger('polyaxon.db.experiment_groups')\n\n\nclass ExperimentGroup(DiffModel,\n RunTimeModel,\n NameableModel,\n PersistenceModel,\n DescribableModel,\n TagModel,\n LastStatusMixin,\n TensorboardJobMixin):\n \"\"\"A model that saves Specification/Polyaxonfiles.\"\"\"\n STATUSES = ExperimentGroupLifeCycle\n\n uuid = models.UUIDField(\n default=uuid.uuid4,\n editable=False,\n unique=True,\n null=False)\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n related_name='+')\n project = models.ForeignKey(\n 'db.Project',\n on_delete=models.CASCADE,\n related_name='experiment_groups',\n help_text='The project this polyaxonfile belongs to.')\n content = models.TextField(\n null=True,\n blank=True,\n help_text='The yaml content of the polyaxonfile/specification.',\n validators=[validate_group_spec_content])\n hptuning = JSONField(\n help_text='The experiment group hptuning params config.',\n null=True,\n blank=True,\n validators=[validate_group_hptuning_config])\n code_reference = models.ForeignKey(\n 'db.CodeReference',\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name='+')\n status = models.OneToOneField(\n 'db.ExperimentGroupStatus',\n related_name='+',\n blank=True,\n null=True,\n editable=True,\n on_delete=models.SET_NULL)\n\n class Meta:\n app_label = 'db'\n unique_together = (('project', 'name'),)\n\n def __str__(self):\n return self.unique_name\n\n @property\n def unique_name(self):\n return GROUP_UNIQUE_NAME_FORMAT.format(\n project_name=self.project.unique_name,\n id=self.id)\n\n def can_transition(self, status):\n \"\"\"Update the status of the current instance.\n\n Returns:\n boolean: if the instance is updated.\n \"\"\"\n if not self.STATUSES.can_transition(status_from=self.last_status, status_to=status):\n _logger.info(\n '`%s` tried to transition from status `%s` to non permitted status `%s`',\n str(self), self.last_status, status)\n return False\n\n return True\n\n def set_status(self, status, message=None, traceback=None, **kwargs):\n if not self.can_transition(status):\n return\n\n ExperimentGroupStatus.objects.create(experiment_group=self,\n status=status,\n message=message,\n traceback=traceback)\n\n @cached_property\n def hptuning_config(self):\n return HPTuningConfig.from_dict(self.hptuning) if self.hptuning else None\n\n @cached_property\n def specification(self):\n return GroupSpecification.read(self.content) if self.content else None\n\n @cached_property\n def concurrency(self):\n if not self.hptuning_config:\n return None\n return self.hptuning_config.concurrency\n\n @cached_property\n def search_algorithm(self):\n if not self.hptuning_config:\n return None\n return self.hptuning_config.search_algorithm\n\n @cached_property\n def has_early_stopping(self):\n return bool(self.early_stopping)\n\n @cached_property\n def early_stopping(self):\n if not self.hptuning_config:\n return None\n return self.hptuning_config.early_stopping or []\n\n @property\n def scheduled_experiments(self):\n return self.experiments.filter(\n status__status=ExperimentLifeCycle.SCHEDULED).distinct()\n\n @property\n def succeeded_experiments(self):\n return self.experiments.filter(\n status__status=ExperimentLifeCycle.SUCCEEDED).distinct()\n\n @property\n def failed_experiments(self):\n return self.experiments.filter(\n status__status=ExperimentLifeCycle.FAILED).distinct()\n\n @property\n def stopped_experiments(self):\n return self.experiments.filter(\n status__status=ExperimentLifeCycle.STOPPED).distinct()\n\n @property\n def pending_experiments(self):\n return self.experiments.filter(\n status__status__in=ExperimentLifeCycle.PENDING_STATUS).distinct()\n\n @property\n def running_experiments(self):\n return self.experiments.filter(\n status__status__in=ExperimentLifeCycle.RUNNING_STATUS).distinct()\n\n @property\n def done_experiments(self):\n return self.experiments.filter(\n status__status__in=ExperimentLifeCycle.DONE_STATUS).distinct()\n\n @property\n def non_done_experiments(self):\n return self.experiments.exclude(\n status__status__in=ExperimentLifeCycle.DONE_STATUS).distinct()\n\n @property\n def n_experiments_to_start(self):\n \"\"\"We need to check if we are allowed to start the experiment\n If the polyaxonfile has concurrency we need to check how many experiments are running.\n \"\"\"\n return self.concurrency - self.running_experiments.count()\n\n @property\n def iteration(self):\n return self.iterations.last()\n\n @property\n def iteration_data(self):\n return self.iteration.data if self.iteration else None\n\n @property\n def current_iteration(self):\n return self.iterations.count()\n\n def should_stop_early(self):\n filters = []\n for early_stopping_metric in self.early_stopping:\n comparison = (\n 'gte' if Optimization.maximize(early_stopping_metric.optimization) else 'lte')\n metric_filter = 'metric__values__{}__{}'.format(\n early_stopping_metric.metric, comparison)\n filters.append({metric_filter: early_stopping_metric.value})\n if filters:\n return self.experiments.filter(functools.reduce(OR, [Q(**f) for f in filters])).exists()\n return False\n\n def get_annotated_experiments_with_metric(self, metric, experiment_ids=None):\n query = self.experiments\n if experiment_ids:\n query = query.filter(id__in=experiment_ids)\n annotation = {\n metric: KeyTransform(metric, 'metric__values')\n }\n return query.annotate(**annotation)\n\n def get_ordered_experiments_by_metric(self, experiment_ids, metric, optimization):\n query = self.get_annotated_experiments_with_metric(\n metric=metric,\n experiment_ids=experiment_ids)\n\n metric_order_by = '{}{}'.format(\n '-' if Optimization.maximize(optimization) else '',\n metric)\n return query.order_by(metric_order_by)\n\n def get_experiments_metrics(self, metric, experiment_ids=None):\n query = self.get_annotated_experiments_with_metric(\n metric=metric,\n experiment_ids=experiment_ids)\n return query.values_list('id', metric)\n\n @cached_property\n def search_manager(self):\n from hpsearch.search_managers import get_search_algorithm_manager\n\n return get_search_algorithm_manager(hptuning_config=self.hptuning_config)\n\n @cached_property\n def iteration_manager(self):\n from hpsearch.iteration_managers import get_search_iteration_manager\n\n return get_search_iteration_manager(experiment_group=self)\n\n @property\n def iteration_config(self):\n from hpsearch.schemas import get_iteration_config\n\n if self.iteration_data and self.search_algorithm:\n return get_iteration_config(\n search_algorithm=self.search_algorithm,\n iteration=self.iteration_data)\n return None\n\n def get_suggestions(self):\n iteration_config = self.iteration_config\n if iteration_config:\n return self.search_manager.get_suggestions(iteration_config=iteration_config)\n return self.search_manager.get_suggestions()\n\n\nclass ExperimentGroupIteration(DiffModel):\n experiment_group = models.ForeignKey(\n 'db.ExperimentGroup',\n on_delete=models.CASCADE,\n related_name='iterations',\n help_text='The experiment group.')\n data = JSONField(\n help_text='The experiment group iteration meta data.')\n\n class Meta:\n app_label = 'db'\n ordering = ['created_at']\n\n def __str__(self):\n return '{} <{}>'.format(self.experiment_group, self.created_at)\n\n\nclass ExperimentGroupStatus(StatusModel):\n \"\"\"A model that represents an experiment group status at certain time.\"\"\"\n STATUSES = ExperimentGroupLifeCycle\n\n experiment_group = models.ForeignKey(\n 'db.ExperimentGroup',\n on_delete=models.CASCADE,\n related_name='statuses')\n status = models.CharField(\n max_length=64,\n blank=True,\n null=True,\n default=STATUSES.CREATED,\n choices=STATUSES.CHOICES)\n\n class Meta:\n app_label = 'db'\n verbose_name_plural = 'Experiment group Statuses'\n\n def __str__(self):\n return '{} <{}>'.format(self.experiment_group.unique_name, self.status)\n","sub_path":"polyaxon/db/models/experiment_groups.py","file_name":"experiment_groups.py","file_ext":"py","file_size_in_byte":10202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"522080912","text":"# # Make IO faster\n# import sys\n# input = sys.stdin.readline\n\n# # get single (or) multiple str\n# X = input()\n\n# # get single int\n# N = int(input())\n# # get multiple int (e.g., 2)\n# X, Y = map(int, input().split())\n# # get multiple int (e.g., 2) for N lines\n# XY = [list(map(int, input().split())) for _ in range(N)]\n\n# from IPython import embed; embed(); exit();\n\n# 全部入り\nimport sys, re\nfrom collections import deque, defaultdict, Counter\nfrom math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians\nfrom itertools import accumulate, permutations, combinations, product\nfrom operator import itemgetter, mul\nfrom copy import deepcopy\nfrom string import ascii_lowercase, ascii_uppercase, digits\nfrom bisect import bisect, bisect_left\nfrom fractions import gcd\nfrom heapq import heappush, heappop\nfrom functools import reduce\nimport numpy as np\ndef input(): return sys.stdin.readline().strip()\ndef INT(): return int(input())\ndef MAP(): return map(int, input().split())\ndef LIST(): return list(map(int, input().split()))\ndef ZIP(n): return zip(*(MAP() for _ in range(n)))\nsys.setrecursionlimit(10 ** 9)\nINF = float('inf')\nmod = 10 ** 9 + 7\n\nN = INT()\nh = []\nfor i in range(N):\n h.append(INT())\n\n# L, R は自分の場所より手前の単調増加列の長さ\ninv_h = list(reversed(h))\nL = [INF for _ in range(N)]\nL[0] = 0\n\ninv_R = [INF for _ in range(N)]\ninv_R[0] = 0\n\nl_cnt = 0\nfor i in range(N - 1):\n if h[i] < h[i + 1]:\n l_cnt += 1\n else:\n l_cnt = 0\n L[i + 1] = l_cnt\n\nr_cnt = 0\nfor i in range(N - 1):\n if inv_h[i] < inv_h[i + 1]:\n r_cnt += 1\n else:\n r_cnt = 0\n inv_R[i + 1] = r_cnt\n\nR = list(reversed(inv_R))\n\nans_max = -INF\nfor i in range(N):\n tmp = L[i] + R[i] + 1\n if tmp > ans_max:\n ans_max = tmp\nprint(ans_max)\n","sub_path":"contests/arc/03/036/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"167368776","text":"#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n# python3.6\r\n\r\n\r\nedge_set = set()\r\n\r\nwith open('wiki_vote', mode='r', encoding='utf-8', newline='') as f:\r\n for line in f.readlines():\r\n for node in line.strip().split('\\t'):\r\n edge_set.add(node)\r\n\r\nwith open('wiki_vote_node', mode='w', encoding='utf-8', newline='') as f:\r\n for node in edge_set:\r\n f.write(node + '\\n')\r\n","sub_path":"gen_node.py","file_name":"gen_node.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"113969325","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 5 15:35:45 2017\n\n@author: SMP II\n\nProgramm zum Erkennen derGestalt des Objektes, Prüfung ob ein bestimmtes Muster vorhanden ist und Rückgabe der Eckpunkte.\n\"\"\"\n\nimport cv2\nfrom Mustererkennung import *\nfrom Glyphdatenbank import *\n\n\n\ndef glyphe_erkennen(Bild):\n \n \n \n #Ob die Glyphe gefunden wurde \n gefunden = False\n \n #Das Bild grau konvertieren\n grau = cv2.cvtColor(Bild, cv2.COLOR_BGR2GRAY)\n \n #Das graue, gefilterete Bild zeigen\n cv2.imshow(\"1\", Bild)\n cv2.imshow(\"2\", grau)\n \n #Kanten im Bild bestimmen\n kanten = cv2.Canny(Bild, 35, 125)\n \n #Kontouren finden\n _, konturen , _ = cv2.findContours(kanten, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \n #Konturen sortieren größte Kontur zu erst\n konturen = sorted(konturen, key=cv2.contourArea, reverse=True)[:10]\n \n #Schleife zur Suche in den Konturen nach einer Glyphe\n for kontur in konturen:\n \n # Form des Objektes prüfen\n #arcLength(linie, geschlossen) gibt an ob, die From geschlossen ist\n umfang = cv2.arcLength(kontur, True)\n\n #cv2.approxPolyDP(Linie, Wie weit darf die Linie von dem Orginal weg sein, ist sie geschlossen)\n annaeherung = cv2.approxPolyDP(kontur, 0.01*umfang, True)\n \n \n #Abfrage ob das die Kontur 4 Ecken besitzt\n if len(annaeherung) == 4:\n \n \n glyphmuster = glyphmuster_erkennen(grau, annaeherung.reshape(4,2))\n \n #Ausgabe\n print(\"Es hat 4 Ecken\")\n #Ausgabe des Musters\n print(glyphmuster)\n \n \n \n gefunden = musterabgleich(glyphmuster)\n #Abgleich mit der Datenbank, ob die Glyphe der gesuchten Glyphe entspricht\n \n if gefunden == True:\n \n print(\"gefunden\")\n \n #Gibt die 4 Eckpunkte wieder\n return (True, annaeherung.reshape(4,2))\n \n #Schleife wird unterbrochen\n break\n \n return (False, [[1,1],[1,1]])","sub_path":"DATABASE_EXTERN/DATABASE/Adds/vlacs/Glyphenerkennung.py","file_name":"Glyphenerkennung.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"290909256","text":"#! /usr/bin/python3\n\n# stm.py - seconds till midnight\n#\n# flags: -n = noon, not midnight\n# -o # = offset by # seconds\n\n\nimport getopt, sys\nfrom datetime import datetime\n\ntry:\n opts, args = getopt.getopt(sys.argv[1:], \"no:\", ['noon', 'offset='])\nexcept getopt.GetoptError as err:\n print(str(err))\n sys.exit(2)\n\noffset = 0\nnoon = False\n\nfor option, argument in opts:\n if option == '-n':\n noon = True\n elif option == '-o':\n offset = int(argument)\n else:\n assert False, \"unknown option\"\n\n\nnow = datetime.now()\n\n# seconds since midnight\nssm = (now - now.replace(hour=0, minute=0, second=0, microsecond=0)).total_seconds()\n\nif noon:\n target = (12 * 3600) - ssm\nelse:\n target = (24 * 3600) - ssm\n\n\nprint(int(target) + offset)\n\n","sub_path":"LABTECH/hw9 Clock with buttons/stm.py","file_name":"stm.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"262674234","text":"import pandas\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input\nfrom keras.layers.core import Dense, Activation\nfrom sklearn.preprocessing import LabelEncoder\n\n# load dataset\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\ndataset = pd.read_csv(\"diabetes.csv\", header=None).values\nbc_dataset = pd.read_csv(\"Breas Cancer.csv\", header=1)\n# print(dataset)\nimport numpy as np\nX_train, X_test, Y_train, Y_test = train_test_split(dataset[:,0:8], dataset[:,8],\n test_size=0.25, random_state=87)\nnp.random.seed(155)\nmy_first_nn = Sequential() # create model\nmy_first_nn.add(Dense(20, input_dim=8, activation='relu')) # hidden layer\nmy_first_nn.add(Dense(15, input_dim=8, activation='relu')) # hidden layer\nmy_first_nn.add(Dense(1, activation='sigmoid')) # output layer\nmy_first_nn.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nmy_first_nn_fitted = my_first_nn.fit(X_train, Y_train, epochs=100, verbose=0,\n initial_epoch=0)\n\n# my_second_nn = Sequential() # create model\n# my_second_nn.add(Dense(20, input_dim=8, activation='relu')) # hidden layer\n# my_second_nn.add(Dense(15, input_dim=8, activation='relu')) # hidden layer\n# my_second_nn.add(Dense(1, activation='sigmoid')) # output layer\n# my_second_nn.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n# my_second_nn_fitted = my_second_nn.fit(X_train, Y_train, epochs=100, verbose=0,\n# initial_epoch=0)\n\ninput1 = Input(shape=(8,))\nhidden1 = Dense(20, activation='relu')(input1)\nhidden2 = Dense(15, activation='relu')(hidden1)\noutput1 = Dense(1, activation='sigmoid')(hidden2)\nnn = Model(inputs=input1, outputs=output1)\n\nnn.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nnn_fitted = nn.fit(X_train, Y_train, epochs=100, verbose=0, initial_epoch=0)\n\nbc_dataset = bc_dataset.apply(LabelEncoder().fit_transform)\nbc_dataset = bc_dataset.values\nbc_X_train, bc_X_test, bc_Y_train, bc_Y_test = train_test_split(bc_dataset[:,2:], bc_dataset[:,1], test_size=0.25,\n random_state=87)\n\nbc_input = Input(shape=(30,))\nbc_hidden1 = Dense(80, activation='relu')(bc_input)\nbc_hidden2 = Dense(40, activation='relu')(bc_hidden1)\nbc_hidden3 = Dense(20, activation='relu')(bc_hidden2)\nbc_output = Dense(1, activation='sigmoid')(bc_hidden3)\nbc_model = Model(inputs=bc_input, outputs=bc_output)\nbc_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nbc_fitted = bc_model.fit(bc_X_train, bc_Y_train, epochs=100, verbose=0, initial_epoch=0)\n\n\nprint(my_first_nn.summary())\nprint(my_first_nn.evaluate(X_test, Y_test, verbose=0))\n\n# print(my_second_nn.summary())\n# print(my_second_nn.evaluate(X_test, Y_test, verbose=0))\n\nprint(nn.summary())\nprint(nn.evaluate(X_test, Y_test, verbose=0))\n\nprint(bc_model.summary())\nprint(bc_model.evaluate(bc_X_test, bc_Y_test, verbose=0))\n","sub_path":"ICP_DL1/DeepLearning_Lesson1/basicOP.py","file_name":"basicOP.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"391379664","text":"#!/usr/bin/python3 -W all\n\"\"\"\n tokenize.py: tokenize tweet text: return one line per line\n usage: tokenize.py < file\n 20171123 erikt(at)xs4all.nl\n\"\"\"\n\nimport nltk\nimport sys\n\ndef tokenize(text):\n tokenizedList = nltk.word_tokenize(text)\n tokenizedLine = \"\"\n for i in range(0,len(tokenizedList)):\n if i == 0: tokenizedLine = tokenizedList[i]\n else: tokenizedLine += \" \"+tokenizedList[i]\n return(tokenizedLine)\n\ndef main(argv):\n for line in sys.stdin:\n tokenizedLine = tokenize(line)\n print(tokenizedLine)\n sys.exit(0)\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","sub_path":"bin/tokenize.py","file_name":"tokenize.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"335708974","text":"import sqlite3\nfrom sqlite3 import Error\nimport os.path\n\n\nPATH_TO_DB = \"data/data.db\"\ndb = sqlite3.connect(PATH_TO_DB, check_same_thread=False)\nc = db.cursor()\n\n\ndef setup_db():\n q = '''\nCREATE TABLE ItemType (\n id integer NOT NULL CONSTRAINT ItemType_pk PRIMARY KEY,\n name varchar(50),\n est_fin_days double,\n categories_id integer NOT NULL\n);\n'''\n c.execute(q)\n q = '''\nCREATE TABLE Item (\n id integer NOT NULL CONSTRAINT Item_pk PRIMARY KEY,\n itemtype_id integer NOT NULL,\n exp_date date NOT NULL,\n qty integer\n);\n'''\n c.execute(q)\n q = '''\nCREATE TABLE List (\n id integer NOT NULL CONSTRAINT List_pk PRIMARY KEY,\n name varchar(50)\n);\n'''\n c.execute(q)\n q = '''\nCREATE TABLE ListItem (\n id integer NOT NULL CONSTRAINT ListItem_pk PRIMARY KEY,\n list_id integer NOT NULL,\n item_id integer NOT NULL\n);\n'''\n c.execute(q)\n q = '''\nCREATE TABLE Category (\n id integer NOT NULL CONSTRAINT Category_pk PRIMARY KEY,\n name character(50)\n);\n'''\n c.execute(q)\n db.commit()\n\n\ndef addItem(itemName, exp_date, qty):\n q = \"INSERT INTO Item(itemtype_id, exp_date, qty) VALUES (?, ?, ?);\"\n c.execute(q, (getItemTypeID(itemName), exp_date, qty))\n db.commit()\n\n#add item type\ndef createItem(categoryName, itemName):\n q = \"INSERT INTO ItemType(name, categories_id) VALUES (?, ?);\"\n c.execute(q, (itemName, getCategoryID(categoryName)))\n db.commit()\n\ndef createCategory(name):\n q = \"INSERT INTO Category(name) VALUES (?);\"\n c.execute(q, (name,))\n db.commit()\n\ndef getCategoryID(name):\n q = \"SELECT id FROM Category WHERE name='%s';\" % name\n return c.execute(q).fetchone()[0]\n\ndef getItemTypeID(name):\n q = \"SELECT id FROM ItemType WHERE name='%s';\" % name\n return c.execute(q).fetchone()[0]\n\n#gets names only\ndef getAllCategories():\n q = \"SELECT name FROM Category ORDER BY name ASC;\"\n return c.execute(q).fetchall()\n\ndef getAllItems():\n q = \"SELECT name FROM Item ORDER BY name ASC;\"\n return c.execute(q).fetchall()\n\ndef needToRegenTables():\n return not c.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name='Item';\").fetchone()\n \n'''\nif __name__ == \"__main__\":\n\n if c.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name='Item';\").fetchone():\n print 'db existed'\n else:\n setup_db()\n createCategory(\"Food\")\n createCategory(\"Medicine\")\n createItem(\"Food\", \"Lettuce\")\n createItem(\"Food\", \"Tomato\")\n createItem(\"Medicine\", \"Xans\")\n createItem(\"Medicine\", \"Mol\")\n addItem(\"Tomato\", \"2017-12-03\", 5)\n addItem(\"Xans\", \"2018-02-02\", 2)\n\n c.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n'''\n","sub_path":"utils/db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"648149521","text":"import wx\nimport sys\nfrom os import path\n\nfrom page.CtrlPanel import CtrlPanel\nfrom page.InputPanel import InputPanel\nfrom page.OutputPanel import OutputPanel\nfrom component.Source import Source\nfrom event import EVT_NEW_DATA, EVT_SEND_DATA\nfrom version import VERSION\n\n\nclass MainFrame(wx.Frame):\n def __init__(self, *args, **kwargs):\n super(MainFrame, self).__init__(*args, **kwargs)\n\n # 顶部状态栏\n self.make_menu_bar()\n\n # Win下窗体图标\n icon = wx.Icon(name=path.join(path.dirname(sys.argv[0]), './resource/icon.ico'), type=wx.BITMAP_TYPE_ICO)\n self.SetIcon(icon)\n\n # 数据收发源\n self.source = Source(self, -1)\n self.source.Hide()\n\n # 收发面板排版\n self.ctrl_panel = CtrlPanel(self, -1)\n self.input_panel = InputPanel(self, -1)\n self.output_panel = OutputPanel(self, -1)\n io_box_sizer = wx.BoxSizer(wx.VERTICAL)\n io_box_sizer.Add(self.output_panel, proportion=3, flag=wx.ALIGN_TOP | wx.EXPAND)\n io_box_sizer.Add(self.input_panel, proportion=1, flag=wx.ALIGN_BOTTOM | wx.EXPAND)\n\n # 顶层排版\n root_box_sizer = wx.BoxSizer(wx.HORIZONTAL)\n root_box_sizer.Add(io_box_sizer, proportion=3, flag=wx.ALIGN_LEFT | wx.EXPAND)\n root_box_sizer.Add(self.ctrl_panel, proportion=1, flag=wx.ALIGN_RIGHT | wx.EXPAND)\n self.SetSizer(root_box_sizer)\n\n self.Bind(EVT_NEW_DATA, self.on_new_data)\n self.Bind(EVT_SEND_DATA, self.on_send_data)\n\n def __del__(self):\n self.source.__del__()\n\n def make_menu_bar(self):\n file_menu = wx.Menu()\n file_menu_exit = file_menu.Append(wx.ID_EXIT, '退出')\n\n help_menu = wx.Menu()\n help_menu_about = help_menu.Append(wx.ID_ABOUT, '关于')\n\n menu_bar = wx.MenuBar()\n menu_bar.Append(file_menu, \"文件(&F)\")\n menu_bar.Append(help_menu, \"帮助(&H)\")\n\n self.SetMenuBar(menu_bar)\n\n self.Bind(wx.EVT_MENU, self.on_menu_exit, file_menu_exit)\n self.Bind(wx.EVT_MENU, self.on_menu_about, help_menu_about)\n\n def on_menu_exit(self, evt):\n self.Close(True)\n\n def on_menu_about(self, evt):\n dlg = wx.MessageDialog(self, '无名串口调试助手\\n\\nv{}\\nGitHub@rusoa'.format(VERSION), '关于', wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n\n def on_new_data(self, evt):\n self.output_panel.GetEventHandler().ProcessEvent(evt)\n\n def on_send_data(self, evt):\n self.source.send(evt.GetEventArgs())\n","sub_path":"source/page/MainFrame.py","file_name":"MainFrame.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"260295716","text":"n, m = map(int, input().split())\r\ndct = {}\r\nfor i in range(1, n+1):\r\n for j in range(1, m+1):\r\n if (i+j) in dct:\r\n dct[i+j] += 1\r\n else:\r\n dct[i+j] = 1\r\n\r\nmxv = max(dct.values())\r\nmxk = [k for k, v in dct.items() if v == mxv]\r\nfor i in mxk:\r\n print(i)\r\n\r\n","sub_path":"Open/Python 3/dicecup.py","file_name":"dicecup.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349302460","text":"from __future__ import absolute_import\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import filters, permissions, status, viewsets\n\nfrom measurements.models import Metric\n\nfrom api.v1 import pagination, serializers\n\n\nclass MetricViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Viewset to search metrics\n \"\"\"\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.MetricSerializer\n pagination_class = pagination.MetricPagination\n\n def get_queryset(self):\n return Metric.objects.filter(is_official=True)\n\n\nclass MetricTypesView(APIView):\n \"\"\"\n Views that explains the metric types that are allowed\n \"\"\"\n def get(self, request, format=None): \n return Response({\n \"type\": [\n {\n \"key\": \"boo\",\n \"name\": \"True/False\",\n \"example\": \"Worked out\",\n \"description\": \"True or False values only\",\n \"aggregation\": [\"sum\"]\n }, {\n \"key\": \"pos\",\n \"name\": \"Positive Integer\",\n \"example\": \"Number of donuts eaten\",\n \"description\": \"Whole numbers greater than zero\",\n \"aggregation\": [\"sum\", \"avg\", \"max\"]\n }, {\n \"key\": \"int\",\n \"name\": \"Integer\",\n \"example\": \"Net calories\",\n \"description\": \"Whole numbers (can be negative as well)\",\n \"aggregation\": [\"sum\", \"avg\", \"max\"]\n }, {\n \"key\": \"flo\",\n \"name\": \"Float\",\n \"example\": \"Number of miles ran\",\n \"description\": \"Numbers with decimal points\",\n \"aggregation\": [\"sum\", \"avg\", \"max\"]\n }, {\n \"key\": \"tim\",\n \"name\": \"Time\",\n \"example\": \"Time I woke up\",\n \"description\": \"Time of day\",\n \"aggregation\": [\"sum\", \"avg\", \"max\"]\n }, {\n \"key\": \"dur\",\n \"name\": \"Duration\",\n \"example\": \"Length of time asleep\",\n \"description\": \"Duration in days/hours/minutes/seconds\",\n \"aggregation\": [\"sum\", \"avg\", \"max\"]\n }\n ],\n \"aggregation\": [\n {\n \"key\": \"sum\",\n \"name\": \"Sum\",\n \"description\": \"When charting this metric we will aggregate based on sum of measurements in a given time period\",\n \"example\": \"Times worked out (this month)\"\n }, {\n \"key\": \"avg\",\n \"name\": \"Average\",\n \"description\": \"When charting this metric we will aggregate by averaging measurements in a given time period\",\n \"example\": \"Average weight (this week)\"\n }, {\n \"key\": \"max\",\n \"name\": \"Maximum\",\n \"description\": \"When charting this metric we will chart the maximum measurement in a given time period\",\n \"example\": \"Pull up record (this year)\"\n }\n ]\n })","sub_path":"src/api/v1/views/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615845827","text":"import logging,bs4,os,time,datetime,smtplib,requests\nfrom selenium import webdriver\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(levelname)s- %(message)s',filename='save_avi.log')\n\n\ndef main():\n print (\"Recording video...\")\n\n #\n\n #url =\"\"\n #url='https://livestream.com/accounts/1160789/events/8853888/player?width=800&height=450&enableInfoAndActivity=true&defaultDrawer=&autoPlay=true&mute=false'\n url = \"https://livestream.com/accounts/1160789/events/8853888/videos/197808333/player?width=640&height=360&enableInfo=true&defaultDrawer=&autoPlay=true&mute=false\"\n logging.info(url)\n\n try:\n ## selenium\n ### build silent driver\n os.chdir(os.path.abspath('.'))\n logging.info(os.getcwd())\n path=os.path.abspath('./chromeDriver/chromedriver')\n ### access site and get source\n logging.info(path)\n options = webdriver.ChromeOptions()\n options.add_argument('--ignore-certificate-errors')\n options.add_argument('--incognito')\n options.add_argument('--headless')\n logging.info(options)\n driver = webdriver.Chrome(path,options=options)\n logging.info(driver)\n\n driver.get(url)\n time.sleep(3)\n page_source = driver.page_source\n \n ## BS4\n #soup = bs4.BeautifulSoup(http_req.content, 'html.parser')\n soup = bs4.BeautifulSoup(page_source, 'lxml')\n\n print(soup)\n print('\\n\\n\\n\\n\\n')\n soupA = soup.find_all(text=True)\n print(soupA)\n print('\\n\\n\\n\\n\\n')\n \n except Exception as error:\n logging.critical(error)\n\n\n\n\n\n \n #r1 = requests.get(url, stream=True)\n #filename = \"stream.avi\"\n#\n #num=0\n #if(r1.status_code == 200):\n # with open(filename,'wb') as f:\n # for chunk in r1.iter_content(chunk_size=1024):\n # num += 1\n # f.write(chunk)\n # if num>5000:\n # print('end')\n # break\n#\n #else:\n # print(\"Received unexpected status code {}\".format(r.status_code))\n\nif __name__ == \"__main__\":\n main()","sub_path":"save_avi.py","file_name":"save_avi.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"466125376","text":"from django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.views.generic import TemplateView\nfrom django.urls import reverse\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom blog.models import Blogpost\nfrom blog.forms import BlogpostForm, UserSignupForm\nfrom blog.tasks import send_welcome_mail\n\n# Create your views here.\n\nclass BlogpostView(TemplateView):\n template_name = 'blog/index.html'\n\n def get(self, request):\n posts = Blogpost.objects.all().order_by('id')\n\n response = [{\n 'id': p.id,\n 'title': p.title,\n 'author': p.author,\n } for p in posts]\n\n return self.render_to_response({'posts': response})\n\n\nclass BlogpostDetailView(TemplateView):\n template_name = 'blog/detail.html'\n\n def get(self, request, id):\n try:\n p = Blogpost.objects.get(id=id)\n except Blogpost.DoesNotExist:\n raise Http404()\n else:\n context = {\n 'title': p.title,\n 'author': p.author,\n 'body': p.body,\n }\n\n return self.render_to_response(context)\n\n\nclass BlogpostCreateView(LoginRequiredMixin, TemplateView):\n template_name = 'blog/create.html'\n\n def get(self, request):\n form = BlogpostForm()\n return self.render_to_response({'form': form})\n\n def post(self, request):\n form = BlogpostForm(data=request.POST)\n if not form.is_valid():\n return self.render_to_response({'errors': form.errors})\n\n blogpost = form.save(commit=False)\n blogpost.user = request.user\n blogpost.save()\n\n return HttpResponseRedirect(reverse('posts-detail', kwargs={'id': blogpost.id}))\n\n\nclass BlogpostEditView(LoginRequiredMixin, TemplateView):\n template_name = 'blog/edit.html'\n\n def get(self, request, id):\n # Equivalent to executing Blogpost.objects.get(id=id)\n blogpost = get_object_or_404(Blogpost, id=id)\n\n if blogpost.user != request.user:\n raise Http404\n\n form = BlogpostForm(instance=blogpost)\n\n return self.render_to_response({'form': form, 'id': id})\n\n def post(self, request, id):\n blogpost = get_object_or_404(Blogpost, id=id)\n\n if blogpost.user != request.user:\n raise Http404\n\n form = BlogpostForm(data=request.POST, instance=blogpost)\n if not form.is_valid():\n return self.render_to_response({'errors': form.errors})\n\n blogpost = form.save()\n\n return HttpResponseRedirect(reverse('posts-detail', kwargs={'id': blogpost.id}))\n\n\nclass SignupView(TemplateView):\n template_name = 'blog/signup.html'\n\n def get(self, request):\n form = UserSignupForm()\n return self.render_to_response({'form': form})\n\n def post(self, request):\n form = UserSignupForm(data=request.POST)\n if not form.is_valid():\n return self.render_to_response({'form': form})\n\n user = form.save()\n\n send_welcome_mail.delay(user.email)\n\n return HttpResponseRedirect(reverse('posts'))\n","sub_path":"_/website/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"340752209","text":"# -*- coding: utf-8 -*-\n\n\"\"\"News syncing, merging, splitting, and uploading.\"\"\"\n\nimport argparse\nimport logging\nimport os.path\nimport sys\n\nfrom datetime import timedelta, timezone\nfrom pathlib import Path\nfrom shutil import rmtree\nfrom subprocess import run\nfrom time import sleep\n\nfrom pytility import parse_date\n\nfrom .merge import merge_files\nfrom .split import split_files\nfrom .utils import date_from_file, now\n\nLOGGER = logging.getLogger(__name__)\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n\ndef update_news(\n s3_src, path_feeds, path_merged, path_split, s3_dst, split_size=None, log_level=None\n):\n \"\"\"News syncing, merging, splitting, and uploading.\"\"\"\n\n path_feeds = Path(path_feeds).resolve()\n path_merged = Path(path_merged).resolve()\n path_split = Path(path_split).resolve()\n\n LOGGER.info(\n \"Sync from <%s>, merge from <%s> into <%s>, split into <%s>, upload to <%s>\",\n s3_src,\n path_feeds,\n path_merged,\n path_split,\n s3_dst,\n )\n\n LOGGER.info(\"Deleting existing dir <%s>\", path_split.parent)\n rmtree(path_split.parent, ignore_errors=True)\n\n path_feeds.mkdir(parents=True, exist_ok=True)\n path_merged.parent.mkdir(parents=True, exist_ok=True)\n path_split.parent.mkdir(parents=True, exist_ok=True)\n\n LOGGER.info(\"S3 sync from <%s> to <%s>\", s3_src, path_feeds)\n run([\"aws\", \"s3\", \"sync\", s3_src, os.path.join(path_feeds, \"\")], check=True)\n\n merge_files(\n in_paths=path_feeds.rglob(\"*.jl\"),\n out_path=path_merged,\n keys=\"article_id\",\n key_types=\"string\",\n latest=(\"published_at\", \"scraped_at\"),\n latest_types=(\"date\", \"date\"),\n latest_required=True,\n sort_latest=True,\n sort_descending=True,\n concat_output=True,\n log_level=log_level,\n )\n\n split_files(\n path_in=path_merged, path_out=path_split, size=split_size, exclude_empty=True\n )\n\n LOGGER.info(\"S3 sync from <%s> to <%s>\", path_split.parent, s3_dst)\n run(\n [\n \"aws\",\n \"s3\",\n \"sync\",\n \"--acl\",\n \"public-read\",\n \"--exclude\",\n \".gitignore\",\n \"--exclude\",\n \".DS_Store\",\n \"--exclude\",\n \".bucket\",\n \"--size-only\",\n \"--delete\",\n os.path.join(path_split.parent, \"\"),\n s3_dst,\n ],\n check=True,\n )\n\n LOGGER.info(\"Done updating news.\")\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(\n description=\"News syncing, merging, splitting, and uploading.\"\n )\n parser.add_argument(\n \"--src-bucket\",\n \"-b\",\n default=\"scrape.news.recommend.games\",\n help=\"S3 bucket with scraped data\",\n )\n parser.add_argument(\n \"--dst-bucket\",\n \"-B\",\n default=\"news.recommend.games\",\n help=\"S3 bucket to upload to\",\n )\n parser.add_argument(\n \"--feeds\", \"-f\", default=BASE_DIR / \"feeds\" / \"news\", help=\"Scraped items\"\n )\n parser.add_argument(\n \"--merged\",\n \"-m\",\n default=BASE_DIR / \"feeds\" / \"news_merged.jl\",\n help=\"Merged file\",\n )\n parser.add_argument(\n \"--split\",\n \"-s\",\n default=BASE_DIR / \"feeds\" / \"news_hosting\" / \"news_{number:05d}.json\",\n help=\"Split file template\",\n )\n parser.add_argument(\n \"--split-size\",\n \"-S\",\n type=int,\n default=25,\n help=\"number of items in each result file\",\n )\n parser.add_argument(\n \"--dont-run-before\", \"-d\", help=\"Either a date or a file with date information\"\n )\n parser.add_argument(\n \"--interval\",\n \"-i\",\n type=int,\n default=10 * 60, # 10 minutes\n help=\"number of seconds to wait before next run\",\n )\n parser.add_argument(\n \"--verbose\",\n \"-v\",\n action=\"count\",\n default=0,\n help=\"log level (repeat for more verbosity)\",\n )\n\n return parser.parse_args()\n\n\ndef main():\n \"\"\"Command line entry point.\"\"\"\n\n args = _parse_args()\n\n logging.basicConfig(\n stream=sys.stderr,\n level=logging.DEBUG if args.verbose > 0 else logging.INFO,\n format=\"%(asctime)s %(levelname)-8.8s [%(name)s:%(lineno)s] %(message)s\",\n )\n\n LOGGER.info(args)\n\n dont_run_before = parse_date(\n args.dont_run_before, tzinfo=timezone.utc\n ) or date_from_file(args.dont_run_before, tzinfo=timezone.utc)\n\n if dont_run_before:\n LOGGER.info(\"Don't run before %s\", dont_run_before.isoformat())\n sleep_seconds = dont_run_before.timestamp() - now().timestamp()\n if sleep_seconds > 0:\n LOGGER.info(\"Going to sleep for %.1f seconds\", sleep_seconds)\n sleep(sleep_seconds)\n\n if args.interval and args.dont_run_before and not parse_date(args.dont_run_before):\n dont_run_before = now() + timedelta(seconds=args.interval)\n LOGGER.info(\n \"Don't run next time before %s, writing tag to <%s>\",\n dont_run_before.isoformat(),\n args.dont_run_before,\n )\n with open(args.dont_run_before, \"w\") as file_obj:\n file_obj.write(dont_run_before.isoformat())\n\n update_news(\n s3_src=f\"s3://{args.src_bucket}/\",\n path_feeds=args.feeds,\n path_merged=args.merged,\n path_split=args.split,\n s3_dst=f\"s3://{args.dst_bucket}/\",\n split_size=args.split_size,\n log_level=\"DEBUG\"\n if args.verbose > 1\n else \"INFO\"\n if args.verbose > 0\n else \"WARN\",\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"board_game_scraper/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":5627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237591678","text":"# Routines used for building cubes\nfrom __future__ import absolute_import, print_function\n\nimport sys\nimport time\nimport numpy as np\nimport math\nimport json\nimport os\n\n#from astropy.io import fits\nfrom ..associations import load_asn\nfrom .. import datamodels\n\n\nimport logging\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\n#********************************************************************************\nclass DataTypes(object):\n#********************************************************************************\n\n \"\"\"\n Class to handle reading the input to the processing, which\n can be a single science exposure or an IFU cube association table.\n The input and output member info is loaded into an ASN table model.\n \"\"\"\n\n template = {\"asn_rule\": \"\",\n \"target\": \"\",\n \"asn_pool\": \"\",\n \"asn_type\": \"\",\n \"products\": [\n {\"name\": \"\",\n \"members\": [\n {\"exptype\": \"\",\n \"expname\": \"\"}\n ]\n }\n ]\n }\n\n def __init__(self, input,single,output_file,output_dir):\n\n self.input_models = []\n self.filenames = []\n self.output_name = None\n self.data_type = None # singleton, multi\n self.input_type = None # Model, File, ASN, Container\n\n # IF a single model or a single file is passed in then\n # self.filename & self.input_model hold the values for this singe dataset\n self.InputType = ''\n if isinstance(input, datamodels.IFUImageModel):\n# print('this is a single file passed as a Model')\n # It's a single image that's been passed in as a model\n # input is a model\n self.filenames.append(input.meta.filename)\n self.input_models.append(input)\n self.input_type = 'Model'\n self.data_type = 'singleton'\n self.output_name = self.build_product_name(self.filenames[0])\n\n elif isinstance(input,datamodels.ModelContainer):\n# print('this is a model container type')\n self.input_type='Container'\n self.data_type = 'multi'\n self.output_name = 'Temp'\n if not single: # find the name of the output file from the association\n with datamodels.ModelContainer(input) as input_model:\n self.output_name =input_model.meta.asn_table.products[0].name\n\n for i in range(len(input)):\n model = input[i]\n self.input_models.append(model)\n self.filenames.append(model.meta.filename)\n# print('number of models',len(self.filenames))\n\n elif isinstance(input, str):\n try:\n # The name of an association table\n # for associations - use Association.load\n # in cube_build_io.SetFileTable - set up:\n # input_model & filename lists\n iproduct = 0 # only one product found in association table\n with open(input, 'r') as input_fh:\n# print('read in association table')\n asn_table = load_asn(input_fh)\n self.input_type = 'ASN'\n self.data_type = 'multi'\n self.output_name = asn_table['products'][0]['name']\n for m in asn_table['products'][iproduct]['members']:\n self.filenames.append(m['expname'])\n self.input_models.append(datamodels.IFUImageModel(m['expname']))\n except:\n # The name of a single image file\n# print(' this is a single file read in filename')\n self.input_type = 'File'\n self.data_type = 'singleton'\n self.filenames.append(input)\n self.input_models.append(datamodels.IFUImageModel(input))\n self.output_name = self.build_product_name(self.filenames[0])\n\n else:\n raise TypeError\n\n# if the user has set the output name - strip out *.fits\n# later suffixes will be added to this name to designate the\n# channel, subchannel or grating,filter the data is covers.\n\n if output_file !=None :\n basename,ext = os.path.splitext(os.path.basename(output_file))\n# print('basename',basename)\n# root, ext = os.path.splitext(output_file)\n# default = root.find('cube_build') # the user has not provided a name\n self.output_name = basename\n\n\n if output_dir !=None :\n self.output_name= output_dir + '/' + self.output_name\n\n def build_product_name(self, filename):\n indx = filename.rfind('.fits')\n indx_try = filename.rfind('_rate.fits') # standard expected filename in CalSpec2\n indx_try2 = filename.rfind('_cal.fits') # standard expected filename\n\n\n if indx_try > 0:\n single_product = filename[:indx_try]\n elif indx_try2 > 0:\n single_product = filename[:indx_try2]\n else:\n single_product = filename[:indx]\n return single_product\n\n\n\n\n\n# TODO: Routines not used below - saved just in case we need them later - if not\n# remove.\n\n def interpret_image_model(self, model):\n \"\"\" Interpret image model as single member association data product.\n Currently this routien is not used by cube_build - it was left\n if needed down the road\n \"\"\"\n\n # An in-memory ImageModel for a single exposure was provided as input\n self.asn_table = self.template\n self.asn_table['target'] = model.meta.target.catalog_name\n self.asn_table['asn_rule'] = 'singleton'\n self.asn_table['asn_type'] = 'singleton'\n self.asn_table['products'][0]['name'] = self.build_product_name(self.filenames[0])\n self.rootname = self.filename[:self.filename.rfind('_')]\n self.asn_table['products'][0]['members'][0]['expname'] = self.filenames[0]\n\n def get_inputs(self, product=0):\n members = []\n for p in self.asn_table['products'][product]['members']:\n members.append(p['expname'])\n return members\n def get_outputs(self, product=0):\n return self.asn_table['products'][product]['name']\n\n\n","sub_path":"jwst/cube_build/data_types.py","file_name":"data_types.py","file_ext":"py","file_size_in_byte":6324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"306415893","text":"def conversion(writePath, asciiString, halt = -1):\n\twith open(writePath, opentype) as writeFile:\n\t\tbyteString = bytearray()\n\t\tbinaryCount = 0\n\t\tnonbinaryCount = 0\n\t\thaltSatisfy = False\n\t\t\n\t\tfor asciiLetter in asciiString:\n\t\t\tif binaryCount >= halt > 0:\n\t\t\t\thaltSatisfy = True\n\t\t\t\tbreak\n\t\t\tif asciiLetter == '0':\n\t\t\t\tbyteString.append(0)\n\t\t\t\tbinaryCount += 1\n\t\t\telif asciiLetter == '1':\n\t\t\t\tbyteString.append(1)\n\t\t\t\tbinaryCount += 1\n\t\t\telse:\n\t\t\t\tnonbinaryCount += 1\n\t\t\t\t\n\t\twriteFile.write(byteString)\n\t\t\n\treturn haltSatisfy\n","sub_path":"ASCII_to_binary.py","file_name":"ASCII_to_binary.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"371159102","text":"import mysql.connector\r\nfrom mysql.connector import Error\r\n\r\ndef checkTableExists(dbcon, tablename):\r\n dbcur = dbcon.cursor()\r\n dbcur.execute(\"\"\"\r\n SELECT COUNT(*)\r\n FROM information_schema.tables\r\n WHERE table_name = '{0}'\r\n \"\"\".format(tablename.replace('\\'', '\\'\\'')))\r\n if dbcur.fetchone()[0] == 1:\r\n dbcur.close()\r\n return True\r\n\r\n dbcur.close()\r\n return False\r\n\r\ndef checkRowExists(dbcon, tablename, primary_key):\r\n dbcur = dbcon.cursor()\r\n dbcur.execute(\"\"\"\r\n SELECT COUNT(*)\r\n FROM \"\"\"+tablename+\"\"\"\r\n WHERE ID=\"\"\"+primary_key)\r\n if dbcur.fetchone()[0] == 1:\r\n dbcur.close()\r\n return True\r\n\r\n dbcur.close()\r\n return False\r\n\r\n\r\ntry:\r\n connection = mysql.connector.connect(host='localhost', database='for_python', user='python', password='python123')\r\n if connection.is_connected():\r\n cursor = connection.cursor()\r\n #test for table existed\r\n if checkTableExists(connection, \"Student_info\"):\r\n SQL = \"drop table Student_info;\"\r\n result = cursor.execute(SQL)\r\n print(\"The table has already existed, drop table.\")\r\n SQL = \"create table Student_info(Name varchar(10), ID varchar(10) primary key, Class_num varchar(1), Score int(3));\"\r\n result = cursor.execute(SQL)\r\n record = cursor.fetchone() \r\n print(\"Create table successfully!\")\r\n \r\n list1=['Amy', '1010001', '1', '87']\r\n list2=['Billy', '1010002', '1', '85']\r\n list3=['Cindy', '1010003', '1', '92']\r\n lists=[list1, list2, list3]\r\n print(lists)\r\n for i in range(len(lists)):\r\n if checkRowExists(connection, \"Student_info\", lists[i][1]):\r\n print(\"Record exists!\")\r\n else:\r\n SQL = \"Insert into Student_info values(\\\"\" + lists[i][0] + \" \\\",\"+lists[i][1]+\",\\\"\"+lists[i][2]+\"\\\",\"+lists[i][3]+\");\"\r\n print(SQL)\r\n result = cursor.execute(SQL)\r\n connection.commit()\r\n #record = cursor.fetchone() \r\n print(\"Insert lists\" + str(i) + \" into table successfully!\")\r\n \r\n if checkRowExists(connection,\"Student_info\",list1[1]):\r\n print(\"Record exists!\")\r\n else:\r\n SQL = \"Insert into Student_info values(\\\"\" + list1[0] + \"\\\",\"+list1[1]+\",\\\"\"+list1[2]+\"\\\",\"+list1[3]+\");\"\r\n print(SQL)\r\n result = cursor.execute(SQL)\r\n connection.commit()\r\n #record = cursor.fetchone() \r\n print(\"Insert into table successfully!\")\r\nexcept Error as e:\r\n print(\"Error while connecting to MySQL\", e)\r\nfinally:\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n print(\"MySQL connection is closed!\")\r\n","sub_path":"NCHU/HW_test.py","file_name":"HW_test.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"590574506","text":"import pygame, sys\nfrom pygame import mixer\n\n# inicializando el pygame\npygame.init()\n# titulo de pantalla\npygame.display.set_caption(\"Find Me\")\n# contienen el tamaño de la pantalla\nglobal display_width\ndisplay_width = 1240\nglobal display_height\ndisplay_height = 900\n# creacion de la pantalla de juego\nglobal screen\nscreen = pygame.display.set_mode((display_width,display_height))\n# si quiero full screen asi es\n#screen = pygame.display.set_mode((display_width,display_height), pygame.FULLSCREEN)\n# reloj\nclock = pygame.time.Clock()\n# boleano que maneja el inicio y fin del juego\ngame_on = True\n\n# Icono\nicon = pygame.image.load('images/lupa-32x32.png')\npygame.display.set_icon(icon)\n# fondo pantalla\nbackground = pygame.image.load('images/Background.png')\n# Musica\nmixer.music.load('Music/muse_algorithm.wav')\nmixer.music.play(-1)\n\nglobal mouse\nmouse = pygame.mouse.get_pos()\n\n\nclick_derecho = pygame.mouse.get_pressed()\n\n\ndef dibujo_el_fondo():\n screen.blit(background, (0,0))\n\n\nscore_actual = int(0)\ndef score():\n global score_actual\n score_actual = score_actual + 1\n\nfont_size= 32\nfont = pygame.font.Font(\"fonts/Carnevalee Freakshow.ttf\", font_size)\n\ntextX= 1070\ntextY= 10\nblack_text=(0,0,0)\n#funcion que muestra el score en pantalla\ndef show_score(x,y):\n scores = font.render(\"Score: \" + str(score_actual), True, black_text)\n screen.blit(scores, (x,y))\n\nobjetos= [ True, True, True, True, True, True, True, True, True, True, True, True, True, True, True]\ndef enable_object_0(): \n global objetos\n if(objetos[0] == True):\n objetos[0] = False\n if(objetos[0] == False):\n return objetos[0]\ndef enable_object_1(): \n if(objetos[1] == True):\n objetos[1] = False\n if(objetos[1] == False):\n return objetos[1]\ndef enable_object_2(): \n if(objetos[2] == True):\n objetos[2] = False\n if(objetos[2] == False):\n return objetos[2]\ndef enable_object_3(): \n if(objetos[3] == True):\n objetos[3] = False\n if(objetos[3] == False):\n return objetos[3]\ndef enable_object_4(): \n if(objetos[4] == True):\n objetos[4] = False\n if(objetos[4] == False):\n return objetos[4]\ndef enable_object_5(): \n if(objetos[5] == True):\n objetos[5] = False\n if(objetos[5] == False):\n return objetos[5]\ndef enable_object_6():\n if(objetos[6] == True):\n objetos[6] = False\n if(objetos[6] == False):\n return objetos[6]\ndef enable_object_7():\n if(objetos[7] == True):\n objetos[7] = False\n if(objetos[7] == False):\n return objetos[7]\ndef enable_object_8():\n if(objetos[8] == True):\n objetos[8] = False\n if(objetos[8] == False):\n return objetos[8]\ndef enable_object_9():\n if(objetos[9] == True):\n objetos[9] = False\n if(objetos[9] == False):\n return objetos[9]\ndef enable_object_10():\n if(objetos[10] == True):\n objetos[10] = False\n if(objetos[10] == False):\n return objetos[10]\ndef enable_object_11():\n if(objetos[11] == True):\n objetos[11] = False\n if(objetos[11] == False):\n return objetos[11]\ndef enable_object_12():\n if(objetos[12] == True):\n objetos[12] = False\n if(objetos[12] == False):\n return objetos[12]\ndef enable_object_13():\n if(objetos[13] == True):\n objetos[13] = False\n if(objetos[13] == False):\n return objetos[13]\ndef enable_object_14():\n if(objetos[14] == True):\n objetos[14] = False\n if(objetos[14] == False):\n return objetos[14]\n\n\nfont_size_win= 37\ntextX_win = 120\ntextY_win = 430\nblack_text_win=(0,0,0)\nfont_win = pygame.font.Font(\"fonts/SuperMario256.ttf\", font_size_win)\ndef show_win_msg(x,y):\n win_msg = font_win.render(\"Has encontrado los 15 Objetos, Felicitaciones! \", True, black_text_win)\n screen.blit(win_msg, (x,y))\n\n \ndef you_win(value):\n if(int(value) == 15):\n show_win_msg(textX_win,textY_win)\n\n \nsituar_imagen_X=[40,160,259,367,457,532,608,690,710,820,900,980,980,1076,1158]\nsituar_imagen_Y=[770,770,770,770,770,770,770,740,790,770,770,720,810,770,780]\ndef situar_imagen(x,y):\n imagen_situada = pygame.image.load('images/marcador_objetos.png') \n screen.blit(imagen_situada, (x,y))\n\n \n \nimagenes_X= [1031, 1234, 389, 472, 429,480,674,777,550,596,747,804,727,757,10,66,149,243,751,844,590,653,996,1045,410,471,1080,1148,789,823]\nimagenes_Y= [558, 735, 133,223, 226,308, 476,521,430,451,136,198,285,330,612,637,663,707,566,638,126,223,421,466,427,456,302,349,18,47]\ndef score_up():\n global imagenes_X\n global imagenes_Y\n show_score(textX,textY)\n whereImI = pygame.mouse.get_pos()\n for event in pygame.event.get():\n # si el evento registrado es un click en la zona, suma 1\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[0] and whereImI[1] <= imagenes_Y[1] and whereImI[0] >= imagenes_X[0] and whereImI[0] <= imagenes_X[1]):\n situar_imagen(situar_imagen_X[0],situar_imagen_Y[0])\n if(objetos[0] == True):\n enable_object_0() \n score()\n you_win(score_actual) \n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[2] and whereImI[1] <= imagenes_Y[3] and whereImI[0] >= imagenes_X[2] and whereImI[0] <= imagenes_X[3]): \n if(objetos[1] == True):\n enable_object_1() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[4] and whereImI[1] <= imagenes_Y[5] and whereImI[0] >= imagenes_X[4] and whereImI[0] <= imagenes_X[5]): \n if(objetos[2] == True):\n enable_object_2() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[6] and whereImI[1] <= imagenes_Y[7] and whereImI[0] >= imagenes_X[6] and whereImI[0] <= imagenes_X[7]): \n if(objetos[3] == True):\n enable_object_3() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[8] and whereImI[1] <= imagenes_Y[9] and whereImI[0] >= imagenes_X[8] and whereImI[0] <= imagenes_X[9]): \n if(objetos[4] == True):\n enable_object_4() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[10] and whereImI[1] <= imagenes_Y[11] and whereImI[0] >= imagenes_X[10] and whereImI[0] <= imagenes_X[11]): \n if(objetos[5] == True):\n enable_object_5() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[12] and whereImI[1] <= imagenes_Y[13] and whereImI[0] >= imagenes_X[12] and whereImI[0] <= imagenes_X[13]): \n if(objetos[6] == True):\n enable_object_6() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[14] and whereImI[1] <= imagenes_Y[15] and whereImI[0] >= imagenes_X[14] and whereImI[0] <= imagenes_X[15]): \n if(objetos[7] == True):\n enable_object_7() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[16] and whereImI[1] <= imagenes_Y[17] and whereImI[0] >= imagenes_X[16] and whereImI[0] <= imagenes_X[17]): \n if(objetos[8] == True):\n enable_object_8() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[18] and whereImI[1] <= imagenes_Y[19] and whereImI[0] >= imagenes_X[18] and whereImI[0] <= imagenes_X[19]): \n if(objetos[9] == True):\n enable_object_9() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[20] and whereImI[1] <= imagenes_Y[21] and whereImI[0] >= imagenes_X[20] and whereImI[0] <= imagenes_X[21]): \n if(objetos[10] == True):\n enable_object_10() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[22] and whereImI[1] <= imagenes_Y[23] and whereImI[0] >= imagenes_X[22] and whereImI[0] <= imagenes_X[23]): \n if(objetos[11] == True):\n enable_object_11() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[24] and whereImI[1] <= imagenes_Y[25] and whereImI[0] >= imagenes_X[24] and whereImI[0] <= imagenes_X[25]): \n if(objetos[12] == True):\n enable_object_12() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[26] and whereImI[1] <= imagenes_Y[27] and whereImI[0] >= imagenes_X[26] and whereImI[0] <= imagenes_X[27]): \n if(objetos[13] == True):\n enable_object_13() \n score()\n you_win(score_actual)\n if (event.type == pygame.MOUSEBUTTONDOWN and whereImI[1] >= imagenes_Y[28] and whereImI[1] <= imagenes_Y[29] and whereImI[0] >= imagenes_X[28] and whereImI[0] <= imagenes_X[29]): \n if(objetos[14] == True):\n enable_object_14() \n score()\n you_win(score_actual) \n else:\n if event.type == pygame.QUIT:\n game_on = False #game_off\n pygame.quit()\n sys.exit()\n\n if(int(score_actual) == 15):\n show_win_msg(textX_win,textY_win)\n mixer.music.fadeout(5000)\n if(objetos[0] == False):\n situar_imagen(situar_imagen_X[0],situar_imagen_Y[0])\n if(objetos[1] == False):\n situar_imagen(situar_imagen_X[1],situar_imagen_Y[1])\n if(objetos[2] == False):\n situar_imagen(situar_imagen_X[2],situar_imagen_Y[2])\n if(objetos[3] == False):\n situar_imagen(situar_imagen_X[3],situar_imagen_Y[3])\n if(objetos[4] == False):\n situar_imagen(situar_imagen_X[4],situar_imagen_Y[4])\n if(objetos[5] == False):\n situar_imagen(situar_imagen_X[5],situar_imagen_Y[5])\n if(objetos[6] == False):\n situar_imagen(situar_imagen_X[6],situar_imagen_Y[6])\n if(objetos[7] == False):\n situar_imagen(situar_imagen_X[7],situar_imagen_Y[7])\n if(objetos[8] == False):\n situar_imagen(situar_imagen_X[8],situar_imagen_Y[8])\n if(objetos[9] == False):\n situar_imagen(situar_imagen_X[9],situar_imagen_Y[9])\n if(objetos[10] == False):\n situar_imagen(situar_imagen_X[10],situar_imagen_Y[10])\n if(objetos[11] == False):\n situar_imagen(situar_imagen_X[11],situar_imagen_Y[11])\n if(objetos[12] == False):\n situar_imagen(situar_imagen_X[12],situar_imagen_Y[12])\n if(objetos[13] == False):\n situar_imagen(situar_imagen_X[13],situar_imagen_Y[13])\n if(objetos[14] == False):\n situar_imagen(situar_imagen_X[14],situar_imagen_Y[14])\n \n \n\nwhile game_on:\n dibujo_el_fondo()\n score_up()\n pygame.display.flip()\n \n\n \n","sub_path":"Juego/FindMe.py","file_name":"FindMe.py","file_ext":"py","file_size_in_byte":11989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"369004189","text":"article ='''\r\nBig data analytics and business analytics\r\nby Duan, Lian; Xiong, Ye\r\nOver the past few decades, with the development of automatic identification, data capture and storage technologies, \r\npeople generate data much faster and collect data much bigger than ever before in business, science, engineering, education and other areas. \r\nBig data has emerged as an important area of study for both practitioners and researchers. \r\nIt has huge impacts on data-related problems. \r\nIn this paper, we identify the key issues related to big data analytics and then investigate its applications specifically related to business problems.\r\n'''\r\n\r\nsplit = article.split()\r\nprint(split)\r\n\r\n#使用空格替换标点符号\r\narticle = article.replace(\",\",\"\").replace(\".\",\"\").replace(\":\",\"\").replace(\";\",\"\").replace(\"?\",\"\")\r\n\r\n\r\n#大写字母转换成小写字母\r\nexchange = article.lower();\r\nprint(exchange)\r\n\r\n#生成单词列表\r\nlist = exchange.split()\r\nprint(list)\r\n\r\n#生成词频统计\r\ndic = {}\r\nfor i in list:\r\n count = list.count(i)\r\n dic[i] = count\r\nprint(dic)\r\n\r\n#排除特定单词\r\nword = {'and','the','with','in','by','its','for','of','an','to'}\r\nfor i in word:\r\n del(dic[i])\r\nprint(dic)\r\n\r\n#排序\r\ndic1= sorted(dic.items(),key=lambda d:d[1],reverse= True)\r\nprint(dic1)\r\n\r\n#输出词频最大的前十位单词\r\nfor i in range(10):\r\n print(dic1[i])\r\n","sub_path":"WCJ165.py","file_name":"WCJ165.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"40360671","text":"def cut_fruits(fruits):\r\n ret = []\r\n for item in fruits:\r\n if item in FRUIT_NAMES:\r\n halfway = len(item) // 2\r\n if len(item) % 2 == 1:\r\n halfway += 1\r\n part1, part2 = item[:halfway], item[halfway:]\r\n ret.append(part1)\r\n ret.append(part2)\r\n else:\r\n ret.append(item)\r\n return ret","sub_path":"7-kyu/i-guess-this-is-a-7kyu-kata-#6--fruit-ninja-i/python/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"121965515","text":"# -*- coding: utf-8 -*-\n\n# ######################################################\n# File Name : test_question_info.py\n# Description : 测试爬取的问题信息是否正确\n# Author : Frank\n# Date : 2014.06.21\n# ######################################################\n\nimport os\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\nrootPath = os.path.abspath('../')\nsys.path.append(rootPath)\nfrom codes import contentParse # 导入上级目录中的库\n\nfrom correct_answer_dict import checkList\n\nimport unittest\n\n\nclass MyTestCase(unittest.TestCase):\n u\"\"\"\n 先检测questionInfoDict\n \"\"\"\n def setUp(self):\n content = open(\"../htmlFile/questionContent/question1.html\", \"r\").read()\n testQuestion = contentParse.ParseQuestion(content)\n self.questionInfoDictList, self.answerDictList = testQuestion.getInfoDict()\n\n def failedTips(self, testedKey, index):\n u\"\"\"\n 测试失败时,输出提示语句\n :param testedkey:\n :param correctValue:\n :return tipString:\n \"\"\"\n testedKey = testedKey.encode(\"utf-8\")\n correctValue = checkList[index][testedKey]\n tipString = u\"\"\"\\n\n {testedKey} parse error\n self.answerDictList[{index}]['{testedKey}'] should equal {correctValue}\n but the self.answerDictList[{index}]['{testedKey}'] = {errorValue}\n \"\"\".encode(\"utf-8\").format(testedKey=testedKey, index=index,\n correctValue=correctValue, errorValue=self.answerDictList[index][testedKey])\n return tipString\n\n def test_answer_authorSign(self):\n u\"\"\"\n 测试用户签名的用例\n :return:\n \"\"\"\n index = 0\n for answer in self.answerDictList:\n self.assertEqual(answer[\"authorSign\"],\n checkList[index][\"authorSign\"],\n self.failedTips(\"authorSign\", index))\n index += 1\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"unit/contentParseTest/test_question_info.py","file_name":"test_question_info.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"219560114","text":"import fresh_tomatoes\nimport media\n\n# Movie list including title, storyline, poster image,\n# YouTube trailer and release date\nblade_runner = media.Movie(\"Blade Runner\",\n \"Ö äußerst A dystopian future where a clone hunter \"\n \"faces his biggest challenge.\",\n \"https://upload.wikimedia.org/wikipedia/en/5/53/Blade_Runner_poster.jpg\", # NOQA\n \"https://www.youtube.com/watch?v=4lW0F1sccqk\",\n \"1982\")\n\ndonnie_darko = media.Movie(\"Donnie Darko\",\n \"After an accident a disturbed teenager \"\n \"explores time, reality, and love.\",\n \"https://upload.wikimedia.org/wikipedia/en/d/db/Donnie_Darko_poster.jpg\", # NOQA\n \"https://www.youtube.com/watch?v=ZZyBaFYFySk\",\n \"2001\")\n\nharold_maude = media.Movie(\"Harold and Maude\",\n \"A death obsessed teenager unexpectedly \"\n \"falls in love.\",\n \"https://upload.wikimedia.org/wikipedia/en/5/5f/Harold_and_Maude_%281971_film%29_poster.jpg\", # NOQA\n \"https://www.youtube.com/watch?v=5mz3TkxJhPc\",\n \"1973\")\n\nnightmare_christmas = media.Movie(\"Nightmare Before Christmas\",\n \"Residents of spooky Halloweentown \"\n \"learn about Christmas.\",\n \"https://upload.wikimedia.org/wikipedia/en/9/9a/The_nightmare_before_christmas_poster.jpg\", # NOQA\n \"https://www.youtube.com/watch?v=8qrB9I3DM80\", # NOQA\n \"1993\")\n\nprincess_bride = media.Movie(\"The Princess Bride\",\n \"An adventure story with pirates, a mythical \"\n \"kingdom, and true love.\",\n \"https://upload.wikimedia.org/wikipedia/en/d/db/Princess_bride.jpg\", # NOQA\n \"https://www.youtube.com/watch?v=VYgcrny2hRs\",\n \"1987\")\n\nthe_hunger = media.Movie(\"The Hunger\",\n \"A story of ancient vampire love set in \"\n \"modern times.\",\n \"https://upload.wikimedia.org/wikipedia/en/d/d6/The_Hunger_film_poster.jpg\", # NOQA\n \"https://www.youtube.com/watch?v=l9IDoAPC6Ps\",\n \"1983\")\n\n# Stored list of movies\nmovies = [blade_runner, donnie_darko, harold_maude,\n nightmare_christmas, princess_bride, the_hunger]\n\n# Converts movies to tiles and loads to page\nfresh_tomatoes.open_movies_page(movies)\n","sub_path":"Movie Trailers/entertainment_test.py","file_name":"entertainment_test.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"537638281","text":"import re\r\nimport csv\r\nanti={}\r\nc1=open(\"D:\\\\研究生\\\\tRNAseq\\\\GSM16248\\\\GSM1624821_TP4_TGACCA_L005_R1_001.txt\",'r+')\r\nfor line in c1:\r\n line=line.strip('\\n')\r\n s=re.match(r\".*-.*(\\w\\w\\w)\\s(\\d*)\",line)\r\n if s:\r\n codon=s.group(1)\r\n num=s.group(2)\r\n num=eval(num)\r\n else:\r\n continue\r\n anti.setdefault(codon,0)\r\n if anti[codon]:\r\n\r\n anti[codon]=anti[codon]+num\r\n else:\r\n\r\n anti[codon]=num\r\nwith open(\"D:\\\\研究生\\\\tRNAseq\\\\GSM16248\\\\merge21.csv\",'w+',newline='') as out1:\r\n csvwriter=csv.writer(out1)\r\n csvwriter.writerow(['anti_codon','num'])\r\n Santi=sorted(anti.items(),key=lambda x:x[1],reverse=True)\r\n for a in range(len(Santi)):\r\n csvwriter.writerow([Santi[a][0],Santi[a][1]])\r\nc1.close()\r\nout1.close()","sub_path":"tRNAseq_processing/tRNA_merge.py","file_name":"tRNA_merge.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"598489110","text":"\"\"\"Hello resource routes.\"\"\"\n\n__author__ = 'neal'\n\nfrom flask import Blueprint, request\n\nfrom app.api import greetings\n\nHello = Blueprint('Hello', __name__)\n\n@Hello.route('/hello', defaults={'name': 'world'}, methods=['GET', 'POST'])\n@Hello.route('/hello/', methods=['GET', 'POST'])\ndef greet(name):\n\treturn {\n\t\t'GET': greetings.greet,\n\t\t'POST': greetings.set_greeting\n\t}[request.method](name)","sub_path":"app/routes/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"175539070","text":"import config\nimport sys\nimport os\nimport logging\nimport logging.config\nimport time\nimport gdax\nimport json\nfrom websocket_api.websocket_base import WebsocketClient_GDAX as wcg\nfrom concurrent.futures import ThreadPoolExecutor, wait\n\n\nclass Triangular():\n #loggerList = []\n \n def __init__(self):\n self.observers = []\n self.public_market = ''\n self.symbols = config.symbols\n self.bookData = []\n self.loggerList = []\n self.init_logger()\n self.threadpool = ThreadPoolExecutor(max_workers=1) # number of symbols\n self.init_observers(config.observers)\n self.init_market(config.market, config.sec_markets, config.socket)\n self.loggerList[0].info('Utilizing {} symbols'.format(config.symbols))\n self.use_primary_market = True\n self.time_awaited = 0\n\n def init_observers(self, _observers):\n if config.demo_mode:\n try:\n self.loggerList[0].info('Initializing demo bot ...')\n exec('import observers.logger')\n observer = eval('observers.logger.Logger()')\n self.observers.append(observer)\n self.loggerList[0].info('Finished initializing demo bot.')\n except(ImportError, AttributeError) as e:\n self.loggerList[0].error('Couldn\\'t initialize demo bot. Are you missing files?')\n self.loggerList[0].error('Error message: {}'.format(e))\n else:\n for observer_name in _observers: \n self.loggerList[0].info('Initializing production bots ...')\n try:\n exec('import observers.' + observer_name.lower())\n observer = eval('observers.' + observer_name.lower() + '.' + observer_name + '()')\n self.observers.append(observer)\n self.loggerList[0].info('Finished initializing bot: {}'.format(observer_name))\n except(ImportError, AttributeError) as e:\n self.loggerList[0].error('%s observer name is invalid. Please verify config file.' % observer_name) \n self.loggerList[0].error('Error message: {}'.format(e)) \n\n def init_market(self, market, secondary_markets, socket=\"\"):\n self.loggerList[0].info('Launching public markets ...')\n self.market = []\n try:\n self.loggerList[0].info(\"Starting public socket\")\n #exec('import websocket_api.websocket_base')\n #ws_client = eval('websocket_api.websocket_base.WebsocketClient_GDAX(url=\"wss://ws-feed.gdax.com\", products=\"BTC-EUR\")')\n\n # add main websocket support\n self.market.append(wcg(url=\"wss://ws-feed.gdax.com\", products=\"BTC-EUR\", channels=[\"level2\"]))\n self.market[0].__init_logger__(self.loggerList[0])\n self.market[0].max_amount = config.max_amount\n\n #self.market.append(ws_client)\n #self.market.append(gdax.OrderBook(\"BTC-EUR\"))\n #self.market.\n return\n \n exec('import public_markets.' + market.lower() + ', public_markets.cryptowatch')\n\n # import market\n main = eval('public_markets.' + market.lower() + '.' + market + '(self.loggerList)')\n self.market.append(main)\n self.loggerList[0].info('Finished importing public market: {}'.format(market))\n\n # import secondary market sources\n for site in secondary_markets:\n secondary = eval('public_markets.' + site.lower() + '.' + site + '(self.loggerList)')\n self.market.append(secondary)\n self.loggerList[0].info('Finished importing public market: {}'.format(site))\n except(ImportError, AttributeError) as e:\n self.loggerList[0].error('Failed to import public market.')\n self.loggerList[0].error('Error: {}'.format(e))\n\n def init_logger(self):\n logging.config.fileConfig('logger.config')\n logger = logging.getLogger('main_logger')\n o_logger = logging.getLogger('opportunity_logger')\n self.loggerList = [logger, o_logger]\n\n def __get_triangle_pairs(self):\n # gets all possible triangle pairs in the given list of symbols from config.currency_pairs\n pairs = []\n first = config.currency_pref\n numOfPairs = len(config.currency_pairs[config.symbols]) - 1\n for pair in config.currency_pairs[config.symbols]:\n if first in pair:\n if first in pair[:3]:\n second = pair[-3:]\n else:\n second = pair[:3]\n else:\n continue\n while True:\n for pair2 in config.currency_pairs[config.symbols]:\n if second in pair2 and first not in pair2:\n if second in pair2[:3]:\n third = pair2[-3:]\n while True:\n for pair3 in config.currency_pairs[config.symbols]:\n if third in pair3 and first in pair3:\n pairs.append([pair, pair2, pair3])\n if config.currency_pairs[config.symbols].index(pair3) == numOfPairs:\n break\n if config.currency_pairs[config.symbols].index(pair3) == numOfPairs:\n break\n else:\n third = pair2[:3]\n while True:\n for pair3 in config.currency_pairs[config.symbols]:\n if third in pair3 and first in pair3:\n pairs.append([pair, pair2, pair3])\n if config.currency_pairs[config.symbols].index(pair3) == numOfPairs:\n break\n if config.currency_pairs[config.symbols].index(pair3) == numOfPairs:\n break\n if config.currency_pairs[config.symbols].index(pair2) == numOfPairs:\n break\n if config.currency_pairs[config.symbols].index(pair2) == numOfPairs:\n break\n self.loggerList[0].info('------------------------------------------------------')\n self.loggerList[0].info('---------------------TRIANGLES------------------------')\n for i in pairs:\n self.loggerList[0].info(i)\n self.loggerList[0].info('------------------------------------------------------')\n return pairs \n\n def __get_triangle_results(self, pairs, results):\n results[','.join(pairs)] = Triangle(pairs, self.bookData).main(self.loggerList)\n\n def _get_market_data(self):\n if self.use_primary_market:\n self.bookData = self.market[0].update_depth()\n else:\n self.bookData = self.market[1].update_depth()\n if self.bookData != {}:\n self.loggerList[0].info('----------------------PRICES-------------------------')\n for item in self.bookData:\n self.loggerList[0].info('{} : {:0.4f}'.format(item, ((self.bookData[item]['bids'][0]['price'] +\n self.bookData[item]['asks'][0]['price']) / 2)))\n self.loggerList[0].info('-----------------------------------------------------')\n else:\n self.use_primary_market = False # alternate to secondary market\n self.time_awaited = time.time()\n\n def update_cases(self, triangles):\n futures = []\n results = {}\n #results = self.__get_triangle_results(triangles[0]) #for testing\n self.loggerList[0].debug('----------------------RESULTS------------------------')\n for pairs in triangles:\n futures.append(self.threadpool.submit(self.__get_triangle_results, pairs, results))\n wait(futures, timeout=3)\n return results\n \n def loop(self):\n if time.time() - self.time_awaited < config.market_expiration_time:\n self.use_primary_market = True\n triangles = self.__get_triangle_pairs()\n self.market[0].start() # starts main socket\n self.market[1].start() # starts order book socket \n while True:\n if not config.demo_mode:\n self.observers[0].check_wallets()\n\n #self._get_market_data()\n time.sleep(5)\n self.loggerList[0].info(\"Order book: \" + str(self.market[1].get_ask()))\n #self.loggerList[0].info(\"Main: \" + self.market[0].\n time.sleep(20)\n self.market[0].close()\n self.market[1].close()\n if self.bookData != {}:\n self.triangles = self.update_cases(triangles)\n self.loggerList[0].info('-----------------------------------------------------')\n item = sorted(self.triangles.items(), key=lambda x: x[1]['profit'], reverse=True)\n if item != [] and item[0][1]['profit'] > 0:\n for pair in item:\n if pair[1]['profit'] > 0:\n item2 = [(pair[0], pair[1])] # simplifying item dict\n for observer in self.observers:\n observer.opportunity(item2, self.loggerList)\n else:\n self.loggerList[0].info('-----------------------------------------------------')\n self.loggerList[1].info('-----------------------------------------------------')\n break\n else:\n self.loggerList[0].info('No opportunity found.')\n self.loggerList[0].info('-----------------------------------------------------')\n time.sleep(config.refresh_rate)\n\n\n\nclass Triangle():\n def __init__(self, pairs, data):\n self.fee = config.fee\n self.slippage = config.slippage\n self.triangle_pairs = pairs\n self.data = data # ticker data\n self.depths = {}\n self.threadpool = ThreadPoolExecutor(max_workers=3) # number of pairs in the symbol\n\n def get_data(self): # get only related triangles\n depths = {}\n for pair in self.data:\n if self.triangle_pairs[0] in pair or self.triangle_pairs[1] in pair or self.triangle_pairs[2] in pair:\n depths.update({pair : self.data[pair]})\n return depths\n\n def pricer(self, triangle, amount, side, fee):\n vol = 0\n value = 0\n i = 0\n prepay = False\n\n # used in cases where the fee must be discounted from the amount bought (i.e. bitfinex model)\n if config.market.lower() == 'bitfinex':\n prepay = True\n amount = amount - (amount * self.fee)\n\n if side == 'buy':\n while i < len(self.depths[triangle]['asks']) and value < amount: # check if amount being bought is larger than trade size\n this_value = min(self.depths[triangle]['asks'][i]['price'] * self.depths[triangle]['asks'][i]['amount'], amount - value) # get total amount being sold\n this_vol = this_value / self.depths[triangle]['asks'][i]['price'] # convert currency\n value += this_value\n vol += this_vol\n i += 1\n if prepay:\n return value / vol, vol\n return value / vol, vol - (vol * fee)\n\n else:\n while i < len(self.depths[triangle]['bids']) and value < amount:\n this_value = min(self.depths[triangle]['bids'][i]['amount'], amount - value)\n this_vol = this_value * self.depths[triangle]['bids'][i]['price']\n value += this_value\n vol += this_vol\n i += 1\n if prepay:\n return value, vol\n return value, vol - (vol * fee)\n \n def main(self, logger):\n self.depths = self.get_data()\n\n # Used to define the flow of the trade\n phases = ['buy', 'buy', 'buy']\n firstProduct = config.currency_pref\n if firstProduct in self.triangle_pairs[0][:3]:\n phases[0] = 'sell'\n if self.triangle_pairs[1][:3] in self.triangle_pairs[0]:\n phases[1] = 'sell'\n if self.triangle_pairs[2][:3] in self.triangle_pairs[1]:\n phases[2] = 'sell'\n\n amt = config.min_amount\n best_case = 0\n results = []\n while amt <= config.max_amount:\n\n # calculate final balance\n balance = amt\n for i in range(3):\n # check if using gdax and current trade is btceur\n if self.triangle_pairs[i] == 'btceur' and config.market.lower() == 'coinbase':\n fee = 0.0025\n else:\n fee = self.fee\n\n new_balance = self.pricer(self.triangle_pairs[i], balance, phases[i], fee)\n balance = new_balance[1]\n \n best_profit = 0\n c1_profit = balance - amt\n c1_profit_percent = (c1_profit / amt) * 100\n\n results.append(round(c1_profit_percent, 2))\n\n if (c1_profit_percent / 100) > config.min_profit:\n best_case = 1\n best_profit = c1_profit\n best_amount = amt - (amt * config.fee)\n best_trades = [\n {\n 'pair':self.triangle_pairs[0],\n 'type':phaseOne,\n 'amount':round(best_amount, 2),\n #'rate':round(firstTicker[phaseOne[1]], 6)\n }, \n {\n 'pair':self.triangle_pairs[1],\n 'type':'buy',\n 'amount':round(firstBalance[1], 2),\n #'rate':round(secondTicker[phaseTwo[1]], 6)\n },\n {\n 'pair':self.triangle_pairs[2],\n 'type':'sell',\n 'amount':round(secondBalance[1], 2),\n #'rate':round(thirdTicker[phaseThree[1]], 6)\n },\n ]\n \n amt += config.increment\n\n # used to display results for debugging\n logger[0].debug(' ** {} :'.format(self.triangle_pairs))\n logger[0].debug(' ** ** {}'.format(results))\n\n if best_case > 0:\n case = \"{} -> {} -> {}\".format(self.triangle_pairs[0], self.triangle_pairs[1], self.triangle_pairs[2]) if best_case == 1 else \"{} -> {} -> {}\".format(self.triangle_pairs[0], self.triangle_pairs[1], self.triangle_pairs[2]) \n return {\n 'case':case,\n 'amount':best_amount,\n 'profit':best_profit,\n 'best_trades':best_trades,\n 'best_case':best_case\n } \n\n return {\n 'case':0,\n 'amount':0,\n 'profit':0,\n 'best_trades':[],\n 'best_case':0\n } \n\n\n","sub_path":"triangular.py","file_name":"triangular.py","file_ext":"py","file_size_in_byte":15352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"501141849","text":"from django.utils.translation import ugettext_lazy as _\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nfrom django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# Application definition\n\nINSTALLED_APPS = [\n 'suit',\n 'modeltranslation',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'ckeditor',\n 'bcks_files',\n 'contact',\n 'd_files',\n 'pages',\n 'django_extensions',\n 'easy_thumbnails',\n 'filer',\n 'mptt',\n 'rosetta',\n 'softhyphen',\n]\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bitnik_pw.urls'\n\n# TEMPLATE_DIRS = (\n# os.path.join(BASE_DIR, 'templates'),\n# )\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')]\n ,\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request', # django suit\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bitnik_pw.wsgi.application'\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLANGUAGE_CODE = 'en'\n\nLANGUAGES = (\n ('en', _('English')),\n ('de', _('German')),\n ('tr', _('Turkish')),\n)\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'locale').replace('\\\\', '/'),\n)\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static').replace('\\\\', '/')\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, \"assets\"),\n # '/var/www/static/',\n)\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media').replace('\\\\', '/')\nMEDIA_URL = '/media/'\n\n# ckeditor settings\nCKEDITOR_UPLOAD_PATH = 'uploads/'\n# CKEDITOR_UPLOAD_PATH = os.path.join(BASE_DIR, \"assets/media/uploads\")\nCKEDITOR_IMAGE_BACKEND = 'pillow'\n# CKEDITOR_JQUERY_URL = os.path.join(BASE_DIR, \"assets/scripts/jquery-2.1.3.min.js\")\nCKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'\nCKEDITOR_CONFIGS = {\n 'default': {\n # 'toolbar': 'full',\n # 'toolbar': 'basic',\n 'height': 300,\n 'width': 600,\n },\n}\n\n# easy thumbnails + django filer\nTHUMBNAIL_HIGH_RESOLUTION = True # django filer\nTHUMBNAIL_PROCESSORS = (\n 'easy_thumbnails.processors.colorspace',\n 'easy_thumbnails.processors.autocrop',\n # 'easy_thumbnails.processors.scale_and_crop',\n 'filer.thumbnail_processors.scale_and_crop_with_subject_location',\n 'easy_thumbnails.processors.filters',\n)\nTHUMBNAIL_ALIASES = {\n 'pages.Page.image': {\n 'page_image': {'size': (300, 300), 'crop': True},\n },\n}\nTHUMBNAIL_DEBUG = True\n\n# rosetta\nROSETTA_MESSAGES_PER_PAGE = 30\n\n# django suit\nSUIT_CONFIG = {\n # header\n 'ADMIN_NAME': 'Bitniks',\n 'HEADER_DATE_FORMAT': 'l, j. F Y',\n 'HEADER_TIME_FORMAT': 'H:i',\n\n # forms\n 'CONFIRM_UNSAVED_CHANGES': False, # Default True\n\n 'MENU_ICONS': {\n 'sites': 'icon-leaf',\n 'auth': 'icon-lock',\n 'bcks_files': 'icon-globe',\n 'filer': 'icon-folder-open',\n 'd_files': 'icon-leaf',\n 'contact': 'icon-pencil',\n 'pages': 'icon-book',\n },\n\n 'MENU_OPEN_FIRST_CHILD': True,\n\n # misc\n 'LIST_PER_PAGE': 40\n}\n\n","sub_path":"bitnik_pw/settings_base.py","file_name":"settings_base.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"459633848","text":"# -*- coding: ISO-8859-1\n\"\"\" Sistema de Gestión de Proyectos SGP\nGrupo Q03\nIngeniería de Software II\n@author: Mabel Peña - Alvaro Rodríguez\nAño: 2014\n\"\"\"\n\nfrom django.conf.urls import patterns, include, url\n\n\nurlpatterns = patterns('',\n # existing patterns here...\n url(r'^$', 'django.contrib.auth.views.login', {'template_name': 'login/index.html'},\n name='login', ),\n url(r'logout/', 'django.contrib.auth.views.logout_then_login', name='logout', ),\n)\n","sub_path":"SGP_Q03/login/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"553125341","text":"def helpMessage():\r\n\toutput=\"\"\"[Hero] to see that hero's abilities.\r\n[Hero/level] for that hero's talents at that level.\r\n[Hero/hotkey] for the ability on that hotkey.\r\n[Hero/searchterm] to search for something in that hero's abilities or talents. & or -- in searchterm for AND and exclusions\r\n[Hero/info] for hero info\r\n[build/Hero] for hero builds/guides from Elitesparkle and others.\r\n[rotation] for free weekly rotation from Gnub.\r\n[patchnotes/hero] for patch notes from \r\nEmojis: [:Hero/emotion], where emotion is of the following: happy, lol, sad, silly, meh, angry, cool, oops, love, or wow.\r\nMock drafting: [draft/info].\r\n[battleground/X] and [core/X], where X is a battleground, for a map or a description of the core's abilities\r\nMy public repository: \"\"\"\r\n\treturn output\r\n\r\ndef getHeroes():#Returns an alphabetically sorted list of all heroes.\r\n\treturn ['Abathur', 'Alarak', 'Alexstrasza', 'Ana', 'Anduin', \"Anub'arak\", 'Artanis', 'Arthas', 'Auriel', 'Azmodan', 'Blaze', 'Brightwing', \r\n\t'Cassia', 'Chen', 'Cho', 'Chromie', 'D.Va', 'Deathwing', 'Deckard', 'Dehaka', 'Diablo', 'E.T.C.', 'Falstad', 'Fenix', 'Gall', 'Garrosh', \r\n\t'Gazlowe', 'Genji', 'Greymane', \"Gul'dan\", 'Hanzo', 'Illidan', 'Imperius', 'Jaina', 'Johanna', 'Junkrat', \"Kael'thas\", \"Kel'Thuzad\", \r\n\t'Kerrigan', 'Kharazim', 'Leoric', 'Li-Ming', 'Li_Li', 'Lt._Morales', 'Lúcio', 'Lunara', 'Maiev', \"Mal'Ganis\", 'Malfurion', 'Malthael', \r\n\t'Medivh', 'Mei', 'Mephisto', 'Muradin', 'Murky', 'Nazeebo', 'Nova', 'Orphea', 'Probius', 'Qhira', 'Ragnaros', 'Raynor', 'Rehgar', 'Rexxar', \r\n\t'Samuro', 'Sgt._Hammer', 'Sonya', 'Stitches', 'Stukov', 'Sylvanas', 'Tassadar', 'The_Butcher', 'The_Lost_Vikings', 'Thrall', 'Tracer', \r\n\t'Tychus', 'Tyrael', 'Tyrande', 'Uther', 'Valeera', 'Valla', 'Varian', 'Whitemane', 'Xul', 'Yrel', 'Zagara', 'Zarya', 'Zeratul', \"Zul'jin\"]\r\n\r\nasync def roll(text,message):\r\n\tif len(text)==1:\r\n\t\tn=6\r\n\telse:\r\n\t\tn=int(text[1])\r\n\tfrom random import randint\r\n\tfrom random import seed\r\n\tseed()\r\n\tawait message.channel.send(str(randint(1,n)))\r\n\r\nasync def getAvatar(client,channel,userMention):\r\n\tif '<' not in userMention:\r\n\t\tawait channel.send('Need a ping')\r\n\t\treturn\r\n\tuserString=userMention.replace(' ','')[2:-1].replace('!','')\r\n\tuser=client.get_user(int(userString))\r\n\tawait channel.send(user.avatar_url)\r\n\r\nasync def vote(message,text):\r\n\tif len(text)==2:\r\n\t\tn=int(text[1])\r\n\t\tif n<1 or n>9:\r\n\t\t\tawait message.channel.send('Out of range')\r\n\t\t\treturn\r\n\t\tfor i in range(1,n+1):\r\n\t\t\tawait message.add_reaction(str(i)+'\\N{combining enclosing keycap}')\r\n\telse:\r\n\t\tawait message.add_reaction('\\U0001f44d')\r\n\t\tawait message.add_reaction('\\U0001f44e')\r\n\r\nasync def deleteMessages(author,ping,client):\r\n\tguild=client.get_guild(535256944106012694)#Wind Striders\r\n\tif 557521663894224912 not in [role.id for role in author.roles]:\r\n\t\treturn\r\n\t\t\r\n\tuserId=int(ping.replace(' ','').replace('!','')[2:-1])\r\n\tdeletedCount=0\r\n\tfor channel in guild.text_channels:\r\n\t\ttry:\r\n\t\t\tasync for message in channel.history(limit=20):\r\n\t\t\t\tif message.author.id==userId:\r\n\t\t\t\t\tawait message.delete()\r\n\t\t\t\t\tdeletedCount+=1\r\n\t\texcept:\r\n\t\t\tpass\r\n\tawait guild.get_channel(576018992624435220).send('Deleted '+str(deletedCount)+' messages from '+ping)\r\n\r\nasync def removeEmbeds(message):#Some embeds are instant, others are edited in by discord. Call in both on_message and on_message_edit\r\n\tif message.embeds:\r\n\t\tfor i in ['forums.blizzard.com','psionic-storm.com','heroespatchnotes.com']:#Forum embeds are huge image, psionic-storm builds/talent embeds link to wrong build number or blank calculator\r\n\t\t\tif i in message.content:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tawait message.edit(suppress=True)\r\n\t\t\t\texcept:\r\n\t\t\t\t\treturn\r\n\r\nasync def waitList(message,text,client):\r\n\tif len(text)==1:\r\n\t\tawait message.channel.send('Wait list: '+' ,'.join([i.name for i in client.waitList]))\r\n\telif text[1] in ['join','next']:\r\n\t\tclient.waitList.append(message.author)\r\n\t\tawait message.channel.send(message.author.name+' has been added to the wait list.')\r\n\telif text[1] in ['ping','here']:\r\n\t\tawait message.channel.send('Wait list: '+', '.join([i.mention for i in client.waitList]))\r\n\telif text[1]=='clear':\r\n\t\tclient.waitList=[]\r\n\telif text[1] in ['leave','unnext']:\r\n\t\tdel client.waitList[client.waitList.index(message.author)]\r\n\r\nasync def confidence(channel,text):\r\n\ttry:\r\n\t\twr,n=text[1].replace(' ','').split(',')\r\n\t\twr=float(wr)\r\n\t\tn=int(n)\r\n\t\ta=1.96*(wr*(100-wr)/n)**0.5\r\n\t\tlower=str(wr-a)[:4]\r\n\t\tupper=str(wr+a)[:4]\r\n\t\tawait channel.send('We are 95% confident that the winrate is between '+lower+'% and '+upper+'%.')\r\n\texcept:\r\n\t\tawait channel.send('Find a 95% confidence interval with [ci/winrate,games] \\n')","sub_path":"miscFunctions.py","file_name":"miscFunctions.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"603781930","text":"import logging\nimport time\nfrom datetime import datetime\n\n\ndef get_logger(name: str):\n \"\"\"\n ハンドラ設定をしたloggerオブジェクトを返却する。\n :param name: 実行ファイルのname要素\n :return: loggerオブジェクト\n \"\"\"\n logger = logging.getLogger(name)\n\n # logレベル設定\n logger.setLevel(logging.DEBUG)\n\n # ログ出力フォーマット設定\n handler_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # log出力先設定\n file_handler = logging.FileHandler('/log/pylog/py.log')\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(handler_format)\n\n # 標準出力設定\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.DEBUG)\n stream_handler.setFormatter(handler_format)\n\n # Handlerをセット\n logger.addHandler(stream_handler)\n logger.addHandler(file_handler)\n\n return logger\n\n\ndef stop_watch(message: str, logger):\n \"\"\"\n 時間計測デコレーター\n :param message: 任意のメッセージ\n :param logger: loggerオブジェクト\n \"\"\"\n\n def _stop_watch(func):\n def wrapper(*args, **kargs):\n start = time.time()\n logger.info(datetime.now())\n result = func(*args, **kargs)\n elapsed_time = time.time() - start\n logger.info(f\"{message}は{elapsed_time}秒かかりました\")\n return result\n\n return wrapper\n\n return _stop_watch\n","sub_path":"src/commonlib/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"82989278","text":"\"\"\" Usage:\n --in=IN_FILE --out=OUT_FILE [--debug]\n\"\"\"\n\nfrom collections import Counter\nimport logging\nfrom docopt import docopt\nimport spacy\nfrom spacy.tokens.token import Token\n\nfrom languages.util import GENDER, MORFEUSZ_GENDER_TYPES, MORFEUSZ_GENDER_TAG_POSITION\n\n\nclass MorfeuszPredictor:\n \"\"\"\n Class for Morfeusz -- Polish Morphology Analyzer\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Init spacy with morfeusz2.\n https://github.com/ipipan/spacy-pl\n http://morfeusz.sgjp.pl/\n \"\"\"\n import morfeusz2\n self.lang = \"pl\"\n self.cache = {} # Store calculated professions\n if spacy.util.is_package('pl_spacy_model_morfeusz_big'):\n self.nlp = spacy.load('pl_spacy_model_morfeusz_big', disable=[\"parser\", \"ner\"])\n elif spacy.util.is_package('pl_spacy_model_morfeusz'):\n self.nlp = spacy.load('pl_spacy_model_morfeusz', disable=[\"parser\", \"ner\"])\n else:\n raise FileNotFoundError(\"Spacy model with Morfeusz not found.\\n\"\n \"To run analysis for Polish install Morfeusz from: http://morfeusz.sgjp.pl/\\n\"\n \"and download Spacy model with Morfeusz from: https://github.com/ipipan/spacy-pl\")\n\n def get_gender(self, profession: str, translated_sent = None, entity_index = None, ds_entry = None) -> GENDER:\n \"\"\"\n Predict gender of an input profession.\n \"\"\"\n\n gold, src_index, src_sent, src_profession = ds_entry\n if profession not in self.cache:\n self.cache[profession] = self._get_gender(profession)\n\n return self.cache[profession]\n\n def _get_gender(self, profession: str) -> GENDER:\n \"\"\"\n Predict gender, without using cache\n \"\"\"\n if not profession.strip():\n # Empty string\n return GENDER.unknown\n\n tokens = self.nlp(profession)\n observed_genders = [self.get_gender_from_token(tok) for tok in tokens]\n observed_genders = [gender for gender in observed_genders if gender]\n\n if not observed_genders:\n # No observed gendered words - return neutral\n return GENDER.neutral\n\n # Return the most commonly observed gender\n return Counter(observed_genders).most_common()[0][0]\n\n @staticmethod\n def get_gender_from_token(token: Token):\n \"\"\"\n Get gender indication from spacy token with morfeusz, if it exists\n \"\"\"\n features = token._.feats.split(':')\n if len(features) < MORFEUSZ_GENDER_TAG_POSITION + 1:\n return None\n\n morfeusz_gender = features[MORFEUSZ_GENDER_TAG_POSITION]\n if morfeusz_gender not in MORFEUSZ_GENDER_TYPES:\n if features[MORFEUSZ_GENDER_TAG_POSITION - 1] in MORFEUSZ_GENDER_TYPES:\n morfeusz_gender = features[MORFEUSZ_GENDER_TAG_POSITION -1]\n else:\n return GENDER.neutral\n\n return MORFEUSZ_GENDER_TYPES[morfeusz_gender]\n\n\nif __name__ == \"__main__\":\n # Parse command line arguments\n args = docopt(__doc__)\n inp_fn = args[\"--in\"]\n out_fn = args[\"--out\"]\n debug = args[\"--debug\"]\n if debug:\n logging.basicConfig(level = logging.DEBUG)\n else:\n logging.basicConfig(level = logging.INFO)\n\n logging.info(\"DONE\")\n","sub_path":"src/languages/morfeusz_support.py","file_name":"morfeusz_support.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610304931","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 3 00:53:51 2021\n\n@author: Tang_Huanqiang\n\"\"\"\n\nfrom BaiduOcr import run\nfiles = ['result/result' + str(i) + '.png' for i in range(1,44)]\nfor i in range(len(files)):\n words = run(files[i])\n filename = 'result/words' + str(i+1) + '.txt'\n with open(filename,'w') as f:\n f.write(words)\n","sub_path":"self_ocr.py","file_name":"self_ocr.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"595891516","text":"__author__ = \"Dohoon Lee\"\r\n__copyright__ = \"Copyright 2018, Dohoon Lee\"\r\n__email__ = \"dohlee.bioinfo@gmail.com\"\r\n__license__ = \"MIT\"\r\n\r\n\r\nfrom snakemake.shell import shell\r\n\r\n# Extract log.\r\nlog = snakemake.log_fmt_shell(stdout=False, stderr=True)\r\n\r\n# Define exception classes.\r\nclass RuleInputException(Exception):\r\n pass\r\n\r\n# Extract parameters.\r\nextra = snakemake.params.get('extra', '')\r\n\r\n# Extract required arguments.\r\nfasta = snakemake.input.fasta\r\ngtf = snakemake.input.get('gtf', None)\r\ngff3 = snakemake.input.get('gff3', None)\r\n\r\nif (gtf is not None) and (gff3 is not None):\r\n raise RuleInputException('You cannot provide both GTF and GFF3 files.')\r\nannotation_option = ''\r\nif gtf is not None:\r\n annotation_option = '--gtf {gtf}'\r\nelif gff3 is not None:\r\n annotation_option = '--gff3 {gff3}'\r\n\r\noutput_prefix = snakemake.output[0].rstrip('.transcripts.fa')\r\nthreads = snakemake.threads\r\n\r\n# Execute shell command.\r\nshell(\r\n \"(\"\r\n \"rsem-prepare-reference \"\r\n \"{annotation_option} \"\r\n \"{extra} \"\r\n \"{fasta} \"\r\n \"{output_prefix} \"\r\n \")\"\r\n \"{log}\"\r\n)\r\n","sub_path":"rsem/prepare-reference/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"587325571","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Author: Gillett Hernandez\n# @Date: 2016-03-21 22:33:46\n# @Last Modified by: Gillett Hernandez\n# @Last Modified time: 2017-08-10 13:35:04\n\nfrom euler_funcs import prime_factorization, get_primes, pf_with_primes_given, timed\n\n@timed\ndef main():\n N = 600851475143\n print(max(prime_factorization(N)))\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python/problem_3.py","file_name":"problem_3.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"638243125","text":"import h5py\nimport numpy as np\nimport astropy.io.fits as fits\nfrom glob import glob\nimport re\nimport os\n\ndef sort_human(l):\n \"\"\"\n sort a list with indices and letters the way a human would\n :param l: a list of string\n \"\"\"\n convert = lambda text: float(text) if text.isdigit() else text\n alphanum = lambda key: [ convert(c) for c in re.split('([-+]?[0-9]*\\.?[0-9]*)', key) ]\n l.sort(key=alphanum)\n return l\n\ndef create_spi_irf_file(irf_database, file_name):\n \n irf_files = sort_human(glob(os.path.join(irf_database,\n 'spi_irf_rsp*.fits')))[-51:]\n\n\n energies = np.zeros(len(irf_files),dtype=np.float64)\n\n masks = []\n\n for i, irf_file in enumerate(irf_files):\n with fits.open(irf_file) as f:\n\n irf_ext = f['SPI.-IRF.-RSP']\n\n irf_header = irf_ext.header\n\n # we only need to grab this once because it is always the same\n\n if i == 0:\n\n irf_crpix2 = irf_header['CRPIX2']\n irf_crpix3 = irf_header['CRPIX3']\n irf_crval2 = irf_header['CRVAL2']\n irf_crval3 = irf_header['CRVAL3']\n irf_cdelt2 = irf_header['CDELT2']\n irf_cdelt3 = irf_header['CDELT3']\n irf_reg = irf_header['REGION']\n ndete = irf_header['NAXIS1']\n nx = irf_header['NAXIS2']\n ny = irf_header['NAXIS3']\n irf_xmin = np.deg2rad(irf_crval2 - np.floor(irf_crpix2-0.5) * irf_cdelt2)\n irf_ymin = np.deg2rad(irf_crval3 - np.floor(irf_crpix3-0.5) * irf_cdelt3)\n irf_xbin = np.deg2rad(irf_cdelt2)\n irf_ybin = np.deg2rad(irf_cdelt3)\n\n energies[i] = irf_header['ENERGY']\n\n # currently ignore the other two matrices\n\n masks.append(irf_ext.data.T[...,0])\n\n\n # now lots make one big matrix\n tmp = [len(masks)]\n tmp.extend(masks[0].shape)\n\n mask_matrix = np.zeros(tuple(tmp))\n\n for i,mask in enumerate(masks):\n\n mask_matrix[i,...] = mask\n\n f = h5py.File(file_name, \"w\")\n \n irf_dataset = f.create_dataset(\"irfs\", \n mask_matrix.shape,\n dtype=mask_matrix.dtype,\n compression=\"gzip\")\n irf_dataset[...] = mask_matrix\n \n irf_dataset.attrs['irf_crpix2'] = irf_crpix2\n irf_dataset.attrs['irf_crpix3'] = irf_crpix3\n \n irf_dataset.attrs['irf_crval2'] = irf_crval2\n irf_dataset.attrs['irf_crval3'] = irf_crval3\n \n irf_dataset.attrs['irf_cdelt2'] = irf_cdelt2\n irf_dataset.attrs['irf_cdelt3'] = irf_cdelt3\n \n irf_dataset.attrs['irf_reg'] = irf_reg\n irf_dataset.attrs['ndete'] = ndete\n irf_dataset.attrs['nx'] = nx\n irf_dataset.attrs['ny'] = ny\n \n irf_dataset.attrs['irf_xmin'] = irf_xmin\n irf_dataset.attrs['irf_ymin'] = irf_ymin\n irf_dataset.attrs['irf_xbin'] = irf_xbin\n irf_dataset.attrs['irf_ybin'] = irf_ybin\n \n \n energies_dataset = f.create_dataset(\"energies\", \n energies.shape,\n dtype=energies.dtype,\n compression=\"gzip\")\n \n energies_dataset[...] = energies\n \n \n f.close()\n \n \n \n \n \n \n \n","sub_path":"pyspi/utils/create_spi_irf_database.py","file_name":"create_spi_irf_database.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"503212213","text":"#!/usr/bin/env python\n#coding:utf-8\nfrom django.utils.safestring import mark_safe\n\n\n\nclass PageInfo:\n def __init__(self,current_page,all_count,per_item=3):\n self.CurrentPage=current_page\n self.AllCount=all_count\n self.PerItem=per_item\n @property\n def start(self):\n return (self.CurrentPage-1)*self.PerItem\n \n @property\n def end(self):\n return self.CurrentPage*self.PerItem\n \n @property\n def all_page_count(self):\n temp = divmod(self.AllCount,self.PerItem)\n if temp[1] ==0:\n all_page_count=temp[0]\n else:\n all_page_count=temp[0]+1\n\n return all_page_count\n\n\n\ndef Page(page,all_page_count):\n page_html=[]\n \n first_html=\"首页\" %(1,)\n page_html.append(first_html)\n \n if page<=1:\n pre_html = \"前一页\"\n \n else:\n pre_html=\"前一页\"%(page-1,)\n page_html.append(pre_html)\n \n\n \n if all_page_count <5:\n begin = 0\n end = all_page_count\n else:\n if page <5:\n begin = 0\n end = 5\n else:\n if page +2 > all_page_count:\n begin = page-3\n end = all_page_count\n else:\n begin = page-3\n end = page+2\n \n for i in range(begin,end):\n if page == i+1:\n a_html = \" %d \" % (i+1,i+1)\n else:\n a_html = \" %d \" % (i+1,i+1)\n page_html.append(a_html)\n \n if page下一页\"% (page+1)\n else:\n aft_html =\"下一页\"\n page_html.append(aft_html)\n \n \n end_html = \"尾页\" % (all_page_count)\n page_html.append(end_html)\n \n return mark_safe(' '.join(page_html))\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n","sub_path":"blog/html_helper.py","file_name":"html_helper.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"298078123","text":"import numpy as np\nimport random\n\n# a = \\sum (x - x_) * (y - y_) / (x - x_) ^ 2\n\ndef leastsquare(x, y):\n\tx_mean = np.mean(x)\n\ty_mean = np.mean(y)\n\tup, down = 0.0, 0.0\n\tfor x_i, y_i in zip(x, y):\n\t\tup += (x_i - x_mean) * (y_i - y_mean)\n\t\tdown += (x_i - x_mean) ** 2\n\ta = up / down\n\tb = y_mean - a * x_mean\n\treturn a, b\n\nif __name__ == '__main__':\n print(\"hello world!\")","sub_path":"linerregression/leastsquare.py","file_name":"leastsquare.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"51858071","text":"import shelve\r\nimport os\r\n\r\n\r\ndef load():\r\n os.chdir('shelve_database')\r\n db = shelve.open('people-classes')\r\n for key in db:\r\n print(key, '=>\\n ', db[key].__class__.__name__, vars(db[key]), '\\n')\r\n db.close()\r\n","sub_path":"zav/classes_interactive_db/load_db_classes.py","file_name":"load_db_classes.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"525461784","text":"from dumptruck import DumpTruck\nimport datetime\nimport re\nimport os\n\ndef _connect(dbname = 'data.sqlite'):\n 'Initialize the database (again). This is mainly for testing'\n global dt\n dt = DumpTruck(dbname = dbname, adapt_and_convert = False)\n\n_connect()\n\ndef execute(sqlquery, data=[], verbose=1):\n \"\"\" Emulate scraperwiki as much as possible by mangling dumptruck result \"\"\"\n # Allow for a non-list to be passed as data.\n if type(data) != list and type(data) != tuple:\n data = [data]\n\n result = dt.execute(sqlquery, data, commit=False)\n # None (non-select) and empty list (select) results\n if not result:\n return {u'data': [], u'keys': []}\n dtos = lambda d: str(d) if isinstance(d, datetime.date) else d\n # Select statement with results\n return {u'data': map(lambda row: map(dtos, row.values()), result),\n u'keys': result[0].keys()}\n\ndef save(unique_keys, data, table_name=\"data\", verbose=2, date=None):\n if not data:\n return\n dt.create_table(data, table_name = table_name, error_if_exists = False)\n if unique_keys != []:\n dt.create_index(unique_keys, table_name, unique = True, if_not_exists = True)\n return dt.upsert(data, table_name = table_name)\n\ndef commit(verbose=1):\n dt.commit()\n\ndef select(sqlquery, data=[], verbose=1):\n sqlquery = \"select %s\" % sqlquery # maybe check if select or another command is there already?\n result = dt.execute(sqlquery, data, commit = False)\n # Convert dates to strings to conform to scraperwiki classic\n if result != []:\n keys = result[0].keys()\n for row in result:\n for key in keys:\n if isinstance(row[key], datetime.date):\n row[key] = str(row[key])\n return result\n\ndef show_tables(dbname=\"\"):\n name = \"sqlite_master\"\n if dbname:\n name = \"`%s`.%s\" % (dbname, name)\n response = select('name, sql from %s where type = \"table\";' % name)\n return {row['name']: row['sql'] for row in response}\n\ndef save_var(name, value, verbose=2):\n data = dt.save_var(name, value)\n dt.execute(u\"CREATE TABLE IF NOT EXISTS swvariables (`value_blob` blob, `type` text, `name` text PRIMARY KEY)\", commit = False)\n dt.execute(u'INSERT OR REPLACE INTO swvariables SELECT `value`, `type`, `key` FROM `%s`' % dt._DumpTruck__vars_table, commit = False)\n dt.execute(u'DROP TABLE `%s`' % dt._DumpTruck__vars_table, commit = False)\n dt.commit()\n return data\n\ndef get_var(name, default=None, verbose=2):\n if 'swvariables' not in show_tables(): # this should be unecessary\n return default\n dt.execute(u\"CREATE TABLE IF NOT EXISTS swvariables (`value_blob` blob, `type` text, `name` text PRIMARY KEY)\", commit = False)\n dt.execute(u\"CREATE TEMPORARY TABLE IF NOT EXISTS %s (`value` blob, `type` text, `key` text PRIMARY KEY)\" % dt._DumpTruck__vars_table, commit = False)\n\n sql = u'INSERT INTO `%s` (value, type, key) SELECT `value_blob`, `type`, `name` FROM `swvariables`' % dt._DumpTruck__vars_table\n dt.execute(sql, commit = False)\n try:\n value = dt.get_var(name)\n except NameError:\n dt.connection.rollback()\n return default\n dt.execute(u'DROP TABLE `%s`' % dt._DumpTruck__vars_table, commit = False)\n dt.commit()\n return value\n","sub_path":"scraperwiki/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114622118","text":"# Author: NKTEAM - Ninise\n# class SettingsFrame(QWidget) \t\t\t\t\t- menu form class\n#\n# Methods:\n# init_ui(self) \t\t\t\t\t - init main options of form\n# center_frame(self) \t\t\t\t\t - disposes form the center of the screen\n# init_layout(self) - create layouts\n# \t options_form(self) - set options of form & and options of components\n# init_components(self) - init components\n# mousePressEvent(self, event) - event press on form\n# \t mouseMoveEvent(self, event) - event move form\n# fade(self) - setWindowOpacity(0.5)\n# unfade(self) - setWindowOpacity(1)\n# options_buttons(self) - options of buttons\n# options_line(self) - options of lines\n# options_labels(self) - options of labels\n# \t open_about_form(self) \t\t \t- open about form\n# \t open_settings_form(self)\t\t\t\t - open setting form\n#\n\nimport sys\nimport os\n\nfrom PyQt5.QtWidgets import QApplication, QWidget, QDesktopWidget, QPushButton,\\\n QFrame, QVBoxLayout, QHBoxLayout, QLabel\nfrom PyQt5.QtCore import Qt, QTimer\nfrom PyQt5.QtGui import QIcon, QFont\n\nfrom frontend.about_form import AboutFrame\nfrom frontend.settings_form import SettingsFrame\n\n\nclass MenuFrame(QWidget):\n\n ICONS_DIR = os.path.abspath(os.curdir) + os.sep + 'icons' + os.sep\n FONT = QFont('Monospace', 10, QFont.Light)\n\n def __init__(self):\n super().__init__()\n\n self.btn_exit = QPushButton(self)\n self.btn_setting = QPushButton(self)\n self.btn_about = QPushButton(self)\n self.btn_up_exit = QPushButton(self)\n\n self.line_up = QFrame(self)\n\n self.lbl_title = QLabel(self)\n\n self.init_ui()\n\n def init_ui(self):\n self.options_form()\n self.show()\n self.center_frame()\n\n # ---------------------------------------------------- #\n # FORM OPTIONS #\n # ---------------------------------------------------- #\n\n def center_frame(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def init_layout(self):\n main_lay = QVBoxLayout(self)\n main_lay.setContentsMargins(0, 0, 0, 0)\n\n top_lay = QHBoxLayout()\n top_lay.addWidget(self.btn_up_exit)\n top_lay.addStretch(10)\n top_lay.addWidget(self.lbl_title)\n top_lay.addStretch(15)\n\n main_lay.addLayout(top_lay)\n main_lay.addWidget(self.line_up)\n main_lay.addWidget(self.btn_setting)\n main_lay.addWidget(self.btn_about)\n main_lay.addWidget(self.btn_exit)\n\n self.setLayout(main_lay)\n\n def options_form(self):\n self.options_buttons()\n self.options_line()\n self.options_label()\n self.init_layout()\n\n self.setStyleSheet(\"background: #1C1C1C\")\n self.setWindowFlags(Qt.FramelessWindowHint)\n self.setFixedSize(300, 140)\n self.setWindowTitle(\"Settings\")\n\n # ---------------------------------------------------- #\n # CREATE FRAME FADE #\n # ---------------------------------------------------- #\n\n def mousePressEvent(self, event):\n self.offset = event.pos()\n\n def mouseMoveEvent(self, event):\n x = event.globalX()\n y = event.globalY()\n x_w = self.offset.x()\n y_w = self.offset.y()\n self.move(x-x_w, y-y_w)\n self.fade()\n\n def fade(self):\n self.setWindowOpacity(0.5)\n QTimer.singleShot(500, self.unfade)\n\n def unfade(self):\n self.setWindowOpacity(1)\n\n # ---------------------------------------------------- #\n # CREATE FORM SIGNALS #\n # ---------------------------------------------------- #\n\n def open_about_form(self):\n self.af = AboutFrame()\n\n def open_settings_form(self):\n self.sf = SettingsFrame()\n\n # ---------------------------------------------------- #\n # OPTIONS OF COMPONENTS #\n # ---------------------------------------------------- #\n\n def options_buttons(self):\n self.btn_about.setText(\"About\")\n self.btn_about.setFocusPolicy(Qt.NoFocus)\n self.btn_about.setStyleSheet(self.style_sheet())\n self.btn_about.setFont(MenuFrame.FONT)\n self.btn_about.clicked.connect(self.open_about_form)\n\n self.btn_setting.setText(\"Settings\")\n self.btn_setting.setFocusPolicy(Qt.NoFocus)\n self.btn_setting.setStyleSheet(self.style_sheet())\n self.btn_setting.setFont(MenuFrame.FONT)\n self.btn_setting.clicked.connect(self.open_settings_form)\n\n self.btn_exit.setText(\"Exit\")\n self.btn_exit.setFocusPolicy(Qt.NoFocus)\n self.btn_exit.setStyleSheet(self.style_sheet())\n self.btn_exit.setFont(MenuFrame.FONT)\n self.btn_exit.clicked.connect(self.hide)\n\n self.btn_up_exit.setIcon(QIcon(MenuFrame.ICONS_DIR + \"exit_1.png\"))\n self.btn_up_exit.setFocusPolicy(Qt.NoFocus)\n self.btn_up_exit.clicked.connect(self.hide)\n self.btn_up_exit.setFlat(True)\n\n def options_label(self):\n self.lbl_title.setText(\"Menu\")\n self.lbl_title.setFont(MenuFrame.FONT)\n\n def options_line(self):\n self.line_up.setFrameShape(QFrame.HLine)\n self.line_up.setFrameShadow(QFrame.Raised)\n self.line_up.setStyleSheet(\"background: #75F09A11\")\n\n @staticmethod\n def style_sheet():\n return \"background: #504E4E; color: white\"\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n sf = MenuFrame()\n sys.exit(app.exec_())\n","sub_path":"frontend/menu_form.py","file_name":"menu_form.py","file_ext":"py","file_size_in_byte":6068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"585749448","text":"import os,models,visitor\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext.webapp.util import run_wsgi_app\n\nurl = '/admin/preview'\nview = 'preview.html'\n\nclass Main(webapp.RequestHandler):\n def post(self):\n visitor.register(self.request)\n data={ \n 'options':models.getConfig(),\n 'post':{\n 'title':self.request.get('xtitle'),\n 'content':self.request.get('xcontent')\n }\n }\n path=os.path.join(os.path.dirname(__file__),view)\n self.response.out.write(template.render(path,data))\n\nif __name__ == \"__main__\": \n run_wsgi_app(webapp.WSGIApplication([(url,Main)],debug=True))\n","sub_path":"preview.py","file_name":"preview.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}